tg3: Add tx and rx ring resource tracking
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2009 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44
45 #include <net/checksum.h>
46 #include <net/ip.h>
47
48 #include <asm/system.h>
49 #include <asm/io.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
52
53 #ifdef CONFIG_SPARC
54 #include <asm/idprom.h>
55 #include <asm/prom.h>
56 #endif
57
58 #define BAR_0 0
59 #define BAR_2 2
60
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
63 #else
64 #define TG3_VLAN_TAG_USED 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.101"
72 #define DRV_MODULE_RELDATE "August 28, 2009"
73
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
78 (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK | \
81 NETIF_MSG_TIMER | \
82 NETIF_MSG_IFDOWN | \
83 NETIF_MSG_IFUP | \
84 NETIF_MSG_RX_ERR | \
85 NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
89 */
90 #define TG3_TX_TIMEOUT (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
100 */
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
111 */
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
114
115 #define TG3_TX_RING_SIZE 512
116 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
125 TG3_TX_RING_SIZE)
126 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define TG3_DMA_BYTE_ENAB 64
129
130 #define TG3_RX_STD_DMA_SZ 1536
131 #define TG3_RX_JMB_DMA_SZ 9046
132
133 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
134
135 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
136 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
137
138 /* minimum number of free TX descriptors required to wake up TX process */
139 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
140
141 #define TG3_RAW_IP_ALIGN 2
142
143 /* number of ETHTOOL_GSTATS u64's */
144 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
145
146 #define TG3_NUM_TEST 6
147
148 #define FIRMWARE_TG3 "tigon/tg3.bin"
149 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
150 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
151
152 static char version[] __devinitdata =
153 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
154
155 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
156 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
157 MODULE_LICENSE("GPL");
158 MODULE_VERSION(DRV_MODULE_VERSION);
159 MODULE_FIRMWARE(FIRMWARE_TG3);
160 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
161 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
162
163 #define TG3_RSS_MIN_NUM_MSIX_VECS 2
164
165 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
166 module_param(tg3_debug, int, 0);
167 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
168
169 static struct pci_device_id tg3_pci_tbl[] = {
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
236 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
237 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
238 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
239 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
241 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
242 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
243 {}
244 };
245
246 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
247
248 static const struct {
249 const char string[ETH_GSTRING_LEN];
250 } ethtool_stats_keys[TG3_NUM_STATS] = {
251 { "rx_octets" },
252 { "rx_fragments" },
253 { "rx_ucast_packets" },
254 { "rx_mcast_packets" },
255 { "rx_bcast_packets" },
256 { "rx_fcs_errors" },
257 { "rx_align_errors" },
258 { "rx_xon_pause_rcvd" },
259 { "rx_xoff_pause_rcvd" },
260 { "rx_mac_ctrl_rcvd" },
261 { "rx_xoff_entered" },
262 { "rx_frame_too_long_errors" },
263 { "rx_jabbers" },
264 { "rx_undersize_packets" },
265 { "rx_in_length_errors" },
266 { "rx_out_length_errors" },
267 { "rx_64_or_less_octet_packets" },
268 { "rx_65_to_127_octet_packets" },
269 { "rx_128_to_255_octet_packets" },
270 { "rx_256_to_511_octet_packets" },
271 { "rx_512_to_1023_octet_packets" },
272 { "rx_1024_to_1522_octet_packets" },
273 { "rx_1523_to_2047_octet_packets" },
274 { "rx_2048_to_4095_octet_packets" },
275 { "rx_4096_to_8191_octet_packets" },
276 { "rx_8192_to_9022_octet_packets" },
277
278 { "tx_octets" },
279 { "tx_collisions" },
280
281 { "tx_xon_sent" },
282 { "tx_xoff_sent" },
283 { "tx_flow_control" },
284 { "tx_mac_errors" },
285 { "tx_single_collisions" },
286 { "tx_mult_collisions" },
287 { "tx_deferred" },
288 { "tx_excessive_collisions" },
289 { "tx_late_collisions" },
290 { "tx_collide_2times" },
291 { "tx_collide_3times" },
292 { "tx_collide_4times" },
293 { "tx_collide_5times" },
294 { "tx_collide_6times" },
295 { "tx_collide_7times" },
296 { "tx_collide_8times" },
297 { "tx_collide_9times" },
298 { "tx_collide_10times" },
299 { "tx_collide_11times" },
300 { "tx_collide_12times" },
301 { "tx_collide_13times" },
302 { "tx_collide_14times" },
303 { "tx_collide_15times" },
304 { "tx_ucast_packets" },
305 { "tx_mcast_packets" },
306 { "tx_bcast_packets" },
307 { "tx_carrier_sense_errors" },
308 { "tx_discards" },
309 { "tx_errors" },
310
311 { "dma_writeq_full" },
312 { "dma_write_prioq_full" },
313 { "rxbds_empty" },
314 { "rx_discards" },
315 { "rx_errors" },
316 { "rx_threshold_hit" },
317
318 { "dma_readq_full" },
319 { "dma_read_prioq_full" },
320 { "tx_comp_queue_full" },
321
322 { "ring_set_send_prod_index" },
323 { "ring_status_update" },
324 { "nic_irqs" },
325 { "nic_avoided_irqs" },
326 { "nic_tx_threshold_hit" }
327 };
328
329 static const struct {
330 const char string[ETH_GSTRING_LEN];
331 } ethtool_test_keys[TG3_NUM_TEST] = {
332 { "nvram test (online) " },
333 { "link test (online) " },
334 { "register test (offline)" },
335 { "memory test (offline)" },
336 { "loopback test (offline)" },
337 { "interrupt test (offline)" },
338 };
339
340 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
341 {
342 writel(val, tp->regs + off);
343 }
344
345 static u32 tg3_read32(struct tg3 *tp, u32 off)
346 {
347 return (readl(tp->regs + off));
348 }
349
350 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
351 {
352 writel(val, tp->aperegs + off);
353 }
354
355 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
356 {
357 return (readl(tp->aperegs + off));
358 }
359
360 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
361 {
362 unsigned long flags;
363
364 spin_lock_irqsave(&tp->indirect_lock, flags);
365 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
366 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
367 spin_unlock_irqrestore(&tp->indirect_lock, flags);
368 }
369
370 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
371 {
372 writel(val, tp->regs + off);
373 readl(tp->regs + off);
374 }
375
376 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
377 {
378 unsigned long flags;
379 u32 val;
380
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
383 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
385 return val;
386 }
387
388 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
389 {
390 unsigned long flags;
391
392 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
393 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
394 TG3_64BIT_REG_LOW, val);
395 return;
396 }
397 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
398 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
399 TG3_64BIT_REG_LOW, val);
400 return;
401 }
402
403 spin_lock_irqsave(&tp->indirect_lock, flags);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
405 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
406 spin_unlock_irqrestore(&tp->indirect_lock, flags);
407
408 /* In indirect mode when disabling interrupts, we also need
409 * to clear the interrupt bit in the GRC local ctrl register.
410 */
411 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
412 (val == 0x1)) {
413 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
414 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
415 }
416 }
417
418 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
419 {
420 unsigned long flags;
421 u32 val;
422
423 spin_lock_irqsave(&tp->indirect_lock, flags);
424 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
425 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
426 spin_unlock_irqrestore(&tp->indirect_lock, flags);
427 return val;
428 }
429
430 /* usec_wait specifies the wait time in usec when writing to certain registers
431 * where it is unsafe to read back the register without some delay.
432 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
433 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
434 */
435 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
436 {
437 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
438 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
439 /* Non-posted methods */
440 tp->write32(tp, off, val);
441 else {
442 /* Posted method */
443 tg3_write32(tp, off, val);
444 if (usec_wait)
445 udelay(usec_wait);
446 tp->read32(tp, off);
447 }
448 /* Wait again after the read for the posted method to guarantee that
449 * the wait time is met.
450 */
451 if (usec_wait)
452 udelay(usec_wait);
453 }
454
455 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
456 {
457 tp->write32_mbox(tp, off, val);
458 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
459 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
460 tp->read32_mbox(tp, off);
461 }
462
463 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
464 {
465 void __iomem *mbox = tp->regs + off;
466 writel(val, mbox);
467 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
468 writel(val, mbox);
469 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
470 readl(mbox);
471 }
472
473 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
474 {
475 return (readl(tp->regs + off + GRCMBOX_BASE));
476 }
477
478 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
479 {
480 writel(val, tp->regs + off + GRCMBOX_BASE);
481 }
482
483 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
484 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
485 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
486 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
487 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
488
489 #define tw32(reg,val) tp->write32(tp, reg, val)
490 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
491 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
492 #define tr32(reg) tp->read32(tp, reg)
493
494 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
495 {
496 unsigned long flags;
497
498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
499 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
500 return;
501
502 spin_lock_irqsave(&tp->indirect_lock, flags);
503 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
504 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
506
507 /* Always leave this as zero. */
508 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
509 } else {
510 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
511 tw32_f(TG3PCI_MEM_WIN_DATA, val);
512
513 /* Always leave this as zero. */
514 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
515 }
516 spin_unlock_irqrestore(&tp->indirect_lock, flags);
517 }
518
519 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
520 {
521 unsigned long flags;
522
523 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
524 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
525 *val = 0;
526 return;
527 }
528
529 spin_lock_irqsave(&tp->indirect_lock, flags);
530 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
531 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
532 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
533
534 /* Always leave this as zero. */
535 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
536 } else {
537 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
538 *val = tr32(TG3PCI_MEM_WIN_DATA);
539
540 /* Always leave this as zero. */
541 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
542 }
543 spin_unlock_irqrestore(&tp->indirect_lock, flags);
544 }
545
546 static void tg3_ape_lock_init(struct tg3 *tp)
547 {
548 int i;
549
550 /* Make sure the driver hasn't any stale locks. */
551 for (i = 0; i < 8; i++)
552 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
553 APE_LOCK_GRANT_DRIVER);
554 }
555
556 static int tg3_ape_lock(struct tg3 *tp, int locknum)
557 {
558 int i, off;
559 int ret = 0;
560 u32 status;
561
562 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
563 return 0;
564
565 switch (locknum) {
566 case TG3_APE_LOCK_GRC:
567 case TG3_APE_LOCK_MEM:
568 break;
569 default:
570 return -EINVAL;
571 }
572
573 off = 4 * locknum;
574
575 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
576
577 /* Wait for up to 1 millisecond to acquire lock. */
578 for (i = 0; i < 100; i++) {
579 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
580 if (status == APE_LOCK_GRANT_DRIVER)
581 break;
582 udelay(10);
583 }
584
585 if (status != APE_LOCK_GRANT_DRIVER) {
586 /* Revoke the lock request. */
587 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
588 APE_LOCK_GRANT_DRIVER);
589
590 ret = -EBUSY;
591 }
592
593 return ret;
594 }
595
596 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
597 {
598 int off;
599
600 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
601 return;
602
603 switch (locknum) {
604 case TG3_APE_LOCK_GRC:
605 case TG3_APE_LOCK_MEM:
606 break;
607 default:
608 return;
609 }
610
611 off = 4 * locknum;
612 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
613 }
614
615 static void tg3_disable_ints(struct tg3 *tp)
616 {
617 tw32(TG3PCI_MISC_HOST_CTRL,
618 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
619 tw32_mailbox_f(tp->napi[0].int_mbox, 0x00000001);
620 }
621
622 static void tg3_enable_ints(struct tg3 *tp)
623 {
624 u32 coal_now;
625 struct tg3_napi *tnapi = &tp->napi[0];
626 tp->irq_sync = 0;
627 wmb();
628
629 tw32(TG3PCI_MISC_HOST_CTRL,
630 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
631 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
632 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
633 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
634
635 coal_now = tnapi->coal_now;
636
637 /* Force an initial interrupt */
638 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
639 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
640 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
641 else
642 tw32(HOSTCC_MODE, tp->coalesce_mode |
643 HOSTCC_MODE_ENABLE | coal_now);
644 }
645
646 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
647 {
648 struct tg3 *tp = tnapi->tp;
649 struct tg3_hw_status *sblk = tnapi->hw_status;
650 unsigned int work_exists = 0;
651
652 /* check for phy events */
653 if (!(tp->tg3_flags &
654 (TG3_FLAG_USE_LINKCHG_REG |
655 TG3_FLAG_POLL_SERDES))) {
656 if (sblk->status & SD_STATUS_LINK_CHG)
657 work_exists = 1;
658 }
659 /* check for RX/TX work to do */
660 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
661 sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr)
662 work_exists = 1;
663
664 return work_exists;
665 }
666
667 /* tg3_int_reenable
668 * similar to tg3_enable_ints, but it accurately determines whether there
669 * is new work pending and can return without flushing the PIO write
670 * which reenables interrupts
671 */
672 static void tg3_int_reenable(struct tg3_napi *tnapi)
673 {
674 struct tg3 *tp = tnapi->tp;
675
676 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
677 mmiowb();
678
679 /* When doing tagged status, this work check is unnecessary.
680 * The last_tag we write above tells the chip which piece of
681 * work we've completed.
682 */
683 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
684 tg3_has_work(tnapi))
685 tw32(HOSTCC_MODE, tp->coalesce_mode |
686 HOSTCC_MODE_ENABLE | tnapi->coal_now);
687 }
688
689 static inline void tg3_netif_stop(struct tg3 *tp)
690 {
691 tp->dev->trans_start = jiffies; /* prevent tx timeout */
692 napi_disable(&tp->napi[0].napi);
693 netif_tx_disable(tp->dev);
694 }
695
696 static inline void tg3_netif_start(struct tg3 *tp)
697 {
698 struct tg3_napi *tnapi = &tp->napi[0];
699 netif_wake_queue(tp->dev);
700 /* NOTE: unconditional netif_wake_queue is only appropriate
701 * so long as all callers are assured to have free tx slots
702 * (such as after tg3_init_hw)
703 */
704 napi_enable(&tnapi->napi);
705 tnapi->hw_status->status |= SD_STATUS_UPDATED;
706 tg3_enable_ints(tp);
707 }
708
709 static void tg3_switch_clocks(struct tg3 *tp)
710 {
711 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
712 u32 orig_clock_ctrl;
713
714 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
715 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
716 return;
717
718 orig_clock_ctrl = clock_ctrl;
719 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
720 CLOCK_CTRL_CLKRUN_OENABLE |
721 0x1f);
722 tp->pci_clock_ctrl = clock_ctrl;
723
724 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
725 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
726 tw32_wait_f(TG3PCI_CLOCK_CTRL,
727 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
728 }
729 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
730 tw32_wait_f(TG3PCI_CLOCK_CTRL,
731 clock_ctrl |
732 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
733 40);
734 tw32_wait_f(TG3PCI_CLOCK_CTRL,
735 clock_ctrl | (CLOCK_CTRL_ALTCLK),
736 40);
737 }
738 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
739 }
740
741 #define PHY_BUSY_LOOPS 5000
742
743 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
744 {
745 u32 frame_val;
746 unsigned int loops;
747 int ret;
748
749 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
750 tw32_f(MAC_MI_MODE,
751 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
752 udelay(80);
753 }
754
755 *val = 0x0;
756
757 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
758 MI_COM_PHY_ADDR_MASK);
759 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
760 MI_COM_REG_ADDR_MASK);
761 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
762
763 tw32_f(MAC_MI_COM, frame_val);
764
765 loops = PHY_BUSY_LOOPS;
766 while (loops != 0) {
767 udelay(10);
768 frame_val = tr32(MAC_MI_COM);
769
770 if ((frame_val & MI_COM_BUSY) == 0) {
771 udelay(5);
772 frame_val = tr32(MAC_MI_COM);
773 break;
774 }
775 loops -= 1;
776 }
777
778 ret = -EBUSY;
779 if (loops != 0) {
780 *val = frame_val & MI_COM_DATA_MASK;
781 ret = 0;
782 }
783
784 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
785 tw32_f(MAC_MI_MODE, tp->mi_mode);
786 udelay(80);
787 }
788
789 return ret;
790 }
791
792 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
793 {
794 u32 frame_val;
795 unsigned int loops;
796 int ret;
797
798 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
799 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
800 return 0;
801
802 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
803 tw32_f(MAC_MI_MODE,
804 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
805 udelay(80);
806 }
807
808 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
809 MI_COM_PHY_ADDR_MASK);
810 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
811 MI_COM_REG_ADDR_MASK);
812 frame_val |= (val & MI_COM_DATA_MASK);
813 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
814
815 tw32_f(MAC_MI_COM, frame_val);
816
817 loops = PHY_BUSY_LOOPS;
818 while (loops != 0) {
819 udelay(10);
820 frame_val = tr32(MAC_MI_COM);
821 if ((frame_val & MI_COM_BUSY) == 0) {
822 udelay(5);
823 frame_val = tr32(MAC_MI_COM);
824 break;
825 }
826 loops -= 1;
827 }
828
829 ret = -EBUSY;
830 if (loops != 0)
831 ret = 0;
832
833 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
834 tw32_f(MAC_MI_MODE, tp->mi_mode);
835 udelay(80);
836 }
837
838 return ret;
839 }
840
841 static int tg3_bmcr_reset(struct tg3 *tp)
842 {
843 u32 phy_control;
844 int limit, err;
845
846 /* OK, reset it, and poll the BMCR_RESET bit until it
847 * clears or we time out.
848 */
849 phy_control = BMCR_RESET;
850 err = tg3_writephy(tp, MII_BMCR, phy_control);
851 if (err != 0)
852 return -EBUSY;
853
854 limit = 5000;
855 while (limit--) {
856 err = tg3_readphy(tp, MII_BMCR, &phy_control);
857 if (err != 0)
858 return -EBUSY;
859
860 if ((phy_control & BMCR_RESET) == 0) {
861 udelay(40);
862 break;
863 }
864 udelay(10);
865 }
866 if (limit < 0)
867 return -EBUSY;
868
869 return 0;
870 }
871
872 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
873 {
874 struct tg3 *tp = bp->priv;
875 u32 val;
876
877 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
878 return -EAGAIN;
879
880 if (tg3_readphy(tp, reg, &val))
881 return -EIO;
882
883 return val;
884 }
885
886 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
887 {
888 struct tg3 *tp = bp->priv;
889
890 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
891 return -EAGAIN;
892
893 if (tg3_writephy(tp, reg, val))
894 return -EIO;
895
896 return 0;
897 }
898
899 static int tg3_mdio_reset(struct mii_bus *bp)
900 {
901 return 0;
902 }
903
904 static void tg3_mdio_config_5785(struct tg3 *tp)
905 {
906 u32 val;
907 struct phy_device *phydev;
908
909 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
910 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
911 case TG3_PHY_ID_BCM50610:
912 val = MAC_PHYCFG2_50610_LED_MODES;
913 break;
914 case TG3_PHY_ID_BCMAC131:
915 val = MAC_PHYCFG2_AC131_LED_MODES;
916 break;
917 case TG3_PHY_ID_RTL8211C:
918 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
919 break;
920 case TG3_PHY_ID_RTL8201E:
921 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
922 break;
923 default:
924 return;
925 }
926
927 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
928 tw32(MAC_PHYCFG2, val);
929
930 val = tr32(MAC_PHYCFG1);
931 val &= ~(MAC_PHYCFG1_RGMII_INT |
932 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
933 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
934 tw32(MAC_PHYCFG1, val);
935
936 return;
937 }
938
939 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
940 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
941 MAC_PHYCFG2_FMODE_MASK_MASK |
942 MAC_PHYCFG2_GMODE_MASK_MASK |
943 MAC_PHYCFG2_ACT_MASK_MASK |
944 MAC_PHYCFG2_QUAL_MASK_MASK |
945 MAC_PHYCFG2_INBAND_ENABLE;
946
947 tw32(MAC_PHYCFG2, val);
948
949 val = tr32(MAC_PHYCFG1);
950 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
951 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
952 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
953 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
954 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
955 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
956 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
957 }
958 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
959 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
960 tw32(MAC_PHYCFG1, val);
961
962 val = tr32(MAC_EXT_RGMII_MODE);
963 val &= ~(MAC_RGMII_MODE_RX_INT_B |
964 MAC_RGMII_MODE_RX_QUALITY |
965 MAC_RGMII_MODE_RX_ACTIVITY |
966 MAC_RGMII_MODE_RX_ENG_DET |
967 MAC_RGMII_MODE_TX_ENABLE |
968 MAC_RGMII_MODE_TX_LOWPWR |
969 MAC_RGMII_MODE_TX_RESET);
970 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
971 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
972 val |= MAC_RGMII_MODE_RX_INT_B |
973 MAC_RGMII_MODE_RX_QUALITY |
974 MAC_RGMII_MODE_RX_ACTIVITY |
975 MAC_RGMII_MODE_RX_ENG_DET;
976 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
977 val |= MAC_RGMII_MODE_TX_ENABLE |
978 MAC_RGMII_MODE_TX_LOWPWR |
979 MAC_RGMII_MODE_TX_RESET;
980 }
981 tw32(MAC_EXT_RGMII_MODE, val);
982 }
983
984 static void tg3_mdio_start(struct tg3 *tp)
985 {
986 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
987 mutex_lock(&tp->mdio_bus->mdio_lock);
988 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
989 mutex_unlock(&tp->mdio_bus->mdio_lock);
990 }
991
992 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
993 tw32_f(MAC_MI_MODE, tp->mi_mode);
994 udelay(80);
995
996 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
997 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
998 tg3_mdio_config_5785(tp);
999 }
1000
1001 static void tg3_mdio_stop(struct tg3 *tp)
1002 {
1003 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1004 mutex_lock(&tp->mdio_bus->mdio_lock);
1005 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
1006 mutex_unlock(&tp->mdio_bus->mdio_lock);
1007 }
1008 }
1009
1010 static int tg3_mdio_init(struct tg3 *tp)
1011 {
1012 int i;
1013 u32 reg;
1014 struct phy_device *phydev;
1015
1016 tg3_mdio_start(tp);
1017
1018 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1019 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1020 return 0;
1021
1022 tp->mdio_bus = mdiobus_alloc();
1023 if (tp->mdio_bus == NULL)
1024 return -ENOMEM;
1025
1026 tp->mdio_bus->name = "tg3 mdio bus";
1027 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1028 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1029 tp->mdio_bus->priv = tp;
1030 tp->mdio_bus->parent = &tp->pdev->dev;
1031 tp->mdio_bus->read = &tg3_mdio_read;
1032 tp->mdio_bus->write = &tg3_mdio_write;
1033 tp->mdio_bus->reset = &tg3_mdio_reset;
1034 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1035 tp->mdio_bus->irq = &tp->mdio_irq[0];
1036
1037 for (i = 0; i < PHY_MAX_ADDR; i++)
1038 tp->mdio_bus->irq[i] = PHY_POLL;
1039
1040 /* The bus registration will look for all the PHYs on the mdio bus.
1041 * Unfortunately, it does not ensure the PHY is powered up before
1042 * accessing the PHY ID registers. A chip reset is the
1043 * quickest way to bring the device back to an operational state..
1044 */
1045 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1046 tg3_bmcr_reset(tp);
1047
1048 i = mdiobus_register(tp->mdio_bus);
1049 if (i) {
1050 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1051 tp->dev->name, i);
1052 mdiobus_free(tp->mdio_bus);
1053 return i;
1054 }
1055
1056 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1057
1058 if (!phydev || !phydev->drv) {
1059 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1060 mdiobus_unregister(tp->mdio_bus);
1061 mdiobus_free(tp->mdio_bus);
1062 return -ENODEV;
1063 }
1064
1065 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1066 case TG3_PHY_ID_BCM57780:
1067 phydev->interface = PHY_INTERFACE_MODE_GMII;
1068 break;
1069 case TG3_PHY_ID_BCM50610:
1070 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1071 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1072 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1073 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1074 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1075 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1076 /* fallthru */
1077 case TG3_PHY_ID_RTL8211C:
1078 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1079 break;
1080 case TG3_PHY_ID_RTL8201E:
1081 case TG3_PHY_ID_BCMAC131:
1082 phydev->interface = PHY_INTERFACE_MODE_MII;
1083 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1084 break;
1085 }
1086
1087 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1088
1089 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1090 tg3_mdio_config_5785(tp);
1091
1092 return 0;
1093 }
1094
1095 static void tg3_mdio_fini(struct tg3 *tp)
1096 {
1097 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1098 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1099 mdiobus_unregister(tp->mdio_bus);
1100 mdiobus_free(tp->mdio_bus);
1101 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1102 }
1103 }
1104
1105 /* tp->lock is held. */
1106 static inline void tg3_generate_fw_event(struct tg3 *tp)
1107 {
1108 u32 val;
1109
1110 val = tr32(GRC_RX_CPU_EVENT);
1111 val |= GRC_RX_CPU_DRIVER_EVENT;
1112 tw32_f(GRC_RX_CPU_EVENT, val);
1113
1114 tp->last_event_jiffies = jiffies;
1115 }
1116
1117 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1118
1119 /* tp->lock is held. */
1120 static void tg3_wait_for_event_ack(struct tg3 *tp)
1121 {
1122 int i;
1123 unsigned int delay_cnt;
1124 long time_remain;
1125
1126 /* If enough time has passed, no wait is necessary. */
1127 time_remain = (long)(tp->last_event_jiffies + 1 +
1128 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1129 (long)jiffies;
1130 if (time_remain < 0)
1131 return;
1132
1133 /* Check if we can shorten the wait time. */
1134 delay_cnt = jiffies_to_usecs(time_remain);
1135 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1136 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1137 delay_cnt = (delay_cnt >> 3) + 1;
1138
1139 for (i = 0; i < delay_cnt; i++) {
1140 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1141 break;
1142 udelay(8);
1143 }
1144 }
1145
1146 /* tp->lock is held. */
1147 static void tg3_ump_link_report(struct tg3 *tp)
1148 {
1149 u32 reg;
1150 u32 val;
1151
1152 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1153 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1154 return;
1155
1156 tg3_wait_for_event_ack(tp);
1157
1158 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1159
1160 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1161
1162 val = 0;
1163 if (!tg3_readphy(tp, MII_BMCR, &reg))
1164 val = reg << 16;
1165 if (!tg3_readphy(tp, MII_BMSR, &reg))
1166 val |= (reg & 0xffff);
1167 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1168
1169 val = 0;
1170 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1171 val = reg << 16;
1172 if (!tg3_readphy(tp, MII_LPA, &reg))
1173 val |= (reg & 0xffff);
1174 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1175
1176 val = 0;
1177 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1178 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1179 val = reg << 16;
1180 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1181 val |= (reg & 0xffff);
1182 }
1183 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1184
1185 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1186 val = reg << 16;
1187 else
1188 val = 0;
1189 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1190
1191 tg3_generate_fw_event(tp);
1192 }
1193
1194 static void tg3_link_report(struct tg3 *tp)
1195 {
1196 if (!netif_carrier_ok(tp->dev)) {
1197 if (netif_msg_link(tp))
1198 printk(KERN_INFO PFX "%s: Link is down.\n",
1199 tp->dev->name);
1200 tg3_ump_link_report(tp);
1201 } else if (netif_msg_link(tp)) {
1202 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1203 tp->dev->name,
1204 (tp->link_config.active_speed == SPEED_1000 ?
1205 1000 :
1206 (tp->link_config.active_speed == SPEED_100 ?
1207 100 : 10)),
1208 (tp->link_config.active_duplex == DUPLEX_FULL ?
1209 "full" : "half"));
1210
1211 printk(KERN_INFO PFX
1212 "%s: Flow control is %s for TX and %s for RX.\n",
1213 tp->dev->name,
1214 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1215 "on" : "off",
1216 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1217 "on" : "off");
1218 tg3_ump_link_report(tp);
1219 }
1220 }
1221
1222 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1223 {
1224 u16 miireg;
1225
1226 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1227 miireg = ADVERTISE_PAUSE_CAP;
1228 else if (flow_ctrl & FLOW_CTRL_TX)
1229 miireg = ADVERTISE_PAUSE_ASYM;
1230 else if (flow_ctrl & FLOW_CTRL_RX)
1231 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1232 else
1233 miireg = 0;
1234
1235 return miireg;
1236 }
1237
1238 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1239 {
1240 u16 miireg;
1241
1242 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1243 miireg = ADVERTISE_1000XPAUSE;
1244 else if (flow_ctrl & FLOW_CTRL_TX)
1245 miireg = ADVERTISE_1000XPSE_ASYM;
1246 else if (flow_ctrl & FLOW_CTRL_RX)
1247 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1248 else
1249 miireg = 0;
1250
1251 return miireg;
1252 }
1253
1254 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1255 {
1256 u8 cap = 0;
1257
1258 if (lcladv & ADVERTISE_1000XPAUSE) {
1259 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1260 if (rmtadv & LPA_1000XPAUSE)
1261 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1262 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1263 cap = FLOW_CTRL_RX;
1264 } else {
1265 if (rmtadv & LPA_1000XPAUSE)
1266 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1267 }
1268 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1269 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1270 cap = FLOW_CTRL_TX;
1271 }
1272
1273 return cap;
1274 }
1275
1276 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1277 {
1278 u8 autoneg;
1279 u8 flowctrl = 0;
1280 u32 old_rx_mode = tp->rx_mode;
1281 u32 old_tx_mode = tp->tx_mode;
1282
1283 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1284 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1285 else
1286 autoneg = tp->link_config.autoneg;
1287
1288 if (autoneg == AUTONEG_ENABLE &&
1289 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1290 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1291 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1292 else
1293 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1294 } else
1295 flowctrl = tp->link_config.flowctrl;
1296
1297 tp->link_config.active_flowctrl = flowctrl;
1298
1299 if (flowctrl & FLOW_CTRL_RX)
1300 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1301 else
1302 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1303
1304 if (old_rx_mode != tp->rx_mode)
1305 tw32_f(MAC_RX_MODE, tp->rx_mode);
1306
1307 if (flowctrl & FLOW_CTRL_TX)
1308 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1309 else
1310 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1311
1312 if (old_tx_mode != tp->tx_mode)
1313 tw32_f(MAC_TX_MODE, tp->tx_mode);
1314 }
1315
1316 static void tg3_adjust_link(struct net_device *dev)
1317 {
1318 u8 oldflowctrl, linkmesg = 0;
1319 u32 mac_mode, lcl_adv, rmt_adv;
1320 struct tg3 *tp = netdev_priv(dev);
1321 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1322
1323 spin_lock(&tp->lock);
1324
1325 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1326 MAC_MODE_HALF_DUPLEX);
1327
1328 oldflowctrl = tp->link_config.active_flowctrl;
1329
1330 if (phydev->link) {
1331 lcl_adv = 0;
1332 rmt_adv = 0;
1333
1334 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1335 mac_mode |= MAC_MODE_PORT_MODE_MII;
1336 else
1337 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1338
1339 if (phydev->duplex == DUPLEX_HALF)
1340 mac_mode |= MAC_MODE_HALF_DUPLEX;
1341 else {
1342 lcl_adv = tg3_advert_flowctrl_1000T(
1343 tp->link_config.flowctrl);
1344
1345 if (phydev->pause)
1346 rmt_adv = LPA_PAUSE_CAP;
1347 if (phydev->asym_pause)
1348 rmt_adv |= LPA_PAUSE_ASYM;
1349 }
1350
1351 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1352 } else
1353 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1354
1355 if (mac_mode != tp->mac_mode) {
1356 tp->mac_mode = mac_mode;
1357 tw32_f(MAC_MODE, tp->mac_mode);
1358 udelay(40);
1359 }
1360
1361 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1362 if (phydev->speed == SPEED_10)
1363 tw32(MAC_MI_STAT,
1364 MAC_MI_STAT_10MBPS_MODE |
1365 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1366 else
1367 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1368 }
1369
1370 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1371 tw32(MAC_TX_LENGTHS,
1372 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1373 (6 << TX_LENGTHS_IPG_SHIFT) |
1374 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1375 else
1376 tw32(MAC_TX_LENGTHS,
1377 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1378 (6 << TX_LENGTHS_IPG_SHIFT) |
1379 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1380
1381 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1382 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1383 phydev->speed != tp->link_config.active_speed ||
1384 phydev->duplex != tp->link_config.active_duplex ||
1385 oldflowctrl != tp->link_config.active_flowctrl)
1386 linkmesg = 1;
1387
1388 tp->link_config.active_speed = phydev->speed;
1389 tp->link_config.active_duplex = phydev->duplex;
1390
1391 spin_unlock(&tp->lock);
1392
1393 if (linkmesg)
1394 tg3_link_report(tp);
1395 }
1396
1397 static int tg3_phy_init(struct tg3 *tp)
1398 {
1399 struct phy_device *phydev;
1400
1401 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1402 return 0;
1403
1404 /* Bring the PHY back to a known state. */
1405 tg3_bmcr_reset(tp);
1406
1407 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1408
1409 /* Attach the MAC to the PHY. */
1410 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1411 phydev->dev_flags, phydev->interface);
1412 if (IS_ERR(phydev)) {
1413 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1414 return PTR_ERR(phydev);
1415 }
1416
1417 /* Mask with MAC supported features. */
1418 switch (phydev->interface) {
1419 case PHY_INTERFACE_MODE_GMII:
1420 case PHY_INTERFACE_MODE_RGMII:
1421 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1422 phydev->supported &= (PHY_GBIT_FEATURES |
1423 SUPPORTED_Pause |
1424 SUPPORTED_Asym_Pause);
1425 break;
1426 }
1427 /* fallthru */
1428 case PHY_INTERFACE_MODE_MII:
1429 phydev->supported &= (PHY_BASIC_FEATURES |
1430 SUPPORTED_Pause |
1431 SUPPORTED_Asym_Pause);
1432 break;
1433 default:
1434 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1435 return -EINVAL;
1436 }
1437
1438 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1439
1440 phydev->advertising = phydev->supported;
1441
1442 return 0;
1443 }
1444
1445 static void tg3_phy_start(struct tg3 *tp)
1446 {
1447 struct phy_device *phydev;
1448
1449 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1450 return;
1451
1452 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1453
1454 if (tp->link_config.phy_is_low_power) {
1455 tp->link_config.phy_is_low_power = 0;
1456 phydev->speed = tp->link_config.orig_speed;
1457 phydev->duplex = tp->link_config.orig_duplex;
1458 phydev->autoneg = tp->link_config.orig_autoneg;
1459 phydev->advertising = tp->link_config.orig_advertising;
1460 }
1461
1462 phy_start(phydev);
1463
1464 phy_start_aneg(phydev);
1465 }
1466
1467 static void tg3_phy_stop(struct tg3 *tp)
1468 {
1469 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1470 return;
1471
1472 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1473 }
1474
1475 static void tg3_phy_fini(struct tg3 *tp)
1476 {
1477 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1478 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1479 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1480 }
1481 }
1482
1483 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1484 {
1485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1486 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1487 }
1488
1489 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1490 {
1491 u32 phytest;
1492
1493 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1494 u32 phy;
1495
1496 tg3_writephy(tp, MII_TG3_FET_TEST,
1497 phytest | MII_TG3_FET_SHADOW_EN);
1498 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1499 if (enable)
1500 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1501 else
1502 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1503 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1504 }
1505 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1506 }
1507 }
1508
1509 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1510 {
1511 u32 reg;
1512
1513 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
1514 return;
1515
1516 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1517 tg3_phy_fet_toggle_apd(tp, enable);
1518 return;
1519 }
1520
1521 reg = MII_TG3_MISC_SHDW_WREN |
1522 MII_TG3_MISC_SHDW_SCR5_SEL |
1523 MII_TG3_MISC_SHDW_SCR5_LPED |
1524 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1525 MII_TG3_MISC_SHDW_SCR5_SDTL |
1526 MII_TG3_MISC_SHDW_SCR5_C125OE;
1527 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1528 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1529
1530 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1531
1532
1533 reg = MII_TG3_MISC_SHDW_WREN |
1534 MII_TG3_MISC_SHDW_APD_SEL |
1535 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1536 if (enable)
1537 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1538
1539 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1540 }
1541
1542 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1543 {
1544 u32 phy;
1545
1546 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1547 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1548 return;
1549
1550 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1551 u32 ephy;
1552
1553 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1554 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1555
1556 tg3_writephy(tp, MII_TG3_FET_TEST,
1557 ephy | MII_TG3_FET_SHADOW_EN);
1558 if (!tg3_readphy(tp, reg, &phy)) {
1559 if (enable)
1560 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1561 else
1562 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1563 tg3_writephy(tp, reg, phy);
1564 }
1565 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1566 }
1567 } else {
1568 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1569 MII_TG3_AUXCTL_SHDWSEL_MISC;
1570 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1571 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1572 if (enable)
1573 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1574 else
1575 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1576 phy |= MII_TG3_AUXCTL_MISC_WREN;
1577 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1578 }
1579 }
1580 }
1581
1582 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1583 {
1584 u32 val;
1585
1586 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1587 return;
1588
1589 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1590 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1591 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1592 (val | (1 << 15) | (1 << 4)));
1593 }
1594
1595 static void tg3_phy_apply_otp(struct tg3 *tp)
1596 {
1597 u32 otp, phy;
1598
1599 if (!tp->phy_otp)
1600 return;
1601
1602 otp = tp->phy_otp;
1603
1604 /* Enable SM_DSP clock and tx 6dB coding. */
1605 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1606 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1607 MII_TG3_AUXCTL_ACTL_TX_6DB;
1608 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1609
1610 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1611 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1612 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1613
1614 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1615 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1616 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1617
1618 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1619 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1620 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1621
1622 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1623 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1624
1625 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1626 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1627
1628 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1629 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1630 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1631
1632 /* Turn off SM_DSP clock. */
1633 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1634 MII_TG3_AUXCTL_ACTL_TX_6DB;
1635 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1636 }
1637
1638 static int tg3_wait_macro_done(struct tg3 *tp)
1639 {
1640 int limit = 100;
1641
1642 while (limit--) {
1643 u32 tmp32;
1644
1645 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1646 if ((tmp32 & 0x1000) == 0)
1647 break;
1648 }
1649 }
1650 if (limit < 0)
1651 return -EBUSY;
1652
1653 return 0;
1654 }
1655
1656 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1657 {
1658 static const u32 test_pat[4][6] = {
1659 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1660 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1661 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1662 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1663 };
1664 int chan;
1665
1666 for (chan = 0; chan < 4; chan++) {
1667 int i;
1668
1669 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1670 (chan * 0x2000) | 0x0200);
1671 tg3_writephy(tp, 0x16, 0x0002);
1672
1673 for (i = 0; i < 6; i++)
1674 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1675 test_pat[chan][i]);
1676
1677 tg3_writephy(tp, 0x16, 0x0202);
1678 if (tg3_wait_macro_done(tp)) {
1679 *resetp = 1;
1680 return -EBUSY;
1681 }
1682
1683 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1684 (chan * 0x2000) | 0x0200);
1685 tg3_writephy(tp, 0x16, 0x0082);
1686 if (tg3_wait_macro_done(tp)) {
1687 *resetp = 1;
1688 return -EBUSY;
1689 }
1690
1691 tg3_writephy(tp, 0x16, 0x0802);
1692 if (tg3_wait_macro_done(tp)) {
1693 *resetp = 1;
1694 return -EBUSY;
1695 }
1696
1697 for (i = 0; i < 6; i += 2) {
1698 u32 low, high;
1699
1700 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1701 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1702 tg3_wait_macro_done(tp)) {
1703 *resetp = 1;
1704 return -EBUSY;
1705 }
1706 low &= 0x7fff;
1707 high &= 0x000f;
1708 if (low != test_pat[chan][i] ||
1709 high != test_pat[chan][i+1]) {
1710 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1711 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1712 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1713
1714 return -EBUSY;
1715 }
1716 }
1717 }
1718
1719 return 0;
1720 }
1721
1722 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1723 {
1724 int chan;
1725
1726 for (chan = 0; chan < 4; chan++) {
1727 int i;
1728
1729 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1730 (chan * 0x2000) | 0x0200);
1731 tg3_writephy(tp, 0x16, 0x0002);
1732 for (i = 0; i < 6; i++)
1733 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1734 tg3_writephy(tp, 0x16, 0x0202);
1735 if (tg3_wait_macro_done(tp))
1736 return -EBUSY;
1737 }
1738
1739 return 0;
1740 }
1741
1742 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1743 {
1744 u32 reg32, phy9_orig;
1745 int retries, do_phy_reset, err;
1746
1747 retries = 10;
1748 do_phy_reset = 1;
1749 do {
1750 if (do_phy_reset) {
1751 err = tg3_bmcr_reset(tp);
1752 if (err)
1753 return err;
1754 do_phy_reset = 0;
1755 }
1756
1757 /* Disable transmitter and interrupt. */
1758 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1759 continue;
1760
1761 reg32 |= 0x3000;
1762 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1763
1764 /* Set full-duplex, 1000 mbps. */
1765 tg3_writephy(tp, MII_BMCR,
1766 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1767
1768 /* Set to master mode. */
1769 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1770 continue;
1771
1772 tg3_writephy(tp, MII_TG3_CTRL,
1773 (MII_TG3_CTRL_AS_MASTER |
1774 MII_TG3_CTRL_ENABLE_AS_MASTER));
1775
1776 /* Enable SM_DSP_CLOCK and 6dB. */
1777 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1778
1779 /* Block the PHY control access. */
1780 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1781 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1782
1783 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1784 if (!err)
1785 break;
1786 } while (--retries);
1787
1788 err = tg3_phy_reset_chanpat(tp);
1789 if (err)
1790 return err;
1791
1792 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1793 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1794
1795 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1796 tg3_writephy(tp, 0x16, 0x0000);
1797
1798 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1799 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1800 /* Set Extended packet length bit for jumbo frames */
1801 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1802 }
1803 else {
1804 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1805 }
1806
1807 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1808
1809 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1810 reg32 &= ~0x3000;
1811 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1812 } else if (!err)
1813 err = -EBUSY;
1814
1815 return err;
1816 }
1817
1818 /* This will reset the tigon3 PHY if there is no valid
1819 * link unless the FORCE argument is non-zero.
1820 */
1821 static int tg3_phy_reset(struct tg3 *tp)
1822 {
1823 u32 cpmuctrl;
1824 u32 phy_status;
1825 int err;
1826
1827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1828 u32 val;
1829
1830 val = tr32(GRC_MISC_CFG);
1831 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1832 udelay(40);
1833 }
1834 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1835 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1836 if (err != 0)
1837 return -EBUSY;
1838
1839 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1840 netif_carrier_off(tp->dev);
1841 tg3_link_report(tp);
1842 }
1843
1844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1845 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1846 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1847 err = tg3_phy_reset_5703_4_5(tp);
1848 if (err)
1849 return err;
1850 goto out;
1851 }
1852
1853 cpmuctrl = 0;
1854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1855 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1856 cpmuctrl = tr32(TG3_CPMU_CTRL);
1857 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1858 tw32(TG3_CPMU_CTRL,
1859 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1860 }
1861
1862 err = tg3_bmcr_reset(tp);
1863 if (err)
1864 return err;
1865
1866 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1867 u32 phy;
1868
1869 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1870 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1871
1872 tw32(TG3_CPMU_CTRL, cpmuctrl);
1873 }
1874
1875 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1876 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1877 u32 val;
1878
1879 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1880 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1881 CPMU_LSPD_1000MB_MACCLK_12_5) {
1882 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1883 udelay(40);
1884 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1885 }
1886 }
1887
1888 tg3_phy_apply_otp(tp);
1889
1890 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1891 tg3_phy_toggle_apd(tp, true);
1892 else
1893 tg3_phy_toggle_apd(tp, false);
1894
1895 out:
1896 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1897 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1898 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1899 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1900 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1901 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1902 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1903 }
1904 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1905 tg3_writephy(tp, 0x1c, 0x8d68);
1906 tg3_writephy(tp, 0x1c, 0x8d68);
1907 }
1908 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1909 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1910 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1911 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1912 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1913 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1914 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1915 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1916 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1917 }
1918 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1919 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1920 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1921 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1922 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1923 tg3_writephy(tp, MII_TG3_TEST1,
1924 MII_TG3_TEST1_TRIM_EN | 0x4);
1925 } else
1926 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1927 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1928 }
1929 /* Set Extended packet length bit (bit 14) on all chips that */
1930 /* support jumbo frames */
1931 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1932 /* Cannot do read-modify-write on 5401 */
1933 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1934 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1935 u32 phy_reg;
1936
1937 /* Set bit 14 with read-modify-write to preserve other bits */
1938 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1939 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1940 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1941 }
1942
1943 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1944 * jumbo frames transmission.
1945 */
1946 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
1947 u32 phy_reg;
1948
1949 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1950 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1951 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1952 }
1953
1954 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1955 /* adjust output voltage */
1956 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1957 }
1958
1959 tg3_phy_toggle_automdix(tp, 1);
1960 tg3_phy_set_wirespeed(tp);
1961 return 0;
1962 }
1963
1964 static void tg3_frob_aux_power(struct tg3 *tp)
1965 {
1966 struct tg3 *tp_peer = tp;
1967
1968 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1969 return;
1970
1971 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1972 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1973 struct net_device *dev_peer;
1974
1975 dev_peer = pci_get_drvdata(tp->pdev_peer);
1976 /* remove_one() may have been run on the peer. */
1977 if (!dev_peer)
1978 tp_peer = tp;
1979 else
1980 tp_peer = netdev_priv(dev_peer);
1981 }
1982
1983 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1984 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1985 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1986 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1987 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1988 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1989 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1990 (GRC_LCLCTRL_GPIO_OE0 |
1991 GRC_LCLCTRL_GPIO_OE1 |
1992 GRC_LCLCTRL_GPIO_OE2 |
1993 GRC_LCLCTRL_GPIO_OUTPUT0 |
1994 GRC_LCLCTRL_GPIO_OUTPUT1),
1995 100);
1996 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
1997 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
1998 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1999 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2000 GRC_LCLCTRL_GPIO_OE1 |
2001 GRC_LCLCTRL_GPIO_OE2 |
2002 GRC_LCLCTRL_GPIO_OUTPUT0 |
2003 GRC_LCLCTRL_GPIO_OUTPUT1 |
2004 tp->grc_local_ctrl;
2005 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2006
2007 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2008 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2009
2010 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2011 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2012 } else {
2013 u32 no_gpio2;
2014 u32 grc_local_ctrl = 0;
2015
2016 if (tp_peer != tp &&
2017 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2018 return;
2019
2020 /* Workaround to prevent overdrawing Amps. */
2021 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2022 ASIC_REV_5714) {
2023 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2024 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2025 grc_local_ctrl, 100);
2026 }
2027
2028 /* On 5753 and variants, GPIO2 cannot be used. */
2029 no_gpio2 = tp->nic_sram_data_cfg &
2030 NIC_SRAM_DATA_CFG_NO_GPIO2;
2031
2032 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2033 GRC_LCLCTRL_GPIO_OE1 |
2034 GRC_LCLCTRL_GPIO_OE2 |
2035 GRC_LCLCTRL_GPIO_OUTPUT1 |
2036 GRC_LCLCTRL_GPIO_OUTPUT2;
2037 if (no_gpio2) {
2038 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2039 GRC_LCLCTRL_GPIO_OUTPUT2);
2040 }
2041 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2042 grc_local_ctrl, 100);
2043
2044 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2045
2046 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2047 grc_local_ctrl, 100);
2048
2049 if (!no_gpio2) {
2050 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2051 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2052 grc_local_ctrl, 100);
2053 }
2054 }
2055 } else {
2056 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2057 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2058 if (tp_peer != tp &&
2059 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2060 return;
2061
2062 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2063 (GRC_LCLCTRL_GPIO_OE1 |
2064 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2065
2066 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2067 GRC_LCLCTRL_GPIO_OE1, 100);
2068
2069 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2070 (GRC_LCLCTRL_GPIO_OE1 |
2071 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2072 }
2073 }
2074 }
2075
2076 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2077 {
2078 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2079 return 1;
2080 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2081 if (speed != SPEED_10)
2082 return 1;
2083 } else if (speed == SPEED_10)
2084 return 1;
2085
2086 return 0;
2087 }
2088
2089 static int tg3_setup_phy(struct tg3 *, int);
2090
2091 #define RESET_KIND_SHUTDOWN 0
2092 #define RESET_KIND_INIT 1
2093 #define RESET_KIND_SUSPEND 2
2094
2095 static void tg3_write_sig_post_reset(struct tg3 *, int);
2096 static int tg3_halt_cpu(struct tg3 *, u32);
2097
2098 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2099 {
2100 u32 val;
2101
2102 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2104 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2105 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2106
2107 sg_dig_ctrl |=
2108 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2109 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2110 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2111 }
2112 return;
2113 }
2114
2115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2116 tg3_bmcr_reset(tp);
2117 val = tr32(GRC_MISC_CFG);
2118 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2119 udelay(40);
2120 return;
2121 } else if (do_low_power) {
2122 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2123 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2124
2125 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2126 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2127 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2128 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2129 MII_TG3_AUXCTL_PCTL_VREG_11V);
2130 }
2131
2132 /* The PHY should not be powered down on some chips because
2133 * of bugs.
2134 */
2135 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2137 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2138 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2139 return;
2140
2141 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2142 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2143 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2144 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2145 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2146 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2147 }
2148
2149 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2150 }
2151
2152 /* tp->lock is held. */
2153 static int tg3_nvram_lock(struct tg3 *tp)
2154 {
2155 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2156 int i;
2157
2158 if (tp->nvram_lock_cnt == 0) {
2159 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2160 for (i = 0; i < 8000; i++) {
2161 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2162 break;
2163 udelay(20);
2164 }
2165 if (i == 8000) {
2166 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2167 return -ENODEV;
2168 }
2169 }
2170 tp->nvram_lock_cnt++;
2171 }
2172 return 0;
2173 }
2174
2175 /* tp->lock is held. */
2176 static void tg3_nvram_unlock(struct tg3 *tp)
2177 {
2178 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2179 if (tp->nvram_lock_cnt > 0)
2180 tp->nvram_lock_cnt--;
2181 if (tp->nvram_lock_cnt == 0)
2182 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2183 }
2184 }
2185
2186 /* tp->lock is held. */
2187 static void tg3_enable_nvram_access(struct tg3 *tp)
2188 {
2189 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2190 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2191 u32 nvaccess = tr32(NVRAM_ACCESS);
2192
2193 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2194 }
2195 }
2196
2197 /* tp->lock is held. */
2198 static void tg3_disable_nvram_access(struct tg3 *tp)
2199 {
2200 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2201 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2202 u32 nvaccess = tr32(NVRAM_ACCESS);
2203
2204 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2205 }
2206 }
2207
2208 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2209 u32 offset, u32 *val)
2210 {
2211 u32 tmp;
2212 int i;
2213
2214 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2215 return -EINVAL;
2216
2217 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2218 EEPROM_ADDR_DEVID_MASK |
2219 EEPROM_ADDR_READ);
2220 tw32(GRC_EEPROM_ADDR,
2221 tmp |
2222 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2223 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2224 EEPROM_ADDR_ADDR_MASK) |
2225 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2226
2227 for (i = 0; i < 1000; i++) {
2228 tmp = tr32(GRC_EEPROM_ADDR);
2229
2230 if (tmp & EEPROM_ADDR_COMPLETE)
2231 break;
2232 msleep(1);
2233 }
2234 if (!(tmp & EEPROM_ADDR_COMPLETE))
2235 return -EBUSY;
2236
2237 tmp = tr32(GRC_EEPROM_DATA);
2238
2239 /*
2240 * The data will always be opposite the native endian
2241 * format. Perform a blind byteswap to compensate.
2242 */
2243 *val = swab32(tmp);
2244
2245 return 0;
2246 }
2247
2248 #define NVRAM_CMD_TIMEOUT 10000
2249
2250 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2251 {
2252 int i;
2253
2254 tw32(NVRAM_CMD, nvram_cmd);
2255 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2256 udelay(10);
2257 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2258 udelay(10);
2259 break;
2260 }
2261 }
2262
2263 if (i == NVRAM_CMD_TIMEOUT)
2264 return -EBUSY;
2265
2266 return 0;
2267 }
2268
2269 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2270 {
2271 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2272 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2273 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2274 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2275 (tp->nvram_jedecnum == JEDEC_ATMEL))
2276
2277 addr = ((addr / tp->nvram_pagesize) <<
2278 ATMEL_AT45DB0X1B_PAGE_POS) +
2279 (addr % tp->nvram_pagesize);
2280
2281 return addr;
2282 }
2283
2284 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2285 {
2286 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2287 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2288 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2289 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2290 (tp->nvram_jedecnum == JEDEC_ATMEL))
2291
2292 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2293 tp->nvram_pagesize) +
2294 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2295
2296 return addr;
2297 }
2298
2299 /* NOTE: Data read in from NVRAM is byteswapped according to
2300 * the byteswapping settings for all other register accesses.
2301 * tg3 devices are BE devices, so on a BE machine, the data
2302 * returned will be exactly as it is seen in NVRAM. On a LE
2303 * machine, the 32-bit value will be byteswapped.
2304 */
2305 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2306 {
2307 int ret;
2308
2309 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2310 return tg3_nvram_read_using_eeprom(tp, offset, val);
2311
2312 offset = tg3_nvram_phys_addr(tp, offset);
2313
2314 if (offset > NVRAM_ADDR_MSK)
2315 return -EINVAL;
2316
2317 ret = tg3_nvram_lock(tp);
2318 if (ret)
2319 return ret;
2320
2321 tg3_enable_nvram_access(tp);
2322
2323 tw32(NVRAM_ADDR, offset);
2324 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2325 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2326
2327 if (ret == 0)
2328 *val = tr32(NVRAM_RDDATA);
2329
2330 tg3_disable_nvram_access(tp);
2331
2332 tg3_nvram_unlock(tp);
2333
2334 return ret;
2335 }
2336
2337 /* Ensures NVRAM data is in bytestream format. */
2338 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2339 {
2340 u32 v;
2341 int res = tg3_nvram_read(tp, offset, &v);
2342 if (!res)
2343 *val = cpu_to_be32(v);
2344 return res;
2345 }
2346
2347 /* tp->lock is held. */
2348 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2349 {
2350 u32 addr_high, addr_low;
2351 int i;
2352
2353 addr_high = ((tp->dev->dev_addr[0] << 8) |
2354 tp->dev->dev_addr[1]);
2355 addr_low = ((tp->dev->dev_addr[2] << 24) |
2356 (tp->dev->dev_addr[3] << 16) |
2357 (tp->dev->dev_addr[4] << 8) |
2358 (tp->dev->dev_addr[5] << 0));
2359 for (i = 0; i < 4; i++) {
2360 if (i == 1 && skip_mac_1)
2361 continue;
2362 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2363 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2364 }
2365
2366 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2367 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2368 for (i = 0; i < 12; i++) {
2369 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2370 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2371 }
2372 }
2373
2374 addr_high = (tp->dev->dev_addr[0] +
2375 tp->dev->dev_addr[1] +
2376 tp->dev->dev_addr[2] +
2377 tp->dev->dev_addr[3] +
2378 tp->dev->dev_addr[4] +
2379 tp->dev->dev_addr[5]) &
2380 TX_BACKOFF_SEED_MASK;
2381 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2382 }
2383
2384 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2385 {
2386 u32 misc_host_ctrl;
2387 bool device_should_wake, do_low_power;
2388
2389 /* Make sure register accesses (indirect or otherwise)
2390 * will function correctly.
2391 */
2392 pci_write_config_dword(tp->pdev,
2393 TG3PCI_MISC_HOST_CTRL,
2394 tp->misc_host_ctrl);
2395
2396 switch (state) {
2397 case PCI_D0:
2398 pci_enable_wake(tp->pdev, state, false);
2399 pci_set_power_state(tp->pdev, PCI_D0);
2400
2401 /* Switch out of Vaux if it is a NIC */
2402 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2403 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2404
2405 return 0;
2406
2407 case PCI_D1:
2408 case PCI_D2:
2409 case PCI_D3hot:
2410 break;
2411
2412 default:
2413 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2414 tp->dev->name, state);
2415 return -EINVAL;
2416 }
2417
2418 /* Restore the CLKREQ setting. */
2419 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2420 u16 lnkctl;
2421
2422 pci_read_config_word(tp->pdev,
2423 tp->pcie_cap + PCI_EXP_LNKCTL,
2424 &lnkctl);
2425 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2426 pci_write_config_word(tp->pdev,
2427 tp->pcie_cap + PCI_EXP_LNKCTL,
2428 lnkctl);
2429 }
2430
2431 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2432 tw32(TG3PCI_MISC_HOST_CTRL,
2433 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2434
2435 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2436 device_may_wakeup(&tp->pdev->dev) &&
2437 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2438
2439 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2440 do_low_power = false;
2441 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2442 !tp->link_config.phy_is_low_power) {
2443 struct phy_device *phydev;
2444 u32 phyid, advertising;
2445
2446 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2447
2448 tp->link_config.phy_is_low_power = 1;
2449
2450 tp->link_config.orig_speed = phydev->speed;
2451 tp->link_config.orig_duplex = phydev->duplex;
2452 tp->link_config.orig_autoneg = phydev->autoneg;
2453 tp->link_config.orig_advertising = phydev->advertising;
2454
2455 advertising = ADVERTISED_TP |
2456 ADVERTISED_Pause |
2457 ADVERTISED_Autoneg |
2458 ADVERTISED_10baseT_Half;
2459
2460 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2461 device_should_wake) {
2462 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2463 advertising |=
2464 ADVERTISED_100baseT_Half |
2465 ADVERTISED_100baseT_Full |
2466 ADVERTISED_10baseT_Full;
2467 else
2468 advertising |= ADVERTISED_10baseT_Full;
2469 }
2470
2471 phydev->advertising = advertising;
2472
2473 phy_start_aneg(phydev);
2474
2475 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2476 if (phyid != TG3_PHY_ID_BCMAC131) {
2477 phyid &= TG3_PHY_OUI_MASK;
2478 if (phyid == TG3_PHY_OUI_1 ||
2479 phyid == TG3_PHY_OUI_2 ||
2480 phyid == TG3_PHY_OUI_3)
2481 do_low_power = true;
2482 }
2483 }
2484 } else {
2485 do_low_power = true;
2486
2487 if (tp->link_config.phy_is_low_power == 0) {
2488 tp->link_config.phy_is_low_power = 1;
2489 tp->link_config.orig_speed = tp->link_config.speed;
2490 tp->link_config.orig_duplex = tp->link_config.duplex;
2491 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2492 }
2493
2494 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2495 tp->link_config.speed = SPEED_10;
2496 tp->link_config.duplex = DUPLEX_HALF;
2497 tp->link_config.autoneg = AUTONEG_ENABLE;
2498 tg3_setup_phy(tp, 0);
2499 }
2500 }
2501
2502 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2503 u32 val;
2504
2505 val = tr32(GRC_VCPU_EXT_CTRL);
2506 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2507 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2508 int i;
2509 u32 val;
2510
2511 for (i = 0; i < 200; i++) {
2512 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2513 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2514 break;
2515 msleep(1);
2516 }
2517 }
2518 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2519 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2520 WOL_DRV_STATE_SHUTDOWN |
2521 WOL_DRV_WOL |
2522 WOL_SET_MAGIC_PKT);
2523
2524 if (device_should_wake) {
2525 u32 mac_mode;
2526
2527 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2528 if (do_low_power) {
2529 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2530 udelay(40);
2531 }
2532
2533 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2534 mac_mode = MAC_MODE_PORT_MODE_GMII;
2535 else
2536 mac_mode = MAC_MODE_PORT_MODE_MII;
2537
2538 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2539 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2540 ASIC_REV_5700) {
2541 u32 speed = (tp->tg3_flags &
2542 TG3_FLAG_WOL_SPEED_100MB) ?
2543 SPEED_100 : SPEED_10;
2544 if (tg3_5700_link_polarity(tp, speed))
2545 mac_mode |= MAC_MODE_LINK_POLARITY;
2546 else
2547 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2548 }
2549 } else {
2550 mac_mode = MAC_MODE_PORT_MODE_TBI;
2551 }
2552
2553 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2554 tw32(MAC_LED_CTRL, tp->led_ctrl);
2555
2556 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2557 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2558 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2559 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2560 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2561 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2562
2563 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2564 mac_mode |= tp->mac_mode &
2565 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2566 if (mac_mode & MAC_MODE_APE_TX_EN)
2567 mac_mode |= MAC_MODE_TDE_ENABLE;
2568 }
2569
2570 tw32_f(MAC_MODE, mac_mode);
2571 udelay(100);
2572
2573 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2574 udelay(10);
2575 }
2576
2577 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2578 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2580 u32 base_val;
2581
2582 base_val = tp->pci_clock_ctrl;
2583 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2584 CLOCK_CTRL_TXCLK_DISABLE);
2585
2586 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2587 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2588 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2589 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2590 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2591 /* do nothing */
2592 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2593 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2594 u32 newbits1, newbits2;
2595
2596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2598 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2599 CLOCK_CTRL_TXCLK_DISABLE |
2600 CLOCK_CTRL_ALTCLK);
2601 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2602 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2603 newbits1 = CLOCK_CTRL_625_CORE;
2604 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2605 } else {
2606 newbits1 = CLOCK_CTRL_ALTCLK;
2607 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2608 }
2609
2610 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2611 40);
2612
2613 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2614 40);
2615
2616 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2617 u32 newbits3;
2618
2619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2620 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2621 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2622 CLOCK_CTRL_TXCLK_DISABLE |
2623 CLOCK_CTRL_44MHZ_CORE);
2624 } else {
2625 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2626 }
2627
2628 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2629 tp->pci_clock_ctrl | newbits3, 40);
2630 }
2631 }
2632
2633 if (!(device_should_wake) &&
2634 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2635 tg3_power_down_phy(tp, do_low_power);
2636
2637 tg3_frob_aux_power(tp);
2638
2639 /* Workaround for unstable PLL clock */
2640 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2641 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2642 u32 val = tr32(0x7d00);
2643
2644 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2645 tw32(0x7d00, val);
2646 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2647 int err;
2648
2649 err = tg3_nvram_lock(tp);
2650 tg3_halt_cpu(tp, RX_CPU_BASE);
2651 if (!err)
2652 tg3_nvram_unlock(tp);
2653 }
2654 }
2655
2656 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2657
2658 if (device_should_wake)
2659 pci_enable_wake(tp->pdev, state, true);
2660
2661 /* Finally, set the new power state. */
2662 pci_set_power_state(tp->pdev, state);
2663
2664 return 0;
2665 }
2666
2667 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2668 {
2669 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2670 case MII_TG3_AUX_STAT_10HALF:
2671 *speed = SPEED_10;
2672 *duplex = DUPLEX_HALF;
2673 break;
2674
2675 case MII_TG3_AUX_STAT_10FULL:
2676 *speed = SPEED_10;
2677 *duplex = DUPLEX_FULL;
2678 break;
2679
2680 case MII_TG3_AUX_STAT_100HALF:
2681 *speed = SPEED_100;
2682 *duplex = DUPLEX_HALF;
2683 break;
2684
2685 case MII_TG3_AUX_STAT_100FULL:
2686 *speed = SPEED_100;
2687 *duplex = DUPLEX_FULL;
2688 break;
2689
2690 case MII_TG3_AUX_STAT_1000HALF:
2691 *speed = SPEED_1000;
2692 *duplex = DUPLEX_HALF;
2693 break;
2694
2695 case MII_TG3_AUX_STAT_1000FULL:
2696 *speed = SPEED_1000;
2697 *duplex = DUPLEX_FULL;
2698 break;
2699
2700 default:
2701 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2702 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2703 SPEED_10;
2704 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2705 DUPLEX_HALF;
2706 break;
2707 }
2708 *speed = SPEED_INVALID;
2709 *duplex = DUPLEX_INVALID;
2710 break;
2711 }
2712 }
2713
2714 static void tg3_phy_copper_begin(struct tg3 *tp)
2715 {
2716 u32 new_adv;
2717 int i;
2718
2719 if (tp->link_config.phy_is_low_power) {
2720 /* Entering low power mode. Disable gigabit and
2721 * 100baseT advertisements.
2722 */
2723 tg3_writephy(tp, MII_TG3_CTRL, 0);
2724
2725 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2726 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2727 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2728 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2729
2730 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2731 } else if (tp->link_config.speed == SPEED_INVALID) {
2732 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2733 tp->link_config.advertising &=
2734 ~(ADVERTISED_1000baseT_Half |
2735 ADVERTISED_1000baseT_Full);
2736
2737 new_adv = ADVERTISE_CSMA;
2738 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2739 new_adv |= ADVERTISE_10HALF;
2740 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2741 new_adv |= ADVERTISE_10FULL;
2742 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2743 new_adv |= ADVERTISE_100HALF;
2744 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2745 new_adv |= ADVERTISE_100FULL;
2746
2747 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2748
2749 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2750
2751 if (tp->link_config.advertising &
2752 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2753 new_adv = 0;
2754 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2755 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2756 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2757 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2758 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2759 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2760 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2761 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2762 MII_TG3_CTRL_ENABLE_AS_MASTER);
2763 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2764 } else {
2765 tg3_writephy(tp, MII_TG3_CTRL, 0);
2766 }
2767 } else {
2768 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2769 new_adv |= ADVERTISE_CSMA;
2770
2771 /* Asking for a specific link mode. */
2772 if (tp->link_config.speed == SPEED_1000) {
2773 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2774
2775 if (tp->link_config.duplex == DUPLEX_FULL)
2776 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2777 else
2778 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2779 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2780 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2781 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2782 MII_TG3_CTRL_ENABLE_AS_MASTER);
2783 } else {
2784 if (tp->link_config.speed == SPEED_100) {
2785 if (tp->link_config.duplex == DUPLEX_FULL)
2786 new_adv |= ADVERTISE_100FULL;
2787 else
2788 new_adv |= ADVERTISE_100HALF;
2789 } else {
2790 if (tp->link_config.duplex == DUPLEX_FULL)
2791 new_adv |= ADVERTISE_10FULL;
2792 else
2793 new_adv |= ADVERTISE_10HALF;
2794 }
2795 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2796
2797 new_adv = 0;
2798 }
2799
2800 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2801 }
2802
2803 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2804 tp->link_config.speed != SPEED_INVALID) {
2805 u32 bmcr, orig_bmcr;
2806
2807 tp->link_config.active_speed = tp->link_config.speed;
2808 tp->link_config.active_duplex = tp->link_config.duplex;
2809
2810 bmcr = 0;
2811 switch (tp->link_config.speed) {
2812 default:
2813 case SPEED_10:
2814 break;
2815
2816 case SPEED_100:
2817 bmcr |= BMCR_SPEED100;
2818 break;
2819
2820 case SPEED_1000:
2821 bmcr |= TG3_BMCR_SPEED1000;
2822 break;
2823 }
2824
2825 if (tp->link_config.duplex == DUPLEX_FULL)
2826 bmcr |= BMCR_FULLDPLX;
2827
2828 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2829 (bmcr != orig_bmcr)) {
2830 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2831 for (i = 0; i < 1500; i++) {
2832 u32 tmp;
2833
2834 udelay(10);
2835 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2836 tg3_readphy(tp, MII_BMSR, &tmp))
2837 continue;
2838 if (!(tmp & BMSR_LSTATUS)) {
2839 udelay(40);
2840 break;
2841 }
2842 }
2843 tg3_writephy(tp, MII_BMCR, bmcr);
2844 udelay(40);
2845 }
2846 } else {
2847 tg3_writephy(tp, MII_BMCR,
2848 BMCR_ANENABLE | BMCR_ANRESTART);
2849 }
2850 }
2851
2852 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2853 {
2854 int err;
2855
2856 /* Turn off tap power management. */
2857 /* Set Extended packet length bit */
2858 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2859
2860 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2861 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2862
2863 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2864 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2865
2866 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2867 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2868
2869 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2870 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2871
2872 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2873 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2874
2875 udelay(40);
2876
2877 return err;
2878 }
2879
2880 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2881 {
2882 u32 adv_reg, all_mask = 0;
2883
2884 if (mask & ADVERTISED_10baseT_Half)
2885 all_mask |= ADVERTISE_10HALF;
2886 if (mask & ADVERTISED_10baseT_Full)
2887 all_mask |= ADVERTISE_10FULL;
2888 if (mask & ADVERTISED_100baseT_Half)
2889 all_mask |= ADVERTISE_100HALF;
2890 if (mask & ADVERTISED_100baseT_Full)
2891 all_mask |= ADVERTISE_100FULL;
2892
2893 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2894 return 0;
2895
2896 if ((adv_reg & all_mask) != all_mask)
2897 return 0;
2898 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2899 u32 tg3_ctrl;
2900
2901 all_mask = 0;
2902 if (mask & ADVERTISED_1000baseT_Half)
2903 all_mask |= ADVERTISE_1000HALF;
2904 if (mask & ADVERTISED_1000baseT_Full)
2905 all_mask |= ADVERTISE_1000FULL;
2906
2907 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2908 return 0;
2909
2910 if ((tg3_ctrl & all_mask) != all_mask)
2911 return 0;
2912 }
2913 return 1;
2914 }
2915
2916 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2917 {
2918 u32 curadv, reqadv;
2919
2920 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2921 return 1;
2922
2923 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2924 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2925
2926 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2927 if (curadv != reqadv)
2928 return 0;
2929
2930 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2931 tg3_readphy(tp, MII_LPA, rmtadv);
2932 } else {
2933 /* Reprogram the advertisement register, even if it
2934 * does not affect the current link. If the link
2935 * gets renegotiated in the future, we can save an
2936 * additional renegotiation cycle by advertising
2937 * it correctly in the first place.
2938 */
2939 if (curadv != reqadv) {
2940 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2941 ADVERTISE_PAUSE_ASYM);
2942 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2943 }
2944 }
2945
2946 return 1;
2947 }
2948
2949 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2950 {
2951 int current_link_up;
2952 u32 bmsr, dummy;
2953 u32 lcl_adv, rmt_adv;
2954 u16 current_speed;
2955 u8 current_duplex;
2956 int i, err;
2957
2958 tw32(MAC_EVENT, 0);
2959
2960 tw32_f(MAC_STATUS,
2961 (MAC_STATUS_SYNC_CHANGED |
2962 MAC_STATUS_CFG_CHANGED |
2963 MAC_STATUS_MI_COMPLETION |
2964 MAC_STATUS_LNKSTATE_CHANGED));
2965 udelay(40);
2966
2967 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2968 tw32_f(MAC_MI_MODE,
2969 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2970 udelay(80);
2971 }
2972
2973 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2974
2975 /* Some third-party PHYs need to be reset on link going
2976 * down.
2977 */
2978 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2980 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2981 netif_carrier_ok(tp->dev)) {
2982 tg3_readphy(tp, MII_BMSR, &bmsr);
2983 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2984 !(bmsr & BMSR_LSTATUS))
2985 force_reset = 1;
2986 }
2987 if (force_reset)
2988 tg3_phy_reset(tp);
2989
2990 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2991 tg3_readphy(tp, MII_BMSR, &bmsr);
2992 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2993 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2994 bmsr = 0;
2995
2996 if (!(bmsr & BMSR_LSTATUS)) {
2997 err = tg3_init_5401phy_dsp(tp);
2998 if (err)
2999 return err;
3000
3001 tg3_readphy(tp, MII_BMSR, &bmsr);
3002 for (i = 0; i < 1000; i++) {
3003 udelay(10);
3004 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3005 (bmsr & BMSR_LSTATUS)) {
3006 udelay(40);
3007 break;
3008 }
3009 }
3010
3011 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
3012 !(bmsr & BMSR_LSTATUS) &&
3013 tp->link_config.active_speed == SPEED_1000) {
3014 err = tg3_phy_reset(tp);
3015 if (!err)
3016 err = tg3_init_5401phy_dsp(tp);
3017 if (err)
3018 return err;
3019 }
3020 }
3021 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3022 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3023 /* 5701 {A0,B0} CRC bug workaround */
3024 tg3_writephy(tp, 0x15, 0x0a75);
3025 tg3_writephy(tp, 0x1c, 0x8c68);
3026 tg3_writephy(tp, 0x1c, 0x8d68);
3027 tg3_writephy(tp, 0x1c, 0x8c68);
3028 }
3029
3030 /* Clear pending interrupts... */
3031 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3032 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3033
3034 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3035 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3036 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3037 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3038
3039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3041 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3042 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3043 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3044 else
3045 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3046 }
3047
3048 current_link_up = 0;
3049 current_speed = SPEED_INVALID;
3050 current_duplex = DUPLEX_INVALID;
3051
3052 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3053 u32 val;
3054
3055 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3056 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3057 if (!(val & (1 << 10))) {
3058 val |= (1 << 10);
3059 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3060 goto relink;
3061 }
3062 }
3063
3064 bmsr = 0;
3065 for (i = 0; i < 100; i++) {
3066 tg3_readphy(tp, MII_BMSR, &bmsr);
3067 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3068 (bmsr & BMSR_LSTATUS))
3069 break;
3070 udelay(40);
3071 }
3072
3073 if (bmsr & BMSR_LSTATUS) {
3074 u32 aux_stat, bmcr;
3075
3076 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3077 for (i = 0; i < 2000; i++) {
3078 udelay(10);
3079 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3080 aux_stat)
3081 break;
3082 }
3083
3084 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3085 &current_speed,
3086 &current_duplex);
3087
3088 bmcr = 0;
3089 for (i = 0; i < 200; i++) {
3090 tg3_readphy(tp, MII_BMCR, &bmcr);
3091 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3092 continue;
3093 if (bmcr && bmcr != 0x7fff)
3094 break;
3095 udelay(10);
3096 }
3097
3098 lcl_adv = 0;
3099 rmt_adv = 0;
3100
3101 tp->link_config.active_speed = current_speed;
3102 tp->link_config.active_duplex = current_duplex;
3103
3104 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3105 if ((bmcr & BMCR_ANENABLE) &&
3106 tg3_copper_is_advertising_all(tp,
3107 tp->link_config.advertising)) {
3108 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3109 &rmt_adv))
3110 current_link_up = 1;
3111 }
3112 } else {
3113 if (!(bmcr & BMCR_ANENABLE) &&
3114 tp->link_config.speed == current_speed &&
3115 tp->link_config.duplex == current_duplex &&
3116 tp->link_config.flowctrl ==
3117 tp->link_config.active_flowctrl) {
3118 current_link_up = 1;
3119 }
3120 }
3121
3122 if (current_link_up == 1 &&
3123 tp->link_config.active_duplex == DUPLEX_FULL)
3124 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3125 }
3126
3127 relink:
3128 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3129 u32 tmp;
3130
3131 tg3_phy_copper_begin(tp);
3132
3133 tg3_readphy(tp, MII_BMSR, &tmp);
3134 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3135 (tmp & BMSR_LSTATUS))
3136 current_link_up = 1;
3137 }
3138
3139 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3140 if (current_link_up == 1) {
3141 if (tp->link_config.active_speed == SPEED_100 ||
3142 tp->link_config.active_speed == SPEED_10)
3143 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3144 else
3145 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3146 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3147 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3148 else
3149 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3150
3151 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3152 if (tp->link_config.active_duplex == DUPLEX_HALF)
3153 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3154
3155 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3156 if (current_link_up == 1 &&
3157 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3158 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3159 else
3160 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3161 }
3162
3163 /* ??? Without this setting Netgear GA302T PHY does not
3164 * ??? send/receive packets...
3165 */
3166 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3167 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3168 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3169 tw32_f(MAC_MI_MODE, tp->mi_mode);
3170 udelay(80);
3171 }
3172
3173 tw32_f(MAC_MODE, tp->mac_mode);
3174 udelay(40);
3175
3176 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3177 /* Polled via timer. */
3178 tw32_f(MAC_EVENT, 0);
3179 } else {
3180 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3181 }
3182 udelay(40);
3183
3184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3185 current_link_up == 1 &&
3186 tp->link_config.active_speed == SPEED_1000 &&
3187 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3188 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3189 udelay(120);
3190 tw32_f(MAC_STATUS,
3191 (MAC_STATUS_SYNC_CHANGED |
3192 MAC_STATUS_CFG_CHANGED));
3193 udelay(40);
3194 tg3_write_mem(tp,
3195 NIC_SRAM_FIRMWARE_MBOX,
3196 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3197 }
3198
3199 /* Prevent send BD corruption. */
3200 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3201 u16 oldlnkctl, newlnkctl;
3202
3203 pci_read_config_word(tp->pdev,
3204 tp->pcie_cap + PCI_EXP_LNKCTL,
3205 &oldlnkctl);
3206 if (tp->link_config.active_speed == SPEED_100 ||
3207 tp->link_config.active_speed == SPEED_10)
3208 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3209 else
3210 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3211 if (newlnkctl != oldlnkctl)
3212 pci_write_config_word(tp->pdev,
3213 tp->pcie_cap + PCI_EXP_LNKCTL,
3214 newlnkctl);
3215 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3216 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3217 if (tp->link_config.active_speed == SPEED_100 ||
3218 tp->link_config.active_speed == SPEED_10)
3219 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3220 else
3221 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3222 if (newreg != oldreg)
3223 tw32(TG3_PCIE_LNKCTL, newreg);
3224 }
3225
3226 if (current_link_up != netif_carrier_ok(tp->dev)) {
3227 if (current_link_up)
3228 netif_carrier_on(tp->dev);
3229 else
3230 netif_carrier_off(tp->dev);
3231 tg3_link_report(tp);
3232 }
3233
3234 return 0;
3235 }
3236
3237 struct tg3_fiber_aneginfo {
3238 int state;
3239 #define ANEG_STATE_UNKNOWN 0
3240 #define ANEG_STATE_AN_ENABLE 1
3241 #define ANEG_STATE_RESTART_INIT 2
3242 #define ANEG_STATE_RESTART 3
3243 #define ANEG_STATE_DISABLE_LINK_OK 4
3244 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3245 #define ANEG_STATE_ABILITY_DETECT 6
3246 #define ANEG_STATE_ACK_DETECT_INIT 7
3247 #define ANEG_STATE_ACK_DETECT 8
3248 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3249 #define ANEG_STATE_COMPLETE_ACK 10
3250 #define ANEG_STATE_IDLE_DETECT_INIT 11
3251 #define ANEG_STATE_IDLE_DETECT 12
3252 #define ANEG_STATE_LINK_OK 13
3253 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3254 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3255
3256 u32 flags;
3257 #define MR_AN_ENABLE 0x00000001
3258 #define MR_RESTART_AN 0x00000002
3259 #define MR_AN_COMPLETE 0x00000004
3260 #define MR_PAGE_RX 0x00000008
3261 #define MR_NP_LOADED 0x00000010
3262 #define MR_TOGGLE_TX 0x00000020
3263 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3264 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3265 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3266 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3267 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3268 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3269 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3270 #define MR_TOGGLE_RX 0x00002000
3271 #define MR_NP_RX 0x00004000
3272
3273 #define MR_LINK_OK 0x80000000
3274
3275 unsigned long link_time, cur_time;
3276
3277 u32 ability_match_cfg;
3278 int ability_match_count;
3279
3280 char ability_match, idle_match, ack_match;
3281
3282 u32 txconfig, rxconfig;
3283 #define ANEG_CFG_NP 0x00000080
3284 #define ANEG_CFG_ACK 0x00000040
3285 #define ANEG_CFG_RF2 0x00000020
3286 #define ANEG_CFG_RF1 0x00000010
3287 #define ANEG_CFG_PS2 0x00000001
3288 #define ANEG_CFG_PS1 0x00008000
3289 #define ANEG_CFG_HD 0x00004000
3290 #define ANEG_CFG_FD 0x00002000
3291 #define ANEG_CFG_INVAL 0x00001f06
3292
3293 };
3294 #define ANEG_OK 0
3295 #define ANEG_DONE 1
3296 #define ANEG_TIMER_ENAB 2
3297 #define ANEG_FAILED -1
3298
3299 #define ANEG_STATE_SETTLE_TIME 10000
3300
3301 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3302 struct tg3_fiber_aneginfo *ap)
3303 {
3304 u16 flowctrl;
3305 unsigned long delta;
3306 u32 rx_cfg_reg;
3307 int ret;
3308
3309 if (ap->state == ANEG_STATE_UNKNOWN) {
3310 ap->rxconfig = 0;
3311 ap->link_time = 0;
3312 ap->cur_time = 0;
3313 ap->ability_match_cfg = 0;
3314 ap->ability_match_count = 0;
3315 ap->ability_match = 0;
3316 ap->idle_match = 0;
3317 ap->ack_match = 0;
3318 }
3319 ap->cur_time++;
3320
3321 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3322 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3323
3324 if (rx_cfg_reg != ap->ability_match_cfg) {
3325 ap->ability_match_cfg = rx_cfg_reg;
3326 ap->ability_match = 0;
3327 ap->ability_match_count = 0;
3328 } else {
3329 if (++ap->ability_match_count > 1) {
3330 ap->ability_match = 1;
3331 ap->ability_match_cfg = rx_cfg_reg;
3332 }
3333 }
3334 if (rx_cfg_reg & ANEG_CFG_ACK)
3335 ap->ack_match = 1;
3336 else
3337 ap->ack_match = 0;
3338
3339 ap->idle_match = 0;
3340 } else {
3341 ap->idle_match = 1;
3342 ap->ability_match_cfg = 0;
3343 ap->ability_match_count = 0;
3344 ap->ability_match = 0;
3345 ap->ack_match = 0;
3346
3347 rx_cfg_reg = 0;
3348 }
3349
3350 ap->rxconfig = rx_cfg_reg;
3351 ret = ANEG_OK;
3352
3353 switch(ap->state) {
3354 case ANEG_STATE_UNKNOWN:
3355 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3356 ap->state = ANEG_STATE_AN_ENABLE;
3357
3358 /* fallthru */
3359 case ANEG_STATE_AN_ENABLE:
3360 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3361 if (ap->flags & MR_AN_ENABLE) {
3362 ap->link_time = 0;
3363 ap->cur_time = 0;
3364 ap->ability_match_cfg = 0;
3365 ap->ability_match_count = 0;
3366 ap->ability_match = 0;
3367 ap->idle_match = 0;
3368 ap->ack_match = 0;
3369
3370 ap->state = ANEG_STATE_RESTART_INIT;
3371 } else {
3372 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3373 }
3374 break;
3375
3376 case ANEG_STATE_RESTART_INIT:
3377 ap->link_time = ap->cur_time;
3378 ap->flags &= ~(MR_NP_LOADED);
3379 ap->txconfig = 0;
3380 tw32(MAC_TX_AUTO_NEG, 0);
3381 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3382 tw32_f(MAC_MODE, tp->mac_mode);
3383 udelay(40);
3384
3385 ret = ANEG_TIMER_ENAB;
3386 ap->state = ANEG_STATE_RESTART;
3387
3388 /* fallthru */
3389 case ANEG_STATE_RESTART:
3390 delta = ap->cur_time - ap->link_time;
3391 if (delta > ANEG_STATE_SETTLE_TIME) {
3392 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3393 } else {
3394 ret = ANEG_TIMER_ENAB;
3395 }
3396 break;
3397
3398 case ANEG_STATE_DISABLE_LINK_OK:
3399 ret = ANEG_DONE;
3400 break;
3401
3402 case ANEG_STATE_ABILITY_DETECT_INIT:
3403 ap->flags &= ~(MR_TOGGLE_TX);
3404 ap->txconfig = ANEG_CFG_FD;
3405 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3406 if (flowctrl & ADVERTISE_1000XPAUSE)
3407 ap->txconfig |= ANEG_CFG_PS1;
3408 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3409 ap->txconfig |= ANEG_CFG_PS2;
3410 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3411 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3412 tw32_f(MAC_MODE, tp->mac_mode);
3413 udelay(40);
3414
3415 ap->state = ANEG_STATE_ABILITY_DETECT;
3416 break;
3417
3418 case ANEG_STATE_ABILITY_DETECT:
3419 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3420 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3421 }
3422 break;
3423
3424 case ANEG_STATE_ACK_DETECT_INIT:
3425 ap->txconfig |= ANEG_CFG_ACK;
3426 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3427 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3428 tw32_f(MAC_MODE, tp->mac_mode);
3429 udelay(40);
3430
3431 ap->state = ANEG_STATE_ACK_DETECT;
3432
3433 /* fallthru */
3434 case ANEG_STATE_ACK_DETECT:
3435 if (ap->ack_match != 0) {
3436 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3437 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3438 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3439 } else {
3440 ap->state = ANEG_STATE_AN_ENABLE;
3441 }
3442 } else if (ap->ability_match != 0 &&
3443 ap->rxconfig == 0) {
3444 ap->state = ANEG_STATE_AN_ENABLE;
3445 }
3446 break;
3447
3448 case ANEG_STATE_COMPLETE_ACK_INIT:
3449 if (ap->rxconfig & ANEG_CFG_INVAL) {
3450 ret = ANEG_FAILED;
3451 break;
3452 }
3453 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3454 MR_LP_ADV_HALF_DUPLEX |
3455 MR_LP_ADV_SYM_PAUSE |
3456 MR_LP_ADV_ASYM_PAUSE |
3457 MR_LP_ADV_REMOTE_FAULT1 |
3458 MR_LP_ADV_REMOTE_FAULT2 |
3459 MR_LP_ADV_NEXT_PAGE |
3460 MR_TOGGLE_RX |
3461 MR_NP_RX);
3462 if (ap->rxconfig & ANEG_CFG_FD)
3463 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3464 if (ap->rxconfig & ANEG_CFG_HD)
3465 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3466 if (ap->rxconfig & ANEG_CFG_PS1)
3467 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3468 if (ap->rxconfig & ANEG_CFG_PS2)
3469 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3470 if (ap->rxconfig & ANEG_CFG_RF1)
3471 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3472 if (ap->rxconfig & ANEG_CFG_RF2)
3473 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3474 if (ap->rxconfig & ANEG_CFG_NP)
3475 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3476
3477 ap->link_time = ap->cur_time;
3478
3479 ap->flags ^= (MR_TOGGLE_TX);
3480 if (ap->rxconfig & 0x0008)
3481 ap->flags |= MR_TOGGLE_RX;
3482 if (ap->rxconfig & ANEG_CFG_NP)
3483 ap->flags |= MR_NP_RX;
3484 ap->flags |= MR_PAGE_RX;
3485
3486 ap->state = ANEG_STATE_COMPLETE_ACK;
3487 ret = ANEG_TIMER_ENAB;
3488 break;
3489
3490 case ANEG_STATE_COMPLETE_ACK:
3491 if (ap->ability_match != 0 &&
3492 ap->rxconfig == 0) {
3493 ap->state = ANEG_STATE_AN_ENABLE;
3494 break;
3495 }
3496 delta = ap->cur_time - ap->link_time;
3497 if (delta > ANEG_STATE_SETTLE_TIME) {
3498 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3499 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3500 } else {
3501 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3502 !(ap->flags & MR_NP_RX)) {
3503 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3504 } else {
3505 ret = ANEG_FAILED;
3506 }
3507 }
3508 }
3509 break;
3510
3511 case ANEG_STATE_IDLE_DETECT_INIT:
3512 ap->link_time = ap->cur_time;
3513 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3514 tw32_f(MAC_MODE, tp->mac_mode);
3515 udelay(40);
3516
3517 ap->state = ANEG_STATE_IDLE_DETECT;
3518 ret = ANEG_TIMER_ENAB;
3519 break;
3520
3521 case ANEG_STATE_IDLE_DETECT:
3522 if (ap->ability_match != 0 &&
3523 ap->rxconfig == 0) {
3524 ap->state = ANEG_STATE_AN_ENABLE;
3525 break;
3526 }
3527 delta = ap->cur_time - ap->link_time;
3528 if (delta > ANEG_STATE_SETTLE_TIME) {
3529 /* XXX another gem from the Broadcom driver :( */
3530 ap->state = ANEG_STATE_LINK_OK;
3531 }
3532 break;
3533
3534 case ANEG_STATE_LINK_OK:
3535 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3536 ret = ANEG_DONE;
3537 break;
3538
3539 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3540 /* ??? unimplemented */
3541 break;
3542
3543 case ANEG_STATE_NEXT_PAGE_WAIT:
3544 /* ??? unimplemented */
3545 break;
3546
3547 default:
3548 ret = ANEG_FAILED;
3549 break;
3550 }
3551
3552 return ret;
3553 }
3554
3555 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3556 {
3557 int res = 0;
3558 struct tg3_fiber_aneginfo aninfo;
3559 int status = ANEG_FAILED;
3560 unsigned int tick;
3561 u32 tmp;
3562
3563 tw32_f(MAC_TX_AUTO_NEG, 0);
3564
3565 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3566 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3567 udelay(40);
3568
3569 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3570 udelay(40);
3571
3572 memset(&aninfo, 0, sizeof(aninfo));
3573 aninfo.flags |= MR_AN_ENABLE;
3574 aninfo.state = ANEG_STATE_UNKNOWN;
3575 aninfo.cur_time = 0;
3576 tick = 0;
3577 while (++tick < 195000) {
3578 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3579 if (status == ANEG_DONE || status == ANEG_FAILED)
3580 break;
3581
3582 udelay(1);
3583 }
3584
3585 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3586 tw32_f(MAC_MODE, tp->mac_mode);
3587 udelay(40);
3588
3589 *txflags = aninfo.txconfig;
3590 *rxflags = aninfo.flags;
3591
3592 if (status == ANEG_DONE &&
3593 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3594 MR_LP_ADV_FULL_DUPLEX)))
3595 res = 1;
3596
3597 return res;
3598 }
3599
3600 static void tg3_init_bcm8002(struct tg3 *tp)
3601 {
3602 u32 mac_status = tr32(MAC_STATUS);
3603 int i;
3604
3605 /* Reset when initting first time or we have a link. */
3606 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3607 !(mac_status & MAC_STATUS_PCS_SYNCED))
3608 return;
3609
3610 /* Set PLL lock range. */
3611 tg3_writephy(tp, 0x16, 0x8007);
3612
3613 /* SW reset */
3614 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3615
3616 /* Wait for reset to complete. */
3617 /* XXX schedule_timeout() ... */
3618 for (i = 0; i < 500; i++)
3619 udelay(10);
3620
3621 /* Config mode; select PMA/Ch 1 regs. */
3622 tg3_writephy(tp, 0x10, 0x8411);
3623
3624 /* Enable auto-lock and comdet, select txclk for tx. */
3625 tg3_writephy(tp, 0x11, 0x0a10);
3626
3627 tg3_writephy(tp, 0x18, 0x00a0);
3628 tg3_writephy(tp, 0x16, 0x41ff);
3629
3630 /* Assert and deassert POR. */
3631 tg3_writephy(tp, 0x13, 0x0400);
3632 udelay(40);
3633 tg3_writephy(tp, 0x13, 0x0000);
3634
3635 tg3_writephy(tp, 0x11, 0x0a50);
3636 udelay(40);
3637 tg3_writephy(tp, 0x11, 0x0a10);
3638
3639 /* Wait for signal to stabilize */
3640 /* XXX schedule_timeout() ... */
3641 for (i = 0; i < 15000; i++)
3642 udelay(10);
3643
3644 /* Deselect the channel register so we can read the PHYID
3645 * later.
3646 */
3647 tg3_writephy(tp, 0x10, 0x8011);
3648 }
3649
3650 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3651 {
3652 u16 flowctrl;
3653 u32 sg_dig_ctrl, sg_dig_status;
3654 u32 serdes_cfg, expected_sg_dig_ctrl;
3655 int workaround, port_a;
3656 int current_link_up;
3657
3658 serdes_cfg = 0;
3659 expected_sg_dig_ctrl = 0;
3660 workaround = 0;
3661 port_a = 1;
3662 current_link_up = 0;
3663
3664 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3665 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3666 workaround = 1;
3667 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3668 port_a = 0;
3669
3670 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3671 /* preserve bits 20-23 for voltage regulator */
3672 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3673 }
3674
3675 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3676
3677 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3678 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3679 if (workaround) {
3680 u32 val = serdes_cfg;
3681
3682 if (port_a)
3683 val |= 0xc010000;
3684 else
3685 val |= 0x4010000;
3686 tw32_f(MAC_SERDES_CFG, val);
3687 }
3688
3689 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3690 }
3691 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3692 tg3_setup_flow_control(tp, 0, 0);
3693 current_link_up = 1;
3694 }
3695 goto out;
3696 }
3697
3698 /* Want auto-negotiation. */
3699 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3700
3701 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3702 if (flowctrl & ADVERTISE_1000XPAUSE)
3703 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3704 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3705 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3706
3707 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3708 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3709 tp->serdes_counter &&
3710 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3711 MAC_STATUS_RCVD_CFG)) ==
3712 MAC_STATUS_PCS_SYNCED)) {
3713 tp->serdes_counter--;
3714 current_link_up = 1;
3715 goto out;
3716 }
3717 restart_autoneg:
3718 if (workaround)
3719 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3720 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3721 udelay(5);
3722 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3723
3724 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3725 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3726 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3727 MAC_STATUS_SIGNAL_DET)) {
3728 sg_dig_status = tr32(SG_DIG_STATUS);
3729 mac_status = tr32(MAC_STATUS);
3730
3731 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3732 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3733 u32 local_adv = 0, remote_adv = 0;
3734
3735 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3736 local_adv |= ADVERTISE_1000XPAUSE;
3737 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3738 local_adv |= ADVERTISE_1000XPSE_ASYM;
3739
3740 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3741 remote_adv |= LPA_1000XPAUSE;
3742 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3743 remote_adv |= LPA_1000XPAUSE_ASYM;
3744
3745 tg3_setup_flow_control(tp, local_adv, remote_adv);
3746 current_link_up = 1;
3747 tp->serdes_counter = 0;
3748 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3749 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3750 if (tp->serdes_counter)
3751 tp->serdes_counter--;
3752 else {
3753 if (workaround) {
3754 u32 val = serdes_cfg;
3755
3756 if (port_a)
3757 val |= 0xc010000;
3758 else
3759 val |= 0x4010000;
3760
3761 tw32_f(MAC_SERDES_CFG, val);
3762 }
3763
3764 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3765 udelay(40);
3766
3767 /* Link parallel detection - link is up */
3768 /* only if we have PCS_SYNC and not */
3769 /* receiving config code words */
3770 mac_status = tr32(MAC_STATUS);
3771 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3772 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3773 tg3_setup_flow_control(tp, 0, 0);
3774 current_link_up = 1;
3775 tp->tg3_flags2 |=
3776 TG3_FLG2_PARALLEL_DETECT;
3777 tp->serdes_counter =
3778 SERDES_PARALLEL_DET_TIMEOUT;
3779 } else
3780 goto restart_autoneg;
3781 }
3782 }
3783 } else {
3784 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3785 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3786 }
3787
3788 out:
3789 return current_link_up;
3790 }
3791
3792 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3793 {
3794 int current_link_up = 0;
3795
3796 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3797 goto out;
3798
3799 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3800 u32 txflags, rxflags;
3801 int i;
3802
3803 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3804 u32 local_adv = 0, remote_adv = 0;
3805
3806 if (txflags & ANEG_CFG_PS1)
3807 local_adv |= ADVERTISE_1000XPAUSE;
3808 if (txflags & ANEG_CFG_PS2)
3809 local_adv |= ADVERTISE_1000XPSE_ASYM;
3810
3811 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3812 remote_adv |= LPA_1000XPAUSE;
3813 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3814 remote_adv |= LPA_1000XPAUSE_ASYM;
3815
3816 tg3_setup_flow_control(tp, local_adv, remote_adv);
3817
3818 current_link_up = 1;
3819 }
3820 for (i = 0; i < 30; i++) {
3821 udelay(20);
3822 tw32_f(MAC_STATUS,
3823 (MAC_STATUS_SYNC_CHANGED |
3824 MAC_STATUS_CFG_CHANGED));
3825 udelay(40);
3826 if ((tr32(MAC_STATUS) &
3827 (MAC_STATUS_SYNC_CHANGED |
3828 MAC_STATUS_CFG_CHANGED)) == 0)
3829 break;
3830 }
3831
3832 mac_status = tr32(MAC_STATUS);
3833 if (current_link_up == 0 &&
3834 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3835 !(mac_status & MAC_STATUS_RCVD_CFG))
3836 current_link_up = 1;
3837 } else {
3838 tg3_setup_flow_control(tp, 0, 0);
3839
3840 /* Forcing 1000FD link up. */
3841 current_link_up = 1;
3842
3843 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3844 udelay(40);
3845
3846 tw32_f(MAC_MODE, tp->mac_mode);
3847 udelay(40);
3848 }
3849
3850 out:
3851 return current_link_up;
3852 }
3853
3854 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3855 {
3856 u32 orig_pause_cfg;
3857 u16 orig_active_speed;
3858 u8 orig_active_duplex;
3859 u32 mac_status;
3860 int current_link_up;
3861 int i;
3862
3863 orig_pause_cfg = tp->link_config.active_flowctrl;
3864 orig_active_speed = tp->link_config.active_speed;
3865 orig_active_duplex = tp->link_config.active_duplex;
3866
3867 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3868 netif_carrier_ok(tp->dev) &&
3869 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3870 mac_status = tr32(MAC_STATUS);
3871 mac_status &= (MAC_STATUS_PCS_SYNCED |
3872 MAC_STATUS_SIGNAL_DET |
3873 MAC_STATUS_CFG_CHANGED |
3874 MAC_STATUS_RCVD_CFG);
3875 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3876 MAC_STATUS_SIGNAL_DET)) {
3877 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3878 MAC_STATUS_CFG_CHANGED));
3879 return 0;
3880 }
3881 }
3882
3883 tw32_f(MAC_TX_AUTO_NEG, 0);
3884
3885 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3886 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3887 tw32_f(MAC_MODE, tp->mac_mode);
3888 udelay(40);
3889
3890 if (tp->phy_id == PHY_ID_BCM8002)
3891 tg3_init_bcm8002(tp);
3892
3893 /* Enable link change event even when serdes polling. */
3894 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3895 udelay(40);
3896
3897 current_link_up = 0;
3898 mac_status = tr32(MAC_STATUS);
3899
3900 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3901 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3902 else
3903 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3904
3905 tp->napi[0].hw_status->status =
3906 (SD_STATUS_UPDATED |
3907 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3908
3909 for (i = 0; i < 100; i++) {
3910 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3911 MAC_STATUS_CFG_CHANGED));
3912 udelay(5);
3913 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3914 MAC_STATUS_CFG_CHANGED |
3915 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3916 break;
3917 }
3918
3919 mac_status = tr32(MAC_STATUS);
3920 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3921 current_link_up = 0;
3922 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3923 tp->serdes_counter == 0) {
3924 tw32_f(MAC_MODE, (tp->mac_mode |
3925 MAC_MODE_SEND_CONFIGS));
3926 udelay(1);
3927 tw32_f(MAC_MODE, tp->mac_mode);
3928 }
3929 }
3930
3931 if (current_link_up == 1) {
3932 tp->link_config.active_speed = SPEED_1000;
3933 tp->link_config.active_duplex = DUPLEX_FULL;
3934 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3935 LED_CTRL_LNKLED_OVERRIDE |
3936 LED_CTRL_1000MBPS_ON));
3937 } else {
3938 tp->link_config.active_speed = SPEED_INVALID;
3939 tp->link_config.active_duplex = DUPLEX_INVALID;
3940 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3941 LED_CTRL_LNKLED_OVERRIDE |
3942 LED_CTRL_TRAFFIC_OVERRIDE));
3943 }
3944
3945 if (current_link_up != netif_carrier_ok(tp->dev)) {
3946 if (current_link_up)
3947 netif_carrier_on(tp->dev);
3948 else
3949 netif_carrier_off(tp->dev);
3950 tg3_link_report(tp);
3951 } else {
3952 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3953 if (orig_pause_cfg != now_pause_cfg ||
3954 orig_active_speed != tp->link_config.active_speed ||
3955 orig_active_duplex != tp->link_config.active_duplex)
3956 tg3_link_report(tp);
3957 }
3958
3959 return 0;
3960 }
3961
3962 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3963 {
3964 int current_link_up, err = 0;
3965 u32 bmsr, bmcr;
3966 u16 current_speed;
3967 u8 current_duplex;
3968 u32 local_adv, remote_adv;
3969
3970 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3971 tw32_f(MAC_MODE, tp->mac_mode);
3972 udelay(40);
3973
3974 tw32(MAC_EVENT, 0);
3975
3976 tw32_f(MAC_STATUS,
3977 (MAC_STATUS_SYNC_CHANGED |
3978 MAC_STATUS_CFG_CHANGED |
3979 MAC_STATUS_MI_COMPLETION |
3980 MAC_STATUS_LNKSTATE_CHANGED));
3981 udelay(40);
3982
3983 if (force_reset)
3984 tg3_phy_reset(tp);
3985
3986 current_link_up = 0;
3987 current_speed = SPEED_INVALID;
3988 current_duplex = DUPLEX_INVALID;
3989
3990 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3991 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3993 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3994 bmsr |= BMSR_LSTATUS;
3995 else
3996 bmsr &= ~BMSR_LSTATUS;
3997 }
3998
3999 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4000
4001 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4002 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4003 /* do nothing, just check for link up at the end */
4004 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4005 u32 adv, new_adv;
4006
4007 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4008 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4009 ADVERTISE_1000XPAUSE |
4010 ADVERTISE_1000XPSE_ASYM |
4011 ADVERTISE_SLCT);
4012
4013 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4014
4015 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4016 new_adv |= ADVERTISE_1000XHALF;
4017 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4018 new_adv |= ADVERTISE_1000XFULL;
4019
4020 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4021 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4022 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4023 tg3_writephy(tp, MII_BMCR, bmcr);
4024
4025 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4026 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4027 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4028
4029 return err;
4030 }
4031 } else {
4032 u32 new_bmcr;
4033
4034 bmcr &= ~BMCR_SPEED1000;
4035 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4036
4037 if (tp->link_config.duplex == DUPLEX_FULL)
4038 new_bmcr |= BMCR_FULLDPLX;
4039
4040 if (new_bmcr != bmcr) {
4041 /* BMCR_SPEED1000 is a reserved bit that needs
4042 * to be set on write.
4043 */
4044 new_bmcr |= BMCR_SPEED1000;
4045
4046 /* Force a linkdown */
4047 if (netif_carrier_ok(tp->dev)) {
4048 u32 adv;
4049
4050 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4051 adv &= ~(ADVERTISE_1000XFULL |
4052 ADVERTISE_1000XHALF |
4053 ADVERTISE_SLCT);
4054 tg3_writephy(tp, MII_ADVERTISE, adv);
4055 tg3_writephy(tp, MII_BMCR, bmcr |
4056 BMCR_ANRESTART |
4057 BMCR_ANENABLE);
4058 udelay(10);
4059 netif_carrier_off(tp->dev);
4060 }
4061 tg3_writephy(tp, MII_BMCR, new_bmcr);
4062 bmcr = new_bmcr;
4063 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4064 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4065 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4066 ASIC_REV_5714) {
4067 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4068 bmsr |= BMSR_LSTATUS;
4069 else
4070 bmsr &= ~BMSR_LSTATUS;
4071 }
4072 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4073 }
4074 }
4075
4076 if (bmsr & BMSR_LSTATUS) {
4077 current_speed = SPEED_1000;
4078 current_link_up = 1;
4079 if (bmcr & BMCR_FULLDPLX)
4080 current_duplex = DUPLEX_FULL;
4081 else
4082 current_duplex = DUPLEX_HALF;
4083
4084 local_adv = 0;
4085 remote_adv = 0;
4086
4087 if (bmcr & BMCR_ANENABLE) {
4088 u32 common;
4089
4090 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4091 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4092 common = local_adv & remote_adv;
4093 if (common & (ADVERTISE_1000XHALF |
4094 ADVERTISE_1000XFULL)) {
4095 if (common & ADVERTISE_1000XFULL)
4096 current_duplex = DUPLEX_FULL;
4097 else
4098 current_duplex = DUPLEX_HALF;
4099 }
4100 else
4101 current_link_up = 0;
4102 }
4103 }
4104
4105 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4106 tg3_setup_flow_control(tp, local_adv, remote_adv);
4107
4108 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4109 if (tp->link_config.active_duplex == DUPLEX_HALF)
4110 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4111
4112 tw32_f(MAC_MODE, tp->mac_mode);
4113 udelay(40);
4114
4115 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4116
4117 tp->link_config.active_speed = current_speed;
4118 tp->link_config.active_duplex = current_duplex;
4119
4120 if (current_link_up != netif_carrier_ok(tp->dev)) {
4121 if (current_link_up)
4122 netif_carrier_on(tp->dev);
4123 else {
4124 netif_carrier_off(tp->dev);
4125 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4126 }
4127 tg3_link_report(tp);
4128 }
4129 return err;
4130 }
4131
4132 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4133 {
4134 if (tp->serdes_counter) {
4135 /* Give autoneg time to complete. */
4136 tp->serdes_counter--;
4137 return;
4138 }
4139 if (!netif_carrier_ok(tp->dev) &&
4140 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4141 u32 bmcr;
4142
4143 tg3_readphy(tp, MII_BMCR, &bmcr);
4144 if (bmcr & BMCR_ANENABLE) {
4145 u32 phy1, phy2;
4146
4147 /* Select shadow register 0x1f */
4148 tg3_writephy(tp, 0x1c, 0x7c00);
4149 tg3_readphy(tp, 0x1c, &phy1);
4150
4151 /* Select expansion interrupt status register */
4152 tg3_writephy(tp, 0x17, 0x0f01);
4153 tg3_readphy(tp, 0x15, &phy2);
4154 tg3_readphy(tp, 0x15, &phy2);
4155
4156 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4157 /* We have signal detect and not receiving
4158 * config code words, link is up by parallel
4159 * detection.
4160 */
4161
4162 bmcr &= ~BMCR_ANENABLE;
4163 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4164 tg3_writephy(tp, MII_BMCR, bmcr);
4165 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4166 }
4167 }
4168 }
4169 else if (netif_carrier_ok(tp->dev) &&
4170 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4171 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4172 u32 phy2;
4173
4174 /* Select expansion interrupt status register */
4175 tg3_writephy(tp, 0x17, 0x0f01);
4176 tg3_readphy(tp, 0x15, &phy2);
4177 if (phy2 & 0x20) {
4178 u32 bmcr;
4179
4180 /* Config code words received, turn on autoneg. */
4181 tg3_readphy(tp, MII_BMCR, &bmcr);
4182 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4183
4184 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4185
4186 }
4187 }
4188 }
4189
4190 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4191 {
4192 int err;
4193
4194 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4195 err = tg3_setup_fiber_phy(tp, force_reset);
4196 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4197 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4198 } else {
4199 err = tg3_setup_copper_phy(tp, force_reset);
4200 }
4201
4202 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4203 u32 val, scale;
4204
4205 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4206 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4207 scale = 65;
4208 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4209 scale = 6;
4210 else
4211 scale = 12;
4212
4213 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4214 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4215 tw32(GRC_MISC_CFG, val);
4216 }
4217
4218 if (tp->link_config.active_speed == SPEED_1000 &&
4219 tp->link_config.active_duplex == DUPLEX_HALF)
4220 tw32(MAC_TX_LENGTHS,
4221 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4222 (6 << TX_LENGTHS_IPG_SHIFT) |
4223 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4224 else
4225 tw32(MAC_TX_LENGTHS,
4226 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4227 (6 << TX_LENGTHS_IPG_SHIFT) |
4228 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4229
4230 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4231 if (netif_carrier_ok(tp->dev)) {
4232 tw32(HOSTCC_STAT_COAL_TICKS,
4233 tp->coal.stats_block_coalesce_usecs);
4234 } else {
4235 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4236 }
4237 }
4238
4239 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4240 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4241 if (!netif_carrier_ok(tp->dev))
4242 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4243 tp->pwrmgmt_thresh;
4244 else
4245 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4246 tw32(PCIE_PWR_MGMT_THRESH, val);
4247 }
4248
4249 return err;
4250 }
4251
4252 /* This is called whenever we suspect that the system chipset is re-
4253 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4254 * is bogus tx completions. We try to recover by setting the
4255 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4256 * in the workqueue.
4257 */
4258 static void tg3_tx_recover(struct tg3 *tp)
4259 {
4260 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4261 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4262
4263 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4264 "mapped I/O cycles to the network device, attempting to "
4265 "recover. Please report the problem to the driver maintainer "
4266 "and include system chipset information.\n", tp->dev->name);
4267
4268 spin_lock(&tp->lock);
4269 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4270 spin_unlock(&tp->lock);
4271 }
4272
4273 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4274 {
4275 smp_mb();
4276 return tnapi->tx_pending -
4277 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4278 }
4279
4280 /* Tigon3 never reports partial packet sends. So we do not
4281 * need special logic to handle SKBs that have not had all
4282 * of their frags sent yet, like SunGEM does.
4283 */
4284 static void tg3_tx(struct tg3_napi *tnapi)
4285 {
4286 struct tg3 *tp = tnapi->tp;
4287 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4288 u32 sw_idx = tnapi->tx_cons;
4289
4290 while (sw_idx != hw_idx) {
4291 struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
4292 struct sk_buff *skb = ri->skb;
4293 int i, tx_bug = 0;
4294
4295 if (unlikely(skb == NULL)) {
4296 tg3_tx_recover(tp);
4297 return;
4298 }
4299
4300 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4301
4302 ri->skb = NULL;
4303
4304 sw_idx = NEXT_TX(sw_idx);
4305
4306 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4307 ri = &tnapi->tx_buffers[sw_idx];
4308 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4309 tx_bug = 1;
4310 sw_idx = NEXT_TX(sw_idx);
4311 }
4312
4313 dev_kfree_skb(skb);
4314
4315 if (unlikely(tx_bug)) {
4316 tg3_tx_recover(tp);
4317 return;
4318 }
4319 }
4320
4321 tnapi->tx_cons = sw_idx;
4322
4323 /* Need to make the tx_cons update visible to tg3_start_xmit()
4324 * before checking for netif_queue_stopped(). Without the
4325 * memory barrier, there is a small possibility that tg3_start_xmit()
4326 * will miss it and cause the queue to be stopped forever.
4327 */
4328 smp_mb();
4329
4330 if (unlikely(netif_queue_stopped(tp->dev) &&
4331 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4332 netif_tx_lock(tp->dev);
4333 if (netif_queue_stopped(tp->dev) &&
4334 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4335 netif_wake_queue(tp->dev);
4336 netif_tx_unlock(tp->dev);
4337 }
4338 }
4339
4340 /* Returns size of skb allocated or < 0 on error.
4341 *
4342 * We only need to fill in the address because the other members
4343 * of the RX descriptor are invariant, see tg3_init_rings.
4344 *
4345 * Note the purposeful assymetry of cpu vs. chip accesses. For
4346 * posting buffers we only dirty the first cache line of the RX
4347 * descriptor (containing the address). Whereas for the RX status
4348 * buffers the cpu only reads the last cacheline of the RX descriptor
4349 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4350 */
4351 static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4352 int src_idx, u32 dest_idx_unmasked)
4353 {
4354 struct tg3 *tp = tnapi->tp;
4355 struct tg3_rx_buffer_desc *desc;
4356 struct ring_info *map, *src_map;
4357 struct sk_buff *skb;
4358 dma_addr_t mapping;
4359 int skb_size, dest_idx;
4360 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4361
4362 src_map = NULL;
4363 switch (opaque_key) {
4364 case RXD_OPAQUE_RING_STD:
4365 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4366 desc = &tpr->rx_std[dest_idx];
4367 map = &tpr->rx_std_buffers[dest_idx];
4368 if (src_idx >= 0)
4369 src_map = &tpr->rx_std_buffers[src_idx];
4370 skb_size = tp->rx_pkt_map_sz;
4371 break;
4372
4373 case RXD_OPAQUE_RING_JUMBO:
4374 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4375 desc = &tpr->rx_jmb[dest_idx].std;
4376 map = &tpr->rx_jmb_buffers[dest_idx];
4377 if (src_idx >= 0)
4378 src_map = &tpr->rx_jmb_buffers[src_idx];
4379 skb_size = TG3_RX_JMB_MAP_SZ;
4380 break;
4381
4382 default:
4383 return -EINVAL;
4384 }
4385
4386 /* Do not overwrite any of the map or rp information
4387 * until we are sure we can commit to a new buffer.
4388 *
4389 * Callers depend upon this behavior and assume that
4390 * we leave everything unchanged if we fail.
4391 */
4392 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4393 if (skb == NULL)
4394 return -ENOMEM;
4395
4396 skb_reserve(skb, tp->rx_offset);
4397
4398 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4399 PCI_DMA_FROMDEVICE);
4400
4401 map->skb = skb;
4402 pci_unmap_addr_set(map, mapping, mapping);
4403
4404 if (src_map != NULL)
4405 src_map->skb = NULL;
4406
4407 desc->addr_hi = ((u64)mapping >> 32);
4408 desc->addr_lo = ((u64)mapping & 0xffffffff);
4409
4410 return skb_size;
4411 }
4412
4413 /* We only need to move over in the address because the other
4414 * members of the RX descriptor are invariant. See notes above
4415 * tg3_alloc_rx_skb for full details.
4416 */
4417 static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4418 int src_idx, u32 dest_idx_unmasked)
4419 {
4420 struct tg3 *tp = tnapi->tp;
4421 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4422 struct ring_info *src_map, *dest_map;
4423 int dest_idx;
4424 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4425
4426 switch (opaque_key) {
4427 case RXD_OPAQUE_RING_STD:
4428 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4429 dest_desc = &tpr->rx_std[dest_idx];
4430 dest_map = &tpr->rx_std_buffers[dest_idx];
4431 src_desc = &tpr->rx_std[src_idx];
4432 src_map = &tpr->rx_std_buffers[src_idx];
4433 break;
4434
4435 case RXD_OPAQUE_RING_JUMBO:
4436 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4437 dest_desc = &tpr->rx_jmb[dest_idx].std;
4438 dest_map = &tpr->rx_jmb_buffers[dest_idx];
4439 src_desc = &tpr->rx_jmb[src_idx].std;
4440 src_map = &tpr->rx_jmb_buffers[src_idx];
4441 break;
4442
4443 default:
4444 return;
4445 }
4446
4447 dest_map->skb = src_map->skb;
4448 pci_unmap_addr_set(dest_map, mapping,
4449 pci_unmap_addr(src_map, mapping));
4450 dest_desc->addr_hi = src_desc->addr_hi;
4451 dest_desc->addr_lo = src_desc->addr_lo;
4452
4453 src_map->skb = NULL;
4454 }
4455
4456 /* The RX ring scheme is composed of multiple rings which post fresh
4457 * buffers to the chip, and one special ring the chip uses to report
4458 * status back to the host.
4459 *
4460 * The special ring reports the status of received packets to the
4461 * host. The chip does not write into the original descriptor the
4462 * RX buffer was obtained from. The chip simply takes the original
4463 * descriptor as provided by the host, updates the status and length
4464 * field, then writes this into the next status ring entry.
4465 *
4466 * Each ring the host uses to post buffers to the chip is described
4467 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4468 * it is first placed into the on-chip ram. When the packet's length
4469 * is known, it walks down the TG3_BDINFO entries to select the ring.
4470 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4471 * which is within the range of the new packet's length is chosen.
4472 *
4473 * The "separate ring for rx status" scheme may sound queer, but it makes
4474 * sense from a cache coherency perspective. If only the host writes
4475 * to the buffer post rings, and only the chip writes to the rx status
4476 * rings, then cache lines never move beyond shared-modified state.
4477 * If both the host and chip were to write into the same ring, cache line
4478 * eviction could occur since both entities want it in an exclusive state.
4479 */
4480 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4481 {
4482 struct tg3 *tp = tnapi->tp;
4483 u32 work_mask, rx_std_posted = 0;
4484 u32 sw_idx = tnapi->rx_rcb_ptr;
4485 u16 hw_idx;
4486 int received;
4487 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4488
4489 hw_idx = tnapi->hw_status->idx[0].rx_producer;
4490 /*
4491 * We need to order the read of hw_idx and the read of
4492 * the opaque cookie.
4493 */
4494 rmb();
4495 work_mask = 0;
4496 received = 0;
4497 while (sw_idx != hw_idx && budget > 0) {
4498 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4499 unsigned int len;
4500 struct sk_buff *skb;
4501 dma_addr_t dma_addr;
4502 u32 opaque_key, desc_idx, *post_ptr;
4503
4504 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4505 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4506 if (opaque_key == RXD_OPAQUE_RING_STD) {
4507 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
4508 dma_addr = pci_unmap_addr(ri, mapping);
4509 skb = ri->skb;
4510 post_ptr = &tpr->rx_std_ptr;
4511 rx_std_posted++;
4512 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4513 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
4514 dma_addr = pci_unmap_addr(ri, mapping);
4515 skb = ri->skb;
4516 post_ptr = &tpr->rx_jmb_ptr;
4517 } else
4518 goto next_pkt_nopost;
4519
4520 work_mask |= opaque_key;
4521
4522 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4523 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4524 drop_it:
4525 tg3_recycle_rx(tnapi, opaque_key,
4526 desc_idx, *post_ptr);
4527 drop_it_no_recycle:
4528 /* Other statistics kept track of by card. */
4529 tp->net_stats.rx_dropped++;
4530 goto next_pkt;
4531 }
4532
4533 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4534 ETH_FCS_LEN;
4535
4536 if (len > RX_COPY_THRESHOLD
4537 && tp->rx_offset == NET_IP_ALIGN
4538 /* rx_offset will likely not equal NET_IP_ALIGN
4539 * if this is a 5701 card running in PCI-X mode
4540 * [see tg3_get_invariants()]
4541 */
4542 ) {
4543 int skb_size;
4544
4545 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
4546 desc_idx, *post_ptr);
4547 if (skb_size < 0)
4548 goto drop_it;
4549
4550 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4551 PCI_DMA_FROMDEVICE);
4552
4553 skb_put(skb, len);
4554 } else {
4555 struct sk_buff *copy_skb;
4556
4557 tg3_recycle_rx(tnapi, opaque_key,
4558 desc_idx, *post_ptr);
4559
4560 copy_skb = netdev_alloc_skb(tp->dev,
4561 len + TG3_RAW_IP_ALIGN);
4562 if (copy_skb == NULL)
4563 goto drop_it_no_recycle;
4564
4565 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4566 skb_put(copy_skb, len);
4567 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4568 skb_copy_from_linear_data(skb, copy_skb->data, len);
4569 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4570
4571 /* We'll reuse the original ring buffer. */
4572 skb = copy_skb;
4573 }
4574
4575 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4576 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4577 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4578 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4579 skb->ip_summed = CHECKSUM_UNNECESSARY;
4580 else
4581 skb->ip_summed = CHECKSUM_NONE;
4582
4583 skb->protocol = eth_type_trans(skb, tp->dev);
4584
4585 if (len > (tp->dev->mtu + ETH_HLEN) &&
4586 skb->protocol != htons(ETH_P_8021Q)) {
4587 dev_kfree_skb(skb);
4588 goto next_pkt;
4589 }
4590
4591 #if TG3_VLAN_TAG_USED
4592 if (tp->vlgrp != NULL &&
4593 desc->type_flags & RXD_FLAG_VLAN) {
4594 vlan_gro_receive(&tnapi->napi, tp->vlgrp,
4595 desc->err_vlan & RXD_VLAN_MASK, skb);
4596 } else
4597 #endif
4598 napi_gro_receive(&tnapi->napi, skb);
4599
4600 received++;
4601 budget--;
4602
4603 next_pkt:
4604 (*post_ptr)++;
4605
4606 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4607 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4608
4609 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4610 TG3_64BIT_REG_LOW, idx);
4611 work_mask &= ~RXD_OPAQUE_RING_STD;
4612 rx_std_posted = 0;
4613 }
4614 next_pkt_nopost:
4615 sw_idx++;
4616 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4617
4618 /* Refresh hw_idx to see if there is new work */
4619 if (sw_idx == hw_idx) {
4620 hw_idx = tnapi->hw_status->idx[0].rx_producer;
4621 rmb();
4622 }
4623 }
4624
4625 /* ACK the status ring. */
4626 tnapi->rx_rcb_ptr = sw_idx;
4627 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4628
4629 /* Refill RX ring(s). */
4630 if (work_mask & RXD_OPAQUE_RING_STD) {
4631 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
4632 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4633 sw_idx);
4634 }
4635 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4636 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
4637 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4638 sw_idx);
4639 }
4640 mmiowb();
4641
4642 return received;
4643 }
4644
4645 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4646 {
4647 struct tg3 *tp = tnapi->tp;
4648 struct tg3_hw_status *sblk = tnapi->hw_status;
4649
4650 /* handle link change and other phy events */
4651 if (!(tp->tg3_flags &
4652 (TG3_FLAG_USE_LINKCHG_REG |
4653 TG3_FLAG_POLL_SERDES))) {
4654 if (sblk->status & SD_STATUS_LINK_CHG) {
4655 sblk->status = SD_STATUS_UPDATED |
4656 (sblk->status & ~SD_STATUS_LINK_CHG);
4657 spin_lock(&tp->lock);
4658 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4659 tw32_f(MAC_STATUS,
4660 (MAC_STATUS_SYNC_CHANGED |
4661 MAC_STATUS_CFG_CHANGED |
4662 MAC_STATUS_MI_COMPLETION |
4663 MAC_STATUS_LNKSTATE_CHANGED));
4664 udelay(40);
4665 } else
4666 tg3_setup_phy(tp, 0);
4667 spin_unlock(&tp->lock);
4668 }
4669 }
4670
4671 /* run TX completion thread */
4672 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4673 tg3_tx(tnapi);
4674 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4675 return work_done;
4676 }
4677
4678 /* run RX thread, within the bounds set by NAPI.
4679 * All RX "locking" is done by ensuring outside
4680 * code synchronizes with tg3->napi.poll()
4681 */
4682 if (sblk->idx[0].rx_producer != tnapi->rx_rcb_ptr)
4683 work_done += tg3_rx(tnapi, budget - work_done);
4684
4685 return work_done;
4686 }
4687
4688 static int tg3_poll(struct napi_struct *napi, int budget)
4689 {
4690 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4691 struct tg3 *tp = tnapi->tp;
4692 int work_done = 0;
4693 struct tg3_hw_status *sblk = tnapi->hw_status;
4694
4695 while (1) {
4696 work_done = tg3_poll_work(tnapi, work_done, budget);
4697
4698 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4699 goto tx_recovery;
4700
4701 if (unlikely(work_done >= budget))
4702 break;
4703
4704 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4705 /* tp->last_tag is used in tg3_int_reenable() below
4706 * to tell the hw how much work has been processed,
4707 * so we must read it before checking for more work.
4708 */
4709 tnapi->last_tag = sblk->status_tag;
4710 tnapi->last_irq_tag = tnapi->last_tag;
4711 rmb();
4712 } else
4713 sblk->status &= ~SD_STATUS_UPDATED;
4714
4715 if (likely(!tg3_has_work(tnapi))) {
4716 napi_complete(napi);
4717 tg3_int_reenable(tnapi);
4718 break;
4719 }
4720 }
4721
4722 return work_done;
4723
4724 tx_recovery:
4725 /* work_done is guaranteed to be less than budget. */
4726 napi_complete(napi);
4727 schedule_work(&tp->reset_task);
4728 return work_done;
4729 }
4730
4731 static void tg3_irq_quiesce(struct tg3 *tp)
4732 {
4733 int i;
4734
4735 BUG_ON(tp->irq_sync);
4736
4737 tp->irq_sync = 1;
4738 smp_mb();
4739
4740 for (i = 0; i < tp->irq_cnt; i++)
4741 synchronize_irq(tp->napi[i].irq_vec);
4742 }
4743
4744 static inline int tg3_irq_sync(struct tg3 *tp)
4745 {
4746 return tp->irq_sync;
4747 }
4748
4749 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4750 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4751 * with as well. Most of the time, this is not necessary except when
4752 * shutting down the device.
4753 */
4754 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4755 {
4756 spin_lock_bh(&tp->lock);
4757 if (irq_sync)
4758 tg3_irq_quiesce(tp);
4759 }
4760
4761 static inline void tg3_full_unlock(struct tg3 *tp)
4762 {
4763 spin_unlock_bh(&tp->lock);
4764 }
4765
4766 /* One-shot MSI handler - Chip automatically disables interrupt
4767 * after sending MSI so driver doesn't have to do it.
4768 */
4769 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4770 {
4771 struct tg3_napi *tnapi = dev_id;
4772 struct tg3 *tp = tnapi->tp;
4773
4774 prefetch(tnapi->hw_status);
4775 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4776
4777 if (likely(!tg3_irq_sync(tp)))
4778 napi_schedule(&tnapi->napi);
4779
4780 return IRQ_HANDLED;
4781 }
4782
4783 /* MSI ISR - No need to check for interrupt sharing and no need to
4784 * flush status block and interrupt mailbox. PCI ordering rules
4785 * guarantee that MSI will arrive after the status block.
4786 */
4787 static irqreturn_t tg3_msi(int irq, void *dev_id)
4788 {
4789 struct tg3_napi *tnapi = dev_id;
4790 struct tg3 *tp = tnapi->tp;
4791
4792 prefetch(tnapi->hw_status);
4793 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4794 /*
4795 * Writing any value to intr-mbox-0 clears PCI INTA# and
4796 * chip-internal interrupt pending events.
4797 * Writing non-zero to intr-mbox-0 additional tells the
4798 * NIC to stop sending us irqs, engaging "in-intr-handler"
4799 * event coalescing.
4800 */
4801 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4802 if (likely(!tg3_irq_sync(tp)))
4803 napi_schedule(&tnapi->napi);
4804
4805 return IRQ_RETVAL(1);
4806 }
4807
4808 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4809 {
4810 struct tg3_napi *tnapi = dev_id;
4811 struct tg3 *tp = tnapi->tp;
4812 struct tg3_hw_status *sblk = tnapi->hw_status;
4813 unsigned int handled = 1;
4814
4815 /* In INTx mode, it is possible for the interrupt to arrive at
4816 * the CPU before the status block posted prior to the interrupt.
4817 * Reading the PCI State register will confirm whether the
4818 * interrupt is ours and will flush the status block.
4819 */
4820 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4821 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4822 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4823 handled = 0;
4824 goto out;
4825 }
4826 }
4827
4828 /*
4829 * Writing any value to intr-mbox-0 clears PCI INTA# and
4830 * chip-internal interrupt pending events.
4831 * Writing non-zero to intr-mbox-0 additional tells the
4832 * NIC to stop sending us irqs, engaging "in-intr-handler"
4833 * event coalescing.
4834 *
4835 * Flush the mailbox to de-assert the IRQ immediately to prevent
4836 * spurious interrupts. The flush impacts performance but
4837 * excessive spurious interrupts can be worse in some cases.
4838 */
4839 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4840 if (tg3_irq_sync(tp))
4841 goto out;
4842 sblk->status &= ~SD_STATUS_UPDATED;
4843 if (likely(tg3_has_work(tnapi))) {
4844 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4845 napi_schedule(&tnapi->napi);
4846 } else {
4847 /* No work, shared interrupt perhaps? re-enable
4848 * interrupts, and flush that PCI write
4849 */
4850 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4851 0x00000000);
4852 }
4853 out:
4854 return IRQ_RETVAL(handled);
4855 }
4856
4857 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4858 {
4859 struct tg3_napi *tnapi = dev_id;
4860 struct tg3 *tp = tnapi->tp;
4861 struct tg3_hw_status *sblk = tnapi->hw_status;
4862 unsigned int handled = 1;
4863
4864 /* In INTx mode, it is possible for the interrupt to arrive at
4865 * the CPU before the status block posted prior to the interrupt.
4866 * Reading the PCI State register will confirm whether the
4867 * interrupt is ours and will flush the status block.
4868 */
4869 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
4870 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4871 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4872 handled = 0;
4873 goto out;
4874 }
4875 }
4876
4877 /*
4878 * writing any value to intr-mbox-0 clears PCI INTA# and
4879 * chip-internal interrupt pending events.
4880 * writing non-zero to intr-mbox-0 additional tells the
4881 * NIC to stop sending us irqs, engaging "in-intr-handler"
4882 * event coalescing.
4883 *
4884 * Flush the mailbox to de-assert the IRQ immediately to prevent
4885 * spurious interrupts. The flush impacts performance but
4886 * excessive spurious interrupts can be worse in some cases.
4887 */
4888 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4889
4890 /*
4891 * In a shared interrupt configuration, sometimes other devices'
4892 * interrupts will scream. We record the current status tag here
4893 * so that the above check can report that the screaming interrupts
4894 * are unhandled. Eventually they will be silenced.
4895 */
4896 tnapi->last_irq_tag = sblk->status_tag;
4897
4898 if (tg3_irq_sync(tp))
4899 goto out;
4900
4901 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
4902
4903 napi_schedule(&tnapi->napi);
4904
4905 out:
4906 return IRQ_RETVAL(handled);
4907 }
4908
4909 /* ISR for interrupt test */
4910 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4911 {
4912 struct tg3_napi *tnapi = dev_id;
4913 struct tg3 *tp = tnapi->tp;
4914 struct tg3_hw_status *sblk = tnapi->hw_status;
4915
4916 if ((sblk->status & SD_STATUS_UPDATED) ||
4917 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4918 tg3_disable_ints(tp);
4919 return IRQ_RETVAL(1);
4920 }
4921 return IRQ_RETVAL(0);
4922 }
4923
4924 static int tg3_init_hw(struct tg3 *, int);
4925 static int tg3_halt(struct tg3 *, int, int);
4926
4927 /* Restart hardware after configuration changes, self-test, etc.
4928 * Invoked with tp->lock held.
4929 */
4930 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4931 __releases(tp->lock)
4932 __acquires(tp->lock)
4933 {
4934 int err;
4935
4936 err = tg3_init_hw(tp, reset_phy);
4937 if (err) {
4938 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4939 "aborting.\n", tp->dev->name);
4940 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4941 tg3_full_unlock(tp);
4942 del_timer_sync(&tp->timer);
4943 tp->irq_sync = 0;
4944 napi_enable(&tp->napi[0].napi);
4945 dev_close(tp->dev);
4946 tg3_full_lock(tp, 0);
4947 }
4948 return err;
4949 }
4950
4951 #ifdef CONFIG_NET_POLL_CONTROLLER
4952 static void tg3_poll_controller(struct net_device *dev)
4953 {
4954 int i;
4955 struct tg3 *tp = netdev_priv(dev);
4956
4957 for (i = 0; i < tp->irq_cnt; i++)
4958 tg3_interrupt(tp->napi[i].irq_vec, dev);
4959 }
4960 #endif
4961
4962 static void tg3_reset_task(struct work_struct *work)
4963 {
4964 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4965 int err;
4966 unsigned int restart_timer;
4967
4968 tg3_full_lock(tp, 0);
4969
4970 if (!netif_running(tp->dev)) {
4971 tg3_full_unlock(tp);
4972 return;
4973 }
4974
4975 tg3_full_unlock(tp);
4976
4977 tg3_phy_stop(tp);
4978
4979 tg3_netif_stop(tp);
4980
4981 tg3_full_lock(tp, 1);
4982
4983 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4984 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4985
4986 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4987 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4988 tp->write32_rx_mbox = tg3_write_flush_reg32;
4989 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4990 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4991 }
4992
4993 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4994 err = tg3_init_hw(tp, 1);
4995 if (err)
4996 goto out;
4997
4998 tg3_netif_start(tp);
4999
5000 if (restart_timer)
5001 mod_timer(&tp->timer, jiffies + 1);
5002
5003 out:
5004 tg3_full_unlock(tp);
5005
5006 if (!err)
5007 tg3_phy_start(tp);
5008 }
5009
5010 static void tg3_dump_short_state(struct tg3 *tp)
5011 {
5012 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5013 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5014 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5015 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5016 }
5017
5018 static void tg3_tx_timeout(struct net_device *dev)
5019 {
5020 struct tg3 *tp = netdev_priv(dev);
5021
5022 if (netif_msg_tx_err(tp)) {
5023 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
5024 dev->name);
5025 tg3_dump_short_state(tp);
5026 }
5027
5028 schedule_work(&tp->reset_task);
5029 }
5030
5031 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5032 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5033 {
5034 u32 base = (u32) mapping & 0xffffffff;
5035
5036 return ((base > 0xffffdcc0) &&
5037 (base + len + 8 < base));
5038 }
5039
5040 /* Test for DMA addresses > 40-bit */
5041 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5042 int len)
5043 {
5044 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5045 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5046 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5047 return 0;
5048 #else
5049 return 0;
5050 #endif
5051 }
5052
5053 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5054
5055 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5056 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5057 u32 last_plus_one, u32 *start,
5058 u32 base_flags, u32 mss)
5059 {
5060 struct tg3_napi *tnapi = &tp->napi[0];
5061 struct sk_buff *new_skb;
5062 dma_addr_t new_addr = 0;
5063 u32 entry = *start;
5064 int i, ret = 0;
5065
5066 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5067 new_skb = skb_copy(skb, GFP_ATOMIC);
5068 else {
5069 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5070
5071 new_skb = skb_copy_expand(skb,
5072 skb_headroom(skb) + more_headroom,
5073 skb_tailroom(skb), GFP_ATOMIC);
5074 }
5075
5076 if (!new_skb) {
5077 ret = -1;
5078 } else {
5079 /* New SKB is guaranteed to be linear. */
5080 entry = *start;
5081 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5082 new_addr = skb_shinfo(new_skb)->dma_head;
5083
5084 /* Make sure new skb does not cross any 4G boundaries.
5085 * Drop the packet if it does.
5086 */
5087 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
5088 if (!ret)
5089 skb_dma_unmap(&tp->pdev->dev, new_skb,
5090 DMA_TO_DEVICE);
5091 ret = -1;
5092 dev_kfree_skb(new_skb);
5093 new_skb = NULL;
5094 } else {
5095 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5096 base_flags, 1 | (mss << 1));
5097 *start = NEXT_TX(entry);
5098 }
5099 }
5100
5101 /* Now clean up the sw ring entries. */
5102 i = 0;
5103 while (entry != last_plus_one) {
5104 if (i == 0)
5105 tnapi->tx_buffers[entry].skb = new_skb;
5106 else
5107 tnapi->tx_buffers[entry].skb = NULL;
5108 entry = NEXT_TX(entry);
5109 i++;
5110 }
5111
5112 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5113 dev_kfree_skb(skb);
5114
5115 return ret;
5116 }
5117
5118 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5119 dma_addr_t mapping, int len, u32 flags,
5120 u32 mss_and_is_end)
5121 {
5122 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5123 int is_end = (mss_and_is_end & 0x1);
5124 u32 mss = (mss_and_is_end >> 1);
5125 u32 vlan_tag = 0;
5126
5127 if (is_end)
5128 flags |= TXD_FLAG_END;
5129 if (flags & TXD_FLAG_VLAN) {
5130 vlan_tag = flags >> 16;
5131 flags &= 0xffff;
5132 }
5133 vlan_tag |= (mss << TXD_MSS_SHIFT);
5134
5135 txd->addr_hi = ((u64) mapping >> 32);
5136 txd->addr_lo = ((u64) mapping & 0xffffffff);
5137 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5138 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5139 }
5140
5141 /* hard_start_xmit for devices that don't have any bugs and
5142 * support TG3_FLG2_HW_TSO_2 only.
5143 */
5144 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5145 struct net_device *dev)
5146 {
5147 struct tg3 *tp = netdev_priv(dev);
5148 u32 len, entry, base_flags, mss;
5149 struct skb_shared_info *sp;
5150 dma_addr_t mapping;
5151 struct tg3_napi *tnapi = &tp->napi[0];
5152
5153 len = skb_headlen(skb);
5154
5155 /* We are running in BH disabled context with netif_tx_lock
5156 * and TX reclaim runs via tp->napi.poll inside of a software
5157 * interrupt. Furthermore, IRQ processing runs lockless so we have
5158 * no IRQ context deadlocks to worry about either. Rejoice!
5159 */
5160 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5161 if (!netif_queue_stopped(dev)) {
5162 netif_stop_queue(dev);
5163
5164 /* This is a hard error, log it. */
5165 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5166 "queue awake!\n", dev->name);
5167 }
5168 return NETDEV_TX_BUSY;
5169 }
5170
5171 entry = tnapi->tx_prod;
5172 base_flags = 0;
5173 mss = 0;
5174 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5175 int tcp_opt_len, ip_tcp_len;
5176
5177 if (skb_header_cloned(skb) &&
5178 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5179 dev_kfree_skb(skb);
5180 goto out_unlock;
5181 }
5182
5183 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5184 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
5185 else {
5186 struct iphdr *iph = ip_hdr(skb);
5187
5188 tcp_opt_len = tcp_optlen(skb);
5189 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5190
5191 iph->check = 0;
5192 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5193 mss |= (ip_tcp_len + tcp_opt_len) << 9;
5194 }
5195
5196 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5197 TXD_FLAG_CPU_POST_DMA);
5198
5199 tcp_hdr(skb)->check = 0;
5200
5201 }
5202 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5203 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5204 #if TG3_VLAN_TAG_USED
5205 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5206 base_flags |= (TXD_FLAG_VLAN |
5207 (vlan_tx_tag_get(skb) << 16));
5208 #endif
5209
5210 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5211 dev_kfree_skb(skb);
5212 goto out_unlock;
5213 }
5214
5215 sp = skb_shinfo(skb);
5216
5217 mapping = sp->dma_head;
5218
5219 tnapi->tx_buffers[entry].skb = skb;
5220
5221 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5222 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5223
5224 entry = NEXT_TX(entry);
5225
5226 /* Now loop through additional data fragments, and queue them. */
5227 if (skb_shinfo(skb)->nr_frags > 0) {
5228 unsigned int i, last;
5229
5230 last = skb_shinfo(skb)->nr_frags - 1;
5231 for (i = 0; i <= last; i++) {
5232 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5233
5234 len = frag->size;
5235 mapping = sp->dma_maps[i];
5236 tnapi->tx_buffers[entry].skb = NULL;
5237
5238 tg3_set_txd(tnapi, entry, mapping, len,
5239 base_flags, (i == last) | (mss << 1));
5240
5241 entry = NEXT_TX(entry);
5242 }
5243 }
5244
5245 /* Packets are ready, update Tx producer idx local and on card. */
5246 tw32_tx_mbox(tnapi->prodmbox, entry);
5247
5248 tnapi->tx_prod = entry;
5249 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5250 netif_stop_queue(dev);
5251 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5252 netif_wake_queue(tp->dev);
5253 }
5254
5255 out_unlock:
5256 mmiowb();
5257
5258 return NETDEV_TX_OK;
5259 }
5260
5261 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5262 struct net_device *);
5263
5264 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5265 * TSO header is greater than 80 bytes.
5266 */
5267 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5268 {
5269 struct sk_buff *segs, *nskb;
5270 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5271
5272 /* Estimate the number of fragments in the worst case */
5273 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5274 netif_stop_queue(tp->dev);
5275 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5276 return NETDEV_TX_BUSY;
5277
5278 netif_wake_queue(tp->dev);
5279 }
5280
5281 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5282 if (IS_ERR(segs))
5283 goto tg3_tso_bug_end;
5284
5285 do {
5286 nskb = segs;
5287 segs = segs->next;
5288 nskb->next = NULL;
5289 tg3_start_xmit_dma_bug(nskb, tp->dev);
5290 } while (segs);
5291
5292 tg3_tso_bug_end:
5293 dev_kfree_skb(skb);
5294
5295 return NETDEV_TX_OK;
5296 }
5297
5298 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5299 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5300 */
5301 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5302 struct net_device *dev)
5303 {
5304 struct tg3 *tp = netdev_priv(dev);
5305 u32 len, entry, base_flags, mss;
5306 struct skb_shared_info *sp;
5307 int would_hit_hwbug;
5308 dma_addr_t mapping;
5309 struct tg3_napi *tnapi = &tp->napi[0];
5310
5311 len = skb_headlen(skb);
5312
5313 /* We are running in BH disabled context with netif_tx_lock
5314 * and TX reclaim runs via tp->napi.poll inside of a software
5315 * interrupt. Furthermore, IRQ processing runs lockless so we have
5316 * no IRQ context deadlocks to worry about either. Rejoice!
5317 */
5318 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5319 if (!netif_queue_stopped(dev)) {
5320 netif_stop_queue(dev);
5321
5322 /* This is a hard error, log it. */
5323 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5324 "queue awake!\n", dev->name);
5325 }
5326 return NETDEV_TX_BUSY;
5327 }
5328
5329 entry = tnapi->tx_prod;
5330 base_flags = 0;
5331 if (skb->ip_summed == CHECKSUM_PARTIAL)
5332 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5333 mss = 0;
5334 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5335 struct iphdr *iph;
5336 int tcp_opt_len, ip_tcp_len, hdr_len;
5337
5338 if (skb_header_cloned(skb) &&
5339 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5340 dev_kfree_skb(skb);
5341 goto out_unlock;
5342 }
5343
5344 tcp_opt_len = tcp_optlen(skb);
5345 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5346
5347 hdr_len = ip_tcp_len + tcp_opt_len;
5348 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5349 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5350 return (tg3_tso_bug(tp, skb));
5351
5352 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5353 TXD_FLAG_CPU_POST_DMA);
5354
5355 iph = ip_hdr(skb);
5356 iph->check = 0;
5357 iph->tot_len = htons(mss + hdr_len);
5358 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5359 tcp_hdr(skb)->check = 0;
5360 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5361 } else
5362 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5363 iph->daddr, 0,
5364 IPPROTO_TCP,
5365 0);
5366
5367 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5368 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5369 if (tcp_opt_len || iph->ihl > 5) {
5370 int tsflags;
5371
5372 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5373 mss |= (tsflags << 11);
5374 }
5375 } else {
5376 if (tcp_opt_len || iph->ihl > 5) {
5377 int tsflags;
5378
5379 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5380 base_flags |= tsflags << 12;
5381 }
5382 }
5383 }
5384 #if TG3_VLAN_TAG_USED
5385 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5386 base_flags |= (TXD_FLAG_VLAN |
5387 (vlan_tx_tag_get(skb) << 16));
5388 #endif
5389
5390 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5391 dev_kfree_skb(skb);
5392 goto out_unlock;
5393 }
5394
5395 sp = skb_shinfo(skb);
5396
5397 mapping = sp->dma_head;
5398
5399 tnapi->tx_buffers[entry].skb = skb;
5400
5401 would_hit_hwbug = 0;
5402
5403 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5404 would_hit_hwbug = 1;
5405 else if (tg3_4g_overflow_test(mapping, len))
5406 would_hit_hwbug = 1;
5407
5408 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5409 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5410
5411 entry = NEXT_TX(entry);
5412
5413 /* Now loop through additional data fragments, and queue them. */
5414 if (skb_shinfo(skb)->nr_frags > 0) {
5415 unsigned int i, last;
5416
5417 last = skb_shinfo(skb)->nr_frags - 1;
5418 for (i = 0; i <= last; i++) {
5419 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5420
5421 len = frag->size;
5422 mapping = sp->dma_maps[i];
5423
5424 tnapi->tx_buffers[entry].skb = NULL;
5425
5426 if (tg3_4g_overflow_test(mapping, len))
5427 would_hit_hwbug = 1;
5428
5429 if (tg3_40bit_overflow_test(tp, mapping, len))
5430 would_hit_hwbug = 1;
5431
5432 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5433 tg3_set_txd(tnapi, entry, mapping, len,
5434 base_flags, (i == last)|(mss << 1));
5435 else
5436 tg3_set_txd(tnapi, entry, mapping, len,
5437 base_flags, (i == last));
5438
5439 entry = NEXT_TX(entry);
5440 }
5441 }
5442
5443 if (would_hit_hwbug) {
5444 u32 last_plus_one = entry;
5445 u32 start;
5446
5447 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5448 start &= (TG3_TX_RING_SIZE - 1);
5449
5450 /* If the workaround fails due to memory/mapping
5451 * failure, silently drop this packet.
5452 */
5453 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5454 &start, base_flags, mss))
5455 goto out_unlock;
5456
5457 entry = start;
5458 }
5459
5460 /* Packets are ready, update Tx producer idx local and on card. */
5461 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry);
5462
5463 tnapi->tx_prod = entry;
5464 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5465 netif_stop_queue(dev);
5466 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5467 netif_wake_queue(tp->dev);
5468 }
5469
5470 out_unlock:
5471 mmiowb();
5472
5473 return NETDEV_TX_OK;
5474 }
5475
5476 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5477 int new_mtu)
5478 {
5479 dev->mtu = new_mtu;
5480
5481 if (new_mtu > ETH_DATA_LEN) {
5482 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5483 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5484 ethtool_op_set_tso(dev, 0);
5485 }
5486 else
5487 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5488 } else {
5489 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5490 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5491 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5492 }
5493 }
5494
5495 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5496 {
5497 struct tg3 *tp = netdev_priv(dev);
5498 int err;
5499
5500 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5501 return -EINVAL;
5502
5503 if (!netif_running(dev)) {
5504 /* We'll just catch it later when the
5505 * device is up'd.
5506 */
5507 tg3_set_mtu(dev, tp, new_mtu);
5508 return 0;
5509 }
5510
5511 tg3_phy_stop(tp);
5512
5513 tg3_netif_stop(tp);
5514
5515 tg3_full_lock(tp, 1);
5516
5517 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5518
5519 tg3_set_mtu(dev, tp, new_mtu);
5520
5521 err = tg3_restart_hw(tp, 0);
5522
5523 if (!err)
5524 tg3_netif_start(tp);
5525
5526 tg3_full_unlock(tp);
5527
5528 if (!err)
5529 tg3_phy_start(tp);
5530
5531 return err;
5532 }
5533
5534 static void tg3_rx_prodring_free(struct tg3 *tp,
5535 struct tg3_rx_prodring_set *tpr)
5536 {
5537 int i;
5538 struct ring_info *rxp;
5539
5540 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5541 rxp = &tpr->rx_std_buffers[i];
5542
5543 if (rxp->skb == NULL)
5544 continue;
5545
5546 pci_unmap_single(tp->pdev,
5547 pci_unmap_addr(rxp, mapping),
5548 tp->rx_pkt_map_sz,
5549 PCI_DMA_FROMDEVICE);
5550 dev_kfree_skb_any(rxp->skb);
5551 rxp->skb = NULL;
5552 }
5553
5554 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5555 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5556 rxp = &tpr->rx_jmb_buffers[i];
5557
5558 if (rxp->skb == NULL)
5559 continue;
5560
5561 pci_unmap_single(tp->pdev,
5562 pci_unmap_addr(rxp, mapping),
5563 TG3_RX_JMB_MAP_SZ,
5564 PCI_DMA_FROMDEVICE);
5565 dev_kfree_skb_any(rxp->skb);
5566 rxp->skb = NULL;
5567 }
5568 }
5569 }
5570
5571 /* Initialize tx/rx rings for packet processing.
5572 *
5573 * The chip has been shut down and the driver detached from
5574 * the networking, so no interrupts or new tx packets will
5575 * end up in the driver. tp->{tx,}lock are held and thus
5576 * we may not sleep.
5577 */
5578 static int tg3_rx_prodring_alloc(struct tg3 *tp,
5579 struct tg3_rx_prodring_set *tpr)
5580 {
5581 u32 i, rx_pkt_dma_sz;
5582 struct tg3_napi *tnapi = &tp->napi[0];
5583
5584 /* Zero out all descriptors. */
5585 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
5586
5587 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
5588 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5589 tp->dev->mtu > ETH_DATA_LEN)
5590 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
5591 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
5592
5593 /* Initialize invariants of the rings, we only set this
5594 * stuff once. This works because the card does not
5595 * write into the rx buffer posting rings.
5596 */
5597 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5598 struct tg3_rx_buffer_desc *rxd;
5599
5600 rxd = &tpr->rx_std[i];
5601 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
5602 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5603 rxd->opaque = (RXD_OPAQUE_RING_STD |
5604 (i << RXD_OPAQUE_INDEX_SHIFT));
5605 }
5606
5607 /* Now allocate fresh SKBs for each rx ring. */
5608 for (i = 0; i < tp->rx_pending; i++) {
5609 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5610 printk(KERN_WARNING PFX
5611 "%s: Using a smaller RX standard ring, "
5612 "only %d out of %d buffers were allocated "
5613 "successfully.\n",
5614 tp->dev->name, i, tp->rx_pending);
5615 if (i == 0)
5616 goto initfail;
5617 tp->rx_pending = i;
5618 break;
5619 }
5620 }
5621
5622 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
5623 goto done;
5624
5625 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
5626
5627 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5628 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5629 struct tg3_rx_buffer_desc *rxd;
5630
5631 rxd = &tpr->rx_jmb[i].std;
5632 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
5633 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5634 RXD_FLAG_JUMBO;
5635 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5636 (i << RXD_OPAQUE_INDEX_SHIFT));
5637 }
5638
5639 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5640 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
5641 -1, i) < 0) {
5642 printk(KERN_WARNING PFX
5643 "%s: Using a smaller RX jumbo ring, "
5644 "only %d out of %d buffers were "
5645 "allocated successfully.\n",
5646 tp->dev->name, i, tp->rx_jumbo_pending);
5647 if (i == 0)
5648 goto initfail;
5649 tp->rx_jumbo_pending = i;
5650 break;
5651 }
5652 }
5653 }
5654
5655 done:
5656 return 0;
5657
5658 initfail:
5659 tg3_rx_prodring_free(tp, tpr);
5660 return -ENOMEM;
5661 }
5662
5663 static void tg3_rx_prodring_fini(struct tg3 *tp,
5664 struct tg3_rx_prodring_set *tpr)
5665 {
5666 kfree(tpr->rx_std_buffers);
5667 tpr->rx_std_buffers = NULL;
5668 kfree(tpr->rx_jmb_buffers);
5669 tpr->rx_jmb_buffers = NULL;
5670 if (tpr->rx_std) {
5671 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5672 tpr->rx_std, tpr->rx_std_mapping);
5673 tpr->rx_std = NULL;
5674 }
5675 if (tpr->rx_jmb) {
5676 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5677 tpr->rx_jmb, tpr->rx_jmb_mapping);
5678 tpr->rx_jmb = NULL;
5679 }
5680 }
5681
5682 static int tg3_rx_prodring_init(struct tg3 *tp,
5683 struct tg3_rx_prodring_set *tpr)
5684 {
5685 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
5686 TG3_RX_RING_SIZE, GFP_KERNEL);
5687 if (!tpr->rx_std_buffers)
5688 return -ENOMEM;
5689
5690 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5691 &tpr->rx_std_mapping);
5692 if (!tpr->rx_std)
5693 goto err_out;
5694
5695 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5696 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
5697 TG3_RX_JUMBO_RING_SIZE,
5698 GFP_KERNEL);
5699 if (!tpr->rx_jmb_buffers)
5700 goto err_out;
5701
5702 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
5703 TG3_RX_JUMBO_RING_BYTES,
5704 &tpr->rx_jmb_mapping);
5705 if (!tpr->rx_jmb)
5706 goto err_out;
5707 }
5708
5709 return 0;
5710
5711 err_out:
5712 tg3_rx_prodring_fini(tp, tpr);
5713 return -ENOMEM;
5714 }
5715
5716 /* Free up pending packets in all rx/tx rings.
5717 *
5718 * The chip has been shut down and the driver detached from
5719 * the networking, so no interrupts or new tx packets will
5720 * end up in the driver. tp->{tx,}lock is not held and we are not
5721 * in an interrupt context and thus may sleep.
5722 */
5723 static void tg3_free_rings(struct tg3 *tp)
5724 {
5725 int i, j;
5726
5727 for (j = 0; j < tp->irq_cnt; j++) {
5728 struct tg3_napi *tnapi = &tp->napi[j];
5729
5730 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5731 struct tx_ring_info *txp;
5732 struct sk_buff *skb;
5733
5734 txp = &tnapi->tx_buffers[i];
5735 skb = txp->skb;
5736
5737 if (skb == NULL) {
5738 i++;
5739 continue;
5740 }
5741
5742 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5743
5744 txp->skb = NULL;
5745
5746 i += skb_shinfo(skb)->nr_frags + 1;
5747
5748 dev_kfree_skb_any(skb);
5749 }
5750 }
5751
5752 tg3_rx_prodring_free(tp, &tp->prodring[0]);
5753 }
5754
5755 /* Initialize tx/rx rings for packet processing.
5756 *
5757 * The chip has been shut down and the driver detached from
5758 * the networking, so no interrupts or new tx packets will
5759 * end up in the driver. tp->{tx,}lock are held and thus
5760 * we may not sleep.
5761 */
5762 static int tg3_init_rings(struct tg3 *tp)
5763 {
5764 int i;
5765
5766 /* Free up all the SKBs. */
5767 tg3_free_rings(tp);
5768
5769 for (i = 0; i < tp->irq_cnt; i++) {
5770 struct tg3_napi *tnapi = &tp->napi[i];
5771
5772 tnapi->last_tag = 0;
5773 tnapi->last_irq_tag = 0;
5774 tnapi->hw_status->status = 0;
5775 tnapi->hw_status->status_tag = 0;
5776 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5777
5778 tnapi->tx_prod = 0;
5779 tnapi->tx_cons = 0;
5780 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
5781
5782 tnapi->rx_rcb_ptr = 0;
5783 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5784 }
5785
5786 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
5787 }
5788
5789 /*
5790 * Must not be invoked with interrupt sources disabled and
5791 * the hardware shutdown down.
5792 */
5793 static void tg3_free_consistent(struct tg3 *tp)
5794 {
5795 int i;
5796
5797 for (i = 0; i < tp->irq_cnt; i++) {
5798 struct tg3_napi *tnapi = &tp->napi[i];
5799
5800 if (tnapi->tx_ring) {
5801 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5802 tnapi->tx_ring, tnapi->tx_desc_mapping);
5803 tnapi->tx_ring = NULL;
5804 }
5805
5806 kfree(tnapi->tx_buffers);
5807 tnapi->tx_buffers = NULL;
5808
5809 if (tnapi->rx_rcb) {
5810 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5811 tnapi->rx_rcb,
5812 tnapi->rx_rcb_mapping);
5813 tnapi->rx_rcb = NULL;
5814 }
5815
5816 if (tnapi->hw_status) {
5817 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5818 tnapi->hw_status,
5819 tnapi->status_mapping);
5820 tnapi->hw_status = NULL;
5821 }
5822 }
5823
5824 if (tp->hw_stats) {
5825 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5826 tp->hw_stats, tp->stats_mapping);
5827 tp->hw_stats = NULL;
5828 }
5829
5830 tg3_rx_prodring_fini(tp, &tp->prodring[0]);
5831 }
5832
5833 /*
5834 * Must not be invoked with interrupt sources disabled and
5835 * the hardware shutdown down. Can sleep.
5836 */
5837 static int tg3_alloc_consistent(struct tg3 *tp)
5838 {
5839 int i;
5840
5841 if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
5842 return -ENOMEM;
5843
5844 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5845 sizeof(struct tg3_hw_stats),
5846 &tp->stats_mapping);
5847 if (!tp->hw_stats)
5848 goto err_out;
5849
5850 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5851
5852 for (i = 0; i < tp->irq_cnt; i++) {
5853 struct tg3_napi *tnapi = &tp->napi[i];
5854
5855 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
5856 TG3_HW_STATUS_SIZE,
5857 &tnapi->status_mapping);
5858 if (!tnapi->hw_status)
5859 goto err_out;
5860
5861 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5862
5863 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
5864 TG3_RX_RCB_RING_BYTES(tp),
5865 &tnapi->rx_rcb_mapping);
5866 if (!tnapi->rx_rcb)
5867 goto err_out;
5868
5869 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5870
5871 tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
5872 TG3_TX_RING_SIZE, GFP_KERNEL);
5873 if (!tnapi->tx_buffers)
5874 goto err_out;
5875
5876 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
5877 TG3_TX_RING_BYTES,
5878 &tnapi->tx_desc_mapping);
5879 if (!tnapi->tx_ring)
5880 goto err_out;
5881 }
5882
5883 return 0;
5884
5885 err_out:
5886 tg3_free_consistent(tp);
5887 return -ENOMEM;
5888 }
5889
5890 #define MAX_WAIT_CNT 1000
5891
5892 /* To stop a block, clear the enable bit and poll till it
5893 * clears. tp->lock is held.
5894 */
5895 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5896 {
5897 unsigned int i;
5898 u32 val;
5899
5900 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5901 switch (ofs) {
5902 case RCVLSC_MODE:
5903 case DMAC_MODE:
5904 case MBFREE_MODE:
5905 case BUFMGR_MODE:
5906 case MEMARB_MODE:
5907 /* We can't enable/disable these bits of the
5908 * 5705/5750, just say success.
5909 */
5910 return 0;
5911
5912 default:
5913 break;
5914 }
5915 }
5916
5917 val = tr32(ofs);
5918 val &= ~enable_bit;
5919 tw32_f(ofs, val);
5920
5921 for (i = 0; i < MAX_WAIT_CNT; i++) {
5922 udelay(100);
5923 val = tr32(ofs);
5924 if ((val & enable_bit) == 0)
5925 break;
5926 }
5927
5928 if (i == MAX_WAIT_CNT && !silent) {
5929 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5930 "ofs=%lx enable_bit=%x\n",
5931 ofs, enable_bit);
5932 return -ENODEV;
5933 }
5934
5935 return 0;
5936 }
5937
5938 /* tp->lock is held. */
5939 static int tg3_abort_hw(struct tg3 *tp, int silent)
5940 {
5941 int i, err;
5942
5943 tg3_disable_ints(tp);
5944
5945 tp->rx_mode &= ~RX_MODE_ENABLE;
5946 tw32_f(MAC_RX_MODE, tp->rx_mode);
5947 udelay(10);
5948
5949 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5950 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5951 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5952 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5953 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5954 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5955
5956 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5957 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5958 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5959 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5960 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5961 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5962 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5963
5964 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5965 tw32_f(MAC_MODE, tp->mac_mode);
5966 udelay(40);
5967
5968 tp->tx_mode &= ~TX_MODE_ENABLE;
5969 tw32_f(MAC_TX_MODE, tp->tx_mode);
5970
5971 for (i = 0; i < MAX_WAIT_CNT; i++) {
5972 udelay(100);
5973 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5974 break;
5975 }
5976 if (i >= MAX_WAIT_CNT) {
5977 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5978 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5979 tp->dev->name, tr32(MAC_TX_MODE));
5980 err |= -ENODEV;
5981 }
5982
5983 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5984 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5985 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5986
5987 tw32(FTQ_RESET, 0xffffffff);
5988 tw32(FTQ_RESET, 0x00000000);
5989
5990 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5991 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5992
5993 for (i = 0; i < tp->irq_cnt; i++) {
5994 struct tg3_napi *tnapi = &tp->napi[i];
5995 if (tnapi->hw_status)
5996 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
5997 }
5998 if (tp->hw_stats)
5999 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6000
6001 return err;
6002 }
6003
6004 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6005 {
6006 int i;
6007 u32 apedata;
6008
6009 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6010 if (apedata != APE_SEG_SIG_MAGIC)
6011 return;
6012
6013 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6014 if (!(apedata & APE_FW_STATUS_READY))
6015 return;
6016
6017 /* Wait for up to 1 millisecond for APE to service previous event. */
6018 for (i = 0; i < 10; i++) {
6019 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6020 return;
6021
6022 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6023
6024 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6025 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6026 event | APE_EVENT_STATUS_EVENT_PENDING);
6027
6028 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6029
6030 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6031 break;
6032
6033 udelay(100);
6034 }
6035
6036 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6037 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6038 }
6039
6040 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6041 {
6042 u32 event;
6043 u32 apedata;
6044
6045 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6046 return;
6047
6048 switch (kind) {
6049 case RESET_KIND_INIT:
6050 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6051 APE_HOST_SEG_SIG_MAGIC);
6052 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6053 APE_HOST_SEG_LEN_MAGIC);
6054 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6055 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6056 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6057 APE_HOST_DRIVER_ID_MAGIC);
6058 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6059 APE_HOST_BEHAV_NO_PHYLOCK);
6060
6061 event = APE_EVENT_STATUS_STATE_START;
6062 break;
6063 case RESET_KIND_SHUTDOWN:
6064 /* With the interface we are currently using,
6065 * APE does not track driver state. Wiping
6066 * out the HOST SEGMENT SIGNATURE forces
6067 * the APE to assume OS absent status.
6068 */
6069 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6070
6071 event = APE_EVENT_STATUS_STATE_UNLOAD;
6072 break;
6073 case RESET_KIND_SUSPEND:
6074 event = APE_EVENT_STATUS_STATE_SUSPEND;
6075 break;
6076 default:
6077 return;
6078 }
6079
6080 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6081
6082 tg3_ape_send_event(tp, event);
6083 }
6084
6085 /* tp->lock is held. */
6086 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6087 {
6088 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6089 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6090
6091 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6092 switch (kind) {
6093 case RESET_KIND_INIT:
6094 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6095 DRV_STATE_START);
6096 break;
6097
6098 case RESET_KIND_SHUTDOWN:
6099 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6100 DRV_STATE_UNLOAD);
6101 break;
6102
6103 case RESET_KIND_SUSPEND:
6104 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6105 DRV_STATE_SUSPEND);
6106 break;
6107
6108 default:
6109 break;
6110 }
6111 }
6112
6113 if (kind == RESET_KIND_INIT ||
6114 kind == RESET_KIND_SUSPEND)
6115 tg3_ape_driver_state_change(tp, kind);
6116 }
6117
6118 /* tp->lock is held. */
6119 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6120 {
6121 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6122 switch (kind) {
6123 case RESET_KIND_INIT:
6124 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6125 DRV_STATE_START_DONE);
6126 break;
6127
6128 case RESET_KIND_SHUTDOWN:
6129 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6130 DRV_STATE_UNLOAD_DONE);
6131 break;
6132
6133 default:
6134 break;
6135 }
6136 }
6137
6138 if (kind == RESET_KIND_SHUTDOWN)
6139 tg3_ape_driver_state_change(tp, kind);
6140 }
6141
6142 /* tp->lock is held. */
6143 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6144 {
6145 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6146 switch (kind) {
6147 case RESET_KIND_INIT:
6148 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6149 DRV_STATE_START);
6150 break;
6151
6152 case RESET_KIND_SHUTDOWN:
6153 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6154 DRV_STATE_UNLOAD);
6155 break;
6156
6157 case RESET_KIND_SUSPEND:
6158 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6159 DRV_STATE_SUSPEND);
6160 break;
6161
6162 default:
6163 break;
6164 }
6165 }
6166 }
6167
6168 static int tg3_poll_fw(struct tg3 *tp)
6169 {
6170 int i;
6171 u32 val;
6172
6173 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6174 /* Wait up to 20ms for init done. */
6175 for (i = 0; i < 200; i++) {
6176 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6177 return 0;
6178 udelay(100);
6179 }
6180 return -ENODEV;
6181 }
6182
6183 /* Wait for firmware initialization to complete. */
6184 for (i = 0; i < 100000; i++) {
6185 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6186 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6187 break;
6188 udelay(10);
6189 }
6190
6191 /* Chip might not be fitted with firmware. Some Sun onboard
6192 * parts are configured like that. So don't signal the timeout
6193 * of the above loop as an error, but do report the lack of
6194 * running firmware once.
6195 */
6196 if (i >= 100000 &&
6197 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6198 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6199
6200 printk(KERN_INFO PFX "%s: No firmware running.\n",
6201 tp->dev->name);
6202 }
6203
6204 return 0;
6205 }
6206
6207 /* Save PCI command register before chip reset */
6208 static void tg3_save_pci_state(struct tg3 *tp)
6209 {
6210 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6211 }
6212
6213 /* Restore PCI state after chip reset */
6214 static void tg3_restore_pci_state(struct tg3 *tp)
6215 {
6216 u32 val;
6217
6218 /* Re-enable indirect register accesses. */
6219 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6220 tp->misc_host_ctrl);
6221
6222 /* Set MAX PCI retry to zero. */
6223 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6224 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6225 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6226 val |= PCISTATE_RETRY_SAME_DMA;
6227 /* Allow reads and writes to the APE register and memory space. */
6228 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6229 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6230 PCISTATE_ALLOW_APE_SHMEM_WR;
6231 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6232
6233 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6234
6235 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6236 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6237 pcie_set_readrq(tp->pdev, 4096);
6238 else {
6239 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6240 tp->pci_cacheline_sz);
6241 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6242 tp->pci_lat_timer);
6243 }
6244 }
6245
6246 /* Make sure PCI-X relaxed ordering bit is clear. */
6247 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6248 u16 pcix_cmd;
6249
6250 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6251 &pcix_cmd);
6252 pcix_cmd &= ~PCI_X_CMD_ERO;
6253 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6254 pcix_cmd);
6255 }
6256
6257 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6258
6259 /* Chip reset on 5780 will reset MSI enable bit,
6260 * so need to restore it.
6261 */
6262 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6263 u16 ctrl;
6264
6265 pci_read_config_word(tp->pdev,
6266 tp->msi_cap + PCI_MSI_FLAGS,
6267 &ctrl);
6268 pci_write_config_word(tp->pdev,
6269 tp->msi_cap + PCI_MSI_FLAGS,
6270 ctrl | PCI_MSI_FLAGS_ENABLE);
6271 val = tr32(MSGINT_MODE);
6272 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6273 }
6274 }
6275 }
6276
6277 static void tg3_stop_fw(struct tg3 *);
6278
6279 /* tp->lock is held. */
6280 static int tg3_chip_reset(struct tg3 *tp)
6281 {
6282 u32 val;
6283 void (*write_op)(struct tg3 *, u32, u32);
6284 int i, err;
6285
6286 tg3_nvram_lock(tp);
6287
6288 tg3_mdio_stop(tp);
6289
6290 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6291
6292 /* No matching tg3_nvram_unlock() after this because
6293 * chip reset below will undo the nvram lock.
6294 */
6295 tp->nvram_lock_cnt = 0;
6296
6297 /* GRC_MISC_CFG core clock reset will clear the memory
6298 * enable bit in PCI register 4 and the MSI enable bit
6299 * on some chips, so we save relevant registers here.
6300 */
6301 tg3_save_pci_state(tp);
6302
6303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6304 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6305 tw32(GRC_FASTBOOT_PC, 0);
6306
6307 /*
6308 * We must avoid the readl() that normally takes place.
6309 * It locks machines, causes machine checks, and other
6310 * fun things. So, temporarily disable the 5701
6311 * hardware workaround, while we do the reset.
6312 */
6313 write_op = tp->write32;
6314 if (write_op == tg3_write_flush_reg32)
6315 tp->write32 = tg3_write32;
6316
6317 /* Prevent the irq handler from reading or writing PCI registers
6318 * during chip reset when the memory enable bit in the PCI command
6319 * register may be cleared. The chip does not generate interrupt
6320 * at this time, but the irq handler may still be called due to irq
6321 * sharing or irqpoll.
6322 */
6323 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6324 for (i = 0; i < tp->irq_cnt; i++) {
6325 struct tg3_napi *tnapi = &tp->napi[i];
6326 if (tnapi->hw_status) {
6327 tnapi->hw_status->status = 0;
6328 tnapi->hw_status->status_tag = 0;
6329 }
6330 tnapi->last_tag = 0;
6331 tnapi->last_irq_tag = 0;
6332 }
6333 smp_mb();
6334
6335 for (i = 0; i < tp->irq_cnt; i++)
6336 synchronize_irq(tp->napi[i].irq_vec);
6337
6338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6339 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6340 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6341 }
6342
6343 /* do the reset */
6344 val = GRC_MISC_CFG_CORECLK_RESET;
6345
6346 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6347 if (tr32(0x7e2c) == 0x60) {
6348 tw32(0x7e2c, 0x20);
6349 }
6350 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6351 tw32(GRC_MISC_CFG, (1 << 29));
6352 val |= (1 << 29);
6353 }
6354 }
6355
6356 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6357 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6358 tw32(GRC_VCPU_EXT_CTRL,
6359 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6360 }
6361
6362 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6363 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6364 tw32(GRC_MISC_CFG, val);
6365
6366 /* restore 5701 hardware bug workaround write method */
6367 tp->write32 = write_op;
6368
6369 /* Unfortunately, we have to delay before the PCI read back.
6370 * Some 575X chips even will not respond to a PCI cfg access
6371 * when the reset command is given to the chip.
6372 *
6373 * How do these hardware designers expect things to work
6374 * properly if the PCI write is posted for a long period
6375 * of time? It is always necessary to have some method by
6376 * which a register read back can occur to push the write
6377 * out which does the reset.
6378 *
6379 * For most tg3 variants the trick below was working.
6380 * Ho hum...
6381 */
6382 udelay(120);
6383
6384 /* Flush PCI posted writes. The normal MMIO registers
6385 * are inaccessible at this time so this is the only
6386 * way to make this reliably (actually, this is no longer
6387 * the case, see above). I tried to use indirect
6388 * register read/write but this upset some 5701 variants.
6389 */
6390 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6391
6392 udelay(120);
6393
6394 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6395 u16 val16;
6396
6397 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6398 int i;
6399 u32 cfg_val;
6400
6401 /* Wait for link training to complete. */
6402 for (i = 0; i < 5000; i++)
6403 udelay(100);
6404
6405 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6406 pci_write_config_dword(tp->pdev, 0xc4,
6407 cfg_val | (1 << 15));
6408 }
6409
6410 /* Clear the "no snoop" and "relaxed ordering" bits. */
6411 pci_read_config_word(tp->pdev,
6412 tp->pcie_cap + PCI_EXP_DEVCTL,
6413 &val16);
6414 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6415 PCI_EXP_DEVCTL_NOSNOOP_EN);
6416 /*
6417 * Older PCIe devices only support the 128 byte
6418 * MPS setting. Enforce the restriction.
6419 */
6420 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6421 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6422 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6423 pci_write_config_word(tp->pdev,
6424 tp->pcie_cap + PCI_EXP_DEVCTL,
6425 val16);
6426
6427 pcie_set_readrq(tp->pdev, 4096);
6428
6429 /* Clear error status */
6430 pci_write_config_word(tp->pdev,
6431 tp->pcie_cap + PCI_EXP_DEVSTA,
6432 PCI_EXP_DEVSTA_CED |
6433 PCI_EXP_DEVSTA_NFED |
6434 PCI_EXP_DEVSTA_FED |
6435 PCI_EXP_DEVSTA_URD);
6436 }
6437
6438 tg3_restore_pci_state(tp);
6439
6440 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6441
6442 val = 0;
6443 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6444 val = tr32(MEMARB_MODE);
6445 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6446
6447 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6448 tg3_stop_fw(tp);
6449 tw32(0x5000, 0x400);
6450 }
6451
6452 tw32(GRC_MODE, tp->grc_mode);
6453
6454 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6455 val = tr32(0xc4);
6456
6457 tw32(0xc4, val | (1 << 15));
6458 }
6459
6460 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6461 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6462 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6463 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6464 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6465 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6466 }
6467
6468 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6469 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6470 tw32_f(MAC_MODE, tp->mac_mode);
6471 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6472 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6473 tw32_f(MAC_MODE, tp->mac_mode);
6474 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6475 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6476 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6477 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6478 tw32_f(MAC_MODE, tp->mac_mode);
6479 } else
6480 tw32_f(MAC_MODE, 0);
6481 udelay(40);
6482
6483 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6484
6485 err = tg3_poll_fw(tp);
6486 if (err)
6487 return err;
6488
6489 tg3_mdio_start(tp);
6490
6491 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6492 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6493 val = tr32(0x7c00);
6494
6495 tw32(0x7c00, val | (1 << 25));
6496 }
6497
6498 /* Reprobe ASF enable state. */
6499 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6500 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6501 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6502 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6503 u32 nic_cfg;
6504
6505 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6506 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6507 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6508 tp->last_event_jiffies = jiffies;
6509 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6510 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6511 }
6512 }
6513
6514 return 0;
6515 }
6516
6517 /* tp->lock is held. */
6518 static void tg3_stop_fw(struct tg3 *tp)
6519 {
6520 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6521 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6522 /* Wait for RX cpu to ACK the previous event. */
6523 tg3_wait_for_event_ack(tp);
6524
6525 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6526
6527 tg3_generate_fw_event(tp);
6528
6529 /* Wait for RX cpu to ACK this event. */
6530 tg3_wait_for_event_ack(tp);
6531 }
6532 }
6533
6534 /* tp->lock is held. */
6535 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6536 {
6537 int err;
6538
6539 tg3_stop_fw(tp);
6540
6541 tg3_write_sig_pre_reset(tp, kind);
6542
6543 tg3_abort_hw(tp, silent);
6544 err = tg3_chip_reset(tp);
6545
6546 __tg3_set_mac_addr(tp, 0);
6547
6548 tg3_write_sig_legacy(tp, kind);
6549 tg3_write_sig_post_reset(tp, kind);
6550
6551 if (err)
6552 return err;
6553
6554 return 0;
6555 }
6556
6557 #define RX_CPU_SCRATCH_BASE 0x30000
6558 #define RX_CPU_SCRATCH_SIZE 0x04000
6559 #define TX_CPU_SCRATCH_BASE 0x34000
6560 #define TX_CPU_SCRATCH_SIZE 0x04000
6561
6562 /* tp->lock is held. */
6563 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6564 {
6565 int i;
6566
6567 BUG_ON(offset == TX_CPU_BASE &&
6568 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6569
6570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6571 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6572
6573 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6574 return 0;
6575 }
6576 if (offset == RX_CPU_BASE) {
6577 for (i = 0; i < 10000; i++) {
6578 tw32(offset + CPU_STATE, 0xffffffff);
6579 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6580 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6581 break;
6582 }
6583
6584 tw32(offset + CPU_STATE, 0xffffffff);
6585 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6586 udelay(10);
6587 } else {
6588 for (i = 0; i < 10000; i++) {
6589 tw32(offset + CPU_STATE, 0xffffffff);
6590 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6591 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6592 break;
6593 }
6594 }
6595
6596 if (i >= 10000) {
6597 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6598 "and %s CPU\n",
6599 tp->dev->name,
6600 (offset == RX_CPU_BASE ? "RX" : "TX"));
6601 return -ENODEV;
6602 }
6603
6604 /* Clear firmware's nvram arbitration. */
6605 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6606 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6607 return 0;
6608 }
6609
6610 struct fw_info {
6611 unsigned int fw_base;
6612 unsigned int fw_len;
6613 const __be32 *fw_data;
6614 };
6615
6616 /* tp->lock is held. */
6617 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6618 int cpu_scratch_size, struct fw_info *info)
6619 {
6620 int err, lock_err, i;
6621 void (*write_op)(struct tg3 *, u32, u32);
6622
6623 if (cpu_base == TX_CPU_BASE &&
6624 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6625 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6626 "TX cpu firmware on %s which is 5705.\n",
6627 tp->dev->name);
6628 return -EINVAL;
6629 }
6630
6631 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6632 write_op = tg3_write_mem;
6633 else
6634 write_op = tg3_write_indirect_reg32;
6635
6636 /* It is possible that bootcode is still loading at this point.
6637 * Get the nvram lock first before halting the cpu.
6638 */
6639 lock_err = tg3_nvram_lock(tp);
6640 err = tg3_halt_cpu(tp, cpu_base);
6641 if (!lock_err)
6642 tg3_nvram_unlock(tp);
6643 if (err)
6644 goto out;
6645
6646 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6647 write_op(tp, cpu_scratch_base + i, 0);
6648 tw32(cpu_base + CPU_STATE, 0xffffffff);
6649 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6650 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6651 write_op(tp, (cpu_scratch_base +
6652 (info->fw_base & 0xffff) +
6653 (i * sizeof(u32))),
6654 be32_to_cpu(info->fw_data[i]));
6655
6656 err = 0;
6657
6658 out:
6659 return err;
6660 }
6661
6662 /* tp->lock is held. */
6663 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6664 {
6665 struct fw_info info;
6666 const __be32 *fw_data;
6667 int err, i;
6668
6669 fw_data = (void *)tp->fw->data;
6670
6671 /* Firmware blob starts with version numbers, followed by
6672 start address and length. We are setting complete length.
6673 length = end_address_of_bss - start_address_of_text.
6674 Remainder is the blob to be loaded contiguously
6675 from start address. */
6676
6677 info.fw_base = be32_to_cpu(fw_data[1]);
6678 info.fw_len = tp->fw->size - 12;
6679 info.fw_data = &fw_data[3];
6680
6681 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6682 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6683 &info);
6684 if (err)
6685 return err;
6686
6687 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6688 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6689 &info);
6690 if (err)
6691 return err;
6692
6693 /* Now startup only the RX cpu. */
6694 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6695 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6696
6697 for (i = 0; i < 5; i++) {
6698 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6699 break;
6700 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6701 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6702 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6703 udelay(1000);
6704 }
6705 if (i >= 5) {
6706 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6707 "to set RX CPU PC, is %08x should be %08x\n",
6708 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6709 info.fw_base);
6710 return -ENODEV;
6711 }
6712 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6713 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6714
6715 return 0;
6716 }
6717
6718 /* 5705 needs a special version of the TSO firmware. */
6719
6720 /* tp->lock is held. */
6721 static int tg3_load_tso_firmware(struct tg3 *tp)
6722 {
6723 struct fw_info info;
6724 const __be32 *fw_data;
6725 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6726 int err, i;
6727
6728 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6729 return 0;
6730
6731 fw_data = (void *)tp->fw->data;
6732
6733 /* Firmware blob starts with version numbers, followed by
6734 start address and length. We are setting complete length.
6735 length = end_address_of_bss - start_address_of_text.
6736 Remainder is the blob to be loaded contiguously
6737 from start address. */
6738
6739 info.fw_base = be32_to_cpu(fw_data[1]);
6740 cpu_scratch_size = tp->fw_len;
6741 info.fw_len = tp->fw->size - 12;
6742 info.fw_data = &fw_data[3];
6743
6744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6745 cpu_base = RX_CPU_BASE;
6746 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6747 } else {
6748 cpu_base = TX_CPU_BASE;
6749 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6750 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6751 }
6752
6753 err = tg3_load_firmware_cpu(tp, cpu_base,
6754 cpu_scratch_base, cpu_scratch_size,
6755 &info);
6756 if (err)
6757 return err;
6758
6759 /* Now startup the cpu. */
6760 tw32(cpu_base + CPU_STATE, 0xffffffff);
6761 tw32_f(cpu_base + CPU_PC, info.fw_base);
6762
6763 for (i = 0; i < 5; i++) {
6764 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6765 break;
6766 tw32(cpu_base + CPU_STATE, 0xffffffff);
6767 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6768 tw32_f(cpu_base + CPU_PC, info.fw_base);
6769 udelay(1000);
6770 }
6771 if (i >= 5) {
6772 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6773 "to set CPU PC, is %08x should be %08x\n",
6774 tp->dev->name, tr32(cpu_base + CPU_PC),
6775 info.fw_base);
6776 return -ENODEV;
6777 }
6778 tw32(cpu_base + CPU_STATE, 0xffffffff);
6779 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6780 return 0;
6781 }
6782
6783
6784 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6785 {
6786 struct tg3 *tp = netdev_priv(dev);
6787 struct sockaddr *addr = p;
6788 int err = 0, skip_mac_1 = 0;
6789
6790 if (!is_valid_ether_addr(addr->sa_data))
6791 return -EINVAL;
6792
6793 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6794
6795 if (!netif_running(dev))
6796 return 0;
6797
6798 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6799 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6800
6801 addr0_high = tr32(MAC_ADDR_0_HIGH);
6802 addr0_low = tr32(MAC_ADDR_0_LOW);
6803 addr1_high = tr32(MAC_ADDR_1_HIGH);
6804 addr1_low = tr32(MAC_ADDR_1_LOW);
6805
6806 /* Skip MAC addr 1 if ASF is using it. */
6807 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6808 !(addr1_high == 0 && addr1_low == 0))
6809 skip_mac_1 = 1;
6810 }
6811 spin_lock_bh(&tp->lock);
6812 __tg3_set_mac_addr(tp, skip_mac_1);
6813 spin_unlock_bh(&tp->lock);
6814
6815 return err;
6816 }
6817
6818 /* tp->lock is held. */
6819 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6820 dma_addr_t mapping, u32 maxlen_flags,
6821 u32 nic_addr)
6822 {
6823 tg3_write_mem(tp,
6824 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6825 ((u64) mapping >> 32));
6826 tg3_write_mem(tp,
6827 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6828 ((u64) mapping & 0xffffffff));
6829 tg3_write_mem(tp,
6830 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6831 maxlen_flags);
6832
6833 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6834 tg3_write_mem(tp,
6835 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6836 nic_addr);
6837 }
6838
6839 static void __tg3_set_rx_mode(struct net_device *);
6840 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6841 {
6842 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6843 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6844 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6845 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6846 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6847 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6848 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6849 }
6850 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6851 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6852 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6853 u32 val = ec->stats_block_coalesce_usecs;
6854
6855 if (!netif_carrier_ok(tp->dev))
6856 val = 0;
6857
6858 tw32(HOSTCC_STAT_COAL_TICKS, val);
6859 }
6860 }
6861
6862 /* tp->lock is held. */
6863 static void tg3_rings_reset(struct tg3 *tp)
6864 {
6865 int i;
6866 u32 stblk, txrcb, rxrcb, limit;
6867 struct tg3_napi *tnapi = &tp->napi[0];
6868
6869 /* Disable all transmit rings but the first. */
6870 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6871 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
6872 else
6873 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
6874
6875 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
6876 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
6877 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
6878 BDINFO_FLAGS_DISABLED);
6879
6880
6881 /* Disable all receive return rings but the first. */
6882 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6883 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
6884 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6885 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
6886 else
6887 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
6888
6889 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
6890 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
6891 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
6892 BDINFO_FLAGS_DISABLED);
6893
6894 /* Disable interrupts */
6895 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
6896
6897 /* Zero mailbox registers. */
6898 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
6899 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
6900 tp->napi[i].tx_prod = 0;
6901 tp->napi[i].tx_cons = 0;
6902 tw32_mailbox(tp->napi[i].prodmbox, 0);
6903 tw32_rx_mbox(tp->napi[i].consmbox, 0);
6904 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
6905 }
6906 } else {
6907 tp->napi[0].tx_prod = 0;
6908 tp->napi[0].tx_cons = 0;
6909 tw32_mailbox(tp->napi[0].prodmbox, 0);
6910 tw32_rx_mbox(tp->napi[0].consmbox, 0);
6911 }
6912
6913 /* Make sure the NIC-based send BD rings are disabled. */
6914 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6915 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
6916 for (i = 0; i < 16; i++)
6917 tw32_tx_mbox(mbox + i * 8, 0);
6918 }
6919
6920 txrcb = NIC_SRAM_SEND_RCB;
6921 rxrcb = NIC_SRAM_RCV_RET_RCB;
6922
6923 /* Clear status block in ram. */
6924 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6925
6926 /* Set status block DMA address */
6927 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6928 ((u64) tnapi->status_mapping >> 32));
6929 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6930 ((u64) tnapi->status_mapping & 0xffffffff));
6931
6932 if (tnapi->tx_ring) {
6933 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
6934 (TG3_TX_RING_SIZE <<
6935 BDINFO_FLAGS_MAXLEN_SHIFT),
6936 NIC_SRAM_TX_BUFFER_DESC);
6937 txrcb += TG3_BDINFO_SIZE;
6938 }
6939
6940 if (tnapi->rx_rcb) {
6941 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
6942 (TG3_RX_RCB_RING_SIZE(tp) <<
6943 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
6944 rxrcb += TG3_BDINFO_SIZE;
6945 }
6946
6947 stblk = HOSTCC_STATBLCK_RING1;
6948
6949 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
6950 u64 mapping = (u64)tnapi->status_mapping;
6951 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
6952 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
6953
6954 /* Clear status block in ram. */
6955 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6956
6957 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
6958 (TG3_TX_RING_SIZE <<
6959 BDINFO_FLAGS_MAXLEN_SHIFT),
6960 NIC_SRAM_TX_BUFFER_DESC);
6961
6962 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
6963 (TG3_RX_RCB_RING_SIZE(tp) <<
6964 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
6965
6966 stblk += 8;
6967 txrcb += TG3_BDINFO_SIZE;
6968 rxrcb += TG3_BDINFO_SIZE;
6969 }
6970 }
6971
6972 /* tp->lock is held. */
6973 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6974 {
6975 u32 val, rdmac_mode;
6976 int i, err, limit;
6977 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
6978
6979 tg3_disable_ints(tp);
6980
6981 tg3_stop_fw(tp);
6982
6983 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6984
6985 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6986 tg3_abort_hw(tp, 1);
6987 }
6988
6989 if (reset_phy &&
6990 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6991 tg3_phy_reset(tp);
6992
6993 err = tg3_chip_reset(tp);
6994 if (err)
6995 return err;
6996
6997 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6998
6999 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7000 val = tr32(TG3_CPMU_CTRL);
7001 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7002 tw32(TG3_CPMU_CTRL, val);
7003
7004 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7005 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7006 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7007 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7008
7009 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7010 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7011 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7012 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7013
7014 val = tr32(TG3_CPMU_HST_ACC);
7015 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7016 val |= CPMU_HST_ACC_MACCLK_6_25;
7017 tw32(TG3_CPMU_HST_ACC, val);
7018 }
7019
7020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7021 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7022 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7023 PCIE_PWR_MGMT_L1_THRESH_4MS;
7024 tw32(PCIE_PWR_MGMT_THRESH, val);
7025
7026 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7027 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7028
7029 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7030 }
7031
7032 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
7033 val = tr32(TG3_PCIE_LNKCTL);
7034 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
7035 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7036 else
7037 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7038 tw32(TG3_PCIE_LNKCTL, val);
7039 }
7040
7041 /* This works around an issue with Athlon chipsets on
7042 * B3 tigon3 silicon. This bit has no effect on any
7043 * other revision. But do not set this on PCI Express
7044 * chips and don't even touch the clocks if the CPMU is present.
7045 */
7046 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7047 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7048 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7049 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7050 }
7051
7052 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7053 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7054 val = tr32(TG3PCI_PCISTATE);
7055 val |= PCISTATE_RETRY_SAME_DMA;
7056 tw32(TG3PCI_PCISTATE, val);
7057 }
7058
7059 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7060 /* Allow reads and writes to the
7061 * APE register and memory space.
7062 */
7063 val = tr32(TG3PCI_PCISTATE);
7064 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7065 PCISTATE_ALLOW_APE_SHMEM_WR;
7066 tw32(TG3PCI_PCISTATE, val);
7067 }
7068
7069 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7070 /* Enable some hw fixes. */
7071 val = tr32(TG3PCI_MSI_DATA);
7072 val |= (1 << 26) | (1 << 28) | (1 << 29);
7073 tw32(TG3PCI_MSI_DATA, val);
7074 }
7075
7076 /* Descriptor ring init may make accesses to the
7077 * NIC SRAM area to setup the TX descriptors, so we
7078 * can only do this after the hardware has been
7079 * successfully reset.
7080 */
7081 err = tg3_init_rings(tp);
7082 if (err)
7083 return err;
7084
7085 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7086 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7087 /* This value is determined during the probe time DMA
7088 * engine test, tg3_test_dma.
7089 */
7090 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7091 }
7092
7093 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7094 GRC_MODE_4X_NIC_SEND_RINGS |
7095 GRC_MODE_NO_TX_PHDR_CSUM |
7096 GRC_MODE_NO_RX_PHDR_CSUM);
7097 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7098
7099 /* Pseudo-header checksum is done by hardware logic and not
7100 * the offload processers, so make the chip do the pseudo-
7101 * header checksums on receive. For transmit it is more
7102 * convenient to do the pseudo-header checksum in software
7103 * as Linux does that on transmit for us in all cases.
7104 */
7105 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7106
7107 tw32(GRC_MODE,
7108 tp->grc_mode |
7109 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7110
7111 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7112 val = tr32(GRC_MISC_CFG);
7113 val &= ~0xff;
7114 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7115 tw32(GRC_MISC_CFG, val);
7116
7117 /* Initialize MBUF/DESC pool. */
7118 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7119 /* Do nothing. */
7120 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7121 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7122 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7123 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7124 else
7125 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7126 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7127 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7128 }
7129 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7130 int fw_len;
7131
7132 fw_len = tp->fw_len;
7133 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7134 tw32(BUFMGR_MB_POOL_ADDR,
7135 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7136 tw32(BUFMGR_MB_POOL_SIZE,
7137 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7138 }
7139
7140 if (tp->dev->mtu <= ETH_DATA_LEN) {
7141 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7142 tp->bufmgr_config.mbuf_read_dma_low_water);
7143 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7144 tp->bufmgr_config.mbuf_mac_rx_low_water);
7145 tw32(BUFMGR_MB_HIGH_WATER,
7146 tp->bufmgr_config.mbuf_high_water);
7147 } else {
7148 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7149 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7150 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7151 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7152 tw32(BUFMGR_MB_HIGH_WATER,
7153 tp->bufmgr_config.mbuf_high_water_jumbo);
7154 }
7155 tw32(BUFMGR_DMA_LOW_WATER,
7156 tp->bufmgr_config.dma_low_water);
7157 tw32(BUFMGR_DMA_HIGH_WATER,
7158 tp->bufmgr_config.dma_high_water);
7159
7160 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7161 for (i = 0; i < 2000; i++) {
7162 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7163 break;
7164 udelay(10);
7165 }
7166 if (i >= 2000) {
7167 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7168 tp->dev->name);
7169 return -ENODEV;
7170 }
7171
7172 /* Setup replenish threshold. */
7173 val = tp->rx_pending / 8;
7174 if (val == 0)
7175 val = 1;
7176 else if (val > tp->rx_std_max_post)
7177 val = tp->rx_std_max_post;
7178 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7179 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7180 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7181
7182 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7183 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7184 }
7185
7186 tw32(RCVBDI_STD_THRESH, val);
7187
7188 /* Initialize TG3_BDINFO's at:
7189 * RCVDBDI_STD_BD: standard eth size rx ring
7190 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7191 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7192 *
7193 * like so:
7194 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7195 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7196 * ring attribute flags
7197 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7198 *
7199 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7200 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7201 *
7202 * The size of each ring is fixed in the firmware, but the location is
7203 * configurable.
7204 */
7205 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7206 ((u64) tpr->rx_std_mapping >> 32));
7207 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7208 ((u64) tpr->rx_std_mapping & 0xffffffff));
7209 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7210 NIC_SRAM_RX_BUFFER_DESC);
7211
7212 /* Disable the mini ring */
7213 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7214 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7215 BDINFO_FLAGS_DISABLED);
7216
7217 /* Program the jumbo buffer descriptor ring control
7218 * blocks on those devices that have them.
7219 */
7220 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7221 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7222 /* Setup replenish threshold. */
7223 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7224
7225 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7226 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7227 ((u64) tpr->rx_jmb_mapping >> 32));
7228 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7229 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7230 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7231 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7232 BDINFO_FLAGS_USE_EXT_RECV);
7233 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7234 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7235 } else {
7236 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7237 BDINFO_FLAGS_DISABLED);
7238 }
7239
7240 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT;
7241 } else
7242 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7243
7244 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7245
7246 tpr->rx_std_ptr = tp->rx_pending;
7247 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7248 tpr->rx_std_ptr);
7249
7250 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7251 tp->rx_jumbo_pending : 0;
7252 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7253 tpr->rx_jmb_ptr);
7254
7255 tg3_rings_reset(tp);
7256
7257 /* Initialize MAC address and backoff seed. */
7258 __tg3_set_mac_addr(tp, 0);
7259
7260 /* MTU + ethernet header + FCS + optional VLAN tag */
7261 tw32(MAC_RX_MTU_SIZE,
7262 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7263
7264 /* The slot time is changed by tg3_setup_phy if we
7265 * run at gigabit with half duplex.
7266 */
7267 tw32(MAC_TX_LENGTHS,
7268 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7269 (6 << TX_LENGTHS_IPG_SHIFT) |
7270 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7271
7272 /* Receive rules. */
7273 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7274 tw32(RCVLPC_CONFIG, 0x0181);
7275
7276 /* Calculate RDMAC_MODE setting early, we need it to determine
7277 * the RCVLPC_STATE_ENABLE mask.
7278 */
7279 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7280 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7281 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7282 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7283 RDMAC_MODE_LNGREAD_ENAB);
7284
7285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7286 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7287 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7288 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7289 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7290 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7291
7292 /* If statement applies to 5705 and 5750 PCI devices only */
7293 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7294 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7295 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7296 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7298 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7299 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7300 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7301 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7302 }
7303 }
7304
7305 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7306 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7307
7308 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7309 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7310
7311 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7312 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7313 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7314
7315 /* Receive/send statistics. */
7316 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7317 val = tr32(RCVLPC_STATS_ENABLE);
7318 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7319 tw32(RCVLPC_STATS_ENABLE, val);
7320 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7321 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7322 val = tr32(RCVLPC_STATS_ENABLE);
7323 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7324 tw32(RCVLPC_STATS_ENABLE, val);
7325 } else {
7326 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7327 }
7328 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7329 tw32(SNDDATAI_STATSENAB, 0xffffff);
7330 tw32(SNDDATAI_STATSCTRL,
7331 (SNDDATAI_SCTRL_ENABLE |
7332 SNDDATAI_SCTRL_FASTUPD));
7333
7334 /* Setup host coalescing engine. */
7335 tw32(HOSTCC_MODE, 0);
7336 for (i = 0; i < 2000; i++) {
7337 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7338 break;
7339 udelay(10);
7340 }
7341
7342 __tg3_set_coalesce(tp, &tp->coal);
7343
7344 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7345 /* Status/statistics block address. See tg3_timer,
7346 * the tg3_periodic_fetch_stats call there, and
7347 * tg3_get_stats to see how this works for 5705/5750 chips.
7348 */
7349 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7350 ((u64) tp->stats_mapping >> 32));
7351 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7352 ((u64) tp->stats_mapping & 0xffffffff));
7353 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7354
7355 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7356
7357 /* Clear statistics and status block memory areas */
7358 for (i = NIC_SRAM_STATS_BLK;
7359 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7360 i += sizeof(u32)) {
7361 tg3_write_mem(tp, i, 0);
7362 udelay(40);
7363 }
7364 }
7365
7366 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7367
7368 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7369 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7370 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7371 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7372
7373 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7374 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7375 /* reset to prevent losing 1st rx packet intermittently */
7376 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7377 udelay(10);
7378 }
7379
7380 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7381 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7382 else
7383 tp->mac_mode = 0;
7384 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7385 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7386 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7387 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7388 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7389 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7390 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7391 udelay(40);
7392
7393 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7394 * If TG3_FLG2_IS_NIC is zero, we should read the
7395 * register to preserve the GPIO settings for LOMs. The GPIOs,
7396 * whether used as inputs or outputs, are set by boot code after
7397 * reset.
7398 */
7399 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7400 u32 gpio_mask;
7401
7402 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7403 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7404 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7405
7406 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7407 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7408 GRC_LCLCTRL_GPIO_OUTPUT3;
7409
7410 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7411 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7412
7413 tp->grc_local_ctrl &= ~gpio_mask;
7414 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7415
7416 /* GPIO1 must be driven high for eeprom write protect */
7417 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7418 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7419 GRC_LCLCTRL_GPIO_OUTPUT1);
7420 }
7421 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7422 udelay(100);
7423
7424 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7425 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7426 udelay(40);
7427 }
7428
7429 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7430 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7431 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7432 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7433 WDMAC_MODE_LNGREAD_ENAB);
7434
7435 /* If statement applies to 5705 and 5750 PCI devices only */
7436 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7437 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7438 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7439 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7440 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7441 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7442 /* nothing */
7443 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7444 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7445 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7446 val |= WDMAC_MODE_RX_ACCEL;
7447 }
7448 }
7449
7450 /* Enable host coalescing bug fix */
7451 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7452 val |= WDMAC_MODE_STATUS_TAG_FIX;
7453
7454 tw32_f(WDMAC_MODE, val);
7455 udelay(40);
7456
7457 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7458 u16 pcix_cmd;
7459
7460 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7461 &pcix_cmd);
7462 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7463 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7464 pcix_cmd |= PCI_X_CMD_READ_2K;
7465 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7466 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7467 pcix_cmd |= PCI_X_CMD_READ_2K;
7468 }
7469 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7470 pcix_cmd);
7471 }
7472
7473 tw32_f(RDMAC_MODE, rdmac_mode);
7474 udelay(40);
7475
7476 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7477 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7478 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7479
7480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7481 tw32(SNDDATAC_MODE,
7482 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7483 else
7484 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7485
7486 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7487 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7488 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7489 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7490 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7491 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7492 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7493 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7494
7495 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7496 err = tg3_load_5701_a0_firmware_fix(tp);
7497 if (err)
7498 return err;
7499 }
7500
7501 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7502 err = tg3_load_tso_firmware(tp);
7503 if (err)
7504 return err;
7505 }
7506
7507 tp->tx_mode = TX_MODE_ENABLE;
7508 tw32_f(MAC_TX_MODE, tp->tx_mode);
7509 udelay(100);
7510
7511 tp->rx_mode = RX_MODE_ENABLE;
7512 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7513 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7514
7515 tw32_f(MAC_RX_MODE, tp->rx_mode);
7516 udelay(10);
7517
7518 tw32(MAC_LED_CTRL, tp->led_ctrl);
7519
7520 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7521 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7522 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7523 udelay(10);
7524 }
7525 tw32_f(MAC_RX_MODE, tp->rx_mode);
7526 udelay(10);
7527
7528 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7529 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7530 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7531 /* Set drive transmission level to 1.2V */
7532 /* only if the signal pre-emphasis bit is not set */
7533 val = tr32(MAC_SERDES_CFG);
7534 val &= 0xfffff000;
7535 val |= 0x880;
7536 tw32(MAC_SERDES_CFG, val);
7537 }
7538 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7539 tw32(MAC_SERDES_CFG, 0x616000);
7540 }
7541
7542 /* Prevent chip from dropping frames when flow control
7543 * is enabled.
7544 */
7545 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7546
7547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7548 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7549 /* Use hardware link auto-negotiation */
7550 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7551 }
7552
7553 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7554 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7555 u32 tmp;
7556
7557 tmp = tr32(SERDES_RX_CTRL);
7558 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7559 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7560 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7561 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7562 }
7563
7564 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7565 if (tp->link_config.phy_is_low_power) {
7566 tp->link_config.phy_is_low_power = 0;
7567 tp->link_config.speed = tp->link_config.orig_speed;
7568 tp->link_config.duplex = tp->link_config.orig_duplex;
7569 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7570 }
7571
7572 err = tg3_setup_phy(tp, 0);
7573 if (err)
7574 return err;
7575
7576 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7577 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
7578 u32 tmp;
7579
7580 /* Clear CRC stats. */
7581 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7582 tg3_writephy(tp, MII_TG3_TEST1,
7583 tmp | MII_TG3_TEST1_CRC_EN);
7584 tg3_readphy(tp, 0x14, &tmp);
7585 }
7586 }
7587 }
7588
7589 __tg3_set_rx_mode(tp->dev);
7590
7591 /* Initialize receive rules. */
7592 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7593 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7594 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7595 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7596
7597 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7598 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7599 limit = 8;
7600 else
7601 limit = 16;
7602 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7603 limit -= 4;
7604 switch (limit) {
7605 case 16:
7606 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7607 case 15:
7608 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7609 case 14:
7610 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7611 case 13:
7612 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7613 case 12:
7614 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7615 case 11:
7616 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7617 case 10:
7618 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7619 case 9:
7620 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7621 case 8:
7622 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7623 case 7:
7624 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7625 case 6:
7626 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7627 case 5:
7628 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7629 case 4:
7630 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7631 case 3:
7632 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7633 case 2:
7634 case 1:
7635
7636 default:
7637 break;
7638 }
7639
7640 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7641 /* Write our heartbeat update interval to APE. */
7642 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7643 APE_HOST_HEARTBEAT_INT_DISABLE);
7644
7645 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7646
7647 return 0;
7648 }
7649
7650 /* Called at device open time to get the chip ready for
7651 * packet processing. Invoked with tp->lock held.
7652 */
7653 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7654 {
7655 tg3_switch_clocks(tp);
7656
7657 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7658
7659 return tg3_reset_hw(tp, reset_phy);
7660 }
7661
7662 #define TG3_STAT_ADD32(PSTAT, REG) \
7663 do { u32 __val = tr32(REG); \
7664 (PSTAT)->low += __val; \
7665 if ((PSTAT)->low < __val) \
7666 (PSTAT)->high += 1; \
7667 } while (0)
7668
7669 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7670 {
7671 struct tg3_hw_stats *sp = tp->hw_stats;
7672
7673 if (!netif_carrier_ok(tp->dev))
7674 return;
7675
7676 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7677 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7678 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7679 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7680 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7681 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7682 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7683 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7684 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7685 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7686 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7687 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7688 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7689
7690 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7691 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7692 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7693 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7694 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7695 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7696 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7697 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7698 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7699 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7700 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7701 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7702 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7703 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7704
7705 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7706 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7707 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7708 }
7709
7710 static void tg3_timer(unsigned long __opaque)
7711 {
7712 struct tg3 *tp = (struct tg3 *) __opaque;
7713
7714 if (tp->irq_sync)
7715 goto restart_timer;
7716
7717 spin_lock(&tp->lock);
7718
7719 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7720 /* All of this garbage is because when using non-tagged
7721 * IRQ status the mailbox/status_block protocol the chip
7722 * uses with the cpu is race prone.
7723 */
7724 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
7725 tw32(GRC_LOCAL_CTRL,
7726 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7727 } else {
7728 tw32(HOSTCC_MODE, tp->coalesce_mode |
7729 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
7730 }
7731
7732 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7733 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7734 spin_unlock(&tp->lock);
7735 schedule_work(&tp->reset_task);
7736 return;
7737 }
7738 }
7739
7740 /* This part only runs once per second. */
7741 if (!--tp->timer_counter) {
7742 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7743 tg3_periodic_fetch_stats(tp);
7744
7745 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7746 u32 mac_stat;
7747 int phy_event;
7748
7749 mac_stat = tr32(MAC_STATUS);
7750
7751 phy_event = 0;
7752 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7753 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7754 phy_event = 1;
7755 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7756 phy_event = 1;
7757
7758 if (phy_event)
7759 tg3_setup_phy(tp, 0);
7760 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7761 u32 mac_stat = tr32(MAC_STATUS);
7762 int need_setup = 0;
7763
7764 if (netif_carrier_ok(tp->dev) &&
7765 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7766 need_setup = 1;
7767 }
7768 if (! netif_carrier_ok(tp->dev) &&
7769 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7770 MAC_STATUS_SIGNAL_DET))) {
7771 need_setup = 1;
7772 }
7773 if (need_setup) {
7774 if (!tp->serdes_counter) {
7775 tw32_f(MAC_MODE,
7776 (tp->mac_mode &
7777 ~MAC_MODE_PORT_MODE_MASK));
7778 udelay(40);
7779 tw32_f(MAC_MODE, tp->mac_mode);
7780 udelay(40);
7781 }
7782 tg3_setup_phy(tp, 0);
7783 }
7784 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7785 tg3_serdes_parallel_detect(tp);
7786
7787 tp->timer_counter = tp->timer_multiplier;
7788 }
7789
7790 /* Heartbeat is only sent once every 2 seconds.
7791 *
7792 * The heartbeat is to tell the ASF firmware that the host
7793 * driver is still alive. In the event that the OS crashes,
7794 * ASF needs to reset the hardware to free up the FIFO space
7795 * that may be filled with rx packets destined for the host.
7796 * If the FIFO is full, ASF will no longer function properly.
7797 *
7798 * Unintended resets have been reported on real time kernels
7799 * where the timer doesn't run on time. Netpoll will also have
7800 * same problem.
7801 *
7802 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7803 * to check the ring condition when the heartbeat is expiring
7804 * before doing the reset. This will prevent most unintended
7805 * resets.
7806 */
7807 if (!--tp->asf_counter) {
7808 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7809 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7810 tg3_wait_for_event_ack(tp);
7811
7812 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7813 FWCMD_NICDRV_ALIVE3);
7814 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7815 /* 5 seconds timeout */
7816 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7817
7818 tg3_generate_fw_event(tp);
7819 }
7820 tp->asf_counter = tp->asf_multiplier;
7821 }
7822
7823 spin_unlock(&tp->lock);
7824
7825 restart_timer:
7826 tp->timer.expires = jiffies + tp->timer_offset;
7827 add_timer(&tp->timer);
7828 }
7829
7830 static int tg3_request_irq(struct tg3 *tp, int irq_num)
7831 {
7832 irq_handler_t fn;
7833 unsigned long flags;
7834 char *name;
7835 struct tg3_napi *tnapi = &tp->napi[irq_num];
7836
7837 if (tp->irq_cnt == 1)
7838 name = tp->dev->name;
7839 else {
7840 name = &tnapi->irq_lbl[0];
7841 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
7842 name[IFNAMSIZ-1] = 0;
7843 }
7844
7845 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
7846 fn = tg3_msi;
7847 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7848 fn = tg3_msi_1shot;
7849 flags = IRQF_SAMPLE_RANDOM;
7850 } else {
7851 fn = tg3_interrupt;
7852 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7853 fn = tg3_interrupt_tagged;
7854 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7855 }
7856
7857 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
7858 }
7859
7860 static int tg3_test_interrupt(struct tg3 *tp)
7861 {
7862 struct tg3_napi *tnapi = &tp->napi[0];
7863 struct net_device *dev = tp->dev;
7864 int err, i, intr_ok = 0;
7865
7866 if (!netif_running(dev))
7867 return -ENODEV;
7868
7869 tg3_disable_ints(tp);
7870
7871 free_irq(tnapi->irq_vec, tnapi);
7872
7873 err = request_irq(tnapi->irq_vec, tg3_test_isr,
7874 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
7875 if (err)
7876 return err;
7877
7878 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
7879 tg3_enable_ints(tp);
7880
7881 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7882 tnapi->coal_now);
7883
7884 for (i = 0; i < 5; i++) {
7885 u32 int_mbox, misc_host_ctrl;
7886
7887 int_mbox = tr32_mailbox(tnapi->int_mbox);
7888 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7889
7890 if ((int_mbox != 0) ||
7891 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7892 intr_ok = 1;
7893 break;
7894 }
7895
7896 msleep(10);
7897 }
7898
7899 tg3_disable_ints(tp);
7900
7901 free_irq(tnapi->irq_vec, tnapi);
7902
7903 err = tg3_request_irq(tp, 0);
7904
7905 if (err)
7906 return err;
7907
7908 if (intr_ok)
7909 return 0;
7910
7911 return -EIO;
7912 }
7913
7914 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7915 * successfully restored
7916 */
7917 static int tg3_test_msi(struct tg3 *tp)
7918 {
7919 int err;
7920 u16 pci_cmd;
7921
7922 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7923 return 0;
7924
7925 /* Turn off SERR reporting in case MSI terminates with Master
7926 * Abort.
7927 */
7928 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7929 pci_write_config_word(tp->pdev, PCI_COMMAND,
7930 pci_cmd & ~PCI_COMMAND_SERR);
7931
7932 err = tg3_test_interrupt(tp);
7933
7934 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7935
7936 if (!err)
7937 return 0;
7938
7939 /* other failures */
7940 if (err != -EIO)
7941 return err;
7942
7943 /* MSI test failed, go back to INTx mode */
7944 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7945 "switching to INTx mode. Please report this failure to "
7946 "the PCI maintainer and include system chipset information.\n",
7947 tp->dev->name);
7948
7949 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7950
7951 pci_disable_msi(tp->pdev);
7952
7953 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7954
7955 err = tg3_request_irq(tp, 0);
7956 if (err)
7957 return err;
7958
7959 /* Need to reset the chip because the MSI cycle may have terminated
7960 * with Master Abort.
7961 */
7962 tg3_full_lock(tp, 1);
7963
7964 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7965 err = tg3_init_hw(tp, 1);
7966
7967 tg3_full_unlock(tp);
7968
7969 if (err)
7970 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
7971
7972 return err;
7973 }
7974
7975 static int tg3_request_firmware(struct tg3 *tp)
7976 {
7977 const __be32 *fw_data;
7978
7979 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
7980 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
7981 tp->dev->name, tp->fw_needed);
7982 return -ENOENT;
7983 }
7984
7985 fw_data = (void *)tp->fw->data;
7986
7987 /* Firmware blob starts with version numbers, followed by
7988 * start address and _full_ length including BSS sections
7989 * (which must be longer than the actual data, of course
7990 */
7991
7992 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
7993 if (tp->fw_len < (tp->fw->size - 12)) {
7994 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
7995 tp->dev->name, tp->fw_len, tp->fw_needed);
7996 release_firmware(tp->fw);
7997 tp->fw = NULL;
7998 return -EINVAL;
7999 }
8000
8001 /* We no longer need firmware; we have it. */
8002 tp->fw_needed = NULL;
8003 return 0;
8004 }
8005
8006 static bool tg3_enable_msix(struct tg3 *tp)
8007 {
8008 int i, rc, cpus = num_online_cpus();
8009 struct msix_entry msix_ent[tp->irq_max];
8010
8011 if (cpus == 1)
8012 /* Just fallback to the simpler MSI mode. */
8013 return false;
8014
8015 /*
8016 * We want as many rx rings enabled as there are cpus.
8017 * The first MSIX vector only deals with link interrupts, etc,
8018 * so we add one to the number of vectors we are requesting.
8019 */
8020 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8021
8022 for (i = 0; i < tp->irq_max; i++) {
8023 msix_ent[i].entry = i;
8024 msix_ent[i].vector = 0;
8025 }
8026
8027 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8028 if (rc != 0) {
8029 if (rc < TG3_RSS_MIN_NUM_MSIX_VECS)
8030 return false;
8031 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8032 return false;
8033 printk(KERN_NOTICE
8034 "%s: Requested %d MSI-X vectors, received %d\n",
8035 tp->dev->name, tp->irq_cnt, rc);
8036 tp->irq_cnt = rc;
8037 }
8038
8039 for (i = 0; i < tp->irq_max; i++)
8040 tp->napi[i].irq_vec = msix_ent[i].vector;
8041
8042 return true;
8043 }
8044
8045 static void tg3_ints_init(struct tg3 *tp)
8046 {
8047 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8048 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8049 /* All MSI supporting chips should support tagged
8050 * status. Assert that this is the case.
8051 */
8052 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8053 "Not using MSI.\n", tp->dev->name);
8054 goto defcfg;
8055 }
8056
8057 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8058 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8059 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8060 pci_enable_msi(tp->pdev) == 0)
8061 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8062
8063 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8064 u32 msi_mode = tr32(MSGINT_MODE);
8065 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8066 }
8067 defcfg:
8068 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8069 tp->irq_cnt = 1;
8070 tp->napi[0].irq_vec = tp->pdev->irq;
8071 }
8072 }
8073
8074 static void tg3_ints_fini(struct tg3 *tp)
8075 {
8076 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8077 pci_disable_msix(tp->pdev);
8078 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8079 pci_disable_msi(tp->pdev);
8080 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8081 }
8082
8083 static int tg3_open(struct net_device *dev)
8084 {
8085 struct tg3 *tp = netdev_priv(dev);
8086 int i, err;
8087
8088 if (tp->fw_needed) {
8089 err = tg3_request_firmware(tp);
8090 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8091 if (err)
8092 return err;
8093 } else if (err) {
8094 printk(KERN_WARNING "%s: TSO capability disabled.\n",
8095 tp->dev->name);
8096 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8097 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8098 printk(KERN_NOTICE "%s: TSO capability restored.\n",
8099 tp->dev->name);
8100 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8101 }
8102 }
8103
8104 netif_carrier_off(tp->dev);
8105
8106 err = tg3_set_power_state(tp, PCI_D0);
8107 if (err)
8108 return err;
8109
8110 tg3_full_lock(tp, 0);
8111
8112 tg3_disable_ints(tp);
8113 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8114
8115 tg3_full_unlock(tp);
8116
8117 /*
8118 * Setup interrupts first so we know how
8119 * many NAPI resources to allocate
8120 */
8121 tg3_ints_init(tp);
8122
8123 /* The placement of this call is tied
8124 * to the setup and use of Host TX descriptors.
8125 */
8126 err = tg3_alloc_consistent(tp);
8127 if (err)
8128 goto err_out1;
8129
8130 napi_enable(&tp->napi[0].napi);
8131
8132 for (i = 0; i < tp->irq_cnt; i++) {
8133 struct tg3_napi *tnapi = &tp->napi[i];
8134 err = tg3_request_irq(tp, i);
8135 if (err) {
8136 for (i--; i >= 0; i--)
8137 free_irq(tnapi->irq_vec, tnapi);
8138 break;
8139 }
8140 }
8141
8142 if (err)
8143 goto err_out2;
8144
8145 tg3_full_lock(tp, 0);
8146
8147 err = tg3_init_hw(tp, 1);
8148 if (err) {
8149 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8150 tg3_free_rings(tp);
8151 } else {
8152 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8153 tp->timer_offset = HZ;
8154 else
8155 tp->timer_offset = HZ / 10;
8156
8157 BUG_ON(tp->timer_offset > HZ);
8158 tp->timer_counter = tp->timer_multiplier =
8159 (HZ / tp->timer_offset);
8160 tp->asf_counter = tp->asf_multiplier =
8161 ((HZ / tp->timer_offset) * 2);
8162
8163 init_timer(&tp->timer);
8164 tp->timer.expires = jiffies + tp->timer_offset;
8165 tp->timer.data = (unsigned long) tp;
8166 tp->timer.function = tg3_timer;
8167 }
8168
8169 tg3_full_unlock(tp);
8170
8171 if (err)
8172 goto err_out3;
8173
8174 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8175 err = tg3_test_msi(tp);
8176
8177 if (err) {
8178 tg3_full_lock(tp, 0);
8179 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8180 tg3_free_rings(tp);
8181 tg3_full_unlock(tp);
8182
8183 goto err_out2;
8184 }
8185
8186 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8187 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8188 u32 val = tr32(PCIE_TRANSACTION_CFG);
8189
8190 tw32(PCIE_TRANSACTION_CFG,
8191 val | PCIE_TRANS_CFG_1SHOT_MSI);
8192 }
8193 }
8194 }
8195
8196 tg3_phy_start(tp);
8197
8198 tg3_full_lock(tp, 0);
8199
8200 add_timer(&tp->timer);
8201 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8202 tg3_enable_ints(tp);
8203
8204 tg3_full_unlock(tp);
8205
8206 netif_start_queue(dev);
8207
8208 return 0;
8209
8210 err_out3:
8211 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8212 struct tg3_napi *tnapi = &tp->napi[i];
8213 free_irq(tnapi->irq_vec, tnapi);
8214 }
8215
8216 err_out2:
8217 napi_disable(&tp->napi[0].napi);
8218 tg3_free_consistent(tp);
8219
8220 err_out1:
8221 tg3_ints_fini(tp);
8222 return err;
8223 }
8224
8225 #if 0
8226 /*static*/ void tg3_dump_state(struct tg3 *tp)
8227 {
8228 u32 val32, val32_2, val32_3, val32_4, val32_5;
8229 u16 val16;
8230 int i;
8231 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8232
8233 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8234 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8235 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8236 val16, val32);
8237
8238 /* MAC block */
8239 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8240 tr32(MAC_MODE), tr32(MAC_STATUS));
8241 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8242 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8243 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8244 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8245 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8246 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8247
8248 /* Send data initiator control block */
8249 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8250 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8251 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8252 tr32(SNDDATAI_STATSCTRL));
8253
8254 /* Send data completion control block */
8255 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8256
8257 /* Send BD ring selector block */
8258 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8259 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8260
8261 /* Send BD initiator control block */
8262 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8263 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8264
8265 /* Send BD completion control block */
8266 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8267
8268 /* Receive list placement control block */
8269 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8270 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8271 printk(" RCVLPC_STATSCTRL[%08x]\n",
8272 tr32(RCVLPC_STATSCTRL));
8273
8274 /* Receive data and receive BD initiator control block */
8275 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8276 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8277
8278 /* Receive data completion control block */
8279 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8280 tr32(RCVDCC_MODE));
8281
8282 /* Receive BD initiator control block */
8283 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8284 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8285
8286 /* Receive BD completion control block */
8287 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8288 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8289
8290 /* Receive list selector control block */
8291 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8292 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8293
8294 /* Mbuf cluster free block */
8295 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8296 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8297
8298 /* Host coalescing control block */
8299 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8300 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8301 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8302 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8303 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8304 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8305 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8306 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8307 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8308 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8309 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8310 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8311
8312 /* Memory arbiter control block */
8313 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8314 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8315
8316 /* Buffer manager control block */
8317 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8318 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8319 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8320 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8321 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8322 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8323 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8324 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8325
8326 /* Read DMA control block */
8327 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8328 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8329
8330 /* Write DMA control block */
8331 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8332 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8333
8334 /* DMA completion block */
8335 printk("DEBUG: DMAC_MODE[%08x]\n",
8336 tr32(DMAC_MODE));
8337
8338 /* GRC block */
8339 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8340 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8341 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8342 tr32(GRC_LOCAL_CTRL));
8343
8344 /* TG3_BDINFOs */
8345 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8346 tr32(RCVDBDI_JUMBO_BD + 0x0),
8347 tr32(RCVDBDI_JUMBO_BD + 0x4),
8348 tr32(RCVDBDI_JUMBO_BD + 0x8),
8349 tr32(RCVDBDI_JUMBO_BD + 0xc));
8350 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8351 tr32(RCVDBDI_STD_BD + 0x0),
8352 tr32(RCVDBDI_STD_BD + 0x4),
8353 tr32(RCVDBDI_STD_BD + 0x8),
8354 tr32(RCVDBDI_STD_BD + 0xc));
8355 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8356 tr32(RCVDBDI_MINI_BD + 0x0),
8357 tr32(RCVDBDI_MINI_BD + 0x4),
8358 tr32(RCVDBDI_MINI_BD + 0x8),
8359 tr32(RCVDBDI_MINI_BD + 0xc));
8360
8361 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8362 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8363 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8364 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8365 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8366 val32, val32_2, val32_3, val32_4);
8367
8368 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8369 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8370 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8371 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8372 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8373 val32, val32_2, val32_3, val32_4);
8374
8375 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8376 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8377 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8378 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8379 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8380 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8381 val32, val32_2, val32_3, val32_4, val32_5);
8382
8383 /* SW status block */
8384 printk(KERN_DEBUG
8385 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8386 sblk->status,
8387 sblk->status_tag,
8388 sblk->rx_jumbo_consumer,
8389 sblk->rx_consumer,
8390 sblk->rx_mini_consumer,
8391 sblk->idx[0].rx_producer,
8392 sblk->idx[0].tx_consumer);
8393
8394 /* SW statistics block */
8395 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8396 ((u32 *)tp->hw_stats)[0],
8397 ((u32 *)tp->hw_stats)[1],
8398 ((u32 *)tp->hw_stats)[2],
8399 ((u32 *)tp->hw_stats)[3]);
8400
8401 /* Mailboxes */
8402 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8403 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8404 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8405 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8406 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8407
8408 /* NIC side send descriptors. */
8409 for (i = 0; i < 6; i++) {
8410 unsigned long txd;
8411
8412 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8413 + (i * sizeof(struct tg3_tx_buffer_desc));
8414 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8415 i,
8416 readl(txd + 0x0), readl(txd + 0x4),
8417 readl(txd + 0x8), readl(txd + 0xc));
8418 }
8419
8420 /* NIC side RX descriptors. */
8421 for (i = 0; i < 6; i++) {
8422 unsigned long rxd;
8423
8424 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8425 + (i * sizeof(struct tg3_rx_buffer_desc));
8426 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8427 i,
8428 readl(rxd + 0x0), readl(rxd + 0x4),
8429 readl(rxd + 0x8), readl(rxd + 0xc));
8430 rxd += (4 * sizeof(u32));
8431 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8432 i,
8433 readl(rxd + 0x0), readl(rxd + 0x4),
8434 readl(rxd + 0x8), readl(rxd + 0xc));
8435 }
8436
8437 for (i = 0; i < 6; i++) {
8438 unsigned long rxd;
8439
8440 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8441 + (i * sizeof(struct tg3_rx_buffer_desc));
8442 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8443 i,
8444 readl(rxd + 0x0), readl(rxd + 0x4),
8445 readl(rxd + 0x8), readl(rxd + 0xc));
8446 rxd += (4 * sizeof(u32));
8447 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8448 i,
8449 readl(rxd + 0x0), readl(rxd + 0x4),
8450 readl(rxd + 0x8), readl(rxd + 0xc));
8451 }
8452 }
8453 #endif
8454
8455 static struct net_device_stats *tg3_get_stats(struct net_device *);
8456 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8457
8458 static int tg3_close(struct net_device *dev)
8459 {
8460 int i;
8461 struct tg3 *tp = netdev_priv(dev);
8462
8463 napi_disable(&tp->napi[0].napi);
8464 cancel_work_sync(&tp->reset_task);
8465
8466 netif_stop_queue(dev);
8467
8468 del_timer_sync(&tp->timer);
8469
8470 tg3_full_lock(tp, 1);
8471 #if 0
8472 tg3_dump_state(tp);
8473 #endif
8474
8475 tg3_disable_ints(tp);
8476
8477 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8478 tg3_free_rings(tp);
8479 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8480
8481 tg3_full_unlock(tp);
8482
8483 for (i = tp->irq_cnt - 1; i >= 0; i--) {
8484 struct tg3_napi *tnapi = &tp->napi[i];
8485 free_irq(tnapi->irq_vec, tnapi);
8486 }
8487
8488 tg3_ints_fini(tp);
8489
8490 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8491 sizeof(tp->net_stats_prev));
8492 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8493 sizeof(tp->estats_prev));
8494
8495 tg3_free_consistent(tp);
8496
8497 tg3_set_power_state(tp, PCI_D3hot);
8498
8499 netif_carrier_off(tp->dev);
8500
8501 return 0;
8502 }
8503
8504 static inline unsigned long get_stat64(tg3_stat64_t *val)
8505 {
8506 unsigned long ret;
8507
8508 #if (BITS_PER_LONG == 32)
8509 ret = val->low;
8510 #else
8511 ret = ((u64)val->high << 32) | ((u64)val->low);
8512 #endif
8513 return ret;
8514 }
8515
8516 static inline u64 get_estat64(tg3_stat64_t *val)
8517 {
8518 return ((u64)val->high << 32) | ((u64)val->low);
8519 }
8520
8521 static unsigned long calc_crc_errors(struct tg3 *tp)
8522 {
8523 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8524
8525 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8526 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8527 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8528 u32 val;
8529
8530 spin_lock_bh(&tp->lock);
8531 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8532 tg3_writephy(tp, MII_TG3_TEST1,
8533 val | MII_TG3_TEST1_CRC_EN);
8534 tg3_readphy(tp, 0x14, &val);
8535 } else
8536 val = 0;
8537 spin_unlock_bh(&tp->lock);
8538
8539 tp->phy_crc_errors += val;
8540
8541 return tp->phy_crc_errors;
8542 }
8543
8544 return get_stat64(&hw_stats->rx_fcs_errors);
8545 }
8546
8547 #define ESTAT_ADD(member) \
8548 estats->member = old_estats->member + \
8549 get_estat64(&hw_stats->member)
8550
8551 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8552 {
8553 struct tg3_ethtool_stats *estats = &tp->estats;
8554 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8555 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8556
8557 if (!hw_stats)
8558 return old_estats;
8559
8560 ESTAT_ADD(rx_octets);
8561 ESTAT_ADD(rx_fragments);
8562 ESTAT_ADD(rx_ucast_packets);
8563 ESTAT_ADD(rx_mcast_packets);
8564 ESTAT_ADD(rx_bcast_packets);
8565 ESTAT_ADD(rx_fcs_errors);
8566 ESTAT_ADD(rx_align_errors);
8567 ESTAT_ADD(rx_xon_pause_rcvd);
8568 ESTAT_ADD(rx_xoff_pause_rcvd);
8569 ESTAT_ADD(rx_mac_ctrl_rcvd);
8570 ESTAT_ADD(rx_xoff_entered);
8571 ESTAT_ADD(rx_frame_too_long_errors);
8572 ESTAT_ADD(rx_jabbers);
8573 ESTAT_ADD(rx_undersize_packets);
8574 ESTAT_ADD(rx_in_length_errors);
8575 ESTAT_ADD(rx_out_length_errors);
8576 ESTAT_ADD(rx_64_or_less_octet_packets);
8577 ESTAT_ADD(rx_65_to_127_octet_packets);
8578 ESTAT_ADD(rx_128_to_255_octet_packets);
8579 ESTAT_ADD(rx_256_to_511_octet_packets);
8580 ESTAT_ADD(rx_512_to_1023_octet_packets);
8581 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8582 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8583 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8584 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8585 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8586
8587 ESTAT_ADD(tx_octets);
8588 ESTAT_ADD(tx_collisions);
8589 ESTAT_ADD(tx_xon_sent);
8590 ESTAT_ADD(tx_xoff_sent);
8591 ESTAT_ADD(tx_flow_control);
8592 ESTAT_ADD(tx_mac_errors);
8593 ESTAT_ADD(tx_single_collisions);
8594 ESTAT_ADD(tx_mult_collisions);
8595 ESTAT_ADD(tx_deferred);
8596 ESTAT_ADD(tx_excessive_collisions);
8597 ESTAT_ADD(tx_late_collisions);
8598 ESTAT_ADD(tx_collide_2times);
8599 ESTAT_ADD(tx_collide_3times);
8600 ESTAT_ADD(tx_collide_4times);
8601 ESTAT_ADD(tx_collide_5times);
8602 ESTAT_ADD(tx_collide_6times);
8603 ESTAT_ADD(tx_collide_7times);
8604 ESTAT_ADD(tx_collide_8times);
8605 ESTAT_ADD(tx_collide_9times);
8606 ESTAT_ADD(tx_collide_10times);
8607 ESTAT_ADD(tx_collide_11times);
8608 ESTAT_ADD(tx_collide_12times);
8609 ESTAT_ADD(tx_collide_13times);
8610 ESTAT_ADD(tx_collide_14times);
8611 ESTAT_ADD(tx_collide_15times);
8612 ESTAT_ADD(tx_ucast_packets);
8613 ESTAT_ADD(tx_mcast_packets);
8614 ESTAT_ADD(tx_bcast_packets);
8615 ESTAT_ADD(tx_carrier_sense_errors);
8616 ESTAT_ADD(tx_discards);
8617 ESTAT_ADD(tx_errors);
8618
8619 ESTAT_ADD(dma_writeq_full);
8620 ESTAT_ADD(dma_write_prioq_full);
8621 ESTAT_ADD(rxbds_empty);
8622 ESTAT_ADD(rx_discards);
8623 ESTAT_ADD(rx_errors);
8624 ESTAT_ADD(rx_threshold_hit);
8625
8626 ESTAT_ADD(dma_readq_full);
8627 ESTAT_ADD(dma_read_prioq_full);
8628 ESTAT_ADD(tx_comp_queue_full);
8629
8630 ESTAT_ADD(ring_set_send_prod_index);
8631 ESTAT_ADD(ring_status_update);
8632 ESTAT_ADD(nic_irqs);
8633 ESTAT_ADD(nic_avoided_irqs);
8634 ESTAT_ADD(nic_tx_threshold_hit);
8635
8636 return estats;
8637 }
8638
8639 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8640 {
8641 struct tg3 *tp = netdev_priv(dev);
8642 struct net_device_stats *stats = &tp->net_stats;
8643 struct net_device_stats *old_stats = &tp->net_stats_prev;
8644 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8645
8646 if (!hw_stats)
8647 return old_stats;
8648
8649 stats->rx_packets = old_stats->rx_packets +
8650 get_stat64(&hw_stats->rx_ucast_packets) +
8651 get_stat64(&hw_stats->rx_mcast_packets) +
8652 get_stat64(&hw_stats->rx_bcast_packets);
8653
8654 stats->tx_packets = old_stats->tx_packets +
8655 get_stat64(&hw_stats->tx_ucast_packets) +
8656 get_stat64(&hw_stats->tx_mcast_packets) +
8657 get_stat64(&hw_stats->tx_bcast_packets);
8658
8659 stats->rx_bytes = old_stats->rx_bytes +
8660 get_stat64(&hw_stats->rx_octets);
8661 stats->tx_bytes = old_stats->tx_bytes +
8662 get_stat64(&hw_stats->tx_octets);
8663
8664 stats->rx_errors = old_stats->rx_errors +
8665 get_stat64(&hw_stats->rx_errors);
8666 stats->tx_errors = old_stats->tx_errors +
8667 get_stat64(&hw_stats->tx_errors) +
8668 get_stat64(&hw_stats->tx_mac_errors) +
8669 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8670 get_stat64(&hw_stats->tx_discards);
8671
8672 stats->multicast = old_stats->multicast +
8673 get_stat64(&hw_stats->rx_mcast_packets);
8674 stats->collisions = old_stats->collisions +
8675 get_stat64(&hw_stats->tx_collisions);
8676
8677 stats->rx_length_errors = old_stats->rx_length_errors +
8678 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8679 get_stat64(&hw_stats->rx_undersize_packets);
8680
8681 stats->rx_over_errors = old_stats->rx_over_errors +
8682 get_stat64(&hw_stats->rxbds_empty);
8683 stats->rx_frame_errors = old_stats->rx_frame_errors +
8684 get_stat64(&hw_stats->rx_align_errors);
8685 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8686 get_stat64(&hw_stats->tx_discards);
8687 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8688 get_stat64(&hw_stats->tx_carrier_sense_errors);
8689
8690 stats->rx_crc_errors = old_stats->rx_crc_errors +
8691 calc_crc_errors(tp);
8692
8693 stats->rx_missed_errors = old_stats->rx_missed_errors +
8694 get_stat64(&hw_stats->rx_discards);
8695
8696 return stats;
8697 }
8698
8699 static inline u32 calc_crc(unsigned char *buf, int len)
8700 {
8701 u32 reg;
8702 u32 tmp;
8703 int j, k;
8704
8705 reg = 0xffffffff;
8706
8707 for (j = 0; j < len; j++) {
8708 reg ^= buf[j];
8709
8710 for (k = 0; k < 8; k++) {
8711 tmp = reg & 0x01;
8712
8713 reg >>= 1;
8714
8715 if (tmp) {
8716 reg ^= 0xedb88320;
8717 }
8718 }
8719 }
8720
8721 return ~reg;
8722 }
8723
8724 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8725 {
8726 /* accept or reject all multicast frames */
8727 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8728 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8729 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8730 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8731 }
8732
8733 static void __tg3_set_rx_mode(struct net_device *dev)
8734 {
8735 struct tg3 *tp = netdev_priv(dev);
8736 u32 rx_mode;
8737
8738 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8739 RX_MODE_KEEP_VLAN_TAG);
8740
8741 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8742 * flag clear.
8743 */
8744 #if TG3_VLAN_TAG_USED
8745 if (!tp->vlgrp &&
8746 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8747 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8748 #else
8749 /* By definition, VLAN is disabled always in this
8750 * case.
8751 */
8752 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8753 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8754 #endif
8755
8756 if (dev->flags & IFF_PROMISC) {
8757 /* Promiscuous mode. */
8758 rx_mode |= RX_MODE_PROMISC;
8759 } else if (dev->flags & IFF_ALLMULTI) {
8760 /* Accept all multicast. */
8761 tg3_set_multi (tp, 1);
8762 } else if (dev->mc_count < 1) {
8763 /* Reject all multicast. */
8764 tg3_set_multi (tp, 0);
8765 } else {
8766 /* Accept one or more multicast(s). */
8767 struct dev_mc_list *mclist;
8768 unsigned int i;
8769 u32 mc_filter[4] = { 0, };
8770 u32 regidx;
8771 u32 bit;
8772 u32 crc;
8773
8774 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8775 i++, mclist = mclist->next) {
8776
8777 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8778 bit = ~crc & 0x7f;
8779 regidx = (bit & 0x60) >> 5;
8780 bit &= 0x1f;
8781 mc_filter[regidx] |= (1 << bit);
8782 }
8783
8784 tw32(MAC_HASH_REG_0, mc_filter[0]);
8785 tw32(MAC_HASH_REG_1, mc_filter[1]);
8786 tw32(MAC_HASH_REG_2, mc_filter[2]);
8787 tw32(MAC_HASH_REG_3, mc_filter[3]);
8788 }
8789
8790 if (rx_mode != tp->rx_mode) {
8791 tp->rx_mode = rx_mode;
8792 tw32_f(MAC_RX_MODE, rx_mode);
8793 udelay(10);
8794 }
8795 }
8796
8797 static void tg3_set_rx_mode(struct net_device *dev)
8798 {
8799 struct tg3 *tp = netdev_priv(dev);
8800
8801 if (!netif_running(dev))
8802 return;
8803
8804 tg3_full_lock(tp, 0);
8805 __tg3_set_rx_mode(dev);
8806 tg3_full_unlock(tp);
8807 }
8808
8809 #define TG3_REGDUMP_LEN (32 * 1024)
8810
8811 static int tg3_get_regs_len(struct net_device *dev)
8812 {
8813 return TG3_REGDUMP_LEN;
8814 }
8815
8816 static void tg3_get_regs(struct net_device *dev,
8817 struct ethtool_regs *regs, void *_p)
8818 {
8819 u32 *p = _p;
8820 struct tg3 *tp = netdev_priv(dev);
8821 u8 *orig_p = _p;
8822 int i;
8823
8824 regs->version = 0;
8825
8826 memset(p, 0, TG3_REGDUMP_LEN);
8827
8828 if (tp->link_config.phy_is_low_power)
8829 return;
8830
8831 tg3_full_lock(tp, 0);
8832
8833 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8834 #define GET_REG32_LOOP(base,len) \
8835 do { p = (u32 *)(orig_p + (base)); \
8836 for (i = 0; i < len; i += 4) \
8837 __GET_REG32((base) + i); \
8838 } while (0)
8839 #define GET_REG32_1(reg) \
8840 do { p = (u32 *)(orig_p + (reg)); \
8841 __GET_REG32((reg)); \
8842 } while (0)
8843
8844 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8845 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8846 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8847 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8848 GET_REG32_1(SNDDATAC_MODE);
8849 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8850 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8851 GET_REG32_1(SNDBDC_MODE);
8852 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8853 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8854 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8855 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8856 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8857 GET_REG32_1(RCVDCC_MODE);
8858 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8859 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8860 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8861 GET_REG32_1(MBFREE_MODE);
8862 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8863 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8864 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8865 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8866 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8867 GET_REG32_1(RX_CPU_MODE);
8868 GET_REG32_1(RX_CPU_STATE);
8869 GET_REG32_1(RX_CPU_PGMCTR);
8870 GET_REG32_1(RX_CPU_HWBKPT);
8871 GET_REG32_1(TX_CPU_MODE);
8872 GET_REG32_1(TX_CPU_STATE);
8873 GET_REG32_1(TX_CPU_PGMCTR);
8874 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8875 GET_REG32_LOOP(FTQ_RESET, 0x120);
8876 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8877 GET_REG32_1(DMAC_MODE);
8878 GET_REG32_LOOP(GRC_MODE, 0x4c);
8879 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8880 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8881
8882 #undef __GET_REG32
8883 #undef GET_REG32_LOOP
8884 #undef GET_REG32_1
8885
8886 tg3_full_unlock(tp);
8887 }
8888
8889 static int tg3_get_eeprom_len(struct net_device *dev)
8890 {
8891 struct tg3 *tp = netdev_priv(dev);
8892
8893 return tp->nvram_size;
8894 }
8895
8896 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8897 {
8898 struct tg3 *tp = netdev_priv(dev);
8899 int ret;
8900 u8 *pd;
8901 u32 i, offset, len, b_offset, b_count;
8902 __be32 val;
8903
8904 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
8905 return -EINVAL;
8906
8907 if (tp->link_config.phy_is_low_power)
8908 return -EAGAIN;
8909
8910 offset = eeprom->offset;
8911 len = eeprom->len;
8912 eeprom->len = 0;
8913
8914 eeprom->magic = TG3_EEPROM_MAGIC;
8915
8916 if (offset & 3) {
8917 /* adjustments to start on required 4 byte boundary */
8918 b_offset = offset & 3;
8919 b_count = 4 - b_offset;
8920 if (b_count > len) {
8921 /* i.e. offset=1 len=2 */
8922 b_count = len;
8923 }
8924 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
8925 if (ret)
8926 return ret;
8927 memcpy(data, ((char*)&val) + b_offset, b_count);
8928 len -= b_count;
8929 offset += b_count;
8930 eeprom->len += b_count;
8931 }
8932
8933 /* read bytes upto the last 4 byte boundary */
8934 pd = &data[eeprom->len];
8935 for (i = 0; i < (len - (len & 3)); i += 4) {
8936 ret = tg3_nvram_read_be32(tp, offset + i, &val);
8937 if (ret) {
8938 eeprom->len += i;
8939 return ret;
8940 }
8941 memcpy(pd + i, &val, 4);
8942 }
8943 eeprom->len += i;
8944
8945 if (len & 3) {
8946 /* read last bytes not ending on 4 byte boundary */
8947 pd = &data[eeprom->len];
8948 b_count = len & 3;
8949 b_offset = offset + len - b_count;
8950 ret = tg3_nvram_read_be32(tp, b_offset, &val);
8951 if (ret)
8952 return ret;
8953 memcpy(pd, &val, b_count);
8954 eeprom->len += b_count;
8955 }
8956 return 0;
8957 }
8958
8959 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8960
8961 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8962 {
8963 struct tg3 *tp = netdev_priv(dev);
8964 int ret;
8965 u32 offset, len, b_offset, odd_len;
8966 u8 *buf;
8967 __be32 start, end;
8968
8969 if (tp->link_config.phy_is_low_power)
8970 return -EAGAIN;
8971
8972 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
8973 eeprom->magic != TG3_EEPROM_MAGIC)
8974 return -EINVAL;
8975
8976 offset = eeprom->offset;
8977 len = eeprom->len;
8978
8979 if ((b_offset = (offset & 3))) {
8980 /* adjustments to start on required 4 byte boundary */
8981 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
8982 if (ret)
8983 return ret;
8984 len += b_offset;
8985 offset &= ~3;
8986 if (len < 4)
8987 len = 4;
8988 }
8989
8990 odd_len = 0;
8991 if (len & 3) {
8992 /* adjustments to end on required 4 byte boundary */
8993 odd_len = 1;
8994 len = (len + 3) & ~3;
8995 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
8996 if (ret)
8997 return ret;
8998 }
8999
9000 buf = data;
9001 if (b_offset || odd_len) {
9002 buf = kmalloc(len, GFP_KERNEL);
9003 if (!buf)
9004 return -ENOMEM;
9005 if (b_offset)
9006 memcpy(buf, &start, 4);
9007 if (odd_len)
9008 memcpy(buf+len-4, &end, 4);
9009 memcpy(buf + b_offset, data, eeprom->len);
9010 }
9011
9012 ret = tg3_nvram_write_block(tp, offset, len, buf);
9013
9014 if (buf != data)
9015 kfree(buf);
9016
9017 return ret;
9018 }
9019
9020 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9021 {
9022 struct tg3 *tp = netdev_priv(dev);
9023
9024 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9025 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9026 return -EAGAIN;
9027 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9028 }
9029
9030 cmd->supported = (SUPPORTED_Autoneg);
9031
9032 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9033 cmd->supported |= (SUPPORTED_1000baseT_Half |
9034 SUPPORTED_1000baseT_Full);
9035
9036 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9037 cmd->supported |= (SUPPORTED_100baseT_Half |
9038 SUPPORTED_100baseT_Full |
9039 SUPPORTED_10baseT_Half |
9040 SUPPORTED_10baseT_Full |
9041 SUPPORTED_TP);
9042 cmd->port = PORT_TP;
9043 } else {
9044 cmd->supported |= SUPPORTED_FIBRE;
9045 cmd->port = PORT_FIBRE;
9046 }
9047
9048 cmd->advertising = tp->link_config.advertising;
9049 if (netif_running(dev)) {
9050 cmd->speed = tp->link_config.active_speed;
9051 cmd->duplex = tp->link_config.active_duplex;
9052 }
9053 cmd->phy_address = PHY_ADDR;
9054 cmd->transceiver = XCVR_INTERNAL;
9055 cmd->autoneg = tp->link_config.autoneg;
9056 cmd->maxtxpkt = 0;
9057 cmd->maxrxpkt = 0;
9058 return 0;
9059 }
9060
9061 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9062 {
9063 struct tg3 *tp = netdev_priv(dev);
9064
9065 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9066 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9067 return -EAGAIN;
9068 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9069 }
9070
9071 if (cmd->autoneg != AUTONEG_ENABLE &&
9072 cmd->autoneg != AUTONEG_DISABLE)
9073 return -EINVAL;
9074
9075 if (cmd->autoneg == AUTONEG_DISABLE &&
9076 cmd->duplex != DUPLEX_FULL &&
9077 cmd->duplex != DUPLEX_HALF)
9078 return -EINVAL;
9079
9080 if (cmd->autoneg == AUTONEG_ENABLE) {
9081 u32 mask = ADVERTISED_Autoneg |
9082 ADVERTISED_Pause |
9083 ADVERTISED_Asym_Pause;
9084
9085 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
9086 mask |= ADVERTISED_1000baseT_Half |
9087 ADVERTISED_1000baseT_Full;
9088
9089 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9090 mask |= ADVERTISED_100baseT_Half |
9091 ADVERTISED_100baseT_Full |
9092 ADVERTISED_10baseT_Half |
9093 ADVERTISED_10baseT_Full |
9094 ADVERTISED_TP;
9095 else
9096 mask |= ADVERTISED_FIBRE;
9097
9098 if (cmd->advertising & ~mask)
9099 return -EINVAL;
9100
9101 mask &= (ADVERTISED_1000baseT_Half |
9102 ADVERTISED_1000baseT_Full |
9103 ADVERTISED_100baseT_Half |
9104 ADVERTISED_100baseT_Full |
9105 ADVERTISED_10baseT_Half |
9106 ADVERTISED_10baseT_Full);
9107
9108 cmd->advertising &= mask;
9109 } else {
9110 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9111 if (cmd->speed != SPEED_1000)
9112 return -EINVAL;
9113
9114 if (cmd->duplex != DUPLEX_FULL)
9115 return -EINVAL;
9116 } else {
9117 if (cmd->speed != SPEED_100 &&
9118 cmd->speed != SPEED_10)
9119 return -EINVAL;
9120 }
9121 }
9122
9123 tg3_full_lock(tp, 0);
9124
9125 tp->link_config.autoneg = cmd->autoneg;
9126 if (cmd->autoneg == AUTONEG_ENABLE) {
9127 tp->link_config.advertising = (cmd->advertising |
9128 ADVERTISED_Autoneg);
9129 tp->link_config.speed = SPEED_INVALID;
9130 tp->link_config.duplex = DUPLEX_INVALID;
9131 } else {
9132 tp->link_config.advertising = 0;
9133 tp->link_config.speed = cmd->speed;
9134 tp->link_config.duplex = cmd->duplex;
9135 }
9136
9137 tp->link_config.orig_speed = tp->link_config.speed;
9138 tp->link_config.orig_duplex = tp->link_config.duplex;
9139 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9140
9141 if (netif_running(dev))
9142 tg3_setup_phy(tp, 1);
9143
9144 tg3_full_unlock(tp);
9145
9146 return 0;
9147 }
9148
9149 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9150 {
9151 struct tg3 *tp = netdev_priv(dev);
9152
9153 strcpy(info->driver, DRV_MODULE_NAME);
9154 strcpy(info->version, DRV_MODULE_VERSION);
9155 strcpy(info->fw_version, tp->fw_ver);
9156 strcpy(info->bus_info, pci_name(tp->pdev));
9157 }
9158
9159 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9160 {
9161 struct tg3 *tp = netdev_priv(dev);
9162
9163 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9164 device_can_wakeup(&tp->pdev->dev))
9165 wol->supported = WAKE_MAGIC;
9166 else
9167 wol->supported = 0;
9168 wol->wolopts = 0;
9169 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9170 device_can_wakeup(&tp->pdev->dev))
9171 wol->wolopts = WAKE_MAGIC;
9172 memset(&wol->sopass, 0, sizeof(wol->sopass));
9173 }
9174
9175 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9176 {
9177 struct tg3 *tp = netdev_priv(dev);
9178 struct device *dp = &tp->pdev->dev;
9179
9180 if (wol->wolopts & ~WAKE_MAGIC)
9181 return -EINVAL;
9182 if ((wol->wolopts & WAKE_MAGIC) &&
9183 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9184 return -EINVAL;
9185
9186 spin_lock_bh(&tp->lock);
9187 if (wol->wolopts & WAKE_MAGIC) {
9188 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9189 device_set_wakeup_enable(dp, true);
9190 } else {
9191 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9192 device_set_wakeup_enable(dp, false);
9193 }
9194 spin_unlock_bh(&tp->lock);
9195
9196 return 0;
9197 }
9198
9199 static u32 tg3_get_msglevel(struct net_device *dev)
9200 {
9201 struct tg3 *tp = netdev_priv(dev);
9202 return tp->msg_enable;
9203 }
9204
9205 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9206 {
9207 struct tg3 *tp = netdev_priv(dev);
9208 tp->msg_enable = value;
9209 }
9210
9211 static int tg3_set_tso(struct net_device *dev, u32 value)
9212 {
9213 struct tg3 *tp = netdev_priv(dev);
9214
9215 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9216 if (value)
9217 return -EINVAL;
9218 return 0;
9219 }
9220 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9221 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
9222 if (value) {
9223 dev->features |= NETIF_F_TSO6;
9224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9225 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9226 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9229 dev->features |= NETIF_F_TSO_ECN;
9230 } else
9231 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9232 }
9233 return ethtool_op_set_tso(dev, value);
9234 }
9235
9236 static int tg3_nway_reset(struct net_device *dev)
9237 {
9238 struct tg3 *tp = netdev_priv(dev);
9239 int r;
9240
9241 if (!netif_running(dev))
9242 return -EAGAIN;
9243
9244 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9245 return -EINVAL;
9246
9247 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9248 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9249 return -EAGAIN;
9250 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9251 } else {
9252 u32 bmcr;
9253
9254 spin_lock_bh(&tp->lock);
9255 r = -EINVAL;
9256 tg3_readphy(tp, MII_BMCR, &bmcr);
9257 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9258 ((bmcr & BMCR_ANENABLE) ||
9259 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9260 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9261 BMCR_ANENABLE);
9262 r = 0;
9263 }
9264 spin_unlock_bh(&tp->lock);
9265 }
9266
9267 return r;
9268 }
9269
9270 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9271 {
9272 struct tg3 *tp = netdev_priv(dev);
9273
9274 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9275 ering->rx_mini_max_pending = 0;
9276 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9277 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9278 else
9279 ering->rx_jumbo_max_pending = 0;
9280
9281 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9282
9283 ering->rx_pending = tp->rx_pending;
9284 ering->rx_mini_pending = 0;
9285 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9286 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9287 else
9288 ering->rx_jumbo_pending = 0;
9289
9290 ering->tx_pending = tp->napi[0].tx_pending;
9291 }
9292
9293 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9294 {
9295 struct tg3 *tp = netdev_priv(dev);
9296 int i, irq_sync = 0, err = 0;
9297
9298 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9299 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9300 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9301 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9302 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9303 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9304 return -EINVAL;
9305
9306 if (netif_running(dev)) {
9307 tg3_phy_stop(tp);
9308 tg3_netif_stop(tp);
9309 irq_sync = 1;
9310 }
9311
9312 tg3_full_lock(tp, irq_sync);
9313
9314 tp->rx_pending = ering->rx_pending;
9315
9316 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9317 tp->rx_pending > 63)
9318 tp->rx_pending = 63;
9319 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9320
9321 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9322 tp->napi[i].tx_pending = ering->tx_pending;
9323
9324 if (netif_running(dev)) {
9325 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9326 err = tg3_restart_hw(tp, 1);
9327 if (!err)
9328 tg3_netif_start(tp);
9329 }
9330
9331 tg3_full_unlock(tp);
9332
9333 if (irq_sync && !err)
9334 tg3_phy_start(tp);
9335
9336 return err;
9337 }
9338
9339 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9340 {
9341 struct tg3 *tp = netdev_priv(dev);
9342
9343 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9344
9345 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9346 epause->rx_pause = 1;
9347 else
9348 epause->rx_pause = 0;
9349
9350 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9351 epause->tx_pause = 1;
9352 else
9353 epause->tx_pause = 0;
9354 }
9355
9356 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9357 {
9358 struct tg3 *tp = netdev_priv(dev);
9359 int err = 0;
9360
9361 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9362 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9363 return -EAGAIN;
9364
9365 if (epause->autoneg) {
9366 u32 newadv;
9367 struct phy_device *phydev;
9368
9369 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9370
9371 if (epause->rx_pause) {
9372 if (epause->tx_pause)
9373 newadv = ADVERTISED_Pause;
9374 else
9375 newadv = ADVERTISED_Pause |
9376 ADVERTISED_Asym_Pause;
9377 } else if (epause->tx_pause) {
9378 newadv = ADVERTISED_Asym_Pause;
9379 } else
9380 newadv = 0;
9381
9382 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9383 u32 oldadv = phydev->advertising &
9384 (ADVERTISED_Pause |
9385 ADVERTISED_Asym_Pause);
9386 if (oldadv != newadv) {
9387 phydev->advertising &=
9388 ~(ADVERTISED_Pause |
9389 ADVERTISED_Asym_Pause);
9390 phydev->advertising |= newadv;
9391 err = phy_start_aneg(phydev);
9392 }
9393 } else {
9394 tp->link_config.advertising &=
9395 ~(ADVERTISED_Pause |
9396 ADVERTISED_Asym_Pause);
9397 tp->link_config.advertising |= newadv;
9398 }
9399 } else {
9400 if (epause->rx_pause)
9401 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9402 else
9403 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9404
9405 if (epause->tx_pause)
9406 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9407 else
9408 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9409
9410 if (netif_running(dev))
9411 tg3_setup_flow_control(tp, 0, 0);
9412 }
9413 } else {
9414 int irq_sync = 0;
9415
9416 if (netif_running(dev)) {
9417 tg3_netif_stop(tp);
9418 irq_sync = 1;
9419 }
9420
9421 tg3_full_lock(tp, irq_sync);
9422
9423 if (epause->autoneg)
9424 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9425 else
9426 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9427 if (epause->rx_pause)
9428 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9429 else
9430 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9431 if (epause->tx_pause)
9432 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9433 else
9434 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9435
9436 if (netif_running(dev)) {
9437 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9438 err = tg3_restart_hw(tp, 1);
9439 if (!err)
9440 tg3_netif_start(tp);
9441 }
9442
9443 tg3_full_unlock(tp);
9444 }
9445
9446 return err;
9447 }
9448
9449 static u32 tg3_get_rx_csum(struct net_device *dev)
9450 {
9451 struct tg3 *tp = netdev_priv(dev);
9452 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9453 }
9454
9455 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9456 {
9457 struct tg3 *tp = netdev_priv(dev);
9458
9459 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9460 if (data != 0)
9461 return -EINVAL;
9462 return 0;
9463 }
9464
9465 spin_lock_bh(&tp->lock);
9466 if (data)
9467 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9468 else
9469 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9470 spin_unlock_bh(&tp->lock);
9471
9472 return 0;
9473 }
9474
9475 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9476 {
9477 struct tg3 *tp = netdev_priv(dev);
9478
9479 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9480 if (data != 0)
9481 return -EINVAL;
9482 return 0;
9483 }
9484
9485 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9486 ethtool_op_set_tx_ipv6_csum(dev, data);
9487 else
9488 ethtool_op_set_tx_csum(dev, data);
9489
9490 return 0;
9491 }
9492
9493 static int tg3_get_sset_count (struct net_device *dev, int sset)
9494 {
9495 switch (sset) {
9496 case ETH_SS_TEST:
9497 return TG3_NUM_TEST;
9498 case ETH_SS_STATS:
9499 return TG3_NUM_STATS;
9500 default:
9501 return -EOPNOTSUPP;
9502 }
9503 }
9504
9505 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9506 {
9507 switch (stringset) {
9508 case ETH_SS_STATS:
9509 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9510 break;
9511 case ETH_SS_TEST:
9512 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9513 break;
9514 default:
9515 WARN_ON(1); /* we need a WARN() */
9516 break;
9517 }
9518 }
9519
9520 static int tg3_phys_id(struct net_device *dev, u32 data)
9521 {
9522 struct tg3 *tp = netdev_priv(dev);
9523 int i;
9524
9525 if (!netif_running(tp->dev))
9526 return -EAGAIN;
9527
9528 if (data == 0)
9529 data = UINT_MAX / 2;
9530
9531 for (i = 0; i < (data * 2); i++) {
9532 if ((i % 2) == 0)
9533 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9534 LED_CTRL_1000MBPS_ON |
9535 LED_CTRL_100MBPS_ON |
9536 LED_CTRL_10MBPS_ON |
9537 LED_CTRL_TRAFFIC_OVERRIDE |
9538 LED_CTRL_TRAFFIC_BLINK |
9539 LED_CTRL_TRAFFIC_LED);
9540
9541 else
9542 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9543 LED_CTRL_TRAFFIC_OVERRIDE);
9544
9545 if (msleep_interruptible(500))
9546 break;
9547 }
9548 tw32(MAC_LED_CTRL, tp->led_ctrl);
9549 return 0;
9550 }
9551
9552 static void tg3_get_ethtool_stats (struct net_device *dev,
9553 struct ethtool_stats *estats, u64 *tmp_stats)
9554 {
9555 struct tg3 *tp = netdev_priv(dev);
9556 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9557 }
9558
9559 #define NVRAM_TEST_SIZE 0x100
9560 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9561 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9562 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9563 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9564 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9565
9566 static int tg3_test_nvram(struct tg3 *tp)
9567 {
9568 u32 csum, magic;
9569 __be32 *buf;
9570 int i, j, k, err = 0, size;
9571
9572 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9573 return 0;
9574
9575 if (tg3_nvram_read(tp, 0, &magic) != 0)
9576 return -EIO;
9577
9578 if (magic == TG3_EEPROM_MAGIC)
9579 size = NVRAM_TEST_SIZE;
9580 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9581 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9582 TG3_EEPROM_SB_FORMAT_1) {
9583 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9584 case TG3_EEPROM_SB_REVISION_0:
9585 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9586 break;
9587 case TG3_EEPROM_SB_REVISION_2:
9588 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9589 break;
9590 case TG3_EEPROM_SB_REVISION_3:
9591 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9592 break;
9593 default:
9594 return 0;
9595 }
9596 } else
9597 return 0;
9598 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9599 size = NVRAM_SELFBOOT_HW_SIZE;
9600 else
9601 return -EIO;
9602
9603 buf = kmalloc(size, GFP_KERNEL);
9604 if (buf == NULL)
9605 return -ENOMEM;
9606
9607 err = -EIO;
9608 for (i = 0, j = 0; i < size; i += 4, j++) {
9609 err = tg3_nvram_read_be32(tp, i, &buf[j]);
9610 if (err)
9611 break;
9612 }
9613 if (i < size)
9614 goto out;
9615
9616 /* Selfboot format */
9617 magic = be32_to_cpu(buf[0]);
9618 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9619 TG3_EEPROM_MAGIC_FW) {
9620 u8 *buf8 = (u8 *) buf, csum8 = 0;
9621
9622 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9623 TG3_EEPROM_SB_REVISION_2) {
9624 /* For rev 2, the csum doesn't include the MBA. */
9625 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9626 csum8 += buf8[i];
9627 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9628 csum8 += buf8[i];
9629 } else {
9630 for (i = 0; i < size; i++)
9631 csum8 += buf8[i];
9632 }
9633
9634 if (csum8 == 0) {
9635 err = 0;
9636 goto out;
9637 }
9638
9639 err = -EIO;
9640 goto out;
9641 }
9642
9643 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9644 TG3_EEPROM_MAGIC_HW) {
9645 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9646 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9647 u8 *buf8 = (u8 *) buf;
9648
9649 /* Separate the parity bits and the data bytes. */
9650 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9651 if ((i == 0) || (i == 8)) {
9652 int l;
9653 u8 msk;
9654
9655 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9656 parity[k++] = buf8[i] & msk;
9657 i++;
9658 }
9659 else if (i == 16) {
9660 int l;
9661 u8 msk;
9662
9663 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9664 parity[k++] = buf8[i] & msk;
9665 i++;
9666
9667 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9668 parity[k++] = buf8[i] & msk;
9669 i++;
9670 }
9671 data[j++] = buf8[i];
9672 }
9673
9674 err = -EIO;
9675 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9676 u8 hw8 = hweight8(data[i]);
9677
9678 if ((hw8 & 0x1) && parity[i])
9679 goto out;
9680 else if (!(hw8 & 0x1) && !parity[i])
9681 goto out;
9682 }
9683 err = 0;
9684 goto out;
9685 }
9686
9687 /* Bootstrap checksum at offset 0x10 */
9688 csum = calc_crc((unsigned char *) buf, 0x10);
9689 if (csum != be32_to_cpu(buf[0x10/4]))
9690 goto out;
9691
9692 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9693 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9694 if (csum != be32_to_cpu(buf[0xfc/4]))
9695 goto out;
9696
9697 err = 0;
9698
9699 out:
9700 kfree(buf);
9701 return err;
9702 }
9703
9704 #define TG3_SERDES_TIMEOUT_SEC 2
9705 #define TG3_COPPER_TIMEOUT_SEC 6
9706
9707 static int tg3_test_link(struct tg3 *tp)
9708 {
9709 int i, max;
9710
9711 if (!netif_running(tp->dev))
9712 return -ENODEV;
9713
9714 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9715 max = TG3_SERDES_TIMEOUT_SEC;
9716 else
9717 max = TG3_COPPER_TIMEOUT_SEC;
9718
9719 for (i = 0; i < max; i++) {
9720 if (netif_carrier_ok(tp->dev))
9721 return 0;
9722
9723 if (msleep_interruptible(1000))
9724 break;
9725 }
9726
9727 return -EIO;
9728 }
9729
9730 /* Only test the commonly used registers */
9731 static int tg3_test_registers(struct tg3 *tp)
9732 {
9733 int i, is_5705, is_5750;
9734 u32 offset, read_mask, write_mask, val, save_val, read_val;
9735 static struct {
9736 u16 offset;
9737 u16 flags;
9738 #define TG3_FL_5705 0x1
9739 #define TG3_FL_NOT_5705 0x2
9740 #define TG3_FL_NOT_5788 0x4
9741 #define TG3_FL_NOT_5750 0x8
9742 u32 read_mask;
9743 u32 write_mask;
9744 } reg_tbl[] = {
9745 /* MAC Control Registers */
9746 { MAC_MODE, TG3_FL_NOT_5705,
9747 0x00000000, 0x00ef6f8c },
9748 { MAC_MODE, TG3_FL_5705,
9749 0x00000000, 0x01ef6b8c },
9750 { MAC_STATUS, TG3_FL_NOT_5705,
9751 0x03800107, 0x00000000 },
9752 { MAC_STATUS, TG3_FL_5705,
9753 0x03800100, 0x00000000 },
9754 { MAC_ADDR_0_HIGH, 0x0000,
9755 0x00000000, 0x0000ffff },
9756 { MAC_ADDR_0_LOW, 0x0000,
9757 0x00000000, 0xffffffff },
9758 { MAC_RX_MTU_SIZE, 0x0000,
9759 0x00000000, 0x0000ffff },
9760 { MAC_TX_MODE, 0x0000,
9761 0x00000000, 0x00000070 },
9762 { MAC_TX_LENGTHS, 0x0000,
9763 0x00000000, 0x00003fff },
9764 { MAC_RX_MODE, TG3_FL_NOT_5705,
9765 0x00000000, 0x000007fc },
9766 { MAC_RX_MODE, TG3_FL_5705,
9767 0x00000000, 0x000007dc },
9768 { MAC_HASH_REG_0, 0x0000,
9769 0x00000000, 0xffffffff },
9770 { MAC_HASH_REG_1, 0x0000,
9771 0x00000000, 0xffffffff },
9772 { MAC_HASH_REG_2, 0x0000,
9773 0x00000000, 0xffffffff },
9774 { MAC_HASH_REG_3, 0x0000,
9775 0x00000000, 0xffffffff },
9776
9777 /* Receive Data and Receive BD Initiator Control Registers. */
9778 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9779 0x00000000, 0xffffffff },
9780 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9781 0x00000000, 0xffffffff },
9782 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9783 0x00000000, 0x00000003 },
9784 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9785 0x00000000, 0xffffffff },
9786 { RCVDBDI_STD_BD+0, 0x0000,
9787 0x00000000, 0xffffffff },
9788 { RCVDBDI_STD_BD+4, 0x0000,
9789 0x00000000, 0xffffffff },
9790 { RCVDBDI_STD_BD+8, 0x0000,
9791 0x00000000, 0xffff0002 },
9792 { RCVDBDI_STD_BD+0xc, 0x0000,
9793 0x00000000, 0xffffffff },
9794
9795 /* Receive BD Initiator Control Registers. */
9796 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9797 0x00000000, 0xffffffff },
9798 { RCVBDI_STD_THRESH, TG3_FL_5705,
9799 0x00000000, 0x000003ff },
9800 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9801 0x00000000, 0xffffffff },
9802
9803 /* Host Coalescing Control Registers. */
9804 { HOSTCC_MODE, TG3_FL_NOT_5705,
9805 0x00000000, 0x00000004 },
9806 { HOSTCC_MODE, TG3_FL_5705,
9807 0x00000000, 0x000000f6 },
9808 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9809 0x00000000, 0xffffffff },
9810 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9811 0x00000000, 0x000003ff },
9812 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9813 0x00000000, 0xffffffff },
9814 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9815 0x00000000, 0x000003ff },
9816 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9817 0x00000000, 0xffffffff },
9818 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9819 0x00000000, 0x000000ff },
9820 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9821 0x00000000, 0xffffffff },
9822 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9823 0x00000000, 0x000000ff },
9824 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9825 0x00000000, 0xffffffff },
9826 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9827 0x00000000, 0xffffffff },
9828 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9829 0x00000000, 0xffffffff },
9830 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9831 0x00000000, 0x000000ff },
9832 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9833 0x00000000, 0xffffffff },
9834 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9835 0x00000000, 0x000000ff },
9836 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9837 0x00000000, 0xffffffff },
9838 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9839 0x00000000, 0xffffffff },
9840 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9841 0x00000000, 0xffffffff },
9842 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9843 0x00000000, 0xffffffff },
9844 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9845 0x00000000, 0xffffffff },
9846 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9847 0xffffffff, 0x00000000 },
9848 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9849 0xffffffff, 0x00000000 },
9850
9851 /* Buffer Manager Control Registers. */
9852 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9853 0x00000000, 0x007fff80 },
9854 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9855 0x00000000, 0x007fffff },
9856 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9857 0x00000000, 0x0000003f },
9858 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9859 0x00000000, 0x000001ff },
9860 { BUFMGR_MB_HIGH_WATER, 0x0000,
9861 0x00000000, 0x000001ff },
9862 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9863 0xffffffff, 0x00000000 },
9864 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9865 0xffffffff, 0x00000000 },
9866
9867 /* Mailbox Registers */
9868 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9869 0x00000000, 0x000001ff },
9870 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9871 0x00000000, 0x000001ff },
9872 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9873 0x00000000, 0x000007ff },
9874 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9875 0x00000000, 0x000001ff },
9876
9877 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9878 };
9879
9880 is_5705 = is_5750 = 0;
9881 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9882 is_5705 = 1;
9883 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9884 is_5750 = 1;
9885 }
9886
9887 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9888 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9889 continue;
9890
9891 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9892 continue;
9893
9894 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9895 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9896 continue;
9897
9898 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9899 continue;
9900
9901 offset = (u32) reg_tbl[i].offset;
9902 read_mask = reg_tbl[i].read_mask;
9903 write_mask = reg_tbl[i].write_mask;
9904
9905 /* Save the original register content */
9906 save_val = tr32(offset);
9907
9908 /* Determine the read-only value. */
9909 read_val = save_val & read_mask;
9910
9911 /* Write zero to the register, then make sure the read-only bits
9912 * are not changed and the read/write bits are all zeros.
9913 */
9914 tw32(offset, 0);
9915
9916 val = tr32(offset);
9917
9918 /* Test the read-only and read/write bits. */
9919 if (((val & read_mask) != read_val) || (val & write_mask))
9920 goto out;
9921
9922 /* Write ones to all the bits defined by RdMask and WrMask, then
9923 * make sure the read-only bits are not changed and the
9924 * read/write bits are all ones.
9925 */
9926 tw32(offset, read_mask | write_mask);
9927
9928 val = tr32(offset);
9929
9930 /* Test the read-only bits. */
9931 if ((val & read_mask) != read_val)
9932 goto out;
9933
9934 /* Test the read/write bits. */
9935 if ((val & write_mask) != write_mask)
9936 goto out;
9937
9938 tw32(offset, save_val);
9939 }
9940
9941 return 0;
9942
9943 out:
9944 if (netif_msg_hw(tp))
9945 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9946 offset);
9947 tw32(offset, save_val);
9948 return -EIO;
9949 }
9950
9951 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9952 {
9953 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9954 int i;
9955 u32 j;
9956
9957 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9958 for (j = 0; j < len; j += 4) {
9959 u32 val;
9960
9961 tg3_write_mem(tp, offset + j, test_pattern[i]);
9962 tg3_read_mem(tp, offset + j, &val);
9963 if (val != test_pattern[i])
9964 return -EIO;
9965 }
9966 }
9967 return 0;
9968 }
9969
9970 static int tg3_test_memory(struct tg3 *tp)
9971 {
9972 static struct mem_entry {
9973 u32 offset;
9974 u32 len;
9975 } mem_tbl_570x[] = {
9976 { 0x00000000, 0x00b50},
9977 { 0x00002000, 0x1c000},
9978 { 0xffffffff, 0x00000}
9979 }, mem_tbl_5705[] = {
9980 { 0x00000100, 0x0000c},
9981 { 0x00000200, 0x00008},
9982 { 0x00004000, 0x00800},
9983 { 0x00006000, 0x01000},
9984 { 0x00008000, 0x02000},
9985 { 0x00010000, 0x0e000},
9986 { 0xffffffff, 0x00000}
9987 }, mem_tbl_5755[] = {
9988 { 0x00000200, 0x00008},
9989 { 0x00004000, 0x00800},
9990 { 0x00006000, 0x00800},
9991 { 0x00008000, 0x02000},
9992 { 0x00010000, 0x0c000},
9993 { 0xffffffff, 0x00000}
9994 }, mem_tbl_5906[] = {
9995 { 0x00000200, 0x00008},
9996 { 0x00004000, 0x00400},
9997 { 0x00006000, 0x00400},
9998 { 0x00008000, 0x01000},
9999 { 0x00010000, 0x01000},
10000 { 0xffffffff, 0x00000}
10001 };
10002 struct mem_entry *mem_tbl;
10003 int err = 0;
10004 int i;
10005
10006 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10007 mem_tbl = mem_tbl_5755;
10008 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10009 mem_tbl = mem_tbl_5906;
10010 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10011 mem_tbl = mem_tbl_5705;
10012 else
10013 mem_tbl = mem_tbl_570x;
10014
10015 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10016 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10017 mem_tbl[i].len)) != 0)
10018 break;
10019 }
10020
10021 return err;
10022 }
10023
10024 #define TG3_MAC_LOOPBACK 0
10025 #define TG3_PHY_LOOPBACK 1
10026
10027 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10028 {
10029 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10030 u32 desc_idx, coal_now;
10031 struct sk_buff *skb, *rx_skb;
10032 u8 *tx_data;
10033 dma_addr_t map;
10034 int num_pkts, tx_len, rx_len, i, err;
10035 struct tg3_rx_buffer_desc *desc;
10036 struct tg3_napi *tnapi, *rnapi;
10037 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10038
10039 tnapi = &tp->napi[0];
10040 rnapi = &tp->napi[0];
10041 coal_now = tnapi->coal_now | rnapi->coal_now;
10042
10043 if (loopback_mode == TG3_MAC_LOOPBACK) {
10044 /* HW errata - mac loopback fails in some cases on 5780.
10045 * Normal traffic and PHY loopback are not affected by
10046 * errata.
10047 */
10048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10049 return 0;
10050
10051 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10052 MAC_MODE_PORT_INT_LPBACK;
10053 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10054 mac_mode |= MAC_MODE_LINK_POLARITY;
10055 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10056 mac_mode |= MAC_MODE_PORT_MODE_MII;
10057 else
10058 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10059 tw32(MAC_MODE, mac_mode);
10060 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10061 u32 val;
10062
10063 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10064 tg3_phy_fet_toggle_apd(tp, false);
10065 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10066 } else
10067 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10068
10069 tg3_phy_toggle_automdix(tp, 0);
10070
10071 tg3_writephy(tp, MII_BMCR, val);
10072 udelay(40);
10073
10074 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10075 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10076 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10077 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
10078 mac_mode |= MAC_MODE_PORT_MODE_MII;
10079 } else
10080 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10081
10082 /* reset to prevent losing 1st rx packet intermittently */
10083 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10084 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10085 udelay(10);
10086 tw32_f(MAC_RX_MODE, tp->rx_mode);
10087 }
10088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10089 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10090 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10091 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10092 mac_mode |= MAC_MODE_LINK_POLARITY;
10093 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10094 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10095 }
10096 tw32(MAC_MODE, mac_mode);
10097 }
10098 else
10099 return -EINVAL;
10100
10101 err = -EIO;
10102
10103 tx_len = 1514;
10104 skb = netdev_alloc_skb(tp->dev, tx_len);
10105 if (!skb)
10106 return -ENOMEM;
10107
10108 tx_data = skb_put(skb, tx_len);
10109 memcpy(tx_data, tp->dev->dev_addr, 6);
10110 memset(tx_data + 6, 0x0, 8);
10111
10112 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10113
10114 for (i = 14; i < tx_len; i++)
10115 tx_data[i] = (u8) (i & 0xff);
10116
10117 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10118
10119 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10120 rnapi->coal_now);
10121
10122 udelay(10);
10123
10124 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10125
10126 num_pkts = 0;
10127
10128 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10129
10130 tnapi->tx_prod++;
10131 num_pkts++;
10132
10133 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10134 tr32_mailbox(tnapi->prodmbox);
10135
10136 udelay(10);
10137
10138 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10139 for (i = 0; i < 25; i++) {
10140 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10141 coal_now);
10142
10143 udelay(10);
10144
10145 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10146 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10147 if ((tx_idx == tnapi->tx_prod) &&
10148 (rx_idx == (rx_start_idx + num_pkts)))
10149 break;
10150 }
10151
10152 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10153 dev_kfree_skb(skb);
10154
10155 if (tx_idx != tnapi->tx_prod)
10156 goto out;
10157
10158 if (rx_idx != rx_start_idx + num_pkts)
10159 goto out;
10160
10161 desc = &rnapi->rx_rcb[rx_start_idx];
10162 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10163 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10164 if (opaque_key != RXD_OPAQUE_RING_STD)
10165 goto out;
10166
10167 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10168 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10169 goto out;
10170
10171 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10172 if (rx_len != tx_len)
10173 goto out;
10174
10175 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10176
10177 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10178 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10179
10180 for (i = 14; i < tx_len; i++) {
10181 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10182 goto out;
10183 }
10184 err = 0;
10185
10186 /* tg3_free_rings will unmap and free the rx_skb */
10187 out:
10188 return err;
10189 }
10190
10191 #define TG3_MAC_LOOPBACK_FAILED 1
10192 #define TG3_PHY_LOOPBACK_FAILED 2
10193 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10194 TG3_PHY_LOOPBACK_FAILED)
10195
10196 static int tg3_test_loopback(struct tg3 *tp)
10197 {
10198 int err = 0;
10199 u32 cpmuctrl = 0;
10200
10201 if (!netif_running(tp->dev))
10202 return TG3_LOOPBACK_FAILED;
10203
10204 err = tg3_reset_hw(tp, 1);
10205 if (err)
10206 return TG3_LOOPBACK_FAILED;
10207
10208 /* Turn off gphy autopowerdown. */
10209 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10210 tg3_phy_toggle_apd(tp, false);
10211
10212 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10213 int i;
10214 u32 status;
10215
10216 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10217
10218 /* Wait for up to 40 microseconds to acquire lock. */
10219 for (i = 0; i < 4; i++) {
10220 status = tr32(TG3_CPMU_MUTEX_GNT);
10221 if (status == CPMU_MUTEX_GNT_DRIVER)
10222 break;
10223 udelay(10);
10224 }
10225
10226 if (status != CPMU_MUTEX_GNT_DRIVER)
10227 return TG3_LOOPBACK_FAILED;
10228
10229 /* Turn off link-based power management. */
10230 cpmuctrl = tr32(TG3_CPMU_CTRL);
10231 tw32(TG3_CPMU_CTRL,
10232 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10233 CPMU_CTRL_LINK_AWARE_MODE));
10234 }
10235
10236 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10237 err |= TG3_MAC_LOOPBACK_FAILED;
10238
10239 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10240 tw32(TG3_CPMU_CTRL, cpmuctrl);
10241
10242 /* Release the mutex */
10243 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10244 }
10245
10246 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10247 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10248 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10249 err |= TG3_PHY_LOOPBACK_FAILED;
10250 }
10251
10252 /* Re-enable gphy autopowerdown. */
10253 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10254 tg3_phy_toggle_apd(tp, true);
10255
10256 return err;
10257 }
10258
10259 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10260 u64 *data)
10261 {
10262 struct tg3 *tp = netdev_priv(dev);
10263
10264 if (tp->link_config.phy_is_low_power)
10265 tg3_set_power_state(tp, PCI_D0);
10266
10267 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10268
10269 if (tg3_test_nvram(tp) != 0) {
10270 etest->flags |= ETH_TEST_FL_FAILED;
10271 data[0] = 1;
10272 }
10273 if (tg3_test_link(tp) != 0) {
10274 etest->flags |= ETH_TEST_FL_FAILED;
10275 data[1] = 1;
10276 }
10277 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10278 int err, err2 = 0, irq_sync = 0;
10279
10280 if (netif_running(dev)) {
10281 tg3_phy_stop(tp);
10282 tg3_netif_stop(tp);
10283 irq_sync = 1;
10284 }
10285
10286 tg3_full_lock(tp, irq_sync);
10287
10288 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10289 err = tg3_nvram_lock(tp);
10290 tg3_halt_cpu(tp, RX_CPU_BASE);
10291 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10292 tg3_halt_cpu(tp, TX_CPU_BASE);
10293 if (!err)
10294 tg3_nvram_unlock(tp);
10295
10296 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10297 tg3_phy_reset(tp);
10298
10299 if (tg3_test_registers(tp) != 0) {
10300 etest->flags |= ETH_TEST_FL_FAILED;
10301 data[2] = 1;
10302 }
10303 if (tg3_test_memory(tp) != 0) {
10304 etest->flags |= ETH_TEST_FL_FAILED;
10305 data[3] = 1;
10306 }
10307 if ((data[4] = tg3_test_loopback(tp)) != 0)
10308 etest->flags |= ETH_TEST_FL_FAILED;
10309
10310 tg3_full_unlock(tp);
10311
10312 if (tg3_test_interrupt(tp) != 0) {
10313 etest->flags |= ETH_TEST_FL_FAILED;
10314 data[5] = 1;
10315 }
10316
10317 tg3_full_lock(tp, 0);
10318
10319 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10320 if (netif_running(dev)) {
10321 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10322 err2 = tg3_restart_hw(tp, 1);
10323 if (!err2)
10324 tg3_netif_start(tp);
10325 }
10326
10327 tg3_full_unlock(tp);
10328
10329 if (irq_sync && !err2)
10330 tg3_phy_start(tp);
10331 }
10332 if (tp->link_config.phy_is_low_power)
10333 tg3_set_power_state(tp, PCI_D3hot);
10334
10335 }
10336
10337 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10338 {
10339 struct mii_ioctl_data *data = if_mii(ifr);
10340 struct tg3 *tp = netdev_priv(dev);
10341 int err;
10342
10343 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10344 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10345 return -EAGAIN;
10346 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10347 }
10348
10349 switch(cmd) {
10350 case SIOCGMIIPHY:
10351 data->phy_id = PHY_ADDR;
10352
10353 /* fallthru */
10354 case SIOCGMIIREG: {
10355 u32 mii_regval;
10356
10357 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10358 break; /* We have no PHY */
10359
10360 if (tp->link_config.phy_is_low_power)
10361 return -EAGAIN;
10362
10363 spin_lock_bh(&tp->lock);
10364 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10365 spin_unlock_bh(&tp->lock);
10366
10367 data->val_out = mii_regval;
10368
10369 return err;
10370 }
10371
10372 case SIOCSMIIREG:
10373 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10374 break; /* We have no PHY */
10375
10376 if (!capable(CAP_NET_ADMIN))
10377 return -EPERM;
10378
10379 if (tp->link_config.phy_is_low_power)
10380 return -EAGAIN;
10381
10382 spin_lock_bh(&tp->lock);
10383 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10384 spin_unlock_bh(&tp->lock);
10385
10386 return err;
10387
10388 default:
10389 /* do nothing */
10390 break;
10391 }
10392 return -EOPNOTSUPP;
10393 }
10394
10395 #if TG3_VLAN_TAG_USED
10396 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10397 {
10398 struct tg3 *tp = netdev_priv(dev);
10399
10400 if (!netif_running(dev)) {
10401 tp->vlgrp = grp;
10402 return;
10403 }
10404
10405 tg3_netif_stop(tp);
10406
10407 tg3_full_lock(tp, 0);
10408
10409 tp->vlgrp = grp;
10410
10411 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10412 __tg3_set_rx_mode(dev);
10413
10414 tg3_netif_start(tp);
10415
10416 tg3_full_unlock(tp);
10417 }
10418 #endif
10419
10420 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10421 {
10422 struct tg3 *tp = netdev_priv(dev);
10423
10424 memcpy(ec, &tp->coal, sizeof(*ec));
10425 return 0;
10426 }
10427
10428 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10429 {
10430 struct tg3 *tp = netdev_priv(dev);
10431 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10432 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10433
10434 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10435 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10436 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10437 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10438 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10439 }
10440
10441 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10442 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10443 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10444 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10445 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10446 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10447 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10448 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10449 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10450 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10451 return -EINVAL;
10452
10453 /* No rx interrupts will be generated if both are zero */
10454 if ((ec->rx_coalesce_usecs == 0) &&
10455 (ec->rx_max_coalesced_frames == 0))
10456 return -EINVAL;
10457
10458 /* No tx interrupts will be generated if both are zero */
10459 if ((ec->tx_coalesce_usecs == 0) &&
10460 (ec->tx_max_coalesced_frames == 0))
10461 return -EINVAL;
10462
10463 /* Only copy relevant parameters, ignore all others. */
10464 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10465 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10466 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10467 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10468 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10469 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10470 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10471 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10472 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10473
10474 if (netif_running(dev)) {
10475 tg3_full_lock(tp, 0);
10476 __tg3_set_coalesce(tp, &tp->coal);
10477 tg3_full_unlock(tp);
10478 }
10479 return 0;
10480 }
10481
10482 static const struct ethtool_ops tg3_ethtool_ops = {
10483 .get_settings = tg3_get_settings,
10484 .set_settings = tg3_set_settings,
10485 .get_drvinfo = tg3_get_drvinfo,
10486 .get_regs_len = tg3_get_regs_len,
10487 .get_regs = tg3_get_regs,
10488 .get_wol = tg3_get_wol,
10489 .set_wol = tg3_set_wol,
10490 .get_msglevel = tg3_get_msglevel,
10491 .set_msglevel = tg3_set_msglevel,
10492 .nway_reset = tg3_nway_reset,
10493 .get_link = ethtool_op_get_link,
10494 .get_eeprom_len = tg3_get_eeprom_len,
10495 .get_eeprom = tg3_get_eeprom,
10496 .set_eeprom = tg3_set_eeprom,
10497 .get_ringparam = tg3_get_ringparam,
10498 .set_ringparam = tg3_set_ringparam,
10499 .get_pauseparam = tg3_get_pauseparam,
10500 .set_pauseparam = tg3_set_pauseparam,
10501 .get_rx_csum = tg3_get_rx_csum,
10502 .set_rx_csum = tg3_set_rx_csum,
10503 .set_tx_csum = tg3_set_tx_csum,
10504 .set_sg = ethtool_op_set_sg,
10505 .set_tso = tg3_set_tso,
10506 .self_test = tg3_self_test,
10507 .get_strings = tg3_get_strings,
10508 .phys_id = tg3_phys_id,
10509 .get_ethtool_stats = tg3_get_ethtool_stats,
10510 .get_coalesce = tg3_get_coalesce,
10511 .set_coalesce = tg3_set_coalesce,
10512 .get_sset_count = tg3_get_sset_count,
10513 };
10514
10515 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10516 {
10517 u32 cursize, val, magic;
10518
10519 tp->nvram_size = EEPROM_CHIP_SIZE;
10520
10521 if (tg3_nvram_read(tp, 0, &magic) != 0)
10522 return;
10523
10524 if ((magic != TG3_EEPROM_MAGIC) &&
10525 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10526 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10527 return;
10528
10529 /*
10530 * Size the chip by reading offsets at increasing powers of two.
10531 * When we encounter our validation signature, we know the addressing
10532 * has wrapped around, and thus have our chip size.
10533 */
10534 cursize = 0x10;
10535
10536 while (cursize < tp->nvram_size) {
10537 if (tg3_nvram_read(tp, cursize, &val) != 0)
10538 return;
10539
10540 if (val == magic)
10541 break;
10542
10543 cursize <<= 1;
10544 }
10545
10546 tp->nvram_size = cursize;
10547 }
10548
10549 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10550 {
10551 u32 val;
10552
10553 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10554 tg3_nvram_read(tp, 0, &val) != 0)
10555 return;
10556
10557 /* Selfboot format */
10558 if (val != TG3_EEPROM_MAGIC) {
10559 tg3_get_eeprom_size(tp);
10560 return;
10561 }
10562
10563 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10564 if (val != 0) {
10565 /* This is confusing. We want to operate on the
10566 * 16-bit value at offset 0xf2. The tg3_nvram_read()
10567 * call will read from NVRAM and byteswap the data
10568 * according to the byteswapping settings for all
10569 * other register accesses. This ensures the data we
10570 * want will always reside in the lower 16-bits.
10571 * However, the data in NVRAM is in LE format, which
10572 * means the data from the NVRAM read will always be
10573 * opposite the endianness of the CPU. The 16-bit
10574 * byteswap then brings the data to CPU endianness.
10575 */
10576 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
10577 return;
10578 }
10579 }
10580 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10581 }
10582
10583 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10584 {
10585 u32 nvcfg1;
10586
10587 nvcfg1 = tr32(NVRAM_CFG1);
10588 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10589 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10590 } else {
10591 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10592 tw32(NVRAM_CFG1, nvcfg1);
10593 }
10594
10595 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10596 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10597 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10598 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10599 tp->nvram_jedecnum = JEDEC_ATMEL;
10600 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10601 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10602 break;
10603 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10604 tp->nvram_jedecnum = JEDEC_ATMEL;
10605 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10606 break;
10607 case FLASH_VENDOR_ATMEL_EEPROM:
10608 tp->nvram_jedecnum = JEDEC_ATMEL;
10609 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10610 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10611 break;
10612 case FLASH_VENDOR_ST:
10613 tp->nvram_jedecnum = JEDEC_ST;
10614 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10615 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10616 break;
10617 case FLASH_VENDOR_SAIFUN:
10618 tp->nvram_jedecnum = JEDEC_SAIFUN;
10619 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10620 break;
10621 case FLASH_VENDOR_SST_SMALL:
10622 case FLASH_VENDOR_SST_LARGE:
10623 tp->nvram_jedecnum = JEDEC_SST;
10624 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10625 break;
10626 }
10627 } else {
10628 tp->nvram_jedecnum = JEDEC_ATMEL;
10629 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10630 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10631 }
10632 }
10633
10634 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10635 {
10636 u32 nvcfg1;
10637
10638 nvcfg1 = tr32(NVRAM_CFG1);
10639
10640 /* NVRAM protection for TPM */
10641 if (nvcfg1 & (1 << 27))
10642 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10643
10644 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10645 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10646 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10647 tp->nvram_jedecnum = JEDEC_ATMEL;
10648 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10649 break;
10650 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10651 tp->nvram_jedecnum = JEDEC_ATMEL;
10652 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10653 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10654 break;
10655 case FLASH_5752VENDOR_ST_M45PE10:
10656 case FLASH_5752VENDOR_ST_M45PE20:
10657 case FLASH_5752VENDOR_ST_M45PE40:
10658 tp->nvram_jedecnum = JEDEC_ST;
10659 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10660 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10661 break;
10662 }
10663
10664 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10665 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10666 case FLASH_5752PAGE_SIZE_256:
10667 tp->nvram_pagesize = 256;
10668 break;
10669 case FLASH_5752PAGE_SIZE_512:
10670 tp->nvram_pagesize = 512;
10671 break;
10672 case FLASH_5752PAGE_SIZE_1K:
10673 tp->nvram_pagesize = 1024;
10674 break;
10675 case FLASH_5752PAGE_SIZE_2K:
10676 tp->nvram_pagesize = 2048;
10677 break;
10678 case FLASH_5752PAGE_SIZE_4K:
10679 tp->nvram_pagesize = 4096;
10680 break;
10681 case FLASH_5752PAGE_SIZE_264:
10682 tp->nvram_pagesize = 264;
10683 break;
10684 }
10685 } else {
10686 /* For eeprom, set pagesize to maximum eeprom size */
10687 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10688
10689 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10690 tw32(NVRAM_CFG1, nvcfg1);
10691 }
10692 }
10693
10694 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10695 {
10696 u32 nvcfg1, protect = 0;
10697
10698 nvcfg1 = tr32(NVRAM_CFG1);
10699
10700 /* NVRAM protection for TPM */
10701 if (nvcfg1 & (1 << 27)) {
10702 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10703 protect = 1;
10704 }
10705
10706 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10707 switch (nvcfg1) {
10708 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10709 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10710 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10711 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10712 tp->nvram_jedecnum = JEDEC_ATMEL;
10713 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10714 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10715 tp->nvram_pagesize = 264;
10716 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10717 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10718 tp->nvram_size = (protect ? 0x3e200 :
10719 TG3_NVRAM_SIZE_512KB);
10720 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10721 tp->nvram_size = (protect ? 0x1f200 :
10722 TG3_NVRAM_SIZE_256KB);
10723 else
10724 tp->nvram_size = (protect ? 0x1f200 :
10725 TG3_NVRAM_SIZE_128KB);
10726 break;
10727 case FLASH_5752VENDOR_ST_M45PE10:
10728 case FLASH_5752VENDOR_ST_M45PE20:
10729 case FLASH_5752VENDOR_ST_M45PE40:
10730 tp->nvram_jedecnum = JEDEC_ST;
10731 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10732 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10733 tp->nvram_pagesize = 256;
10734 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10735 tp->nvram_size = (protect ?
10736 TG3_NVRAM_SIZE_64KB :
10737 TG3_NVRAM_SIZE_128KB);
10738 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10739 tp->nvram_size = (protect ?
10740 TG3_NVRAM_SIZE_64KB :
10741 TG3_NVRAM_SIZE_256KB);
10742 else
10743 tp->nvram_size = (protect ?
10744 TG3_NVRAM_SIZE_128KB :
10745 TG3_NVRAM_SIZE_512KB);
10746 break;
10747 }
10748 }
10749
10750 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10751 {
10752 u32 nvcfg1;
10753
10754 nvcfg1 = tr32(NVRAM_CFG1);
10755
10756 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10757 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10758 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10759 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10760 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10761 tp->nvram_jedecnum = JEDEC_ATMEL;
10762 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10763 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10764
10765 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10766 tw32(NVRAM_CFG1, nvcfg1);
10767 break;
10768 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10769 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10770 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10771 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10772 tp->nvram_jedecnum = JEDEC_ATMEL;
10773 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10774 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10775 tp->nvram_pagesize = 264;
10776 break;
10777 case FLASH_5752VENDOR_ST_M45PE10:
10778 case FLASH_5752VENDOR_ST_M45PE20:
10779 case FLASH_5752VENDOR_ST_M45PE40:
10780 tp->nvram_jedecnum = JEDEC_ST;
10781 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10782 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10783 tp->nvram_pagesize = 256;
10784 break;
10785 }
10786 }
10787
10788 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10789 {
10790 u32 nvcfg1, protect = 0;
10791
10792 nvcfg1 = tr32(NVRAM_CFG1);
10793
10794 /* NVRAM protection for TPM */
10795 if (nvcfg1 & (1 << 27)) {
10796 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10797 protect = 1;
10798 }
10799
10800 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10801 switch (nvcfg1) {
10802 case FLASH_5761VENDOR_ATMEL_ADB021D:
10803 case FLASH_5761VENDOR_ATMEL_ADB041D:
10804 case FLASH_5761VENDOR_ATMEL_ADB081D:
10805 case FLASH_5761VENDOR_ATMEL_ADB161D:
10806 case FLASH_5761VENDOR_ATMEL_MDB021D:
10807 case FLASH_5761VENDOR_ATMEL_MDB041D:
10808 case FLASH_5761VENDOR_ATMEL_MDB081D:
10809 case FLASH_5761VENDOR_ATMEL_MDB161D:
10810 tp->nvram_jedecnum = JEDEC_ATMEL;
10811 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10812 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10813 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10814 tp->nvram_pagesize = 256;
10815 break;
10816 case FLASH_5761VENDOR_ST_A_M45PE20:
10817 case FLASH_5761VENDOR_ST_A_M45PE40:
10818 case FLASH_5761VENDOR_ST_A_M45PE80:
10819 case FLASH_5761VENDOR_ST_A_M45PE16:
10820 case FLASH_5761VENDOR_ST_M_M45PE20:
10821 case FLASH_5761VENDOR_ST_M_M45PE40:
10822 case FLASH_5761VENDOR_ST_M_M45PE80:
10823 case FLASH_5761VENDOR_ST_M_M45PE16:
10824 tp->nvram_jedecnum = JEDEC_ST;
10825 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10826 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10827 tp->nvram_pagesize = 256;
10828 break;
10829 }
10830
10831 if (protect) {
10832 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10833 } else {
10834 switch (nvcfg1) {
10835 case FLASH_5761VENDOR_ATMEL_ADB161D:
10836 case FLASH_5761VENDOR_ATMEL_MDB161D:
10837 case FLASH_5761VENDOR_ST_A_M45PE16:
10838 case FLASH_5761VENDOR_ST_M_M45PE16:
10839 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10840 break;
10841 case FLASH_5761VENDOR_ATMEL_ADB081D:
10842 case FLASH_5761VENDOR_ATMEL_MDB081D:
10843 case FLASH_5761VENDOR_ST_A_M45PE80:
10844 case FLASH_5761VENDOR_ST_M_M45PE80:
10845 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10846 break;
10847 case FLASH_5761VENDOR_ATMEL_ADB041D:
10848 case FLASH_5761VENDOR_ATMEL_MDB041D:
10849 case FLASH_5761VENDOR_ST_A_M45PE40:
10850 case FLASH_5761VENDOR_ST_M_M45PE40:
10851 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10852 break;
10853 case FLASH_5761VENDOR_ATMEL_ADB021D:
10854 case FLASH_5761VENDOR_ATMEL_MDB021D:
10855 case FLASH_5761VENDOR_ST_A_M45PE20:
10856 case FLASH_5761VENDOR_ST_M_M45PE20:
10857 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10858 break;
10859 }
10860 }
10861 }
10862
10863 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10864 {
10865 tp->nvram_jedecnum = JEDEC_ATMEL;
10866 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10867 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10868 }
10869
10870 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10871 {
10872 u32 nvcfg1;
10873
10874 nvcfg1 = tr32(NVRAM_CFG1);
10875
10876 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10877 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10878 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10879 tp->nvram_jedecnum = JEDEC_ATMEL;
10880 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10881 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10882
10883 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10884 tw32(NVRAM_CFG1, nvcfg1);
10885 return;
10886 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10887 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10888 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10889 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10890 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10891 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10892 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10893 tp->nvram_jedecnum = JEDEC_ATMEL;
10894 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10895 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10896
10897 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10898 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10899 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10900 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10901 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10902 break;
10903 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10904 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10905 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10906 break;
10907 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10908 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10909 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10910 break;
10911 }
10912 break;
10913 case FLASH_5752VENDOR_ST_M45PE10:
10914 case FLASH_5752VENDOR_ST_M45PE20:
10915 case FLASH_5752VENDOR_ST_M45PE40:
10916 tp->nvram_jedecnum = JEDEC_ST;
10917 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10918 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10919
10920 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10921 case FLASH_5752VENDOR_ST_M45PE10:
10922 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10923 break;
10924 case FLASH_5752VENDOR_ST_M45PE20:
10925 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10926 break;
10927 case FLASH_5752VENDOR_ST_M45PE40:
10928 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10929 break;
10930 }
10931 break;
10932 default:
10933 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
10934 return;
10935 }
10936
10937 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10938 case FLASH_5752PAGE_SIZE_256:
10939 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10940 tp->nvram_pagesize = 256;
10941 break;
10942 case FLASH_5752PAGE_SIZE_512:
10943 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10944 tp->nvram_pagesize = 512;
10945 break;
10946 case FLASH_5752PAGE_SIZE_1K:
10947 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10948 tp->nvram_pagesize = 1024;
10949 break;
10950 case FLASH_5752PAGE_SIZE_2K:
10951 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10952 tp->nvram_pagesize = 2048;
10953 break;
10954 case FLASH_5752PAGE_SIZE_4K:
10955 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10956 tp->nvram_pagesize = 4096;
10957 break;
10958 case FLASH_5752PAGE_SIZE_264:
10959 tp->nvram_pagesize = 264;
10960 break;
10961 case FLASH_5752PAGE_SIZE_528:
10962 tp->nvram_pagesize = 528;
10963 break;
10964 }
10965 }
10966
10967 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10968 static void __devinit tg3_nvram_init(struct tg3 *tp)
10969 {
10970 tw32_f(GRC_EEPROM_ADDR,
10971 (EEPROM_ADDR_FSM_RESET |
10972 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10973 EEPROM_ADDR_CLKPERD_SHIFT)));
10974
10975 msleep(1);
10976
10977 /* Enable seeprom accesses. */
10978 tw32_f(GRC_LOCAL_CTRL,
10979 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10980 udelay(100);
10981
10982 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10983 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10984 tp->tg3_flags |= TG3_FLAG_NVRAM;
10985
10986 if (tg3_nvram_lock(tp)) {
10987 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10988 "tg3_nvram_init failed.\n", tp->dev->name);
10989 return;
10990 }
10991 tg3_enable_nvram_access(tp);
10992
10993 tp->nvram_size = 0;
10994
10995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10996 tg3_get_5752_nvram_info(tp);
10997 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10998 tg3_get_5755_nvram_info(tp);
10999 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11002 tg3_get_5787_nvram_info(tp);
11003 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11004 tg3_get_5761_nvram_info(tp);
11005 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11006 tg3_get_5906_nvram_info(tp);
11007 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11008 tg3_get_57780_nvram_info(tp);
11009 else
11010 tg3_get_nvram_info(tp);
11011
11012 if (tp->nvram_size == 0)
11013 tg3_get_nvram_size(tp);
11014
11015 tg3_disable_nvram_access(tp);
11016 tg3_nvram_unlock(tp);
11017
11018 } else {
11019 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11020
11021 tg3_get_eeprom_size(tp);
11022 }
11023 }
11024
11025 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11026 u32 offset, u32 len, u8 *buf)
11027 {
11028 int i, j, rc = 0;
11029 u32 val;
11030
11031 for (i = 0; i < len; i += 4) {
11032 u32 addr;
11033 __be32 data;
11034
11035 addr = offset + i;
11036
11037 memcpy(&data, buf + i, 4);
11038
11039 /*
11040 * The SEEPROM interface expects the data to always be opposite
11041 * the native endian format. We accomplish this by reversing
11042 * all the operations that would have been performed on the
11043 * data from a call to tg3_nvram_read_be32().
11044 */
11045 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11046
11047 val = tr32(GRC_EEPROM_ADDR);
11048 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11049
11050 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11051 EEPROM_ADDR_READ);
11052 tw32(GRC_EEPROM_ADDR, val |
11053 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11054 (addr & EEPROM_ADDR_ADDR_MASK) |
11055 EEPROM_ADDR_START |
11056 EEPROM_ADDR_WRITE);
11057
11058 for (j = 0; j < 1000; j++) {
11059 val = tr32(GRC_EEPROM_ADDR);
11060
11061 if (val & EEPROM_ADDR_COMPLETE)
11062 break;
11063 msleep(1);
11064 }
11065 if (!(val & EEPROM_ADDR_COMPLETE)) {
11066 rc = -EBUSY;
11067 break;
11068 }
11069 }
11070
11071 return rc;
11072 }
11073
11074 /* offset and length are dword aligned */
11075 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11076 u8 *buf)
11077 {
11078 int ret = 0;
11079 u32 pagesize = tp->nvram_pagesize;
11080 u32 pagemask = pagesize - 1;
11081 u32 nvram_cmd;
11082 u8 *tmp;
11083
11084 tmp = kmalloc(pagesize, GFP_KERNEL);
11085 if (tmp == NULL)
11086 return -ENOMEM;
11087
11088 while (len) {
11089 int j;
11090 u32 phy_addr, page_off, size;
11091
11092 phy_addr = offset & ~pagemask;
11093
11094 for (j = 0; j < pagesize; j += 4) {
11095 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11096 (__be32 *) (tmp + j));
11097 if (ret)
11098 break;
11099 }
11100 if (ret)
11101 break;
11102
11103 page_off = offset & pagemask;
11104 size = pagesize;
11105 if (len < size)
11106 size = len;
11107
11108 len -= size;
11109
11110 memcpy(tmp + page_off, buf, size);
11111
11112 offset = offset + (pagesize - page_off);
11113
11114 tg3_enable_nvram_access(tp);
11115
11116 /*
11117 * Before we can erase the flash page, we need
11118 * to issue a special "write enable" command.
11119 */
11120 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11121
11122 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11123 break;
11124
11125 /* Erase the target page */
11126 tw32(NVRAM_ADDR, phy_addr);
11127
11128 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11129 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11130
11131 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11132 break;
11133
11134 /* Issue another write enable to start the write. */
11135 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11136
11137 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11138 break;
11139
11140 for (j = 0; j < pagesize; j += 4) {
11141 __be32 data;
11142
11143 data = *((__be32 *) (tmp + j));
11144
11145 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11146
11147 tw32(NVRAM_ADDR, phy_addr + j);
11148
11149 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11150 NVRAM_CMD_WR;
11151
11152 if (j == 0)
11153 nvram_cmd |= NVRAM_CMD_FIRST;
11154 else if (j == (pagesize - 4))
11155 nvram_cmd |= NVRAM_CMD_LAST;
11156
11157 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11158 break;
11159 }
11160 if (ret)
11161 break;
11162 }
11163
11164 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11165 tg3_nvram_exec_cmd(tp, nvram_cmd);
11166
11167 kfree(tmp);
11168
11169 return ret;
11170 }
11171
11172 /* offset and length are dword aligned */
11173 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11174 u8 *buf)
11175 {
11176 int i, ret = 0;
11177
11178 for (i = 0; i < len; i += 4, offset += 4) {
11179 u32 page_off, phy_addr, nvram_cmd;
11180 __be32 data;
11181
11182 memcpy(&data, buf + i, 4);
11183 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11184
11185 page_off = offset % tp->nvram_pagesize;
11186
11187 phy_addr = tg3_nvram_phys_addr(tp, offset);
11188
11189 tw32(NVRAM_ADDR, phy_addr);
11190
11191 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11192
11193 if ((page_off == 0) || (i == 0))
11194 nvram_cmd |= NVRAM_CMD_FIRST;
11195 if (page_off == (tp->nvram_pagesize - 4))
11196 nvram_cmd |= NVRAM_CMD_LAST;
11197
11198 if (i == (len - 4))
11199 nvram_cmd |= NVRAM_CMD_LAST;
11200
11201 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11202 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11203 (tp->nvram_jedecnum == JEDEC_ST) &&
11204 (nvram_cmd & NVRAM_CMD_FIRST)) {
11205
11206 if ((ret = tg3_nvram_exec_cmd(tp,
11207 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11208 NVRAM_CMD_DONE)))
11209
11210 break;
11211 }
11212 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11213 /* We always do complete word writes to eeprom. */
11214 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11215 }
11216
11217 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11218 break;
11219 }
11220 return ret;
11221 }
11222
11223 /* offset and length are dword aligned */
11224 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11225 {
11226 int ret;
11227
11228 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11229 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11230 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11231 udelay(40);
11232 }
11233
11234 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11235 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11236 }
11237 else {
11238 u32 grc_mode;
11239
11240 ret = tg3_nvram_lock(tp);
11241 if (ret)
11242 return ret;
11243
11244 tg3_enable_nvram_access(tp);
11245 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11246 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11247 tw32(NVRAM_WRITE1, 0x406);
11248
11249 grc_mode = tr32(GRC_MODE);
11250 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11251
11252 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11253 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11254
11255 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11256 buf);
11257 }
11258 else {
11259 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11260 buf);
11261 }
11262
11263 grc_mode = tr32(GRC_MODE);
11264 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11265
11266 tg3_disable_nvram_access(tp);
11267 tg3_nvram_unlock(tp);
11268 }
11269
11270 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11271 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11272 udelay(40);
11273 }
11274
11275 return ret;
11276 }
11277
11278 struct subsys_tbl_ent {
11279 u16 subsys_vendor, subsys_devid;
11280 u32 phy_id;
11281 };
11282
11283 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11284 /* Broadcom boards. */
11285 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11286 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11287 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11288 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11289 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11290 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11291 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11292 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11293 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11294 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11295 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11296
11297 /* 3com boards. */
11298 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11299 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11300 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11301 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11302 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11303
11304 /* DELL boards. */
11305 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11306 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11307 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11308 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11309
11310 /* Compaq boards. */
11311 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11312 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11313 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11314 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11315 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11316
11317 /* IBM boards. */
11318 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11319 };
11320
11321 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11322 {
11323 int i;
11324
11325 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11326 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11327 tp->pdev->subsystem_vendor) &&
11328 (subsys_id_to_phy_id[i].subsys_devid ==
11329 tp->pdev->subsystem_device))
11330 return &subsys_id_to_phy_id[i];
11331 }
11332 return NULL;
11333 }
11334
11335 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11336 {
11337 u32 val;
11338 u16 pmcsr;
11339
11340 /* On some early chips the SRAM cannot be accessed in D3hot state,
11341 * so need make sure we're in D0.
11342 */
11343 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11344 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11345 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11346 msleep(1);
11347
11348 /* Make sure register accesses (indirect or otherwise)
11349 * will function correctly.
11350 */
11351 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11352 tp->misc_host_ctrl);
11353
11354 /* The memory arbiter has to be enabled in order for SRAM accesses
11355 * to succeed. Normally on powerup the tg3 chip firmware will make
11356 * sure it is enabled, but other entities such as system netboot
11357 * code might disable it.
11358 */
11359 val = tr32(MEMARB_MODE);
11360 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11361
11362 tp->phy_id = PHY_ID_INVALID;
11363 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11364
11365 /* Assume an onboard device and WOL capable by default. */
11366 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11367
11368 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11369 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11370 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11371 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11372 }
11373 val = tr32(VCPU_CFGSHDW);
11374 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11375 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11376 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11377 (val & VCPU_CFGSHDW_WOL_MAGPKT))
11378 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11379 goto done;
11380 }
11381
11382 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11383 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11384 u32 nic_cfg, led_cfg;
11385 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11386 int eeprom_phy_serdes = 0;
11387
11388 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11389 tp->nic_sram_data_cfg = nic_cfg;
11390
11391 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11392 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11393 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11394 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11395 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11396 (ver > 0) && (ver < 0x100))
11397 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11398
11399 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11400 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11401
11402 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11403 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11404 eeprom_phy_serdes = 1;
11405
11406 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11407 if (nic_phy_id != 0) {
11408 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11409 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11410
11411 eeprom_phy_id = (id1 >> 16) << 10;
11412 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11413 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11414 } else
11415 eeprom_phy_id = 0;
11416
11417 tp->phy_id = eeprom_phy_id;
11418 if (eeprom_phy_serdes) {
11419 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11420 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11421 else
11422 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11423 }
11424
11425 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11426 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11427 SHASTA_EXT_LED_MODE_MASK);
11428 else
11429 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11430
11431 switch (led_cfg) {
11432 default:
11433 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11434 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11435 break;
11436
11437 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11438 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11439 break;
11440
11441 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11442 tp->led_ctrl = LED_CTRL_MODE_MAC;
11443
11444 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11445 * read on some older 5700/5701 bootcode.
11446 */
11447 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11448 ASIC_REV_5700 ||
11449 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11450 ASIC_REV_5701)
11451 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11452
11453 break;
11454
11455 case SHASTA_EXT_LED_SHARED:
11456 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11457 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11458 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11459 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11460 LED_CTRL_MODE_PHY_2);
11461 break;
11462
11463 case SHASTA_EXT_LED_MAC:
11464 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11465 break;
11466
11467 case SHASTA_EXT_LED_COMBO:
11468 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11469 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11470 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11471 LED_CTRL_MODE_PHY_2);
11472 break;
11473
11474 }
11475
11476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11477 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11478 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11479 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11480
11481 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11482 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11483
11484 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11485 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11486 if ((tp->pdev->subsystem_vendor ==
11487 PCI_VENDOR_ID_ARIMA) &&
11488 (tp->pdev->subsystem_device == 0x205a ||
11489 tp->pdev->subsystem_device == 0x2063))
11490 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11491 } else {
11492 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11493 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11494 }
11495
11496 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11497 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11498 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11499 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11500 }
11501
11502 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11503 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11504 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11505
11506 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11507 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11508 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11509
11510 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11511 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11512 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11513
11514 if (cfg2 & (1 << 17))
11515 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11516
11517 /* serdes signal pre-emphasis in register 0x590 set by */
11518 /* bootcode if bit 18 is set */
11519 if (cfg2 & (1 << 18))
11520 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11521
11522 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11523 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11524 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11525 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11526
11527 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11528 u32 cfg3;
11529
11530 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11531 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11532 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11533 }
11534
11535 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11536 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11537 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11538 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11539 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11540 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11541 }
11542 done:
11543 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11544 device_set_wakeup_enable(&tp->pdev->dev,
11545 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11546 }
11547
11548 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11549 {
11550 int i;
11551 u32 val;
11552
11553 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11554 tw32(OTP_CTRL, cmd);
11555
11556 /* Wait for up to 1 ms for command to execute. */
11557 for (i = 0; i < 100; i++) {
11558 val = tr32(OTP_STATUS);
11559 if (val & OTP_STATUS_CMD_DONE)
11560 break;
11561 udelay(10);
11562 }
11563
11564 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11565 }
11566
11567 /* Read the gphy configuration from the OTP region of the chip. The gphy
11568 * configuration is a 32-bit value that straddles the alignment boundary.
11569 * We do two 32-bit reads and then shift and merge the results.
11570 */
11571 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11572 {
11573 u32 bhalf_otp, thalf_otp;
11574
11575 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11576
11577 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11578 return 0;
11579
11580 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11581
11582 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11583 return 0;
11584
11585 thalf_otp = tr32(OTP_READ_DATA);
11586
11587 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11588
11589 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11590 return 0;
11591
11592 bhalf_otp = tr32(OTP_READ_DATA);
11593
11594 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11595 }
11596
11597 static int __devinit tg3_phy_probe(struct tg3 *tp)
11598 {
11599 u32 hw_phy_id_1, hw_phy_id_2;
11600 u32 hw_phy_id, hw_phy_id_masked;
11601 int err;
11602
11603 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11604 return tg3_phy_init(tp);
11605
11606 /* Reading the PHY ID register can conflict with ASF
11607 * firmware access to the PHY hardware.
11608 */
11609 err = 0;
11610 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11611 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11612 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11613 } else {
11614 /* Now read the physical PHY_ID from the chip and verify
11615 * that it is sane. If it doesn't look good, we fall back
11616 * to either the hard-coded table based PHY_ID and failing
11617 * that the value found in the eeprom area.
11618 */
11619 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11620 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11621
11622 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11623 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11624 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11625
11626 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11627 }
11628
11629 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11630 tp->phy_id = hw_phy_id;
11631 if (hw_phy_id_masked == PHY_ID_BCM8002)
11632 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11633 else
11634 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11635 } else {
11636 if (tp->phy_id != PHY_ID_INVALID) {
11637 /* Do nothing, phy ID already set up in
11638 * tg3_get_eeprom_hw_cfg().
11639 */
11640 } else {
11641 struct subsys_tbl_ent *p;
11642
11643 /* No eeprom signature? Try the hardcoded
11644 * subsys device table.
11645 */
11646 p = lookup_by_subsys(tp);
11647 if (!p)
11648 return -ENODEV;
11649
11650 tp->phy_id = p->phy_id;
11651 if (!tp->phy_id ||
11652 tp->phy_id == PHY_ID_BCM8002)
11653 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11654 }
11655 }
11656
11657 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11658 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11659 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11660 u32 bmsr, adv_reg, tg3_ctrl, mask;
11661
11662 tg3_readphy(tp, MII_BMSR, &bmsr);
11663 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11664 (bmsr & BMSR_LSTATUS))
11665 goto skip_phy_reset;
11666
11667 err = tg3_phy_reset(tp);
11668 if (err)
11669 return err;
11670
11671 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11672 ADVERTISE_100HALF | ADVERTISE_100FULL |
11673 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11674 tg3_ctrl = 0;
11675 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11676 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11677 MII_TG3_CTRL_ADV_1000_FULL);
11678 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11679 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11680 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11681 MII_TG3_CTRL_ENABLE_AS_MASTER);
11682 }
11683
11684 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11685 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11686 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11687 if (!tg3_copper_is_advertising_all(tp, mask)) {
11688 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11689
11690 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11691 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11692
11693 tg3_writephy(tp, MII_BMCR,
11694 BMCR_ANENABLE | BMCR_ANRESTART);
11695 }
11696 tg3_phy_set_wirespeed(tp);
11697
11698 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11699 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11700 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11701 }
11702
11703 skip_phy_reset:
11704 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11705 err = tg3_init_5401phy_dsp(tp);
11706 if (err)
11707 return err;
11708 }
11709
11710 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11711 err = tg3_init_5401phy_dsp(tp);
11712 }
11713
11714 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11715 tp->link_config.advertising =
11716 (ADVERTISED_1000baseT_Half |
11717 ADVERTISED_1000baseT_Full |
11718 ADVERTISED_Autoneg |
11719 ADVERTISED_FIBRE);
11720 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11721 tp->link_config.advertising &=
11722 ~(ADVERTISED_1000baseT_Half |
11723 ADVERTISED_1000baseT_Full);
11724
11725 return err;
11726 }
11727
11728 static void __devinit tg3_read_partno(struct tg3 *tp)
11729 {
11730 unsigned char vpd_data[256]; /* in little-endian format */
11731 unsigned int i;
11732 u32 magic;
11733
11734 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11735 tg3_nvram_read(tp, 0x0, &magic))
11736 goto out_not_found;
11737
11738 if (magic == TG3_EEPROM_MAGIC) {
11739 for (i = 0; i < 256; i += 4) {
11740 u32 tmp;
11741
11742 /* The data is in little-endian format in NVRAM.
11743 * Use the big-endian read routines to preserve
11744 * the byte order as it exists in NVRAM.
11745 */
11746 if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
11747 goto out_not_found;
11748
11749 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
11750 }
11751 } else {
11752 int vpd_cap;
11753
11754 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11755 for (i = 0; i < 256; i += 4) {
11756 u32 tmp, j = 0;
11757 __le32 v;
11758 u16 tmp16;
11759
11760 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11761 i);
11762 while (j++ < 100) {
11763 pci_read_config_word(tp->pdev, vpd_cap +
11764 PCI_VPD_ADDR, &tmp16);
11765 if (tmp16 & 0x8000)
11766 break;
11767 msleep(1);
11768 }
11769 if (!(tmp16 & 0x8000))
11770 goto out_not_found;
11771
11772 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11773 &tmp);
11774 v = cpu_to_le32(tmp);
11775 memcpy(&vpd_data[i], &v, sizeof(v));
11776 }
11777 }
11778
11779 /* Now parse and find the part number. */
11780 for (i = 0; i < 254; ) {
11781 unsigned char val = vpd_data[i];
11782 unsigned int block_end;
11783
11784 if (val == 0x82 || val == 0x91) {
11785 i = (i + 3 +
11786 (vpd_data[i + 1] +
11787 (vpd_data[i + 2] << 8)));
11788 continue;
11789 }
11790
11791 if (val != 0x90)
11792 goto out_not_found;
11793
11794 block_end = (i + 3 +
11795 (vpd_data[i + 1] +
11796 (vpd_data[i + 2] << 8)));
11797 i += 3;
11798
11799 if (block_end > 256)
11800 goto out_not_found;
11801
11802 while (i < (block_end - 2)) {
11803 if (vpd_data[i + 0] == 'P' &&
11804 vpd_data[i + 1] == 'N') {
11805 int partno_len = vpd_data[i + 2];
11806
11807 i += 3;
11808 if (partno_len > 24 || (partno_len + i) > 256)
11809 goto out_not_found;
11810
11811 memcpy(tp->board_part_number,
11812 &vpd_data[i], partno_len);
11813
11814 /* Success. */
11815 return;
11816 }
11817 i += 3 + vpd_data[i + 2];
11818 }
11819
11820 /* Part number not found. */
11821 goto out_not_found;
11822 }
11823
11824 out_not_found:
11825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11826 strcpy(tp->board_part_number, "BCM95906");
11827 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11828 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
11829 strcpy(tp->board_part_number, "BCM57780");
11830 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11831 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
11832 strcpy(tp->board_part_number, "BCM57760");
11833 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11834 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
11835 strcpy(tp->board_part_number, "BCM57790");
11836 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11837 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
11838 strcpy(tp->board_part_number, "BCM57788");
11839 else
11840 strcpy(tp->board_part_number, "none");
11841 }
11842
11843 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11844 {
11845 u32 val;
11846
11847 if (tg3_nvram_read(tp, offset, &val) ||
11848 (val & 0xfc000000) != 0x0c000000 ||
11849 tg3_nvram_read(tp, offset + 4, &val) ||
11850 val != 0)
11851 return 0;
11852
11853 return 1;
11854 }
11855
11856 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
11857 {
11858 u32 val, offset, start, ver_offset;
11859 int i;
11860 bool newver = false;
11861
11862 if (tg3_nvram_read(tp, 0xc, &offset) ||
11863 tg3_nvram_read(tp, 0x4, &start))
11864 return;
11865
11866 offset = tg3_nvram_logical_addr(tp, offset);
11867
11868 if (tg3_nvram_read(tp, offset, &val))
11869 return;
11870
11871 if ((val & 0xfc000000) == 0x0c000000) {
11872 if (tg3_nvram_read(tp, offset + 4, &val))
11873 return;
11874
11875 if (val == 0)
11876 newver = true;
11877 }
11878
11879 if (newver) {
11880 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
11881 return;
11882
11883 offset = offset + ver_offset - start;
11884 for (i = 0; i < 16; i += 4) {
11885 __be32 v;
11886 if (tg3_nvram_read_be32(tp, offset + i, &v))
11887 return;
11888
11889 memcpy(tp->fw_ver + i, &v, sizeof(v));
11890 }
11891 } else {
11892 u32 major, minor;
11893
11894 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
11895 return;
11896
11897 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
11898 TG3_NVM_BCVER_MAJSFT;
11899 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
11900 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
11901 }
11902 }
11903
11904 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
11905 {
11906 u32 val, major, minor;
11907
11908 /* Use native endian representation */
11909 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
11910 return;
11911
11912 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
11913 TG3_NVM_HWSB_CFG1_MAJSFT;
11914 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
11915 TG3_NVM_HWSB_CFG1_MINSFT;
11916
11917 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
11918 }
11919
11920 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
11921 {
11922 u32 offset, major, minor, build;
11923
11924 tp->fw_ver[0] = 's';
11925 tp->fw_ver[1] = 'b';
11926 tp->fw_ver[2] = '\0';
11927
11928 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
11929 return;
11930
11931 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
11932 case TG3_EEPROM_SB_REVISION_0:
11933 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
11934 break;
11935 case TG3_EEPROM_SB_REVISION_2:
11936 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
11937 break;
11938 case TG3_EEPROM_SB_REVISION_3:
11939 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
11940 break;
11941 default:
11942 return;
11943 }
11944
11945 if (tg3_nvram_read(tp, offset, &val))
11946 return;
11947
11948 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
11949 TG3_EEPROM_SB_EDH_BLD_SHFT;
11950 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
11951 TG3_EEPROM_SB_EDH_MAJ_SHFT;
11952 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
11953
11954 if (minor > 99 || build > 26)
11955 return;
11956
11957 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
11958
11959 if (build > 0) {
11960 tp->fw_ver[8] = 'a' + build - 1;
11961 tp->fw_ver[9] = '\0';
11962 }
11963 }
11964
11965 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
11966 {
11967 u32 val, offset, start;
11968 int i, vlen;
11969
11970 for (offset = TG3_NVM_DIR_START;
11971 offset < TG3_NVM_DIR_END;
11972 offset += TG3_NVM_DIRENT_SIZE) {
11973 if (tg3_nvram_read(tp, offset, &val))
11974 return;
11975
11976 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11977 break;
11978 }
11979
11980 if (offset == TG3_NVM_DIR_END)
11981 return;
11982
11983 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11984 start = 0x08000000;
11985 else if (tg3_nvram_read(tp, offset - 4, &start))
11986 return;
11987
11988 if (tg3_nvram_read(tp, offset + 4, &offset) ||
11989 !tg3_fw_img_is_valid(tp, offset) ||
11990 tg3_nvram_read(tp, offset + 8, &val))
11991 return;
11992
11993 offset += val - start;
11994
11995 vlen = strlen(tp->fw_ver);
11996
11997 tp->fw_ver[vlen++] = ',';
11998 tp->fw_ver[vlen++] = ' ';
11999
12000 for (i = 0; i < 4; i++) {
12001 __be32 v;
12002 if (tg3_nvram_read_be32(tp, offset, &v))
12003 return;
12004
12005 offset += sizeof(v);
12006
12007 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12008 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12009 break;
12010 }
12011
12012 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12013 vlen += sizeof(v);
12014 }
12015 }
12016
12017 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12018 {
12019 int vlen;
12020 u32 apedata;
12021
12022 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12023 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12024 return;
12025
12026 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12027 if (apedata != APE_SEG_SIG_MAGIC)
12028 return;
12029
12030 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12031 if (!(apedata & APE_FW_STATUS_READY))
12032 return;
12033
12034 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12035
12036 vlen = strlen(tp->fw_ver);
12037
12038 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12039 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12040 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12041 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12042 (apedata & APE_FW_VERSION_BLDMSK));
12043 }
12044
12045 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12046 {
12047 u32 val;
12048
12049 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12050 tp->fw_ver[0] = 's';
12051 tp->fw_ver[1] = 'b';
12052 tp->fw_ver[2] = '\0';
12053
12054 return;
12055 }
12056
12057 if (tg3_nvram_read(tp, 0, &val))
12058 return;
12059
12060 if (val == TG3_EEPROM_MAGIC)
12061 tg3_read_bc_ver(tp);
12062 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12063 tg3_read_sb_ver(tp, val);
12064 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12065 tg3_read_hwsb_ver(tp);
12066 else
12067 return;
12068
12069 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12070 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
12071 return;
12072
12073 tg3_read_mgmtfw_ver(tp);
12074
12075 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12076 }
12077
12078 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12079
12080 static int __devinit tg3_get_invariants(struct tg3 *tp)
12081 {
12082 static struct pci_device_id write_reorder_chipsets[] = {
12083 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12084 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12085 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12086 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12087 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12088 PCI_DEVICE_ID_VIA_8385_0) },
12089 { },
12090 };
12091 u32 misc_ctrl_reg;
12092 u32 pci_state_reg, grc_misc_cfg;
12093 u32 val;
12094 u16 pci_cmd;
12095 int err;
12096
12097 /* Force memory write invalidate off. If we leave it on,
12098 * then on 5700_BX chips we have to enable a workaround.
12099 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12100 * to match the cacheline size. The Broadcom driver have this
12101 * workaround but turns MWI off all the times so never uses
12102 * it. This seems to suggest that the workaround is insufficient.
12103 */
12104 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12105 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12106 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12107
12108 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12109 * has the register indirect write enable bit set before
12110 * we try to access any of the MMIO registers. It is also
12111 * critical that the PCI-X hw workaround situation is decided
12112 * before that as well.
12113 */
12114 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12115 &misc_ctrl_reg);
12116
12117 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12118 MISC_HOST_CTRL_CHIPREV_SHIFT);
12119 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12120 u32 prod_id_asic_rev;
12121
12122 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12123 &prod_id_asic_rev);
12124 tp->pci_chip_rev_id = prod_id_asic_rev;
12125 }
12126
12127 /* Wrong chip ID in 5752 A0. This code can be removed later
12128 * as A0 is not in production.
12129 */
12130 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12131 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12132
12133 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12134 * we need to disable memory and use config. cycles
12135 * only to access all registers. The 5702/03 chips
12136 * can mistakenly decode the special cycles from the
12137 * ICH chipsets as memory write cycles, causing corruption
12138 * of register and memory space. Only certain ICH bridges
12139 * will drive special cycles with non-zero data during the
12140 * address phase which can fall within the 5703's address
12141 * range. This is not an ICH bug as the PCI spec allows
12142 * non-zero address during special cycles. However, only
12143 * these ICH bridges are known to drive non-zero addresses
12144 * during special cycles.
12145 *
12146 * Since special cycles do not cross PCI bridges, we only
12147 * enable this workaround if the 5703 is on the secondary
12148 * bus of these ICH bridges.
12149 */
12150 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12151 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12152 static struct tg3_dev_id {
12153 u32 vendor;
12154 u32 device;
12155 u32 rev;
12156 } ich_chipsets[] = {
12157 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12158 PCI_ANY_ID },
12159 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12160 PCI_ANY_ID },
12161 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12162 0xa },
12163 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12164 PCI_ANY_ID },
12165 { },
12166 };
12167 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12168 struct pci_dev *bridge = NULL;
12169
12170 while (pci_id->vendor != 0) {
12171 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12172 bridge);
12173 if (!bridge) {
12174 pci_id++;
12175 continue;
12176 }
12177 if (pci_id->rev != PCI_ANY_ID) {
12178 if (bridge->revision > pci_id->rev)
12179 continue;
12180 }
12181 if (bridge->subordinate &&
12182 (bridge->subordinate->number ==
12183 tp->pdev->bus->number)) {
12184
12185 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12186 pci_dev_put(bridge);
12187 break;
12188 }
12189 }
12190 }
12191
12192 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12193 static struct tg3_dev_id {
12194 u32 vendor;
12195 u32 device;
12196 } bridge_chipsets[] = {
12197 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12198 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12199 { },
12200 };
12201 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12202 struct pci_dev *bridge = NULL;
12203
12204 while (pci_id->vendor != 0) {
12205 bridge = pci_get_device(pci_id->vendor,
12206 pci_id->device,
12207 bridge);
12208 if (!bridge) {
12209 pci_id++;
12210 continue;
12211 }
12212 if (bridge->subordinate &&
12213 (bridge->subordinate->number <=
12214 tp->pdev->bus->number) &&
12215 (bridge->subordinate->subordinate >=
12216 tp->pdev->bus->number)) {
12217 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12218 pci_dev_put(bridge);
12219 break;
12220 }
12221 }
12222 }
12223
12224 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12225 * DMA addresses > 40-bit. This bridge may have other additional
12226 * 57xx devices behind it in some 4-port NIC designs for example.
12227 * Any tg3 device found behind the bridge will also need the 40-bit
12228 * DMA workaround.
12229 */
12230 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12231 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12232 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12233 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12234 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12235 }
12236 else {
12237 struct pci_dev *bridge = NULL;
12238
12239 do {
12240 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12241 PCI_DEVICE_ID_SERVERWORKS_EPB,
12242 bridge);
12243 if (bridge && bridge->subordinate &&
12244 (bridge->subordinate->number <=
12245 tp->pdev->bus->number) &&
12246 (bridge->subordinate->subordinate >=
12247 tp->pdev->bus->number)) {
12248 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12249 pci_dev_put(bridge);
12250 break;
12251 }
12252 } while (bridge);
12253 }
12254
12255 /* Initialize misc host control in PCI block. */
12256 tp->misc_host_ctrl |= (misc_ctrl_reg &
12257 MISC_HOST_CTRL_CHIPREV);
12258 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12259 tp->misc_host_ctrl);
12260
12261 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12262 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12263 tp->pdev_peer = tg3_find_peer(tp);
12264
12265 /* Intentionally exclude ASIC_REV_5906 */
12266 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12268 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12269 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12270 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12271 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12272 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12273
12274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12275 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12276 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12277 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12278 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12279 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12280
12281 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12282 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12283 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12284
12285 /* 5700 B0 chips do not support checksumming correctly due
12286 * to hardware bugs.
12287 */
12288 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12289 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12290 else {
12291 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12292 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12293 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12294 tp->dev->features |= NETIF_F_IPV6_CSUM;
12295 }
12296
12297 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12298 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12299 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12300 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12301 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12302 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12303 tp->pdev_peer == tp->pdev))
12304 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12305
12306 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12307 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12308 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12309 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12310 } else {
12311 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12312 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12313 ASIC_REV_5750 &&
12314 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12315 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12316 }
12317 }
12318
12319 tp->irq_max = 1;
12320
12321 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12322 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12323 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
12324
12325 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12326 &pci_state_reg);
12327
12328 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12329 if (tp->pcie_cap != 0) {
12330 u16 lnkctl;
12331
12332 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12333
12334 pcie_set_readrq(tp->pdev, 4096);
12335
12336 pci_read_config_word(tp->pdev,
12337 tp->pcie_cap + PCI_EXP_LNKCTL,
12338 &lnkctl);
12339 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12340 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12341 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12343 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12344 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12345 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12346 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12347 }
12348 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12349 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12350 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12351 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12352 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12353 if (!tp->pcix_cap) {
12354 printk(KERN_ERR PFX "Cannot find PCI-X "
12355 "capability, aborting.\n");
12356 return -EIO;
12357 }
12358
12359 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12360 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12361 }
12362
12363 /* If we have an AMD 762 or VIA K8T800 chipset, write
12364 * reordering to the mailbox registers done by the host
12365 * controller can cause major troubles. We read back from
12366 * every mailbox register write to force the writes to be
12367 * posted to the chip in order.
12368 */
12369 if (pci_dev_present(write_reorder_chipsets) &&
12370 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12371 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12372
12373 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12374 &tp->pci_cacheline_sz);
12375 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12376 &tp->pci_lat_timer);
12377 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12378 tp->pci_lat_timer < 64) {
12379 tp->pci_lat_timer = 64;
12380 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12381 tp->pci_lat_timer);
12382 }
12383
12384 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12385 /* 5700 BX chips need to have their TX producer index
12386 * mailboxes written twice to workaround a bug.
12387 */
12388 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12389
12390 /* If we are in PCI-X mode, enable register write workaround.
12391 *
12392 * The workaround is to use indirect register accesses
12393 * for all chip writes not to mailbox registers.
12394 */
12395 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12396 u32 pm_reg;
12397
12398 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12399
12400 /* The chip can have it's power management PCI config
12401 * space registers clobbered due to this bug.
12402 * So explicitly force the chip into D0 here.
12403 */
12404 pci_read_config_dword(tp->pdev,
12405 tp->pm_cap + PCI_PM_CTRL,
12406 &pm_reg);
12407 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12408 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12409 pci_write_config_dword(tp->pdev,
12410 tp->pm_cap + PCI_PM_CTRL,
12411 pm_reg);
12412
12413 /* Also, force SERR#/PERR# in PCI command. */
12414 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12415 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12416 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12417 }
12418 }
12419
12420 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12421 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12422 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12423 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12424
12425 /* Chip-specific fixup from Broadcom driver */
12426 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12427 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12428 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12429 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12430 }
12431
12432 /* Default fast path register access methods */
12433 tp->read32 = tg3_read32;
12434 tp->write32 = tg3_write32;
12435 tp->read32_mbox = tg3_read32;
12436 tp->write32_mbox = tg3_write32;
12437 tp->write32_tx_mbox = tg3_write32;
12438 tp->write32_rx_mbox = tg3_write32;
12439
12440 /* Various workaround register access methods */
12441 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12442 tp->write32 = tg3_write_indirect_reg32;
12443 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12444 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12445 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12446 /*
12447 * Back to back register writes can cause problems on these
12448 * chips, the workaround is to read back all reg writes
12449 * except those to mailbox regs.
12450 *
12451 * See tg3_write_indirect_reg32().
12452 */
12453 tp->write32 = tg3_write_flush_reg32;
12454 }
12455
12456
12457 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12458 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12459 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12460 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12461 tp->write32_rx_mbox = tg3_write_flush_reg32;
12462 }
12463
12464 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12465 tp->read32 = tg3_read_indirect_reg32;
12466 tp->write32 = tg3_write_indirect_reg32;
12467 tp->read32_mbox = tg3_read_indirect_mbox;
12468 tp->write32_mbox = tg3_write_indirect_mbox;
12469 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12470 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12471
12472 iounmap(tp->regs);
12473 tp->regs = NULL;
12474
12475 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12476 pci_cmd &= ~PCI_COMMAND_MEMORY;
12477 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12478 }
12479 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12480 tp->read32_mbox = tg3_read32_mbox_5906;
12481 tp->write32_mbox = tg3_write32_mbox_5906;
12482 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12483 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12484 }
12485
12486 if (tp->write32 == tg3_write_indirect_reg32 ||
12487 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12488 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12489 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12490 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12491
12492 /* Get eeprom hw config before calling tg3_set_power_state().
12493 * In particular, the TG3_FLG2_IS_NIC flag must be
12494 * determined before calling tg3_set_power_state() so that
12495 * we know whether or not to switch out of Vaux power.
12496 * When the flag is set, it means that GPIO1 is used for eeprom
12497 * write protect and also implies that it is a LOM where GPIOs
12498 * are not used to switch power.
12499 */
12500 tg3_get_eeprom_hw_cfg(tp);
12501
12502 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12503 /* Allow reads and writes to the
12504 * APE register and memory space.
12505 */
12506 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12507 PCISTATE_ALLOW_APE_SHMEM_WR;
12508 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12509 pci_state_reg);
12510 }
12511
12512 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12513 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12514 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12515 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12516 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12517
12518 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12519 * GPIO1 driven high will bring 5700's external PHY out of reset.
12520 * It is also used as eeprom write protect on LOMs.
12521 */
12522 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12523 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12524 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12525 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12526 GRC_LCLCTRL_GPIO_OUTPUT1);
12527 /* Unused GPIO3 must be driven as output on 5752 because there
12528 * are no pull-up resistors on unused GPIO pins.
12529 */
12530 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12531 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12532
12533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12534 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12535 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12536
12537 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12538 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12539 /* Turn off the debug UART. */
12540 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12541 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12542 /* Keep VMain power. */
12543 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12544 GRC_LCLCTRL_GPIO_OUTPUT0;
12545 }
12546
12547 /* Force the chip into D0. */
12548 err = tg3_set_power_state(tp, PCI_D0);
12549 if (err) {
12550 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12551 pci_name(tp->pdev));
12552 return err;
12553 }
12554
12555 /* Derive initial jumbo mode from MTU assigned in
12556 * ether_setup() via the alloc_etherdev() call
12557 */
12558 if (tp->dev->mtu > ETH_DATA_LEN &&
12559 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12560 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12561
12562 /* Determine WakeOnLan speed to use. */
12563 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12564 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12565 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12566 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12567 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12568 } else {
12569 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12570 }
12571
12572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12573 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
12574
12575 /* A few boards don't want Ethernet@WireSpeed phy feature */
12576 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12577 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12578 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12579 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12580 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
12581 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12582 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12583
12584 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12585 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12586 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12587 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12588 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12589
12590 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12591 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
12592 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12593 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12595 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12596 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12598 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12599 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12600 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12601 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12602 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12603 } else
12604 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12605 }
12606
12607 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12608 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12609 tp->phy_otp = tg3_read_otp_phycfg(tp);
12610 if (tp->phy_otp == 0)
12611 tp->phy_otp = TG3_OTP_DEFAULT;
12612 }
12613
12614 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12615 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12616 else
12617 tp->mi_mode = MAC_MI_MODE_BASE;
12618
12619 tp->coalesce_mode = 0;
12620 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12621 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12622 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12623
12624 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12626 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12627
12628 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12629 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12630 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12631 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12632
12633 err = tg3_mdio_init(tp);
12634 if (err)
12635 return err;
12636
12637 /* Initialize data/descriptor byte/word swapping. */
12638 val = tr32(GRC_MODE);
12639 val &= GRC_MODE_HOST_STACKUP;
12640 tw32(GRC_MODE, val | tp->grc_mode);
12641
12642 tg3_switch_clocks(tp);
12643
12644 /* Clear this out for sanity. */
12645 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12646
12647 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12648 &pci_state_reg);
12649 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12650 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12651 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12652
12653 if (chiprevid == CHIPREV_ID_5701_A0 ||
12654 chiprevid == CHIPREV_ID_5701_B0 ||
12655 chiprevid == CHIPREV_ID_5701_B2 ||
12656 chiprevid == CHIPREV_ID_5701_B5) {
12657 void __iomem *sram_base;
12658
12659 /* Write some dummy words into the SRAM status block
12660 * area, see if it reads back correctly. If the return
12661 * value is bad, force enable the PCIX workaround.
12662 */
12663 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12664
12665 writel(0x00000000, sram_base);
12666 writel(0x00000000, sram_base + 4);
12667 writel(0xffffffff, sram_base + 4);
12668 if (readl(sram_base) != 0x00000000)
12669 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12670 }
12671 }
12672
12673 udelay(50);
12674 tg3_nvram_init(tp);
12675
12676 grc_misc_cfg = tr32(GRC_MISC_CFG);
12677 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12678
12679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12680 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12681 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12682 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12683
12684 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12685 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12686 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12687 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12688 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12689 HOSTCC_MODE_CLRTICK_TXBD);
12690
12691 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12692 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12693 tp->misc_host_ctrl);
12694 }
12695
12696 /* Preserve the APE MAC_MODE bits */
12697 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12698 tp->mac_mode = tr32(MAC_MODE) |
12699 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12700 else
12701 tp->mac_mode = TG3_DEF_MAC_MODE;
12702
12703 /* these are limited to 10/100 only */
12704 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12705 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12706 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12707 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12708 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12709 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12710 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12711 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12712 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12713 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12714 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12715 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12716 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
12717 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12718
12719 err = tg3_phy_probe(tp);
12720 if (err) {
12721 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12722 pci_name(tp->pdev), err);
12723 /* ... but do not return immediately ... */
12724 tg3_mdio_fini(tp);
12725 }
12726
12727 tg3_read_partno(tp);
12728 tg3_read_fw_ver(tp);
12729
12730 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12731 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12732 } else {
12733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12734 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12735 else
12736 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12737 }
12738
12739 /* 5700 {AX,BX} chips have a broken status block link
12740 * change bit implementation, so we must use the
12741 * status register in those cases.
12742 */
12743 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12744 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12745 else
12746 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12747
12748 /* The led_ctrl is set during tg3_phy_probe, here we might
12749 * have to force the link status polling mechanism based
12750 * upon subsystem IDs.
12751 */
12752 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12754 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12755 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12756 TG3_FLAG_USE_LINKCHG_REG);
12757 }
12758
12759 /* For all SERDES we poll the MAC status register. */
12760 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12761 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12762 else
12763 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12764
12765 tp->rx_offset = NET_IP_ALIGN;
12766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12767 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12768 tp->rx_offset = 0;
12769
12770 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12771
12772 /* Increment the rx prod index on the rx std ring by at most
12773 * 8 for these chips to workaround hw errata.
12774 */
12775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12776 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12778 tp->rx_std_max_post = 8;
12779
12780 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12781 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12782 PCIE_PWR_MGMT_L1_THRESH_MSK;
12783
12784 return err;
12785 }
12786
12787 #ifdef CONFIG_SPARC
12788 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12789 {
12790 struct net_device *dev = tp->dev;
12791 struct pci_dev *pdev = tp->pdev;
12792 struct device_node *dp = pci_device_to_OF_node(pdev);
12793 const unsigned char *addr;
12794 int len;
12795
12796 addr = of_get_property(dp, "local-mac-address", &len);
12797 if (addr && len == 6) {
12798 memcpy(dev->dev_addr, addr, 6);
12799 memcpy(dev->perm_addr, dev->dev_addr, 6);
12800 return 0;
12801 }
12802 return -ENODEV;
12803 }
12804
12805 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12806 {
12807 struct net_device *dev = tp->dev;
12808
12809 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12810 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12811 return 0;
12812 }
12813 #endif
12814
12815 static int __devinit tg3_get_device_address(struct tg3 *tp)
12816 {
12817 struct net_device *dev = tp->dev;
12818 u32 hi, lo, mac_offset;
12819 int addr_ok = 0;
12820
12821 #ifdef CONFIG_SPARC
12822 if (!tg3_get_macaddr_sparc(tp))
12823 return 0;
12824 #endif
12825
12826 mac_offset = 0x7c;
12827 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12828 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12829 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12830 mac_offset = 0xcc;
12831 if (tg3_nvram_lock(tp))
12832 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12833 else
12834 tg3_nvram_unlock(tp);
12835 }
12836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12837 mac_offset = 0x10;
12838
12839 /* First try to get it from MAC address mailbox. */
12840 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12841 if ((hi >> 16) == 0x484b) {
12842 dev->dev_addr[0] = (hi >> 8) & 0xff;
12843 dev->dev_addr[1] = (hi >> 0) & 0xff;
12844
12845 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12846 dev->dev_addr[2] = (lo >> 24) & 0xff;
12847 dev->dev_addr[3] = (lo >> 16) & 0xff;
12848 dev->dev_addr[4] = (lo >> 8) & 0xff;
12849 dev->dev_addr[5] = (lo >> 0) & 0xff;
12850
12851 /* Some old bootcode may report a 0 MAC address in SRAM */
12852 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12853 }
12854 if (!addr_ok) {
12855 /* Next, try NVRAM. */
12856 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
12857 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
12858 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
12859 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
12860 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
12861 }
12862 /* Finally just fetch it out of the MAC control regs. */
12863 else {
12864 hi = tr32(MAC_ADDR_0_HIGH);
12865 lo = tr32(MAC_ADDR_0_LOW);
12866
12867 dev->dev_addr[5] = lo & 0xff;
12868 dev->dev_addr[4] = (lo >> 8) & 0xff;
12869 dev->dev_addr[3] = (lo >> 16) & 0xff;
12870 dev->dev_addr[2] = (lo >> 24) & 0xff;
12871 dev->dev_addr[1] = hi & 0xff;
12872 dev->dev_addr[0] = (hi >> 8) & 0xff;
12873 }
12874 }
12875
12876 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12877 #ifdef CONFIG_SPARC
12878 if (!tg3_get_default_macaddr_sparc(tp))
12879 return 0;
12880 #endif
12881 return -EINVAL;
12882 }
12883 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12884 return 0;
12885 }
12886
12887 #define BOUNDARY_SINGLE_CACHELINE 1
12888 #define BOUNDARY_MULTI_CACHELINE 2
12889
12890 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12891 {
12892 int cacheline_size;
12893 u8 byte;
12894 int goal;
12895
12896 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12897 if (byte == 0)
12898 cacheline_size = 1024;
12899 else
12900 cacheline_size = (int) byte * 4;
12901
12902 /* On 5703 and later chips, the boundary bits have no
12903 * effect.
12904 */
12905 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12906 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12907 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12908 goto out;
12909
12910 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12911 goal = BOUNDARY_MULTI_CACHELINE;
12912 #else
12913 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12914 goal = BOUNDARY_SINGLE_CACHELINE;
12915 #else
12916 goal = 0;
12917 #endif
12918 #endif
12919
12920 if (!goal)
12921 goto out;
12922
12923 /* PCI controllers on most RISC systems tend to disconnect
12924 * when a device tries to burst across a cache-line boundary.
12925 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12926 *
12927 * Unfortunately, for PCI-E there are only limited
12928 * write-side controls for this, and thus for reads
12929 * we will still get the disconnects. We'll also waste
12930 * these PCI cycles for both read and write for chips
12931 * other than 5700 and 5701 which do not implement the
12932 * boundary bits.
12933 */
12934 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12935 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12936 switch (cacheline_size) {
12937 case 16:
12938 case 32:
12939 case 64:
12940 case 128:
12941 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12942 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12943 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12944 } else {
12945 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12946 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12947 }
12948 break;
12949
12950 case 256:
12951 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12952 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12953 break;
12954
12955 default:
12956 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12957 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12958 break;
12959 }
12960 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12961 switch (cacheline_size) {
12962 case 16:
12963 case 32:
12964 case 64:
12965 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12966 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12967 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12968 break;
12969 }
12970 /* fallthrough */
12971 case 128:
12972 default:
12973 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12974 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12975 break;
12976 }
12977 } else {
12978 switch (cacheline_size) {
12979 case 16:
12980 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12981 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12982 DMA_RWCTRL_WRITE_BNDRY_16);
12983 break;
12984 }
12985 /* fallthrough */
12986 case 32:
12987 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12988 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12989 DMA_RWCTRL_WRITE_BNDRY_32);
12990 break;
12991 }
12992 /* fallthrough */
12993 case 64:
12994 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12995 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12996 DMA_RWCTRL_WRITE_BNDRY_64);
12997 break;
12998 }
12999 /* fallthrough */
13000 case 128:
13001 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13002 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13003 DMA_RWCTRL_WRITE_BNDRY_128);
13004 break;
13005 }
13006 /* fallthrough */
13007 case 256:
13008 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13009 DMA_RWCTRL_WRITE_BNDRY_256);
13010 break;
13011 case 512:
13012 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13013 DMA_RWCTRL_WRITE_BNDRY_512);
13014 break;
13015 case 1024:
13016 default:
13017 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13018 DMA_RWCTRL_WRITE_BNDRY_1024);
13019 break;
13020 }
13021 }
13022
13023 out:
13024 return val;
13025 }
13026
13027 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13028 {
13029 struct tg3_internal_buffer_desc test_desc;
13030 u32 sram_dma_descs;
13031 int i, ret;
13032
13033 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13034
13035 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13036 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13037 tw32(RDMAC_STATUS, 0);
13038 tw32(WDMAC_STATUS, 0);
13039
13040 tw32(BUFMGR_MODE, 0);
13041 tw32(FTQ_RESET, 0);
13042
13043 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13044 test_desc.addr_lo = buf_dma & 0xffffffff;
13045 test_desc.nic_mbuf = 0x00002100;
13046 test_desc.len = size;
13047
13048 /*
13049 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13050 * the *second* time the tg3 driver was getting loaded after an
13051 * initial scan.
13052 *
13053 * Broadcom tells me:
13054 * ...the DMA engine is connected to the GRC block and a DMA
13055 * reset may affect the GRC block in some unpredictable way...
13056 * The behavior of resets to individual blocks has not been tested.
13057 *
13058 * Broadcom noted the GRC reset will also reset all sub-components.
13059 */
13060 if (to_device) {
13061 test_desc.cqid_sqid = (13 << 8) | 2;
13062
13063 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13064 udelay(40);
13065 } else {
13066 test_desc.cqid_sqid = (16 << 8) | 7;
13067
13068 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13069 udelay(40);
13070 }
13071 test_desc.flags = 0x00000005;
13072
13073 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13074 u32 val;
13075
13076 val = *(((u32 *)&test_desc) + i);
13077 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13078 sram_dma_descs + (i * sizeof(u32)));
13079 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13080 }
13081 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13082
13083 if (to_device) {
13084 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13085 } else {
13086 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13087 }
13088
13089 ret = -ENODEV;
13090 for (i = 0; i < 40; i++) {
13091 u32 val;
13092
13093 if (to_device)
13094 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13095 else
13096 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13097 if ((val & 0xffff) == sram_dma_descs) {
13098 ret = 0;
13099 break;
13100 }
13101
13102 udelay(100);
13103 }
13104
13105 return ret;
13106 }
13107
13108 #define TEST_BUFFER_SIZE 0x2000
13109
13110 static int __devinit tg3_test_dma(struct tg3 *tp)
13111 {
13112 dma_addr_t buf_dma;
13113 u32 *buf, saved_dma_rwctrl;
13114 int ret;
13115
13116 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13117 if (!buf) {
13118 ret = -ENOMEM;
13119 goto out_nofree;
13120 }
13121
13122 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13123 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13124
13125 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13126
13127 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13128 /* DMA read watermark not used on PCIE */
13129 tp->dma_rwctrl |= 0x00180000;
13130 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13131 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13133 tp->dma_rwctrl |= 0x003f0000;
13134 else
13135 tp->dma_rwctrl |= 0x003f000f;
13136 } else {
13137 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13139 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13140 u32 read_water = 0x7;
13141
13142 /* If the 5704 is behind the EPB bridge, we can
13143 * do the less restrictive ONE_DMA workaround for
13144 * better performance.
13145 */
13146 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13148 tp->dma_rwctrl |= 0x8000;
13149 else if (ccval == 0x6 || ccval == 0x7)
13150 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13151
13152 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13153 read_water = 4;
13154 /* Set bit 23 to enable PCIX hw bug fix */
13155 tp->dma_rwctrl |=
13156 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13157 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13158 (1 << 23);
13159 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13160 /* 5780 always in PCIX mode */
13161 tp->dma_rwctrl |= 0x00144000;
13162 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13163 /* 5714 always in PCIX mode */
13164 tp->dma_rwctrl |= 0x00148000;
13165 } else {
13166 tp->dma_rwctrl |= 0x001b000f;
13167 }
13168 }
13169
13170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13172 tp->dma_rwctrl &= 0xfffffff0;
13173
13174 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13175 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13176 /* Remove this if it causes problems for some boards. */
13177 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13178
13179 /* On 5700/5701 chips, we need to set this bit.
13180 * Otherwise the chip will issue cacheline transactions
13181 * to streamable DMA memory with not all the byte
13182 * enables turned on. This is an error on several
13183 * RISC PCI controllers, in particular sparc64.
13184 *
13185 * On 5703/5704 chips, this bit has been reassigned
13186 * a different meaning. In particular, it is used
13187 * on those chips to enable a PCI-X workaround.
13188 */
13189 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13190 }
13191
13192 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13193
13194 #if 0
13195 /* Unneeded, already done by tg3_get_invariants. */
13196 tg3_switch_clocks(tp);
13197 #endif
13198
13199 ret = 0;
13200 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13201 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13202 goto out;
13203
13204 /* It is best to perform DMA test with maximum write burst size
13205 * to expose the 5700/5701 write DMA bug.
13206 */
13207 saved_dma_rwctrl = tp->dma_rwctrl;
13208 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13209 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13210
13211 while (1) {
13212 u32 *p = buf, i;
13213
13214 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13215 p[i] = i;
13216
13217 /* Send the buffer to the chip. */
13218 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13219 if (ret) {
13220 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13221 break;
13222 }
13223
13224 #if 0
13225 /* validate data reached card RAM correctly. */
13226 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13227 u32 val;
13228 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13229 if (le32_to_cpu(val) != p[i]) {
13230 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13231 /* ret = -ENODEV here? */
13232 }
13233 p[i] = 0;
13234 }
13235 #endif
13236 /* Now read it back. */
13237 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13238 if (ret) {
13239 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13240
13241 break;
13242 }
13243
13244 /* Verify it. */
13245 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13246 if (p[i] == i)
13247 continue;
13248
13249 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13250 DMA_RWCTRL_WRITE_BNDRY_16) {
13251 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13252 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13253 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13254 break;
13255 } else {
13256 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13257 ret = -ENODEV;
13258 goto out;
13259 }
13260 }
13261
13262 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13263 /* Success. */
13264 ret = 0;
13265 break;
13266 }
13267 }
13268 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13269 DMA_RWCTRL_WRITE_BNDRY_16) {
13270 static struct pci_device_id dma_wait_state_chipsets[] = {
13271 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13272 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13273 { },
13274 };
13275
13276 /* DMA test passed without adjusting DMA boundary,
13277 * now look for chipsets that are known to expose the
13278 * DMA bug without failing the test.
13279 */
13280 if (pci_dev_present(dma_wait_state_chipsets)) {
13281 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13282 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13283 }
13284 else
13285 /* Safe to use the calculated DMA boundary. */
13286 tp->dma_rwctrl = saved_dma_rwctrl;
13287
13288 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13289 }
13290
13291 out:
13292 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13293 out_nofree:
13294 return ret;
13295 }
13296
13297 static void __devinit tg3_init_link_config(struct tg3 *tp)
13298 {
13299 tp->link_config.advertising =
13300 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13301 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13302 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13303 ADVERTISED_Autoneg | ADVERTISED_MII);
13304 tp->link_config.speed = SPEED_INVALID;
13305 tp->link_config.duplex = DUPLEX_INVALID;
13306 tp->link_config.autoneg = AUTONEG_ENABLE;
13307 tp->link_config.active_speed = SPEED_INVALID;
13308 tp->link_config.active_duplex = DUPLEX_INVALID;
13309 tp->link_config.phy_is_low_power = 0;
13310 tp->link_config.orig_speed = SPEED_INVALID;
13311 tp->link_config.orig_duplex = DUPLEX_INVALID;
13312 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13313 }
13314
13315 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13316 {
13317 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13318 tp->bufmgr_config.mbuf_read_dma_low_water =
13319 DEFAULT_MB_RDMA_LOW_WATER_5705;
13320 tp->bufmgr_config.mbuf_mac_rx_low_water =
13321 DEFAULT_MB_MACRX_LOW_WATER_5705;
13322 tp->bufmgr_config.mbuf_high_water =
13323 DEFAULT_MB_HIGH_WATER_5705;
13324 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13325 tp->bufmgr_config.mbuf_mac_rx_low_water =
13326 DEFAULT_MB_MACRX_LOW_WATER_5906;
13327 tp->bufmgr_config.mbuf_high_water =
13328 DEFAULT_MB_HIGH_WATER_5906;
13329 }
13330
13331 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13332 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13333 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13334 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13335 tp->bufmgr_config.mbuf_high_water_jumbo =
13336 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13337 } else {
13338 tp->bufmgr_config.mbuf_read_dma_low_water =
13339 DEFAULT_MB_RDMA_LOW_WATER;
13340 tp->bufmgr_config.mbuf_mac_rx_low_water =
13341 DEFAULT_MB_MACRX_LOW_WATER;
13342 tp->bufmgr_config.mbuf_high_water =
13343 DEFAULT_MB_HIGH_WATER;
13344
13345 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13346 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13347 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13348 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13349 tp->bufmgr_config.mbuf_high_water_jumbo =
13350 DEFAULT_MB_HIGH_WATER_JUMBO;
13351 }
13352
13353 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13354 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13355 }
13356
13357 static char * __devinit tg3_phy_string(struct tg3 *tp)
13358 {
13359 switch (tp->phy_id & PHY_ID_MASK) {
13360 case PHY_ID_BCM5400: return "5400";
13361 case PHY_ID_BCM5401: return "5401";
13362 case PHY_ID_BCM5411: return "5411";
13363 case PHY_ID_BCM5701: return "5701";
13364 case PHY_ID_BCM5703: return "5703";
13365 case PHY_ID_BCM5704: return "5704";
13366 case PHY_ID_BCM5705: return "5705";
13367 case PHY_ID_BCM5750: return "5750";
13368 case PHY_ID_BCM5752: return "5752";
13369 case PHY_ID_BCM5714: return "5714";
13370 case PHY_ID_BCM5780: return "5780";
13371 case PHY_ID_BCM5755: return "5755";
13372 case PHY_ID_BCM5787: return "5787";
13373 case PHY_ID_BCM5784: return "5784";
13374 case PHY_ID_BCM5756: return "5722/5756";
13375 case PHY_ID_BCM5906: return "5906";
13376 case PHY_ID_BCM5761: return "5761";
13377 case PHY_ID_BCM8002: return "8002/serdes";
13378 case 0: return "serdes";
13379 default: return "unknown";
13380 }
13381 }
13382
13383 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13384 {
13385 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13386 strcpy(str, "PCI Express");
13387 return str;
13388 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13389 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13390
13391 strcpy(str, "PCIX:");
13392
13393 if ((clock_ctrl == 7) ||
13394 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13395 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13396 strcat(str, "133MHz");
13397 else if (clock_ctrl == 0)
13398 strcat(str, "33MHz");
13399 else if (clock_ctrl == 2)
13400 strcat(str, "50MHz");
13401 else if (clock_ctrl == 4)
13402 strcat(str, "66MHz");
13403 else if (clock_ctrl == 6)
13404 strcat(str, "100MHz");
13405 } else {
13406 strcpy(str, "PCI:");
13407 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13408 strcat(str, "66MHz");
13409 else
13410 strcat(str, "33MHz");
13411 }
13412 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13413 strcat(str, ":32-bit");
13414 else
13415 strcat(str, ":64-bit");
13416 return str;
13417 }
13418
13419 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13420 {
13421 struct pci_dev *peer;
13422 unsigned int func, devnr = tp->pdev->devfn & ~7;
13423
13424 for (func = 0; func < 8; func++) {
13425 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13426 if (peer && peer != tp->pdev)
13427 break;
13428 pci_dev_put(peer);
13429 }
13430 /* 5704 can be configured in single-port mode, set peer to
13431 * tp->pdev in that case.
13432 */
13433 if (!peer) {
13434 peer = tp->pdev;
13435 return peer;
13436 }
13437
13438 /*
13439 * We don't need to keep the refcount elevated; there's no way
13440 * to remove one half of this device without removing the other
13441 */
13442 pci_dev_put(peer);
13443
13444 return peer;
13445 }
13446
13447 static void __devinit tg3_init_coal(struct tg3 *tp)
13448 {
13449 struct ethtool_coalesce *ec = &tp->coal;
13450
13451 memset(ec, 0, sizeof(*ec));
13452 ec->cmd = ETHTOOL_GCOALESCE;
13453 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13454 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13455 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13456 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13457 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13458 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13459 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13460 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13461 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13462
13463 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13464 HOSTCC_MODE_CLRTICK_TXBD)) {
13465 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13466 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13467 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13468 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13469 }
13470
13471 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13472 ec->rx_coalesce_usecs_irq = 0;
13473 ec->tx_coalesce_usecs_irq = 0;
13474 ec->stats_block_coalesce_usecs = 0;
13475 }
13476 }
13477
13478 static const struct net_device_ops tg3_netdev_ops = {
13479 .ndo_open = tg3_open,
13480 .ndo_stop = tg3_close,
13481 .ndo_start_xmit = tg3_start_xmit,
13482 .ndo_get_stats = tg3_get_stats,
13483 .ndo_validate_addr = eth_validate_addr,
13484 .ndo_set_multicast_list = tg3_set_rx_mode,
13485 .ndo_set_mac_address = tg3_set_mac_addr,
13486 .ndo_do_ioctl = tg3_ioctl,
13487 .ndo_tx_timeout = tg3_tx_timeout,
13488 .ndo_change_mtu = tg3_change_mtu,
13489 #if TG3_VLAN_TAG_USED
13490 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13491 #endif
13492 #ifdef CONFIG_NET_POLL_CONTROLLER
13493 .ndo_poll_controller = tg3_poll_controller,
13494 #endif
13495 };
13496
13497 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13498 .ndo_open = tg3_open,
13499 .ndo_stop = tg3_close,
13500 .ndo_start_xmit = tg3_start_xmit_dma_bug,
13501 .ndo_get_stats = tg3_get_stats,
13502 .ndo_validate_addr = eth_validate_addr,
13503 .ndo_set_multicast_list = tg3_set_rx_mode,
13504 .ndo_set_mac_address = tg3_set_mac_addr,
13505 .ndo_do_ioctl = tg3_ioctl,
13506 .ndo_tx_timeout = tg3_tx_timeout,
13507 .ndo_change_mtu = tg3_change_mtu,
13508 #if TG3_VLAN_TAG_USED
13509 .ndo_vlan_rx_register = tg3_vlan_rx_register,
13510 #endif
13511 #ifdef CONFIG_NET_POLL_CONTROLLER
13512 .ndo_poll_controller = tg3_poll_controller,
13513 #endif
13514 };
13515
13516 static int __devinit tg3_init_one(struct pci_dev *pdev,
13517 const struct pci_device_id *ent)
13518 {
13519 static int tg3_version_printed = 0;
13520 struct net_device *dev;
13521 struct tg3 *tp;
13522 int i, err, pm_cap;
13523 u32 sndmbx, rcvmbx, intmbx;
13524 char str[40];
13525 u64 dma_mask, persist_dma_mask;
13526
13527 if (tg3_version_printed++ == 0)
13528 printk(KERN_INFO "%s", version);
13529
13530 err = pci_enable_device(pdev);
13531 if (err) {
13532 printk(KERN_ERR PFX "Cannot enable PCI device, "
13533 "aborting.\n");
13534 return err;
13535 }
13536
13537 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13538 if (err) {
13539 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13540 "aborting.\n");
13541 goto err_out_disable_pdev;
13542 }
13543
13544 pci_set_master(pdev);
13545
13546 /* Find power-management capability. */
13547 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13548 if (pm_cap == 0) {
13549 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13550 "aborting.\n");
13551 err = -EIO;
13552 goto err_out_free_res;
13553 }
13554
13555 dev = alloc_etherdev(sizeof(*tp));
13556 if (!dev) {
13557 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13558 err = -ENOMEM;
13559 goto err_out_free_res;
13560 }
13561
13562 SET_NETDEV_DEV(dev, &pdev->dev);
13563
13564 #if TG3_VLAN_TAG_USED
13565 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13566 #endif
13567
13568 tp = netdev_priv(dev);
13569 tp->pdev = pdev;
13570 tp->dev = dev;
13571 tp->pm_cap = pm_cap;
13572 tp->rx_mode = TG3_DEF_RX_MODE;
13573 tp->tx_mode = TG3_DEF_TX_MODE;
13574
13575 if (tg3_debug > 0)
13576 tp->msg_enable = tg3_debug;
13577 else
13578 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13579
13580 /* The word/byte swap controls here control register access byte
13581 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13582 * setting below.
13583 */
13584 tp->misc_host_ctrl =
13585 MISC_HOST_CTRL_MASK_PCI_INT |
13586 MISC_HOST_CTRL_WORD_SWAP |
13587 MISC_HOST_CTRL_INDIR_ACCESS |
13588 MISC_HOST_CTRL_PCISTATE_RW;
13589
13590 /* The NONFRM (non-frame) byte/word swap controls take effect
13591 * on descriptor entries, anything which isn't packet data.
13592 *
13593 * The StrongARM chips on the board (one for tx, one for rx)
13594 * are running in big-endian mode.
13595 */
13596 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13597 GRC_MODE_WSWAP_NONFRM_DATA);
13598 #ifdef __BIG_ENDIAN
13599 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13600 #endif
13601 spin_lock_init(&tp->lock);
13602 spin_lock_init(&tp->indirect_lock);
13603 INIT_WORK(&tp->reset_task, tg3_reset_task);
13604
13605 tp->regs = pci_ioremap_bar(pdev, BAR_0);
13606 if (!tp->regs) {
13607 printk(KERN_ERR PFX "Cannot map device registers, "
13608 "aborting.\n");
13609 err = -ENOMEM;
13610 goto err_out_free_dev;
13611 }
13612
13613 tg3_init_link_config(tp);
13614
13615 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13616 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13617
13618 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13619 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13620 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13621 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
13622 struct tg3_napi *tnapi = &tp->napi[i];
13623
13624 tnapi->tp = tp;
13625 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
13626
13627 tnapi->int_mbox = intmbx;
13628 if (i < 4)
13629 intmbx += 0x8;
13630 else
13631 intmbx += 0x4;
13632
13633 tnapi->consmbox = rcvmbx;
13634 tnapi->prodmbox = sndmbx;
13635
13636 if (i)
13637 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
13638 else
13639 tnapi->coal_now = HOSTCC_MODE_NOW;
13640
13641 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
13642 break;
13643
13644 /*
13645 * If we support MSIX, we'll be using RSS. If we're using
13646 * RSS, the first vector only handles link interrupts and the
13647 * remaining vectors handle rx and tx interrupts. Reuse the
13648 * mailbox values for the next iteration. The values we setup
13649 * above are still useful for the single vectored mode.
13650 */
13651 if (!i)
13652 continue;
13653
13654 rcvmbx += 0x8;
13655
13656 if (sndmbx & 0x4)
13657 sndmbx -= 0x4;
13658 else
13659 sndmbx += 0xc;
13660 }
13661
13662 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
13663 dev->ethtool_ops = &tg3_ethtool_ops;
13664 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13665 dev->irq = pdev->irq;
13666
13667 err = tg3_get_invariants(tp);
13668 if (err) {
13669 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13670 "aborting.\n");
13671 goto err_out_iounmap;
13672 }
13673
13674 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13676 dev->netdev_ops = &tg3_netdev_ops;
13677 else
13678 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13679
13680
13681 /* The EPB bridge inside 5714, 5715, and 5780 and any
13682 * device behind the EPB cannot support DMA addresses > 40-bit.
13683 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13684 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13685 * do DMA address check in tg3_start_xmit().
13686 */
13687 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13688 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
13689 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13690 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
13691 #ifdef CONFIG_HIGHMEM
13692 dma_mask = DMA_BIT_MASK(64);
13693 #endif
13694 } else
13695 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
13696
13697 /* Configure DMA attributes. */
13698 if (dma_mask > DMA_BIT_MASK(32)) {
13699 err = pci_set_dma_mask(pdev, dma_mask);
13700 if (!err) {
13701 dev->features |= NETIF_F_HIGHDMA;
13702 err = pci_set_consistent_dma_mask(pdev,
13703 persist_dma_mask);
13704 if (err < 0) {
13705 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13706 "DMA for consistent allocations\n");
13707 goto err_out_iounmap;
13708 }
13709 }
13710 }
13711 if (err || dma_mask == DMA_BIT_MASK(32)) {
13712 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
13713 if (err) {
13714 printk(KERN_ERR PFX "No usable DMA configuration, "
13715 "aborting.\n");
13716 goto err_out_iounmap;
13717 }
13718 }
13719
13720 tg3_init_bufmgr_config(tp);
13721
13722 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13723 tp->fw_needed = FIRMWARE_TG3;
13724
13725 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13726 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13727 }
13728 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13729 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13730 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13731 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13732 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13733 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13734 } else {
13735 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13736 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13737 tp->fw_needed = FIRMWARE_TG3TSO5;
13738 else
13739 tp->fw_needed = FIRMWARE_TG3TSO;
13740 }
13741
13742 /* TSO is on by default on chips that support hardware TSO.
13743 * Firmware TSO on older chips gives lower performance, so it
13744 * is off by default, but can be enabled using ethtool.
13745 */
13746 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13747 if (dev->features & NETIF_F_IP_CSUM)
13748 dev->features |= NETIF_F_TSO;
13749 if ((dev->features & NETIF_F_IPV6_CSUM) &&
13750 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13751 dev->features |= NETIF_F_TSO6;
13752 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13753 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13754 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13755 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13756 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13757 dev->features |= NETIF_F_TSO_ECN;
13758 }
13759
13760
13761 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13762 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13763 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13764 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13765 tp->rx_pending = 63;
13766 }
13767
13768 err = tg3_get_device_address(tp);
13769 if (err) {
13770 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13771 "aborting.\n");
13772 goto err_out_fw;
13773 }
13774
13775 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13776 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13777 if (!tp->aperegs) {
13778 printk(KERN_ERR PFX "Cannot map APE registers, "
13779 "aborting.\n");
13780 err = -ENOMEM;
13781 goto err_out_fw;
13782 }
13783
13784 tg3_ape_lock_init(tp);
13785
13786 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
13787 tg3_read_dash_ver(tp);
13788 }
13789
13790 /*
13791 * Reset chip in case UNDI or EFI driver did not shutdown
13792 * DMA self test will enable WDMAC and we'll see (spurious)
13793 * pending DMA on the PCI bus at that point.
13794 */
13795 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13796 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13797 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13798 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13799 }
13800
13801 err = tg3_test_dma(tp);
13802 if (err) {
13803 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13804 goto err_out_apeunmap;
13805 }
13806
13807 /* flow control autonegotiation is default behavior */
13808 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13809 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13810
13811 tg3_init_coal(tp);
13812
13813 pci_set_drvdata(pdev, dev);
13814
13815 err = register_netdev(dev);
13816 if (err) {
13817 printk(KERN_ERR PFX "Cannot register net device, "
13818 "aborting.\n");
13819 goto err_out_apeunmap;
13820 }
13821
13822 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13823 dev->name,
13824 tp->board_part_number,
13825 tp->pci_chip_rev_id,
13826 tg3_bus_string(tp, str),
13827 dev->dev_addr);
13828
13829 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13830 printk(KERN_INFO
13831 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13832 tp->dev->name,
13833 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13834 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13835 else
13836 printk(KERN_INFO
13837 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13838 tp->dev->name, tg3_phy_string(tp),
13839 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13840 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13841 "10/100/1000Base-T")),
13842 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13843
13844 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13845 dev->name,
13846 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13847 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13848 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13849 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13850 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13851 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13852 dev->name, tp->dma_rwctrl,
13853 (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
13854 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
13855
13856 return 0;
13857
13858 err_out_apeunmap:
13859 if (tp->aperegs) {
13860 iounmap(tp->aperegs);
13861 tp->aperegs = NULL;
13862 }
13863
13864 err_out_fw:
13865 if (tp->fw)
13866 release_firmware(tp->fw);
13867
13868 err_out_iounmap:
13869 if (tp->regs) {
13870 iounmap(tp->regs);
13871 tp->regs = NULL;
13872 }
13873
13874 err_out_free_dev:
13875 free_netdev(dev);
13876
13877 err_out_free_res:
13878 pci_release_regions(pdev);
13879
13880 err_out_disable_pdev:
13881 pci_disable_device(pdev);
13882 pci_set_drvdata(pdev, NULL);
13883 return err;
13884 }
13885
13886 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13887 {
13888 struct net_device *dev = pci_get_drvdata(pdev);
13889
13890 if (dev) {
13891 struct tg3 *tp = netdev_priv(dev);
13892
13893 if (tp->fw)
13894 release_firmware(tp->fw);
13895
13896 flush_scheduled_work();
13897
13898 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13899 tg3_phy_fini(tp);
13900 tg3_mdio_fini(tp);
13901 }
13902
13903 unregister_netdev(dev);
13904 if (tp->aperegs) {
13905 iounmap(tp->aperegs);
13906 tp->aperegs = NULL;
13907 }
13908 if (tp->regs) {
13909 iounmap(tp->regs);
13910 tp->regs = NULL;
13911 }
13912 free_netdev(dev);
13913 pci_release_regions(pdev);
13914 pci_disable_device(pdev);
13915 pci_set_drvdata(pdev, NULL);
13916 }
13917 }
13918
13919 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13920 {
13921 struct net_device *dev = pci_get_drvdata(pdev);
13922 struct tg3 *tp = netdev_priv(dev);
13923 pci_power_t target_state;
13924 int err;
13925
13926 /* PCI register 4 needs to be saved whether netif_running() or not.
13927 * MSI address and data need to be saved if using MSI and
13928 * netif_running().
13929 */
13930 pci_save_state(pdev);
13931
13932 if (!netif_running(dev))
13933 return 0;
13934
13935 flush_scheduled_work();
13936 tg3_phy_stop(tp);
13937 tg3_netif_stop(tp);
13938
13939 del_timer_sync(&tp->timer);
13940
13941 tg3_full_lock(tp, 1);
13942 tg3_disable_ints(tp);
13943 tg3_full_unlock(tp);
13944
13945 netif_device_detach(dev);
13946
13947 tg3_full_lock(tp, 0);
13948 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13949 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13950 tg3_full_unlock(tp);
13951
13952 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13953
13954 err = tg3_set_power_state(tp, target_state);
13955 if (err) {
13956 int err2;
13957
13958 tg3_full_lock(tp, 0);
13959
13960 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13961 err2 = tg3_restart_hw(tp, 1);
13962 if (err2)
13963 goto out;
13964
13965 tp->timer.expires = jiffies + tp->timer_offset;
13966 add_timer(&tp->timer);
13967
13968 netif_device_attach(dev);
13969 tg3_netif_start(tp);
13970
13971 out:
13972 tg3_full_unlock(tp);
13973
13974 if (!err2)
13975 tg3_phy_start(tp);
13976 }
13977
13978 return err;
13979 }
13980
13981 static int tg3_resume(struct pci_dev *pdev)
13982 {
13983 struct net_device *dev = pci_get_drvdata(pdev);
13984 struct tg3 *tp = netdev_priv(dev);
13985 int err;
13986
13987 pci_restore_state(tp->pdev);
13988
13989 if (!netif_running(dev))
13990 return 0;
13991
13992 err = tg3_set_power_state(tp, PCI_D0);
13993 if (err)
13994 return err;
13995
13996 netif_device_attach(dev);
13997
13998 tg3_full_lock(tp, 0);
13999
14000 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14001 err = tg3_restart_hw(tp, 1);
14002 if (err)
14003 goto out;
14004
14005 tp->timer.expires = jiffies + tp->timer_offset;
14006 add_timer(&tp->timer);
14007
14008 tg3_netif_start(tp);
14009
14010 out:
14011 tg3_full_unlock(tp);
14012
14013 if (!err)
14014 tg3_phy_start(tp);
14015
14016 return err;
14017 }
14018
14019 static struct pci_driver tg3_driver = {
14020 .name = DRV_MODULE_NAME,
14021 .id_table = tg3_pci_tbl,
14022 .probe = tg3_init_one,
14023 .remove = __devexit_p(tg3_remove_one),
14024 .suspend = tg3_suspend,
14025 .resume = tg3_resume
14026 };
14027
14028 static int __init tg3_init(void)
14029 {
14030 return pci_register_driver(&tg3_driver);
14031 }
14032
14033 static void __exit tg3_cleanup(void)
14034 {
14035 pci_unregister_driver(&tg3_driver);
14036 }
14037
14038 module_init(tg3_init);
14039 module_exit(tg3_cleanup);