2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
42 #include <net/checksum.h>
45 #include <asm/system.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
51 #include <asm/idprom.h>
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
58 #define TG3_VLAN_TAG_USED 0
61 #define TG3_TSO_SUPPORT 1
65 #define DRV_MODULE_NAME "tg3"
66 #define PFX DRV_MODULE_NAME ": "
67 #define DRV_MODULE_VERSION "3.92"
68 #define DRV_MODULE_RELDATE "May 2, 2008"
70 #define TG3_DEF_MAC_MODE 0
71 #define TG3_DEF_RX_MODE 0
72 #define TG3_DEF_TX_MODE 0
73 #define TG3_DEF_MSG_ENABLE \
83 /* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
86 #define TG3_TX_TIMEOUT (5 * HZ)
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU 60
90 #define TG3_MAX_MTU(tp) \
91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
97 #define TG3_RX_RING_SIZE 512
98 #define TG3_DEF_RX_RING_PENDING 200
99 #define TG3_RX_JUMBO_RING_SIZE 256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
102 /* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
108 #define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
111 #define TG3_TX_RING_SIZE 512
112 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
114 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
122 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
124 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
133 #define TG3_NUM_TEST 6
135 static char version
[] __devinitdata
=
136 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION
);
143 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug
, int, 0);
145 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
147 static struct pci_device_id tg3_pci_tbl
[] = {
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5720
)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750M
)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
216 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
218 static const struct {
219 const char string
[ETH_GSTRING_LEN
];
220 } ethtool_stats_keys
[TG3_NUM_STATS
] = {
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
253 { "tx_flow_control" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
286 { "rx_threshold_hit" },
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
299 static const struct {
300 const char string
[ETH_GSTRING_LEN
];
301 } ethtool_test_keys
[TG3_NUM_TEST
] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
310 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
312 writel(val
, tp
->regs
+ off
);
315 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
317 return (readl(tp
->regs
+ off
));
320 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
322 writel(val
, tp
->aperegs
+ off
);
325 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
327 return (readl(tp
->aperegs
+ off
));
330 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
334 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
335 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
336 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
337 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
340 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
342 writel(val
, tp
->regs
+ off
);
343 readl(tp
->regs
+ off
);
346 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
351 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
352 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
353 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
354 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
358 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
362 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
363 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
364 TG3_64BIT_REG_LOW
, val
);
367 if (off
== (MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
)) {
368 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
369 TG3_64BIT_REG_LOW
, val
);
373 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
374 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
375 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
376 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
381 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
383 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
384 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
388 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
393 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
394 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
395 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
396 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
405 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
407 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) ||
408 (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
409 /* Non-posted methods */
410 tp
->write32(tp
, off
, val
);
413 tg3_write32(tp
, off
, val
);
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
425 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
427 tp
->write32_mbox(tp
, off
, val
);
428 if (!(tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) &&
429 !(tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
430 tp
->read32_mbox(tp
, off
);
433 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
435 void __iomem
*mbox
= tp
->regs
+ off
;
437 if (tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
)
439 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
443 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
445 return (readl(tp
->regs
+ off
+ GRCMBOX_BASE
));
448 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
450 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
453 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
459 #define tw32(reg,val) tp->write32(tp, reg, val)
460 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg) tp->read32(tp, reg)
464 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
468 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) &&
469 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
472 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
473 if (tp
->tg3_flags
& TG3_FLAG_SRAM_USE_CONFIG
) {
474 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
475 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
481 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
486 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
489 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
493 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) &&
494 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
499 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
500 if (tp
->tg3_flags
& TG3_FLAG_SRAM_USE_CONFIG
) {
501 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
502 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
508 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
513 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
516 static void tg3_ape_lock_init(struct tg3
*tp
)
520 /* Make sure the driver hasn't any stale locks. */
521 for (i
= 0; i
< 8; i
++)
522 tg3_ape_write32(tp
, TG3_APE_LOCK_GRANT
+ 4 * i
,
523 APE_LOCK_GRANT_DRIVER
);
526 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
532 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
))
536 case TG3_APE_LOCK_MEM
:
544 tg3_ape_write32(tp
, TG3_APE_LOCK_REQ
+ off
, APE_LOCK_REQ_DRIVER
);
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i
= 0; i
< 100; i
++) {
548 status
= tg3_ape_read32(tp
, TG3_APE_LOCK_GRANT
+ off
);
549 if (status
== APE_LOCK_GRANT_DRIVER
)
554 if (status
!= APE_LOCK_GRANT_DRIVER
) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp
, TG3_APE_LOCK_GRANT
+ off
,
557 APE_LOCK_GRANT_DRIVER
);
565 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
569 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
))
573 case TG3_APE_LOCK_MEM
:
580 tg3_ape_write32(tp
, TG3_APE_LOCK_GRANT
+ off
, APE_LOCK_GRANT_DRIVER
);
583 static void tg3_disable_ints(struct tg3
*tp
)
585 tw32(TG3PCI_MISC_HOST_CTRL
,
586 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
587 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
590 static inline void tg3_cond_int(struct tg3
*tp
)
592 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
593 (tp
->hw_status
->status
& SD_STATUS_UPDATED
))
594 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
596 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
597 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
600 static void tg3_enable_ints(struct tg3
*tp
)
605 tw32(TG3PCI_MISC_HOST_CTRL
,
606 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
608 (tp
->last_tag
<< 24));
609 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
611 (tp
->last_tag
<< 24));
615 static inline unsigned int tg3_has_work(struct tg3
*tp
)
617 struct tg3_hw_status
*sblk
= tp
->hw_status
;
618 unsigned int work_exists
= 0;
620 /* check for phy events */
621 if (!(tp
->tg3_flags
&
622 (TG3_FLAG_USE_LINKCHG_REG
|
623 TG3_FLAG_POLL_SERDES
))) {
624 if (sblk
->status
& SD_STATUS_LINK_CHG
)
627 /* check for RX/TX work to do */
628 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
||
629 sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
)
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
638 * which reenables interrupts
640 static void tg3_restart_ints(struct tg3
*tp
)
642 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
650 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
652 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
653 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
656 static inline void tg3_netif_stop(struct tg3
*tp
)
658 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
659 napi_disable(&tp
->napi
);
660 netif_tx_disable(tp
->dev
);
663 static inline void tg3_netif_start(struct tg3
*tp
)
665 netif_wake_queue(tp
->dev
);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
670 napi_enable(&tp
->napi
);
671 tp
->hw_status
->status
|= SD_STATUS_UPDATED
;
675 static void tg3_switch_clocks(struct tg3
*tp
)
677 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
680 if ((tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
) ||
681 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
684 orig_clock_ctrl
= clock_ctrl
;
685 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
686 CLOCK_CTRL_CLKRUN_OENABLE
|
688 tp
->pci_clock_ctrl
= clock_ctrl
;
690 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
691 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
692 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
693 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
695 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
696 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
698 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
700 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
701 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
704 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
707 #define PHY_BUSY_LOOPS 5000
709 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
715 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
717 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
723 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
724 MI_COM_PHY_ADDR_MASK
);
725 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
726 MI_COM_REG_ADDR_MASK
);
727 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
729 tw32_f(MAC_MI_COM
, frame_val
);
731 loops
= PHY_BUSY_LOOPS
;
734 frame_val
= tr32(MAC_MI_COM
);
736 if ((frame_val
& MI_COM_BUSY
) == 0) {
738 frame_val
= tr32(MAC_MI_COM
);
746 *val
= frame_val
& MI_COM_DATA_MASK
;
750 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
751 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
758 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
764 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
765 (reg
== MII_TG3_CTRL
|| reg
== MII_TG3_AUX_CTRL
))
768 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
770 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
774 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
775 MI_COM_PHY_ADDR_MASK
);
776 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
777 MI_COM_REG_ADDR_MASK
);
778 frame_val
|= (val
& MI_COM_DATA_MASK
);
779 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
781 tw32_f(MAC_MI_COM
, frame_val
);
783 loops
= PHY_BUSY_LOOPS
;
786 frame_val
= tr32(MAC_MI_COM
);
787 if ((frame_val
& MI_COM_BUSY
) == 0) {
789 frame_val
= tr32(MAC_MI_COM
);
799 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
800 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
807 static int tg3_bmcr_reset(struct tg3
*tp
)
812 /* OK, reset it, and poll the BMCR_RESET bit until it
813 * clears or we time out.
815 phy_control
= BMCR_RESET
;
816 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
822 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
826 if ((phy_control
& BMCR_RESET
) == 0) {
838 /* tp->lock is held. */
839 static void tg3_wait_for_event_ack(struct tg3
*tp
)
843 /* Wait for up to 2.5 milliseconds */
844 for (i
= 0; i
< 250000; i
++) {
845 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
851 /* tp->lock is held. */
852 static void tg3_ump_link_report(struct tg3
*tp
)
857 if (!(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) ||
858 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
861 tg3_wait_for_event_ack(tp
);
863 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
865 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
868 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
870 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
871 val
|= (reg
& 0xffff);
872 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, val
);
875 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
877 if (!tg3_readphy(tp
, MII_LPA
, ®
))
878 val
|= (reg
& 0xffff);
879 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 4, val
);
882 if (!(tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)) {
883 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
885 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
886 val
|= (reg
& 0xffff);
888 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 8, val
);
890 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
894 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 12, val
);
896 val
= tr32(GRC_RX_CPU_EVENT
);
897 val
|= GRC_RX_CPU_DRIVER_EVENT
;
898 tw32_f(GRC_RX_CPU_EVENT
, val
);
901 static void tg3_link_report(struct tg3
*tp
)
903 if (!netif_carrier_ok(tp
->dev
)) {
904 if (netif_msg_link(tp
))
905 printk(KERN_INFO PFX
"%s: Link is down.\n",
907 tg3_ump_link_report(tp
);
908 } else if (netif_msg_link(tp
)) {
909 printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex.\n",
911 (tp
->link_config
.active_speed
== SPEED_1000
?
913 (tp
->link_config
.active_speed
== SPEED_100
?
915 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
919 "%s: Flow control is %s for TX and %s for RX.\n",
921 (tp
->link_config
.active_flowctrl
& TG3_FLOW_CTRL_TX
) ?
923 (tp
->link_config
.active_flowctrl
& TG3_FLOW_CTRL_RX
) ?
925 tg3_ump_link_report(tp
);
929 static u16
tg3_advert_flowctrl_1000T(u8 flow_ctrl
)
933 if ((flow_ctrl
& TG3_FLOW_CTRL_TX
) && (flow_ctrl
& TG3_FLOW_CTRL_RX
))
934 miireg
= ADVERTISE_PAUSE_CAP
;
935 else if (flow_ctrl
& TG3_FLOW_CTRL_TX
)
936 miireg
= ADVERTISE_PAUSE_ASYM
;
937 else if (flow_ctrl
& TG3_FLOW_CTRL_RX
)
938 miireg
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
945 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
949 if ((flow_ctrl
& TG3_FLOW_CTRL_TX
) && (flow_ctrl
& TG3_FLOW_CTRL_RX
))
950 miireg
= ADVERTISE_1000XPAUSE
;
951 else if (flow_ctrl
& TG3_FLOW_CTRL_TX
)
952 miireg
= ADVERTISE_1000XPSE_ASYM
;
953 else if (flow_ctrl
& TG3_FLOW_CTRL_RX
)
954 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
961 static u8
tg3_resolve_flowctrl_1000T(u16 lcladv
, u16 rmtadv
)
965 if (lcladv
& ADVERTISE_PAUSE_CAP
) {
966 if (lcladv
& ADVERTISE_PAUSE_ASYM
) {
967 if (rmtadv
& LPA_PAUSE_CAP
)
968 cap
= TG3_FLOW_CTRL_TX
| TG3_FLOW_CTRL_RX
;
969 else if (rmtadv
& LPA_PAUSE_ASYM
)
970 cap
= TG3_FLOW_CTRL_RX
;
972 if (rmtadv
& LPA_PAUSE_CAP
)
973 cap
= TG3_FLOW_CTRL_TX
| TG3_FLOW_CTRL_RX
;
975 } else if (lcladv
& ADVERTISE_PAUSE_ASYM
) {
976 if ((rmtadv
& LPA_PAUSE_CAP
) && (rmtadv
& LPA_PAUSE_ASYM
))
977 cap
= TG3_FLOW_CTRL_TX
;
983 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
987 if (lcladv
& ADVERTISE_1000XPAUSE
) {
988 if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
989 if (rmtadv
& LPA_1000XPAUSE
)
990 cap
= TG3_FLOW_CTRL_TX
| TG3_FLOW_CTRL_RX
;
991 else if (rmtadv
& LPA_1000XPAUSE_ASYM
)
992 cap
= TG3_FLOW_CTRL_RX
;
994 if (rmtadv
& LPA_1000XPAUSE
)
995 cap
= TG3_FLOW_CTRL_TX
| TG3_FLOW_CTRL_RX
;
997 } else if (lcladv
& ADVERTISE_1000XPSE_ASYM
) {
998 if ((rmtadv
& LPA_1000XPAUSE
) && (rmtadv
& LPA_1000XPAUSE_ASYM
))
999 cap
= TG3_FLOW_CTRL_TX
;
1005 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1008 u32 old_rx_mode
= tp
->rx_mode
;
1009 u32 old_tx_mode
= tp
->tx_mode
;
1011 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
1012 (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
)) {
1013 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
1014 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1016 flowctrl
= tg3_resolve_flowctrl_1000T(lcladv
, rmtadv
);
1018 flowctrl
= tp
->link_config
.flowctrl
;
1020 tp
->link_config
.active_flowctrl
= flowctrl
;
1022 if (flowctrl
& TG3_FLOW_CTRL_RX
)
1023 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1025 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1027 if (old_rx_mode
!= tp
->rx_mode
)
1028 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1030 if (flowctrl
& TG3_FLOW_CTRL_TX
)
1031 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1033 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1035 if (old_tx_mode
!= tp
->tx_mode
)
1036 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1039 static void tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1041 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1042 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1045 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
1049 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) ||
1050 (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
))
1053 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1056 if (!tg3_readphy(tp
, MII_TG3_EPHY_TEST
, &ephy
)) {
1057 tg3_writephy(tp
, MII_TG3_EPHY_TEST
,
1058 ephy
| MII_TG3_EPHY_SHADOW_EN
);
1059 if (!tg3_readphy(tp
, MII_TG3_EPHYTST_MISCCTRL
, &phy
)) {
1061 phy
|= MII_TG3_EPHYTST_MISCCTRL_MDIX
;
1063 phy
&= ~MII_TG3_EPHYTST_MISCCTRL_MDIX
;
1064 tg3_writephy(tp
, MII_TG3_EPHYTST_MISCCTRL
, phy
);
1066 tg3_writephy(tp
, MII_TG3_EPHY_TEST
, ephy
);
1069 phy
= MII_TG3_AUXCTL_MISC_RDSEL_MISC
|
1070 MII_TG3_AUXCTL_SHDWSEL_MISC
;
1071 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy
) &&
1072 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &phy
)) {
1074 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1076 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
1077 phy
|= MII_TG3_AUXCTL_MISC_WREN
;
1078 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy
);
1083 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
1087 if (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
)
1090 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x7007) &&
1091 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
))
1092 tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1093 (val
| (1 << 15) | (1 << 4)));
1096 static void tg3_phy_apply_otp(struct tg3
*tp
)
1105 /* Enable SM_DSP clock and tx 6dB coding. */
1106 phy
= MII_TG3_AUXCTL_SHDWSEL_AUXCTL
|
1107 MII_TG3_AUXCTL_ACTL_SMDSP_ENA
|
1108 MII_TG3_AUXCTL_ACTL_TX_6DB
;
1109 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy
);
1111 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
1112 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
1113 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
1115 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
1116 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
1117 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
1119 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
1120 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
1121 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
1123 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
1124 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
1126 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
1127 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
1129 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
1130 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
1131 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
1133 /* Turn off SM_DSP clock. */
1134 phy
= MII_TG3_AUXCTL_SHDWSEL_AUXCTL
|
1135 MII_TG3_AUXCTL_ACTL_TX_6DB
;
1136 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy
);
1139 static int tg3_wait_macro_done(struct tg3
*tp
)
1146 if (!tg3_readphy(tp
, 0x16, &tmp32
)) {
1147 if ((tmp32
& 0x1000) == 0)
1157 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
1159 static const u32 test_pat
[4][6] = {
1160 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1161 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1162 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1163 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1167 for (chan
= 0; chan
< 4; chan
++) {
1170 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1171 (chan
* 0x2000) | 0x0200);
1172 tg3_writephy(tp
, 0x16, 0x0002);
1174 for (i
= 0; i
< 6; i
++)
1175 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
1178 tg3_writephy(tp
, 0x16, 0x0202);
1179 if (tg3_wait_macro_done(tp
)) {
1184 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1185 (chan
* 0x2000) | 0x0200);
1186 tg3_writephy(tp
, 0x16, 0x0082);
1187 if (tg3_wait_macro_done(tp
)) {
1192 tg3_writephy(tp
, 0x16, 0x0802);
1193 if (tg3_wait_macro_done(tp
)) {
1198 for (i
= 0; i
< 6; i
+= 2) {
1201 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
1202 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
1203 tg3_wait_macro_done(tp
)) {
1209 if (low
!= test_pat
[chan
][i
] ||
1210 high
!= test_pat
[chan
][i
+1]) {
1211 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
1212 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
1213 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
1223 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
1227 for (chan
= 0; chan
< 4; chan
++) {
1230 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
1231 (chan
* 0x2000) | 0x0200);
1232 tg3_writephy(tp
, 0x16, 0x0002);
1233 for (i
= 0; i
< 6; i
++)
1234 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
1235 tg3_writephy(tp
, 0x16, 0x0202);
1236 if (tg3_wait_macro_done(tp
))
1243 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
1245 u32 reg32
, phy9_orig
;
1246 int retries
, do_phy_reset
, err
;
1252 err
= tg3_bmcr_reset(tp
);
1258 /* Disable transmitter and interrupt. */
1259 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
1263 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
1265 /* Set full-duplex, 1000 mbps. */
1266 tg3_writephy(tp
, MII_BMCR
,
1267 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
1269 /* Set to master mode. */
1270 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
1273 tg3_writephy(tp
, MII_TG3_CTRL
,
1274 (MII_TG3_CTRL_AS_MASTER
|
1275 MII_TG3_CTRL_ENABLE_AS_MASTER
));
1277 /* Enable SM_DSP_CLOCK and 6dB. */
1278 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
1280 /* Block the PHY control access. */
1281 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
1282 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0800);
1284 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
1287 } while (--retries
);
1289 err
= tg3_phy_reset_chanpat(tp
);
1293 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
1294 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0000);
1296 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
1297 tg3_writephy(tp
, 0x16, 0x0000);
1299 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
1300 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
1301 /* Set Extended packet length bit for jumbo frames */
1302 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4400);
1305 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
1308 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
1310 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
1312 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
1319 /* This will reset the tigon3 PHY if there is no valid
1320 * link unless the FORCE argument is non-zero.
1322 static int tg3_phy_reset(struct tg3
*tp
)
1328 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1331 val
= tr32(GRC_MISC_CFG
);
1332 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
1335 err
= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
1336 err
|= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
1340 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
1341 netif_carrier_off(tp
->dev
);
1342 tg3_link_report(tp
);
1345 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
1346 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
1347 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
1348 err
= tg3_phy_reset_5703_4_5(tp
);
1355 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
1356 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
1357 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
1358 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
1360 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
1363 err
= tg3_bmcr_reset(tp
);
1367 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
1370 phy
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
1371 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, phy
);
1373 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
1376 if (tp
->tg3_flags3
& TG3_FLG3_5761_5784_AX_FIXES
) {
1379 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
1380 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
1381 CPMU_LSPD_1000MB_MACCLK_12_5
) {
1382 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
1384 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
1387 /* Disable GPHY autopowerdown. */
1388 tg3_writephy(tp
, MII_TG3_MISC_SHDW
,
1389 MII_TG3_MISC_SHDW_WREN
|
1390 MII_TG3_MISC_SHDW_APD_SEL
|
1391 MII_TG3_MISC_SHDW_APD_WKTM_84MS
);
1394 tg3_phy_apply_otp(tp
);
1397 if (tp
->tg3_flags2
& TG3_FLG2_PHY_ADC_BUG
) {
1398 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
1399 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
1400 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x2aaa);
1401 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
1402 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0323);
1403 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
1405 if (tp
->tg3_flags2
& TG3_FLG2_PHY_5704_A0_BUG
) {
1406 tg3_writephy(tp
, 0x1c, 0x8d68);
1407 tg3_writephy(tp
, 0x1c, 0x8d68);
1409 if (tp
->tg3_flags2
& TG3_FLG2_PHY_BER_BUG
) {
1410 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
1411 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
1412 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x310b);
1413 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
1414 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x9506);
1415 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x401f);
1416 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x14e2);
1417 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
1419 else if (tp
->tg3_flags2
& TG3_FLG2_PHY_JITTER_BUG
) {
1420 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
1421 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
1422 if (tp
->tg3_flags2
& TG3_FLG2_PHY_ADJUST_TRIM
) {
1423 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
1424 tg3_writephy(tp
, MII_TG3_TEST1
,
1425 MII_TG3_TEST1_TRIM_EN
| 0x4);
1427 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
1428 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
1430 /* Set Extended packet length bit (bit 14) on all chips that */
1431 /* support jumbo frames */
1432 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
1433 /* Cannot do read-modify-write on 5401 */
1434 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
1435 } else if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
1438 /* Set bit 14 with read-modify-write to preserve other bits */
1439 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0007) &&
1440 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &phy_reg
))
1441 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy_reg
| 0x4000);
1444 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1445 * jumbo frames transmission.
1447 if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
1450 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &phy_reg
))
1451 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1452 phy_reg
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
1455 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1456 /* adjust output voltage */
1457 tg3_writephy(tp
, MII_TG3_EPHY_PTEST
, 0x12);
1460 tg3_phy_toggle_automdix(tp
, 1);
1461 tg3_phy_set_wirespeed(tp
);
1465 static void tg3_frob_aux_power(struct tg3
*tp
)
1467 struct tg3
*tp_peer
= tp
;
1469 if ((tp
->tg3_flags2
& TG3_FLG2_IS_NIC
) == 0)
1472 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
1473 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
1474 struct net_device
*dev_peer
;
1476 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
1477 /* remove_one() may have been run on the peer. */
1481 tp_peer
= netdev_priv(dev_peer
);
1484 if ((tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1485 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0 ||
1486 (tp_peer
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1487 (tp_peer
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
1488 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1489 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1490 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1491 (GRC_LCLCTRL_GPIO_OE0
|
1492 GRC_LCLCTRL_GPIO_OE1
|
1493 GRC_LCLCTRL_GPIO_OE2
|
1494 GRC_LCLCTRL_GPIO_OUTPUT0
|
1495 GRC_LCLCTRL_GPIO_OUTPUT1
),
1499 u32 grc_local_ctrl
= 0;
1501 if (tp_peer
!= tp
&&
1502 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1505 /* Workaround to prevent overdrawing Amps. */
1506 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
1508 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
1509 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1510 grc_local_ctrl
, 100);
1513 /* On 5753 and variants, GPIO2 cannot be used. */
1514 no_gpio2
= tp
->nic_sram_data_cfg
&
1515 NIC_SRAM_DATA_CFG_NO_GPIO2
;
1517 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
1518 GRC_LCLCTRL_GPIO_OE1
|
1519 GRC_LCLCTRL_GPIO_OE2
|
1520 GRC_LCLCTRL_GPIO_OUTPUT1
|
1521 GRC_LCLCTRL_GPIO_OUTPUT2
;
1523 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
1524 GRC_LCLCTRL_GPIO_OUTPUT2
);
1526 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1527 grc_local_ctrl
, 100);
1529 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
1531 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1532 grc_local_ctrl
, 100);
1535 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
1536 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1537 grc_local_ctrl
, 100);
1541 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
1542 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
1543 if (tp_peer
!= tp
&&
1544 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1547 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1548 (GRC_LCLCTRL_GPIO_OE1
|
1549 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1551 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1552 GRC_LCLCTRL_GPIO_OE1
, 100);
1554 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1555 (GRC_LCLCTRL_GPIO_OE1
|
1556 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1561 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
1563 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
1565 else if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5411
) {
1566 if (speed
!= SPEED_10
)
1568 } else if (speed
== SPEED_10
)
1574 static int tg3_setup_phy(struct tg3
*, int);
1576 #define RESET_KIND_SHUTDOWN 0
1577 #define RESET_KIND_INIT 1
1578 #define RESET_KIND_SUSPEND 2
1580 static void tg3_write_sig_post_reset(struct tg3
*, int);
1581 static int tg3_halt_cpu(struct tg3
*, u32
);
1582 static int tg3_nvram_lock(struct tg3
*);
1583 static void tg3_nvram_unlock(struct tg3
*);
1585 static void tg3_power_down_phy(struct tg3
*tp
)
1589 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
1590 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
1591 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
1592 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
1595 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
1596 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
1597 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
1602 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1604 val
= tr32(GRC_MISC_CFG
);
1605 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
1609 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1610 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
1611 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x01b2);
1614 /* The PHY should not be powered down on some chips because
1617 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1618 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
1619 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
1620 (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)))
1623 if (tp
->tg3_flags3
& TG3_FLG3_5761_5784_AX_FIXES
) {
1624 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
1625 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
1626 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
1627 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
1630 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
1633 static int tg3_set_power_state(struct tg3
*tp
, pci_power_t state
)
1636 u16 power_control
, power_caps
;
1637 int pm
= tp
->pm_cap
;
1639 /* Make sure register accesses (indirect or otherwise)
1640 * will function correctly.
1642 pci_write_config_dword(tp
->pdev
,
1643 TG3PCI_MISC_HOST_CTRL
,
1644 tp
->misc_host_ctrl
);
1646 pci_read_config_word(tp
->pdev
,
1649 power_control
|= PCI_PM_CTRL_PME_STATUS
;
1650 power_control
&= ~(PCI_PM_CTRL_STATE_MASK
);
1654 pci_write_config_word(tp
->pdev
,
1657 udelay(100); /* Delay after power state change */
1659 /* Switch out of Vaux if it is a NIC */
1660 if (tp
->tg3_flags2
& TG3_FLG2_IS_NIC
)
1661 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
, 100);
1678 printk(KERN_WARNING PFX
"%s: Invalid power state (%d) "
1680 tp
->dev
->name
, state
);
1684 power_control
|= PCI_PM_CTRL_PME_ENABLE
;
1686 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
1687 tw32(TG3PCI_MISC_HOST_CTRL
,
1688 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
1690 if (tp
->link_config
.phy_is_low_power
== 0) {
1691 tp
->link_config
.phy_is_low_power
= 1;
1692 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
1693 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
1694 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
1697 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)) {
1698 tp
->link_config
.speed
= SPEED_10
;
1699 tp
->link_config
.duplex
= DUPLEX_HALF
;
1700 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
1701 tg3_setup_phy(tp
, 0);
1704 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1707 val
= tr32(GRC_VCPU_EXT_CTRL
);
1708 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
1709 } else if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1713 for (i
= 0; i
< 200; i
++) {
1714 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
1715 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1720 if (tp
->tg3_flags
& TG3_FLAG_WOL_CAP
)
1721 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
1722 WOL_DRV_STATE_SHUTDOWN
|
1726 pci_read_config_word(tp
->pdev
, pm
+ PCI_PM_PMC
, &power_caps
);
1728 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) {
1731 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
1732 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x5a);
1735 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
1736 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
1738 mac_mode
= MAC_MODE_PORT_MODE_MII
;
1740 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
1741 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
1743 u32 speed
= (tp
->tg3_flags
&
1744 TG3_FLAG_WOL_SPEED_100MB
) ?
1745 SPEED_100
: SPEED_10
;
1746 if (tg3_5700_link_polarity(tp
, speed
))
1747 mac_mode
|= MAC_MODE_LINK_POLARITY
;
1749 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
1752 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
1755 if (!(tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
1756 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
1758 if (((power_caps
& PCI_PM_CAP_PME_D3cold
) &&
1759 (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)))
1760 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
1762 tw32_f(MAC_MODE
, mac_mode
);
1765 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
1769 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
) &&
1770 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1771 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
1774 base_val
= tp
->pci_clock_ctrl
;
1775 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
1776 CLOCK_CTRL_TXCLK_DISABLE
);
1778 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
1779 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
1780 } else if ((tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) ||
1781 (tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
) ||
1782 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)) {
1784 } else if (!((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
1785 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))) {
1786 u32 newbits1
, newbits2
;
1788 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1789 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1790 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
1791 CLOCK_CTRL_TXCLK_DISABLE
|
1793 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1794 } else if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
1795 newbits1
= CLOCK_CTRL_625_CORE
;
1796 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
1798 newbits1
= CLOCK_CTRL_ALTCLK
;
1799 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1802 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
1805 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
1808 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
1811 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1812 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1813 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
1814 CLOCK_CTRL_TXCLK_DISABLE
|
1815 CLOCK_CTRL_44MHZ_CORE
);
1817 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
1820 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1821 tp
->pci_clock_ctrl
| newbits3
, 40);
1825 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) &&
1826 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) &&
1827 !(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
))
1828 tg3_power_down_phy(tp
);
1830 tg3_frob_aux_power(tp
);
1832 /* Workaround for unstable PLL clock */
1833 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
1834 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
1835 u32 val
= tr32(0x7d00);
1837 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1839 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1842 err
= tg3_nvram_lock(tp
);
1843 tg3_halt_cpu(tp
, RX_CPU_BASE
);
1845 tg3_nvram_unlock(tp
);
1849 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
1851 /* Finally, set the new power state. */
1852 pci_write_config_word(tp
->pdev
, pm
+ PCI_PM_CTRL
, power_control
);
1853 udelay(100); /* Delay after power state change */
1858 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
1860 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
1861 case MII_TG3_AUX_STAT_10HALF
:
1863 *duplex
= DUPLEX_HALF
;
1866 case MII_TG3_AUX_STAT_10FULL
:
1868 *duplex
= DUPLEX_FULL
;
1871 case MII_TG3_AUX_STAT_100HALF
:
1873 *duplex
= DUPLEX_HALF
;
1876 case MII_TG3_AUX_STAT_100FULL
:
1878 *duplex
= DUPLEX_FULL
;
1881 case MII_TG3_AUX_STAT_1000HALF
:
1882 *speed
= SPEED_1000
;
1883 *duplex
= DUPLEX_HALF
;
1886 case MII_TG3_AUX_STAT_1000FULL
:
1887 *speed
= SPEED_1000
;
1888 *duplex
= DUPLEX_FULL
;
1892 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1893 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
1895 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
1899 *speed
= SPEED_INVALID
;
1900 *duplex
= DUPLEX_INVALID
;
1905 static void tg3_phy_copper_begin(struct tg3
*tp
)
1910 if (tp
->link_config
.phy_is_low_power
) {
1911 /* Entering low power mode. Disable gigabit and
1912 * 100baseT advertisements.
1914 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1916 new_adv
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1917 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1918 if (tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
)
1919 new_adv
|= (ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1921 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1922 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
1923 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
1924 tp
->link_config
.advertising
&=
1925 ~(ADVERTISED_1000baseT_Half
|
1926 ADVERTISED_1000baseT_Full
);
1928 new_adv
= ADVERTISE_CSMA
;
1929 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Half
)
1930 new_adv
|= ADVERTISE_10HALF
;
1931 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Full
)
1932 new_adv
|= ADVERTISE_10FULL
;
1933 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Half
)
1934 new_adv
|= ADVERTISE_100HALF
;
1935 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Full
)
1936 new_adv
|= ADVERTISE_100FULL
;
1938 new_adv
|= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
1940 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1942 if (tp
->link_config
.advertising
&
1943 (ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
)) {
1945 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
1946 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
1947 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
1948 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
1949 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) &&
1950 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1951 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
))
1952 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1953 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1954 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1956 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1959 new_adv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
1960 new_adv
|= ADVERTISE_CSMA
;
1962 /* Asking for a specific link mode. */
1963 if (tp
->link_config
.speed
== SPEED_1000
) {
1964 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1966 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1967 new_adv
= MII_TG3_CTRL_ADV_1000_FULL
;
1969 new_adv
= MII_TG3_CTRL_ADV_1000_HALF
;
1970 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1971 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
1972 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1973 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1975 if (tp
->link_config
.speed
== SPEED_100
) {
1976 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1977 new_adv
|= ADVERTISE_100FULL
;
1979 new_adv
|= ADVERTISE_100HALF
;
1981 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1982 new_adv
|= ADVERTISE_10FULL
;
1984 new_adv
|= ADVERTISE_10HALF
;
1986 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1991 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1994 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
1995 tp
->link_config
.speed
!= SPEED_INVALID
) {
1996 u32 bmcr
, orig_bmcr
;
1998 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
1999 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
2002 switch (tp
->link_config
.speed
) {
2008 bmcr
|= BMCR_SPEED100
;
2012 bmcr
|= TG3_BMCR_SPEED1000
;
2016 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
2017 bmcr
|= BMCR_FULLDPLX
;
2019 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
2020 (bmcr
!= orig_bmcr
)) {
2021 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
2022 for (i
= 0; i
< 1500; i
++) {
2026 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
2027 tg3_readphy(tp
, MII_BMSR
, &tmp
))
2029 if (!(tmp
& BMSR_LSTATUS
)) {
2034 tg3_writephy(tp
, MII_BMCR
, bmcr
);
2038 tg3_writephy(tp
, MII_BMCR
,
2039 BMCR_ANENABLE
| BMCR_ANRESTART
);
2043 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
2047 /* Turn off tap power management. */
2048 /* Set Extended packet length bit */
2049 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
2051 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0012);
2052 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1804);
2054 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0013);
2055 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1204);
2057 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
2058 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0132);
2060 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
2061 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0232);
2063 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
2064 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0a20);
2071 static int tg3_copper_is_advertising_all(struct tg3
*tp
, u32 mask
)
2073 u32 adv_reg
, all_mask
= 0;
2075 if (mask
& ADVERTISED_10baseT_Half
)
2076 all_mask
|= ADVERTISE_10HALF
;
2077 if (mask
& ADVERTISED_10baseT_Full
)
2078 all_mask
|= ADVERTISE_10FULL
;
2079 if (mask
& ADVERTISED_100baseT_Half
)
2080 all_mask
|= ADVERTISE_100HALF
;
2081 if (mask
& ADVERTISED_100baseT_Full
)
2082 all_mask
|= ADVERTISE_100FULL
;
2084 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
2087 if ((adv_reg
& all_mask
) != all_mask
)
2089 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
2093 if (mask
& ADVERTISED_1000baseT_Half
)
2094 all_mask
|= ADVERTISE_1000HALF
;
2095 if (mask
& ADVERTISED_1000baseT_Full
)
2096 all_mask
|= ADVERTISE_1000FULL
;
2098 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
2101 if ((tg3_ctrl
& all_mask
) != all_mask
)
2107 static int tg3_adv_1000T_flowctrl_ok(struct tg3
*tp
, u32
*lcladv
, u32
*rmtadv
)
2111 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
2114 curadv
= *lcladv
& (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
2115 reqadv
= tg3_advert_flowctrl_1000T(tp
->link_config
.flowctrl
);
2117 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
2118 if (curadv
!= reqadv
)
2121 if (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
)
2122 tg3_readphy(tp
, MII_LPA
, rmtadv
);
2124 /* Reprogram the advertisement register, even if it
2125 * does not affect the current link. If the link
2126 * gets renegotiated in the future, we can save an
2127 * additional renegotiation cycle by advertising
2128 * it correctly in the first place.
2130 if (curadv
!= reqadv
) {
2131 *lcladv
&= ~(ADVERTISE_PAUSE_CAP
|
2132 ADVERTISE_PAUSE_ASYM
);
2133 tg3_writephy(tp
, MII_ADVERTISE
, *lcladv
| reqadv
);
2140 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
2142 int current_link_up
;
2144 u32 lcl_adv
, rmt_adv
;
2152 (MAC_STATUS_SYNC_CHANGED
|
2153 MAC_STATUS_CFG_CHANGED
|
2154 MAC_STATUS_MI_COMPLETION
|
2155 MAC_STATUS_LNKSTATE_CHANGED
));
2158 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
2160 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
2164 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x02);
2166 /* Some third-party PHYs need to be reset on link going
2169 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2170 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2171 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
2172 netif_carrier_ok(tp
->dev
)) {
2173 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2174 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
2175 !(bmsr
& BMSR_LSTATUS
))
2181 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
2182 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2183 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
2184 !(tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
))
2187 if (!(bmsr
& BMSR_LSTATUS
)) {
2188 err
= tg3_init_5401phy_dsp(tp
);
2192 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2193 for (i
= 0; i
< 1000; i
++) {
2195 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
2196 (bmsr
& BMSR_LSTATUS
)) {
2202 if ((tp
->phy_id
& PHY_ID_REV_MASK
) == PHY_REV_BCM5401_B0
&&
2203 !(bmsr
& BMSR_LSTATUS
) &&
2204 tp
->link_config
.active_speed
== SPEED_1000
) {
2205 err
= tg3_phy_reset(tp
);
2207 err
= tg3_init_5401phy_dsp(tp
);
2212 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
2213 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
2214 /* 5701 {A0,B0} CRC bug workaround */
2215 tg3_writephy(tp
, 0x15, 0x0a75);
2216 tg3_writephy(tp
, 0x1c, 0x8c68);
2217 tg3_writephy(tp
, 0x1c, 0x8d68);
2218 tg3_writephy(tp
, 0x1c, 0x8c68);
2221 /* Clear pending interrupts... */
2222 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
2223 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
2225 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
)
2226 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
2227 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5906
)
2228 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
2230 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2231 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2232 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
2233 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2234 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
2236 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
2239 current_link_up
= 0;
2240 current_speed
= SPEED_INVALID
;
2241 current_duplex
= DUPLEX_INVALID
;
2243 if (tp
->tg3_flags2
& TG3_FLG2_CAPACITIVE_COUPLING
) {
2246 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4007);
2247 tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
);
2248 if (!(val
& (1 << 10))) {
2250 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
2256 for (i
= 0; i
< 100; i
++) {
2257 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2258 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
2259 (bmsr
& BMSR_LSTATUS
))
2264 if (bmsr
& BMSR_LSTATUS
) {
2267 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
2268 for (i
= 0; i
< 2000; i
++) {
2270 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
2275 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
2280 for (i
= 0; i
< 200; i
++) {
2281 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2282 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
2284 if (bmcr
&& bmcr
!= 0x7fff)
2292 tp
->link_config
.active_speed
= current_speed
;
2293 tp
->link_config
.active_duplex
= current_duplex
;
2295 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2296 if ((bmcr
& BMCR_ANENABLE
) &&
2297 tg3_copper_is_advertising_all(tp
,
2298 tp
->link_config
.advertising
)) {
2299 if (tg3_adv_1000T_flowctrl_ok(tp
, &lcl_adv
,
2301 current_link_up
= 1;
2304 if (!(bmcr
& BMCR_ANENABLE
) &&
2305 tp
->link_config
.speed
== current_speed
&&
2306 tp
->link_config
.duplex
== current_duplex
&&
2307 tp
->link_config
.flowctrl
==
2308 tp
->link_config
.active_flowctrl
) {
2309 current_link_up
= 1;
2313 if (current_link_up
== 1 &&
2314 tp
->link_config
.active_duplex
== DUPLEX_FULL
)
2315 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2319 if (current_link_up
== 0 || tp
->link_config
.phy_is_low_power
) {
2322 tg3_phy_copper_begin(tp
);
2324 tg3_readphy(tp
, MII_BMSR
, &tmp
);
2325 if (!tg3_readphy(tp
, MII_BMSR
, &tmp
) &&
2326 (tmp
& BMSR_LSTATUS
))
2327 current_link_up
= 1;
2330 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
2331 if (current_link_up
== 1) {
2332 if (tp
->link_config
.active_speed
== SPEED_100
||
2333 tp
->link_config
.active_speed
== SPEED_10
)
2334 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2336 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2338 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2340 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
2341 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2342 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2344 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
2345 if (current_link_up
== 1 &&
2346 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
2347 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
2349 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2352 /* ??? Without this setting Netgear GA302T PHY does not
2353 * ??? send/receive packets...
2355 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5411
&&
2356 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
2357 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
2358 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
2362 tw32_f(MAC_MODE
, tp
->mac_mode
);
2365 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
2366 /* Polled via timer. */
2367 tw32_f(MAC_EVENT
, 0);
2369 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2373 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
2374 current_link_up
== 1 &&
2375 tp
->link_config
.active_speed
== SPEED_1000
&&
2376 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ||
2377 (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
))) {
2380 (MAC_STATUS_SYNC_CHANGED
|
2381 MAC_STATUS_CFG_CHANGED
));
2384 NIC_SRAM_FIRMWARE_MBOX
,
2385 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
2388 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2389 if (current_link_up
)
2390 netif_carrier_on(tp
->dev
);
2392 netif_carrier_off(tp
->dev
);
2393 tg3_link_report(tp
);
2399 struct tg3_fiber_aneginfo
{
2401 #define ANEG_STATE_UNKNOWN 0
2402 #define ANEG_STATE_AN_ENABLE 1
2403 #define ANEG_STATE_RESTART_INIT 2
2404 #define ANEG_STATE_RESTART 3
2405 #define ANEG_STATE_DISABLE_LINK_OK 4
2406 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2407 #define ANEG_STATE_ABILITY_DETECT 6
2408 #define ANEG_STATE_ACK_DETECT_INIT 7
2409 #define ANEG_STATE_ACK_DETECT 8
2410 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2411 #define ANEG_STATE_COMPLETE_ACK 10
2412 #define ANEG_STATE_IDLE_DETECT_INIT 11
2413 #define ANEG_STATE_IDLE_DETECT 12
2414 #define ANEG_STATE_LINK_OK 13
2415 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2416 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2419 #define MR_AN_ENABLE 0x00000001
2420 #define MR_RESTART_AN 0x00000002
2421 #define MR_AN_COMPLETE 0x00000004
2422 #define MR_PAGE_RX 0x00000008
2423 #define MR_NP_LOADED 0x00000010
2424 #define MR_TOGGLE_TX 0x00000020
2425 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2426 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2427 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2428 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2429 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2430 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2431 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2432 #define MR_TOGGLE_RX 0x00002000
2433 #define MR_NP_RX 0x00004000
2435 #define MR_LINK_OK 0x80000000
2437 unsigned long link_time
, cur_time
;
2439 u32 ability_match_cfg
;
2440 int ability_match_count
;
2442 char ability_match
, idle_match
, ack_match
;
2444 u32 txconfig
, rxconfig
;
2445 #define ANEG_CFG_NP 0x00000080
2446 #define ANEG_CFG_ACK 0x00000040
2447 #define ANEG_CFG_RF2 0x00000020
2448 #define ANEG_CFG_RF1 0x00000010
2449 #define ANEG_CFG_PS2 0x00000001
2450 #define ANEG_CFG_PS1 0x00008000
2451 #define ANEG_CFG_HD 0x00004000
2452 #define ANEG_CFG_FD 0x00002000
2453 #define ANEG_CFG_INVAL 0x00001f06
2458 #define ANEG_TIMER_ENAB 2
2459 #define ANEG_FAILED -1
2461 #define ANEG_STATE_SETTLE_TIME 10000
2463 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
2464 struct tg3_fiber_aneginfo
*ap
)
2467 unsigned long delta
;
2471 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
2475 ap
->ability_match_cfg
= 0;
2476 ap
->ability_match_count
= 0;
2477 ap
->ability_match
= 0;
2483 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
2484 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
2486 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
2487 ap
->ability_match_cfg
= rx_cfg_reg
;
2488 ap
->ability_match
= 0;
2489 ap
->ability_match_count
= 0;
2491 if (++ap
->ability_match_count
> 1) {
2492 ap
->ability_match
= 1;
2493 ap
->ability_match_cfg
= rx_cfg_reg
;
2496 if (rx_cfg_reg
& ANEG_CFG_ACK
)
2504 ap
->ability_match_cfg
= 0;
2505 ap
->ability_match_count
= 0;
2506 ap
->ability_match
= 0;
2512 ap
->rxconfig
= rx_cfg_reg
;
2516 case ANEG_STATE_UNKNOWN
:
2517 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
2518 ap
->state
= ANEG_STATE_AN_ENABLE
;
2521 case ANEG_STATE_AN_ENABLE
:
2522 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
2523 if (ap
->flags
& MR_AN_ENABLE
) {
2526 ap
->ability_match_cfg
= 0;
2527 ap
->ability_match_count
= 0;
2528 ap
->ability_match
= 0;
2532 ap
->state
= ANEG_STATE_RESTART_INIT
;
2534 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
2538 case ANEG_STATE_RESTART_INIT
:
2539 ap
->link_time
= ap
->cur_time
;
2540 ap
->flags
&= ~(MR_NP_LOADED
);
2542 tw32(MAC_TX_AUTO_NEG
, 0);
2543 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2544 tw32_f(MAC_MODE
, tp
->mac_mode
);
2547 ret
= ANEG_TIMER_ENAB
;
2548 ap
->state
= ANEG_STATE_RESTART
;
2551 case ANEG_STATE_RESTART
:
2552 delta
= ap
->cur_time
- ap
->link_time
;
2553 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2554 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
2556 ret
= ANEG_TIMER_ENAB
;
2560 case ANEG_STATE_DISABLE_LINK_OK
:
2564 case ANEG_STATE_ABILITY_DETECT_INIT
:
2565 ap
->flags
&= ~(MR_TOGGLE_TX
);
2566 ap
->txconfig
= ANEG_CFG_FD
;
2567 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
2568 if (flowctrl
& ADVERTISE_1000XPAUSE
)
2569 ap
->txconfig
|= ANEG_CFG_PS1
;
2570 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
2571 ap
->txconfig
|= ANEG_CFG_PS2
;
2572 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2573 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2574 tw32_f(MAC_MODE
, tp
->mac_mode
);
2577 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
2580 case ANEG_STATE_ABILITY_DETECT
:
2581 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0) {
2582 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
2586 case ANEG_STATE_ACK_DETECT_INIT
:
2587 ap
->txconfig
|= ANEG_CFG_ACK
;
2588 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2589 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2590 tw32_f(MAC_MODE
, tp
->mac_mode
);
2593 ap
->state
= ANEG_STATE_ACK_DETECT
;
2596 case ANEG_STATE_ACK_DETECT
:
2597 if (ap
->ack_match
!= 0) {
2598 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
2599 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
2600 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
2602 ap
->state
= ANEG_STATE_AN_ENABLE
;
2604 } else if (ap
->ability_match
!= 0 &&
2605 ap
->rxconfig
== 0) {
2606 ap
->state
= ANEG_STATE_AN_ENABLE
;
2610 case ANEG_STATE_COMPLETE_ACK_INIT
:
2611 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
2615 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
2616 MR_LP_ADV_HALF_DUPLEX
|
2617 MR_LP_ADV_SYM_PAUSE
|
2618 MR_LP_ADV_ASYM_PAUSE
|
2619 MR_LP_ADV_REMOTE_FAULT1
|
2620 MR_LP_ADV_REMOTE_FAULT2
|
2621 MR_LP_ADV_NEXT_PAGE
|
2624 if (ap
->rxconfig
& ANEG_CFG_FD
)
2625 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
2626 if (ap
->rxconfig
& ANEG_CFG_HD
)
2627 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
2628 if (ap
->rxconfig
& ANEG_CFG_PS1
)
2629 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
2630 if (ap
->rxconfig
& ANEG_CFG_PS2
)
2631 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
2632 if (ap
->rxconfig
& ANEG_CFG_RF1
)
2633 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
2634 if (ap
->rxconfig
& ANEG_CFG_RF2
)
2635 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
2636 if (ap
->rxconfig
& ANEG_CFG_NP
)
2637 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
2639 ap
->link_time
= ap
->cur_time
;
2641 ap
->flags
^= (MR_TOGGLE_TX
);
2642 if (ap
->rxconfig
& 0x0008)
2643 ap
->flags
|= MR_TOGGLE_RX
;
2644 if (ap
->rxconfig
& ANEG_CFG_NP
)
2645 ap
->flags
|= MR_NP_RX
;
2646 ap
->flags
|= MR_PAGE_RX
;
2648 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
2649 ret
= ANEG_TIMER_ENAB
;
2652 case ANEG_STATE_COMPLETE_ACK
:
2653 if (ap
->ability_match
!= 0 &&
2654 ap
->rxconfig
== 0) {
2655 ap
->state
= ANEG_STATE_AN_ENABLE
;
2658 delta
= ap
->cur_time
- ap
->link_time
;
2659 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2660 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
2661 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2663 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
2664 !(ap
->flags
& MR_NP_RX
)) {
2665 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2673 case ANEG_STATE_IDLE_DETECT_INIT
:
2674 ap
->link_time
= ap
->cur_time
;
2675 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2676 tw32_f(MAC_MODE
, tp
->mac_mode
);
2679 ap
->state
= ANEG_STATE_IDLE_DETECT
;
2680 ret
= ANEG_TIMER_ENAB
;
2683 case ANEG_STATE_IDLE_DETECT
:
2684 if (ap
->ability_match
!= 0 &&
2685 ap
->rxconfig
== 0) {
2686 ap
->state
= ANEG_STATE_AN_ENABLE
;
2689 delta
= ap
->cur_time
- ap
->link_time
;
2690 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2691 /* XXX another gem from the Broadcom driver :( */
2692 ap
->state
= ANEG_STATE_LINK_OK
;
2696 case ANEG_STATE_LINK_OK
:
2697 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
2701 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
2702 /* ??? unimplemented */
2705 case ANEG_STATE_NEXT_PAGE_WAIT
:
2706 /* ??? unimplemented */
2717 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
2720 struct tg3_fiber_aneginfo aninfo
;
2721 int status
= ANEG_FAILED
;
2725 tw32_f(MAC_TX_AUTO_NEG
, 0);
2727 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
2728 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
2731 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
2734 memset(&aninfo
, 0, sizeof(aninfo
));
2735 aninfo
.flags
|= MR_AN_ENABLE
;
2736 aninfo
.state
= ANEG_STATE_UNKNOWN
;
2737 aninfo
.cur_time
= 0;
2739 while (++tick
< 195000) {
2740 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
2741 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
2747 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2748 tw32_f(MAC_MODE
, tp
->mac_mode
);
2751 *txflags
= aninfo
.txconfig
;
2752 *rxflags
= aninfo
.flags
;
2754 if (status
== ANEG_DONE
&&
2755 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
2756 MR_LP_ADV_FULL_DUPLEX
)))
2762 static void tg3_init_bcm8002(struct tg3
*tp
)
2764 u32 mac_status
= tr32(MAC_STATUS
);
2767 /* Reset when initting first time or we have a link. */
2768 if ((tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) &&
2769 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
2772 /* Set PLL lock range. */
2773 tg3_writephy(tp
, 0x16, 0x8007);
2776 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
2778 /* Wait for reset to complete. */
2779 /* XXX schedule_timeout() ... */
2780 for (i
= 0; i
< 500; i
++)
2783 /* Config mode; select PMA/Ch 1 regs. */
2784 tg3_writephy(tp
, 0x10, 0x8411);
2786 /* Enable auto-lock and comdet, select txclk for tx. */
2787 tg3_writephy(tp
, 0x11, 0x0a10);
2789 tg3_writephy(tp
, 0x18, 0x00a0);
2790 tg3_writephy(tp
, 0x16, 0x41ff);
2792 /* Assert and deassert POR. */
2793 tg3_writephy(tp
, 0x13, 0x0400);
2795 tg3_writephy(tp
, 0x13, 0x0000);
2797 tg3_writephy(tp
, 0x11, 0x0a50);
2799 tg3_writephy(tp
, 0x11, 0x0a10);
2801 /* Wait for signal to stabilize */
2802 /* XXX schedule_timeout() ... */
2803 for (i
= 0; i
< 15000; i
++)
2806 /* Deselect the channel register so we can read the PHYID
2809 tg3_writephy(tp
, 0x10, 0x8011);
2812 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
2815 u32 sg_dig_ctrl
, sg_dig_status
;
2816 u32 serdes_cfg
, expected_sg_dig_ctrl
;
2817 int workaround
, port_a
;
2818 int current_link_up
;
2821 expected_sg_dig_ctrl
= 0;
2824 current_link_up
= 0;
2826 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
2827 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
2829 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
2832 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2833 /* preserve bits 20-23 for voltage regulator */
2834 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
2837 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2839 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
2840 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
2842 u32 val
= serdes_cfg
;
2848 tw32_f(MAC_SERDES_CFG
, val
);
2851 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
2853 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
2854 tg3_setup_flow_control(tp
, 0, 0);
2855 current_link_up
= 1;
2860 /* Want auto-negotiation. */
2861 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
2863 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
2864 if (flowctrl
& ADVERTISE_1000XPAUSE
)
2865 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
2866 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
2867 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
2869 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
2870 if ((tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
) &&
2871 tp
->serdes_counter
&&
2872 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
2873 MAC_STATUS_RCVD_CFG
)) ==
2874 MAC_STATUS_PCS_SYNCED
)) {
2875 tp
->serdes_counter
--;
2876 current_link_up
= 1;
2881 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
2882 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
2884 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
2886 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
2887 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2888 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
2889 MAC_STATUS_SIGNAL_DET
)) {
2890 sg_dig_status
= tr32(SG_DIG_STATUS
);
2891 mac_status
= tr32(MAC_STATUS
);
2893 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
2894 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2895 u32 local_adv
= 0, remote_adv
= 0;
2897 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
2898 local_adv
|= ADVERTISE_1000XPAUSE
;
2899 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
2900 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
2902 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
2903 remote_adv
|= LPA_1000XPAUSE
;
2904 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
2905 remote_adv
|= LPA_1000XPAUSE_ASYM
;
2907 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2908 current_link_up
= 1;
2909 tp
->serdes_counter
= 0;
2910 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2911 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
2912 if (tp
->serdes_counter
)
2913 tp
->serdes_counter
--;
2916 u32 val
= serdes_cfg
;
2923 tw32_f(MAC_SERDES_CFG
, val
);
2926 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
2929 /* Link parallel detection - link is up */
2930 /* only if we have PCS_SYNC and not */
2931 /* receiving config code words */
2932 mac_status
= tr32(MAC_STATUS
);
2933 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2934 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
2935 tg3_setup_flow_control(tp
, 0, 0);
2936 current_link_up
= 1;
2938 TG3_FLG2_PARALLEL_DETECT
;
2939 tp
->serdes_counter
=
2940 SERDES_PARALLEL_DET_TIMEOUT
;
2942 goto restart_autoneg
;
2946 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
2947 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2951 return current_link_up
;
2954 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
2956 int current_link_up
= 0;
2958 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
2961 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2962 u32 txflags
, rxflags
;
2965 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
2966 u32 local_adv
= 0, remote_adv
= 0;
2968 if (txflags
& ANEG_CFG_PS1
)
2969 local_adv
|= ADVERTISE_1000XPAUSE
;
2970 if (txflags
& ANEG_CFG_PS2
)
2971 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
2973 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
2974 remote_adv
|= LPA_1000XPAUSE
;
2975 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
2976 remote_adv
|= LPA_1000XPAUSE_ASYM
;
2978 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2980 current_link_up
= 1;
2982 for (i
= 0; i
< 30; i
++) {
2985 (MAC_STATUS_SYNC_CHANGED
|
2986 MAC_STATUS_CFG_CHANGED
));
2988 if ((tr32(MAC_STATUS
) &
2989 (MAC_STATUS_SYNC_CHANGED
|
2990 MAC_STATUS_CFG_CHANGED
)) == 0)
2994 mac_status
= tr32(MAC_STATUS
);
2995 if (current_link_up
== 0 &&
2996 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2997 !(mac_status
& MAC_STATUS_RCVD_CFG
))
2998 current_link_up
= 1;
3000 tg3_setup_flow_control(tp
, 0, 0);
3002 /* Forcing 1000FD link up. */
3003 current_link_up
= 1;
3005 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
3008 tw32_f(MAC_MODE
, tp
->mac_mode
);
3013 return current_link_up
;
3016 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
3019 u16 orig_active_speed
;
3020 u8 orig_active_duplex
;
3022 int current_link_up
;
3025 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
3026 orig_active_speed
= tp
->link_config
.active_speed
;
3027 orig_active_duplex
= tp
->link_config
.active_duplex
;
3029 if (!(tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
) &&
3030 netif_carrier_ok(tp
->dev
) &&
3031 (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)) {
3032 mac_status
= tr32(MAC_STATUS
);
3033 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
3034 MAC_STATUS_SIGNAL_DET
|
3035 MAC_STATUS_CFG_CHANGED
|
3036 MAC_STATUS_RCVD_CFG
);
3037 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
3038 MAC_STATUS_SIGNAL_DET
)) {
3039 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
3040 MAC_STATUS_CFG_CHANGED
));
3045 tw32_f(MAC_TX_AUTO_NEG
, 0);
3047 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
3048 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
3049 tw32_f(MAC_MODE
, tp
->mac_mode
);
3052 if (tp
->phy_id
== PHY_ID_BCM8002
)
3053 tg3_init_bcm8002(tp
);
3055 /* Enable link change event even when serdes polling. */
3056 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
3059 current_link_up
= 0;
3060 mac_status
= tr32(MAC_STATUS
);
3062 if (tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
)
3063 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
3065 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
3067 tp
->hw_status
->status
=
3068 (SD_STATUS_UPDATED
|
3069 (tp
->hw_status
->status
& ~SD_STATUS_LINK_CHG
));
3071 for (i
= 0; i
< 100; i
++) {
3072 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
3073 MAC_STATUS_CFG_CHANGED
));
3075 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
3076 MAC_STATUS_CFG_CHANGED
|
3077 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
3081 mac_status
= tr32(MAC_STATUS
);
3082 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
3083 current_link_up
= 0;
3084 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
3085 tp
->serdes_counter
== 0) {
3086 tw32_f(MAC_MODE
, (tp
->mac_mode
|
3087 MAC_MODE_SEND_CONFIGS
));
3089 tw32_f(MAC_MODE
, tp
->mac_mode
);
3093 if (current_link_up
== 1) {
3094 tp
->link_config
.active_speed
= SPEED_1000
;
3095 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
3096 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
3097 LED_CTRL_LNKLED_OVERRIDE
|
3098 LED_CTRL_1000MBPS_ON
));
3100 tp
->link_config
.active_speed
= SPEED_INVALID
;
3101 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
3102 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
3103 LED_CTRL_LNKLED_OVERRIDE
|
3104 LED_CTRL_TRAFFIC_OVERRIDE
));
3107 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
3108 if (current_link_up
)
3109 netif_carrier_on(tp
->dev
);
3111 netif_carrier_off(tp
->dev
);
3112 tg3_link_report(tp
);
3114 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
3115 if (orig_pause_cfg
!= now_pause_cfg
||
3116 orig_active_speed
!= tp
->link_config
.active_speed
||
3117 orig_active_duplex
!= tp
->link_config
.active_duplex
)
3118 tg3_link_report(tp
);
3124 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
3126 int current_link_up
, err
= 0;
3130 u32 local_adv
, remote_adv
;
3132 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
3133 tw32_f(MAC_MODE
, tp
->mac_mode
);
3139 (MAC_STATUS_SYNC_CHANGED
|
3140 MAC_STATUS_CFG_CHANGED
|
3141 MAC_STATUS_MI_COMPLETION
|
3142 MAC_STATUS_LNKSTATE_CHANGED
));
3148 current_link_up
= 0;
3149 current_speed
= SPEED_INVALID
;
3150 current_duplex
= DUPLEX_INVALID
;
3152 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3153 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3154 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
3155 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
3156 bmsr
|= BMSR_LSTATUS
;
3158 bmsr
&= ~BMSR_LSTATUS
;
3161 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3163 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
3164 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
) &&
3165 tp
->link_config
.flowctrl
== tp
->link_config
.active_flowctrl
) {
3166 /* do nothing, just check for link up at the end */
3167 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
3170 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
3171 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
3172 ADVERTISE_1000XPAUSE
|
3173 ADVERTISE_1000XPSE_ASYM
|
3176 new_adv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
3178 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
3179 new_adv
|= ADVERTISE_1000XHALF
;
3180 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
3181 new_adv
|= ADVERTISE_1000XFULL
;
3183 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
3184 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
3185 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
3186 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3188 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
3189 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
3190 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
3197 bmcr
&= ~BMCR_SPEED1000
;
3198 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
3200 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
3201 new_bmcr
|= BMCR_FULLDPLX
;
3203 if (new_bmcr
!= bmcr
) {
3204 /* BMCR_SPEED1000 is a reserved bit that needs
3205 * to be set on write.
3207 new_bmcr
|= BMCR_SPEED1000
;
3209 /* Force a linkdown */
3210 if (netif_carrier_ok(tp
->dev
)) {
3213 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
3214 adv
&= ~(ADVERTISE_1000XFULL
|
3215 ADVERTISE_1000XHALF
|
3217 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
3218 tg3_writephy(tp
, MII_BMCR
, bmcr
|
3222 netif_carrier_off(tp
->dev
);
3224 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
3226 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3227 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
3228 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
3230 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
3231 bmsr
|= BMSR_LSTATUS
;
3233 bmsr
&= ~BMSR_LSTATUS
;
3235 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
3239 if (bmsr
& BMSR_LSTATUS
) {
3240 current_speed
= SPEED_1000
;
3241 current_link_up
= 1;
3242 if (bmcr
& BMCR_FULLDPLX
)
3243 current_duplex
= DUPLEX_FULL
;
3245 current_duplex
= DUPLEX_HALF
;
3250 if (bmcr
& BMCR_ANENABLE
) {
3253 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
3254 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
3255 common
= local_adv
& remote_adv
;
3256 if (common
& (ADVERTISE_1000XHALF
|
3257 ADVERTISE_1000XFULL
)) {
3258 if (common
& ADVERTISE_1000XFULL
)
3259 current_duplex
= DUPLEX_FULL
;
3261 current_duplex
= DUPLEX_HALF
;
3264 current_link_up
= 0;
3268 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
3269 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
3271 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
3272 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
3273 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
3275 tw32_f(MAC_MODE
, tp
->mac_mode
);
3278 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
3280 tp
->link_config
.active_speed
= current_speed
;
3281 tp
->link_config
.active_duplex
= current_duplex
;
3283 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
3284 if (current_link_up
)
3285 netif_carrier_on(tp
->dev
);
3287 netif_carrier_off(tp
->dev
);
3288 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
3290 tg3_link_report(tp
);
3295 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
3297 if (tp
->serdes_counter
) {
3298 /* Give autoneg time to complete. */
3299 tp
->serdes_counter
--;
3302 if (!netif_carrier_ok(tp
->dev
) &&
3303 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
3306 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3307 if (bmcr
& BMCR_ANENABLE
) {
3310 /* Select shadow register 0x1f */
3311 tg3_writephy(tp
, 0x1c, 0x7c00);
3312 tg3_readphy(tp
, 0x1c, &phy1
);
3314 /* Select expansion interrupt status register */
3315 tg3_writephy(tp
, 0x17, 0x0f01);
3316 tg3_readphy(tp
, 0x15, &phy2
);
3317 tg3_readphy(tp
, 0x15, &phy2
);
3319 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
3320 /* We have signal detect and not receiving
3321 * config code words, link is up by parallel
3325 bmcr
&= ~BMCR_ANENABLE
;
3326 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
3327 tg3_writephy(tp
, MII_BMCR
, bmcr
);
3328 tp
->tg3_flags2
|= TG3_FLG2_PARALLEL_DETECT
;
3332 else if (netif_carrier_ok(tp
->dev
) &&
3333 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
3334 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
)) {
3337 /* Select expansion interrupt status register */
3338 tg3_writephy(tp
, 0x17, 0x0f01);
3339 tg3_readphy(tp
, 0x15, &phy2
);
3343 /* Config code words received, turn on autoneg. */
3344 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
3345 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
3347 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
3353 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
3357 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
3358 err
= tg3_setup_fiber_phy(tp
, force_reset
);
3359 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
3360 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
3362 err
= tg3_setup_copper_phy(tp
, force_reset
);
3365 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5784_A0
||
3366 tp
->pci_chip_rev_id
== CHIPREV_ID_5784_A1
) {
3369 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
3370 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
3372 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
3377 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
3378 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
3379 tw32(GRC_MISC_CFG
, val
);
3382 if (tp
->link_config
.active_speed
== SPEED_1000
&&
3383 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
3384 tw32(MAC_TX_LENGTHS
,
3385 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
3386 (6 << TX_LENGTHS_IPG_SHIFT
) |
3387 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
3389 tw32(MAC_TX_LENGTHS
,
3390 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
3391 (6 << TX_LENGTHS_IPG_SHIFT
) |
3392 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
3394 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
3395 if (netif_carrier_ok(tp
->dev
)) {
3396 tw32(HOSTCC_STAT_COAL_TICKS
,
3397 tp
->coal
.stats_block_coalesce_usecs
);
3399 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
3403 if (tp
->tg3_flags
& TG3_FLAG_ASPM_WORKAROUND
) {
3404 u32 val
= tr32(PCIE_PWR_MGMT_THRESH
);
3405 if (!netif_carrier_ok(tp
->dev
))
3406 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
3409 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
3410 tw32(PCIE_PWR_MGMT_THRESH
, val
);
3416 /* This is called whenever we suspect that the system chipset is re-
3417 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3418 * is bogus tx completions. We try to recover by setting the
3419 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3422 static void tg3_tx_recover(struct tg3
*tp
)
3424 BUG_ON((tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) ||
3425 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
3427 printk(KERN_WARNING PFX
"%s: The system may be re-ordering memory-"
3428 "mapped I/O cycles to the network device, attempting to "
3429 "recover. Please report the problem to the driver maintainer "
3430 "and include system chipset information.\n", tp
->dev
->name
);
3432 spin_lock(&tp
->lock
);
3433 tp
->tg3_flags
|= TG3_FLAG_TX_RECOVERY_PENDING
;
3434 spin_unlock(&tp
->lock
);
3437 static inline u32
tg3_tx_avail(struct tg3
*tp
)
3440 return (tp
->tx_pending
-
3441 ((tp
->tx_prod
- tp
->tx_cons
) & (TG3_TX_RING_SIZE
- 1)));
3444 /* Tigon3 never reports partial packet sends. So we do not
3445 * need special logic to handle SKBs that have not had all
3446 * of their frags sent yet, like SunGEM does.
3448 static void tg3_tx(struct tg3
*tp
)
3450 u32 hw_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
3451 u32 sw_idx
= tp
->tx_cons
;
3453 while (sw_idx
!= hw_idx
) {
3454 struct tx_ring_info
*ri
= &tp
->tx_buffers
[sw_idx
];
3455 struct sk_buff
*skb
= ri
->skb
;
3458 if (unlikely(skb
== NULL
)) {
3463 pci_unmap_single(tp
->pdev
,
3464 pci_unmap_addr(ri
, mapping
),
3470 sw_idx
= NEXT_TX(sw_idx
);
3472 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3473 ri
= &tp
->tx_buffers
[sw_idx
];
3474 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
3477 pci_unmap_page(tp
->pdev
,
3478 pci_unmap_addr(ri
, mapping
),
3479 skb_shinfo(skb
)->frags
[i
].size
,
3482 sw_idx
= NEXT_TX(sw_idx
);
3487 if (unlikely(tx_bug
)) {
3493 tp
->tx_cons
= sw_idx
;
3495 /* Need to make the tx_cons update visible to tg3_start_xmit()
3496 * before checking for netif_queue_stopped(). Without the
3497 * memory barrier, there is a small possibility that tg3_start_xmit()
3498 * will miss it and cause the queue to be stopped forever.
3502 if (unlikely(netif_queue_stopped(tp
->dev
) &&
3503 (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH(tp
)))) {
3504 netif_tx_lock(tp
->dev
);
3505 if (netif_queue_stopped(tp
->dev
) &&
3506 (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH(tp
)))
3507 netif_wake_queue(tp
->dev
);
3508 netif_tx_unlock(tp
->dev
);
3512 /* Returns size of skb allocated or < 0 on error.
3514 * We only need to fill in the address because the other members
3515 * of the RX descriptor are invariant, see tg3_init_rings.
3517 * Note the purposeful assymetry of cpu vs. chip accesses. For
3518 * posting buffers we only dirty the first cache line of the RX
3519 * descriptor (containing the address). Whereas for the RX status
3520 * buffers the cpu only reads the last cacheline of the RX descriptor
3521 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3523 static int tg3_alloc_rx_skb(struct tg3
*tp
, u32 opaque_key
,
3524 int src_idx
, u32 dest_idx_unmasked
)
3526 struct tg3_rx_buffer_desc
*desc
;
3527 struct ring_info
*map
, *src_map
;
3528 struct sk_buff
*skb
;
3530 int skb_size
, dest_idx
;
3533 switch (opaque_key
) {
3534 case RXD_OPAQUE_RING_STD
:
3535 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
3536 desc
= &tp
->rx_std
[dest_idx
];
3537 map
= &tp
->rx_std_buffers
[dest_idx
];
3539 src_map
= &tp
->rx_std_buffers
[src_idx
];
3540 skb_size
= tp
->rx_pkt_buf_sz
;
3543 case RXD_OPAQUE_RING_JUMBO
:
3544 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3545 desc
= &tp
->rx_jumbo
[dest_idx
];
3546 map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3548 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3549 skb_size
= RX_JUMBO_PKT_BUF_SZ
;
3556 /* Do not overwrite any of the map or rp information
3557 * until we are sure we can commit to a new buffer.
3559 * Callers depend upon this behavior and assume that
3560 * we leave everything unchanged if we fail.
3562 skb
= netdev_alloc_skb(tp
->dev
, skb_size
);
3566 skb_reserve(skb
, tp
->rx_offset
);
3568 mapping
= pci_map_single(tp
->pdev
, skb
->data
,
3569 skb_size
- tp
->rx_offset
,
3570 PCI_DMA_FROMDEVICE
);
3573 pci_unmap_addr_set(map
, mapping
, mapping
);
3575 if (src_map
!= NULL
)
3576 src_map
->skb
= NULL
;
3578 desc
->addr_hi
= ((u64
)mapping
>> 32);
3579 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
3584 /* We only need to move over in the address because the other
3585 * members of the RX descriptor are invariant. See notes above
3586 * tg3_alloc_rx_skb for full details.
3588 static void tg3_recycle_rx(struct tg3
*tp
, u32 opaque_key
,
3589 int src_idx
, u32 dest_idx_unmasked
)
3591 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
3592 struct ring_info
*src_map
, *dest_map
;
3595 switch (opaque_key
) {
3596 case RXD_OPAQUE_RING_STD
:
3597 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
3598 dest_desc
= &tp
->rx_std
[dest_idx
];
3599 dest_map
= &tp
->rx_std_buffers
[dest_idx
];
3600 src_desc
= &tp
->rx_std
[src_idx
];
3601 src_map
= &tp
->rx_std_buffers
[src_idx
];
3604 case RXD_OPAQUE_RING_JUMBO
:
3605 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3606 dest_desc
= &tp
->rx_jumbo
[dest_idx
];
3607 dest_map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3608 src_desc
= &tp
->rx_jumbo
[src_idx
];
3609 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3616 dest_map
->skb
= src_map
->skb
;
3617 pci_unmap_addr_set(dest_map
, mapping
,
3618 pci_unmap_addr(src_map
, mapping
));
3619 dest_desc
->addr_hi
= src_desc
->addr_hi
;
3620 dest_desc
->addr_lo
= src_desc
->addr_lo
;
3622 src_map
->skb
= NULL
;
3625 #if TG3_VLAN_TAG_USED
3626 static int tg3_vlan_rx(struct tg3
*tp
, struct sk_buff
*skb
, u16 vlan_tag
)
3628 return vlan_hwaccel_receive_skb(skb
, tp
->vlgrp
, vlan_tag
);
3632 /* The RX ring scheme is composed of multiple rings which post fresh
3633 * buffers to the chip, and one special ring the chip uses to report
3634 * status back to the host.
3636 * The special ring reports the status of received packets to the
3637 * host. The chip does not write into the original descriptor the
3638 * RX buffer was obtained from. The chip simply takes the original
3639 * descriptor as provided by the host, updates the status and length
3640 * field, then writes this into the next status ring entry.
3642 * Each ring the host uses to post buffers to the chip is described
3643 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3644 * it is first placed into the on-chip ram. When the packet's length
3645 * is known, it walks down the TG3_BDINFO entries to select the ring.
3646 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3647 * which is within the range of the new packet's length is chosen.
3649 * The "separate ring for rx status" scheme may sound queer, but it makes
3650 * sense from a cache coherency perspective. If only the host writes
3651 * to the buffer post rings, and only the chip writes to the rx status
3652 * rings, then cache lines never move beyond shared-modified state.
3653 * If both the host and chip were to write into the same ring, cache line
3654 * eviction could occur since both entities want it in an exclusive state.
3656 static int tg3_rx(struct tg3
*tp
, int budget
)
3658 u32 work_mask
, rx_std_posted
= 0;
3659 u32 sw_idx
= tp
->rx_rcb_ptr
;
3663 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3665 * We need to order the read of hw_idx and the read of
3666 * the opaque cookie.
3671 while (sw_idx
!= hw_idx
&& budget
> 0) {
3672 struct tg3_rx_buffer_desc
*desc
= &tp
->rx_rcb
[sw_idx
];
3674 struct sk_buff
*skb
;
3675 dma_addr_t dma_addr
;
3676 u32 opaque_key
, desc_idx
, *post_ptr
;
3678 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
3679 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
3680 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
3681 dma_addr
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
],
3683 skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
3684 post_ptr
= &tp
->rx_std_ptr
;
3686 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
3687 dma_addr
= pci_unmap_addr(&tp
->rx_jumbo_buffers
[desc_idx
],
3689 skb
= tp
->rx_jumbo_buffers
[desc_idx
].skb
;
3690 post_ptr
= &tp
->rx_jumbo_ptr
;
3693 goto next_pkt_nopost
;
3696 work_mask
|= opaque_key
;
3698 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
3699 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
3701 tg3_recycle_rx(tp
, opaque_key
,
3702 desc_idx
, *post_ptr
);
3704 /* Other statistics kept track of by card. */
3705 tp
->net_stats
.rx_dropped
++;
3709 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4; /* omit crc */
3711 if (len
> RX_COPY_THRESHOLD
3712 && tp
->rx_offset
== 2
3713 /* rx_offset != 2 iff this is a 5701 card running
3714 * in PCI-X mode [see tg3_get_invariants()] */
3718 skb_size
= tg3_alloc_rx_skb(tp
, opaque_key
,
3719 desc_idx
, *post_ptr
);
3723 pci_unmap_single(tp
->pdev
, dma_addr
,
3724 skb_size
- tp
->rx_offset
,
3725 PCI_DMA_FROMDEVICE
);
3729 struct sk_buff
*copy_skb
;
3731 tg3_recycle_rx(tp
, opaque_key
,
3732 desc_idx
, *post_ptr
);
3734 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+ 2);
3735 if (copy_skb
== NULL
)
3736 goto drop_it_no_recycle
;
3738 skb_reserve(copy_skb
, 2);
3739 skb_put(copy_skb
, len
);
3740 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3741 skb_copy_from_linear_data(skb
, copy_skb
->data
, len
);
3742 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3744 /* We'll reuse the original ring buffer. */
3748 if ((tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) &&
3749 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
3750 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
3751 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
3752 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3754 skb
->ip_summed
= CHECKSUM_NONE
;
3756 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
3757 #if TG3_VLAN_TAG_USED
3758 if (tp
->vlgrp
!= NULL
&&
3759 desc
->type_flags
& RXD_FLAG_VLAN
) {
3760 tg3_vlan_rx(tp
, skb
,
3761 desc
->err_vlan
& RXD_VLAN_MASK
);
3764 netif_receive_skb(skb
);
3766 tp
->dev
->last_rx
= jiffies
;
3773 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
3774 u32 idx
= *post_ptr
% TG3_RX_RING_SIZE
;
3776 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+
3777 TG3_64BIT_REG_LOW
, idx
);
3778 work_mask
&= ~RXD_OPAQUE_RING_STD
;
3783 sw_idx
&= (TG3_RX_RCB_RING_SIZE(tp
) - 1);
3785 /* Refresh hw_idx to see if there is new work */
3786 if (sw_idx
== hw_idx
) {
3787 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3792 /* ACK the status ring. */
3793 tp
->rx_rcb_ptr
= sw_idx
;
3794 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, sw_idx
);
3796 /* Refill RX ring(s). */
3797 if (work_mask
& RXD_OPAQUE_RING_STD
) {
3798 sw_idx
= tp
->rx_std_ptr
% TG3_RX_RING_SIZE
;
3799 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3802 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
3803 sw_idx
= tp
->rx_jumbo_ptr
% TG3_RX_JUMBO_RING_SIZE
;
3804 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3812 static int tg3_poll_work(struct tg3
*tp
, int work_done
, int budget
)
3814 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3816 /* handle link change and other phy events */
3817 if (!(tp
->tg3_flags
&
3818 (TG3_FLAG_USE_LINKCHG_REG
|
3819 TG3_FLAG_POLL_SERDES
))) {
3820 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
3821 sblk
->status
= SD_STATUS_UPDATED
|
3822 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
3823 spin_lock(&tp
->lock
);
3824 tg3_setup_phy(tp
, 0);
3825 spin_unlock(&tp
->lock
);
3829 /* run TX completion thread */
3830 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
) {
3832 if (unlikely(tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
))
3836 /* run RX thread, within the bounds set by NAPI.
3837 * All RX "locking" is done by ensuring outside
3838 * code synchronizes with tg3->napi.poll()
3840 if (sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
)
3841 work_done
+= tg3_rx(tp
, budget
- work_done
);
3846 static int tg3_poll(struct napi_struct
*napi
, int budget
)
3848 struct tg3
*tp
= container_of(napi
, struct tg3
, napi
);
3850 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3853 work_done
= tg3_poll_work(tp
, work_done
, budget
);
3855 if (unlikely(tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
))
3858 if (unlikely(work_done
>= budget
))
3861 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
3862 /* tp->last_tag is used in tg3_restart_ints() below
3863 * to tell the hw how much work has been processed,
3864 * so we must read it before checking for more work.
3866 tp
->last_tag
= sblk
->status_tag
;
3869 sblk
->status
&= ~SD_STATUS_UPDATED
;
3871 if (likely(!tg3_has_work(tp
))) {
3872 netif_rx_complete(tp
->dev
, napi
);
3873 tg3_restart_ints(tp
);
3881 /* work_done is guaranteed to be less than budget. */
3882 netif_rx_complete(tp
->dev
, napi
);
3883 schedule_work(&tp
->reset_task
);
3887 static void tg3_irq_quiesce(struct tg3
*tp
)
3889 BUG_ON(tp
->irq_sync
);
3894 synchronize_irq(tp
->pdev
->irq
);
3897 static inline int tg3_irq_sync(struct tg3
*tp
)
3899 return tp
->irq_sync
;
3902 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3903 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3904 * with as well. Most of the time, this is not necessary except when
3905 * shutting down the device.
3907 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
3909 spin_lock_bh(&tp
->lock
);
3911 tg3_irq_quiesce(tp
);
3914 static inline void tg3_full_unlock(struct tg3
*tp
)
3916 spin_unlock_bh(&tp
->lock
);
3919 /* One-shot MSI handler - Chip automatically disables interrupt
3920 * after sending MSI so driver doesn't have to do it.
3922 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
3924 struct net_device
*dev
= dev_id
;
3925 struct tg3
*tp
= netdev_priv(dev
);
3927 prefetch(tp
->hw_status
);
3928 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3930 if (likely(!tg3_irq_sync(tp
)))
3931 netif_rx_schedule(dev
, &tp
->napi
);
3936 /* MSI ISR - No need to check for interrupt sharing and no need to
3937 * flush status block and interrupt mailbox. PCI ordering rules
3938 * guarantee that MSI will arrive after the status block.
3940 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
3942 struct net_device
*dev
= dev_id
;
3943 struct tg3
*tp
= netdev_priv(dev
);
3945 prefetch(tp
->hw_status
);
3946 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3948 * Writing any value to intr-mbox-0 clears PCI INTA# and
3949 * chip-internal interrupt pending events.
3950 * Writing non-zero to intr-mbox-0 additional tells the
3951 * NIC to stop sending us irqs, engaging "in-intr-handler"
3954 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
3955 if (likely(!tg3_irq_sync(tp
)))
3956 netif_rx_schedule(dev
, &tp
->napi
);
3958 return IRQ_RETVAL(1);
3961 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
3963 struct net_device
*dev
= dev_id
;
3964 struct tg3
*tp
= netdev_priv(dev
);
3965 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3966 unsigned int handled
= 1;
3968 /* In INTx mode, it is possible for the interrupt to arrive at
3969 * the CPU before the status block posted prior to the interrupt.
3970 * Reading the PCI State register will confirm whether the
3971 * interrupt is ours and will flush the status block.
3973 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
3974 if ((tp
->tg3_flags
& TG3_FLAG_CHIP_RESETTING
) ||
3975 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3982 * Writing any value to intr-mbox-0 clears PCI INTA# and
3983 * chip-internal interrupt pending events.
3984 * Writing non-zero to intr-mbox-0 additional tells the
3985 * NIC to stop sending us irqs, engaging "in-intr-handler"
3988 * Flush the mailbox to de-assert the IRQ immediately to prevent
3989 * spurious interrupts. The flush impacts performance but
3990 * excessive spurious interrupts can be worse in some cases.
3992 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
3993 if (tg3_irq_sync(tp
))
3995 sblk
->status
&= ~SD_STATUS_UPDATED
;
3996 if (likely(tg3_has_work(tp
))) {
3997 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3998 netif_rx_schedule(dev
, &tp
->napi
);
4000 /* No work, shared interrupt perhaps? re-enable
4001 * interrupts, and flush that PCI write
4003 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
4007 return IRQ_RETVAL(handled
);
4010 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
4012 struct net_device
*dev
= dev_id
;
4013 struct tg3
*tp
= netdev_priv(dev
);
4014 struct tg3_hw_status
*sblk
= tp
->hw_status
;
4015 unsigned int handled
= 1;
4017 /* In INTx mode, it is possible for the interrupt to arrive at
4018 * the CPU before the status block posted prior to the interrupt.
4019 * Reading the PCI State register will confirm whether the
4020 * interrupt is ours and will flush the status block.
4022 if (unlikely(sblk
->status_tag
== tp
->last_tag
)) {
4023 if ((tp
->tg3_flags
& TG3_FLAG_CHIP_RESETTING
) ||
4024 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
4031 * writing any value to intr-mbox-0 clears PCI INTA# and
4032 * chip-internal interrupt pending events.
4033 * writing non-zero to intr-mbox-0 additional tells the
4034 * NIC to stop sending us irqs, engaging "in-intr-handler"
4037 * Flush the mailbox to de-assert the IRQ immediately to prevent
4038 * spurious interrupts. The flush impacts performance but
4039 * excessive spurious interrupts can be worse in some cases.
4041 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
4042 if (tg3_irq_sync(tp
))
4044 if (netif_rx_schedule_prep(dev
, &tp
->napi
)) {
4045 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
4046 /* Update last_tag to mark that this status has been
4047 * seen. Because interrupt may be shared, we may be
4048 * racing with tg3_poll(), so only update last_tag
4049 * if tg3_poll() is not scheduled.
4051 tp
->last_tag
= sblk
->status_tag
;
4052 __netif_rx_schedule(dev
, &tp
->napi
);
4055 return IRQ_RETVAL(handled
);
4058 /* ISR for interrupt test */
4059 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
4061 struct net_device
*dev
= dev_id
;
4062 struct tg3
*tp
= netdev_priv(dev
);
4063 struct tg3_hw_status
*sblk
= tp
->hw_status
;
4065 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
4066 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
4067 tg3_disable_ints(tp
);
4068 return IRQ_RETVAL(1);
4070 return IRQ_RETVAL(0);
4073 static int tg3_init_hw(struct tg3
*, int);
4074 static int tg3_halt(struct tg3
*, int, int);
4076 /* Restart hardware after configuration changes, self-test, etc.
4077 * Invoked with tp->lock held.
4079 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
4080 __releases(tp
->lock
)
4081 __acquires(tp
->lock
)
4085 err
= tg3_init_hw(tp
, reset_phy
);
4087 printk(KERN_ERR PFX
"%s: Failed to re-initialize device, "
4088 "aborting.\n", tp
->dev
->name
);
4089 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
4090 tg3_full_unlock(tp
);
4091 del_timer_sync(&tp
->timer
);
4093 napi_enable(&tp
->napi
);
4095 tg3_full_lock(tp
, 0);
4100 #ifdef CONFIG_NET_POLL_CONTROLLER
4101 static void tg3_poll_controller(struct net_device
*dev
)
4103 struct tg3
*tp
= netdev_priv(dev
);
4105 tg3_interrupt(tp
->pdev
->irq
, dev
);
4109 static void tg3_reset_task(struct work_struct
*work
)
4111 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
4112 unsigned int restart_timer
;
4114 tg3_full_lock(tp
, 0);
4116 if (!netif_running(tp
->dev
)) {
4117 tg3_full_unlock(tp
);
4121 tg3_full_unlock(tp
);
4125 tg3_full_lock(tp
, 1);
4127 restart_timer
= tp
->tg3_flags2
& TG3_FLG2_RESTART_TIMER
;
4128 tp
->tg3_flags2
&= ~TG3_FLG2_RESTART_TIMER
;
4130 if (tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
) {
4131 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
4132 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
4133 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
4134 tp
->tg3_flags
&= ~TG3_FLAG_TX_RECOVERY_PENDING
;
4137 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
4138 if (tg3_init_hw(tp
, 1))
4141 tg3_netif_start(tp
);
4144 mod_timer(&tp
->timer
, jiffies
+ 1);
4147 tg3_full_unlock(tp
);
4150 static void tg3_dump_short_state(struct tg3
*tp
)
4152 printk(KERN_ERR PFX
"DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4153 tr32(MAC_TX_STATUS
), tr32(MAC_RX_STATUS
));
4154 printk(KERN_ERR PFX
"DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4155 tr32(RDMAC_STATUS
), tr32(WDMAC_STATUS
));
4158 static void tg3_tx_timeout(struct net_device
*dev
)
4160 struct tg3
*tp
= netdev_priv(dev
);
4162 if (netif_msg_tx_err(tp
)) {
4163 printk(KERN_ERR PFX
"%s: transmit timed out, resetting\n",
4165 tg3_dump_short_state(tp
);
4168 schedule_work(&tp
->reset_task
);
4171 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4172 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
4174 u32 base
= (u32
) mapping
& 0xffffffff;
4176 return ((base
> 0xffffdcc0) &&
4177 (base
+ len
+ 8 < base
));
4180 /* Test for DMA addresses > 40-bit */
4181 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
4184 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4185 if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
)
4186 return (((u64
) mapping
+ len
) > DMA_40BIT_MASK
);
4193 static void tg3_set_txd(struct tg3
*, int, dma_addr_t
, int, u32
, u32
);
4195 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4196 static int tigon3_dma_hwbug_workaround(struct tg3
*tp
, struct sk_buff
*skb
,
4197 u32 last_plus_one
, u32
*start
,
4198 u32 base_flags
, u32 mss
)
4200 struct sk_buff
*new_skb
;
4201 dma_addr_t new_addr
= 0;
4205 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
4206 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
4208 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
4210 new_skb
= skb_copy_expand(skb
,
4211 skb_headroom(skb
) + more_headroom
,
4212 skb_tailroom(skb
), GFP_ATOMIC
);
4218 /* New SKB is guaranteed to be linear. */
4220 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
4222 /* Make sure new skb does not cross any 4G boundaries.
4223 * Drop the packet if it does.
4225 if (tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
4227 dev_kfree_skb(new_skb
);
4230 tg3_set_txd(tp
, entry
, new_addr
, new_skb
->len
,
4231 base_flags
, 1 | (mss
<< 1));
4232 *start
= NEXT_TX(entry
);
4236 /* Now clean up the sw ring entries. */
4238 while (entry
!= last_plus_one
) {
4242 len
= skb_headlen(skb
);
4244 len
= skb_shinfo(skb
)->frags
[i
-1].size
;
4245 pci_unmap_single(tp
->pdev
,
4246 pci_unmap_addr(&tp
->tx_buffers
[entry
], mapping
),
4247 len
, PCI_DMA_TODEVICE
);
4249 tp
->tx_buffers
[entry
].skb
= new_skb
;
4250 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, new_addr
);
4252 tp
->tx_buffers
[entry
].skb
= NULL
;
4254 entry
= NEXT_TX(entry
);
4263 static void tg3_set_txd(struct tg3
*tp
, int entry
,
4264 dma_addr_t mapping
, int len
, u32 flags
,
4267 struct tg3_tx_buffer_desc
*txd
= &tp
->tx_ring
[entry
];
4268 int is_end
= (mss_and_is_end
& 0x1);
4269 u32 mss
= (mss_and_is_end
>> 1);
4273 flags
|= TXD_FLAG_END
;
4274 if (flags
& TXD_FLAG_VLAN
) {
4275 vlan_tag
= flags
>> 16;
4278 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
4280 txd
->addr_hi
= ((u64
) mapping
>> 32);
4281 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
4282 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
4283 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
4286 /* hard_start_xmit for devices that don't have any bugs and
4287 * support TG3_FLG2_HW_TSO_2 only.
4289 static int tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4291 struct tg3
*tp
= netdev_priv(dev
);
4293 u32 len
, entry
, base_flags
, mss
;
4295 len
= skb_headlen(skb
);
4297 /* We are running in BH disabled context with netif_tx_lock
4298 * and TX reclaim runs via tp->napi.poll inside of a software
4299 * interrupt. Furthermore, IRQ processing runs lockless so we have
4300 * no IRQ context deadlocks to worry about either. Rejoice!
4302 if (unlikely(tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
4303 if (!netif_queue_stopped(dev
)) {
4304 netif_stop_queue(dev
);
4306 /* This is a hard error, log it. */
4307 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
4308 "queue awake!\n", dev
->name
);
4310 return NETDEV_TX_BUSY
;
4313 entry
= tp
->tx_prod
;
4316 if ((mss
= skb_shinfo(skb
)->gso_size
) != 0) {
4317 int tcp_opt_len
, ip_tcp_len
;
4319 if (skb_header_cloned(skb
) &&
4320 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
4325 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
4326 mss
|= (skb_headlen(skb
) - ETH_HLEN
) << 9;
4328 struct iphdr
*iph
= ip_hdr(skb
);
4330 tcp_opt_len
= tcp_optlen(skb
);
4331 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
4334 iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
4335 mss
|= (ip_tcp_len
+ tcp_opt_len
) << 9;
4338 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
4339 TXD_FLAG_CPU_POST_DMA
);
4341 tcp_hdr(skb
)->check
= 0;
4344 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
4345 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
4346 #if TG3_VLAN_TAG_USED
4347 if (tp
->vlgrp
!= NULL
&& vlan_tx_tag_present(skb
))
4348 base_flags
|= (TXD_FLAG_VLAN
|
4349 (vlan_tx_tag_get(skb
) << 16));
4352 /* Queue skb data, a.k.a. the main skb fragment. */
4353 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
4355 tp
->tx_buffers
[entry
].skb
= skb
;
4356 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
4358 tg3_set_txd(tp
, entry
, mapping
, len
, base_flags
,
4359 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
4361 entry
= NEXT_TX(entry
);
4363 /* Now loop through additional data fragments, and queue them. */
4364 if (skb_shinfo(skb
)->nr_frags
> 0) {
4365 unsigned int i
, last
;
4367 last
= skb_shinfo(skb
)->nr_frags
- 1;
4368 for (i
= 0; i
<= last
; i
++) {
4369 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4372 mapping
= pci_map_page(tp
->pdev
,
4375 len
, PCI_DMA_TODEVICE
);
4377 tp
->tx_buffers
[entry
].skb
= NULL
;
4378 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
4380 tg3_set_txd(tp
, entry
, mapping
, len
,
4381 base_flags
, (i
== last
) | (mss
<< 1));
4383 entry
= NEXT_TX(entry
);
4387 /* Packets are ready, update Tx producer idx local and on card. */
4388 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
), entry
);
4390 tp
->tx_prod
= entry
;
4391 if (unlikely(tg3_tx_avail(tp
) <= (MAX_SKB_FRAGS
+ 1))) {
4392 netif_stop_queue(dev
);
4393 if (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH(tp
))
4394 netif_wake_queue(tp
->dev
);
4400 dev
->trans_start
= jiffies
;
4402 return NETDEV_TX_OK
;
4405 static int tg3_start_xmit_dma_bug(struct sk_buff
*, struct net_device
*);
4407 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4408 * TSO header is greater than 80 bytes.
4410 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
4412 struct sk_buff
*segs
, *nskb
;
4414 /* Estimate the number of fragments in the worst case */
4415 if (unlikely(tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->gso_segs
* 3))) {
4416 netif_stop_queue(tp
->dev
);
4417 if (tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->gso_segs
* 3))
4418 return NETDEV_TX_BUSY
;
4420 netif_wake_queue(tp
->dev
);
4423 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
4425 goto tg3_tso_bug_end
;
4431 tg3_start_xmit_dma_bug(nskb
, tp
->dev
);
4437 return NETDEV_TX_OK
;
4440 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4441 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4443 static int tg3_start_xmit_dma_bug(struct sk_buff
*skb
, struct net_device
*dev
)
4445 struct tg3
*tp
= netdev_priv(dev
);
4447 u32 len
, entry
, base_flags
, mss
;
4448 int would_hit_hwbug
;
4450 len
= skb_headlen(skb
);
4452 /* We are running in BH disabled context with netif_tx_lock
4453 * and TX reclaim runs via tp->napi.poll inside of a software
4454 * interrupt. Furthermore, IRQ processing runs lockless so we have
4455 * no IRQ context deadlocks to worry about either. Rejoice!
4457 if (unlikely(tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
4458 if (!netif_queue_stopped(dev
)) {
4459 netif_stop_queue(dev
);
4461 /* This is a hard error, log it. */
4462 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
4463 "queue awake!\n", dev
->name
);
4465 return NETDEV_TX_BUSY
;
4468 entry
= tp
->tx_prod
;
4470 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
4471 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
4473 if ((mss
= skb_shinfo(skb
)->gso_size
) != 0) {
4475 int tcp_opt_len
, ip_tcp_len
, hdr_len
;
4477 if (skb_header_cloned(skb
) &&
4478 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
4483 tcp_opt_len
= tcp_optlen(skb
);
4484 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
4486 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
4487 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
4488 (tp
->tg3_flags2
& TG3_FLG2_TSO_BUG
))
4489 return (tg3_tso_bug(tp
, skb
));
4491 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
4492 TXD_FLAG_CPU_POST_DMA
);
4496 iph
->tot_len
= htons(mss
+ hdr_len
);
4497 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
4498 tcp_hdr(skb
)->check
= 0;
4499 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
4501 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
4506 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) ||
4507 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)) {
4508 if (tcp_opt_len
|| iph
->ihl
> 5) {
4511 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
4512 mss
|= (tsflags
<< 11);
4515 if (tcp_opt_len
|| iph
->ihl
> 5) {
4518 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
4519 base_flags
|= tsflags
<< 12;
4523 #if TG3_VLAN_TAG_USED
4524 if (tp
->vlgrp
!= NULL
&& vlan_tx_tag_present(skb
))
4525 base_flags
|= (TXD_FLAG_VLAN
|
4526 (vlan_tx_tag_get(skb
) << 16));
4529 /* Queue skb data, a.k.a. the main skb fragment. */
4530 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
4532 tp
->tx_buffers
[entry
].skb
= skb
;
4533 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
4535 would_hit_hwbug
= 0;
4537 if (tp
->tg3_flags3
& TG3_FLG3_5701_DMA_BUG
)
4538 would_hit_hwbug
= 1;
4539 else if (tg3_4g_overflow_test(mapping
, len
))
4540 would_hit_hwbug
= 1;
4542 tg3_set_txd(tp
, entry
, mapping
, len
, base_flags
,
4543 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
4545 entry
= NEXT_TX(entry
);
4547 /* Now loop through additional data fragments, and queue them. */
4548 if (skb_shinfo(skb
)->nr_frags
> 0) {
4549 unsigned int i
, last
;
4551 last
= skb_shinfo(skb
)->nr_frags
- 1;
4552 for (i
= 0; i
<= last
; i
++) {
4553 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4556 mapping
= pci_map_page(tp
->pdev
,
4559 len
, PCI_DMA_TODEVICE
);
4561 tp
->tx_buffers
[entry
].skb
= NULL
;
4562 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
4564 if (tg3_4g_overflow_test(mapping
, len
))
4565 would_hit_hwbug
= 1;
4567 if (tg3_40bit_overflow_test(tp
, mapping
, len
))
4568 would_hit_hwbug
= 1;
4570 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
4571 tg3_set_txd(tp
, entry
, mapping
, len
,
4572 base_flags
, (i
== last
)|(mss
<< 1));
4574 tg3_set_txd(tp
, entry
, mapping
, len
,
4575 base_flags
, (i
== last
));
4577 entry
= NEXT_TX(entry
);
4581 if (would_hit_hwbug
) {
4582 u32 last_plus_one
= entry
;
4585 start
= entry
- 1 - skb_shinfo(skb
)->nr_frags
;
4586 start
&= (TG3_TX_RING_SIZE
- 1);
4588 /* If the workaround fails due to memory/mapping
4589 * failure, silently drop this packet.
4591 if (tigon3_dma_hwbug_workaround(tp
, skb
, last_plus_one
,
4592 &start
, base_flags
, mss
))
4598 /* Packets are ready, update Tx producer idx local and on card. */
4599 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
), entry
);
4601 tp
->tx_prod
= entry
;
4602 if (unlikely(tg3_tx_avail(tp
) <= (MAX_SKB_FRAGS
+ 1))) {
4603 netif_stop_queue(dev
);
4604 if (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH(tp
))
4605 netif_wake_queue(tp
->dev
);
4611 dev
->trans_start
= jiffies
;
4613 return NETDEV_TX_OK
;
4616 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
4621 if (new_mtu
> ETH_DATA_LEN
) {
4622 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
4623 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
4624 ethtool_op_set_tso(dev
, 0);
4627 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
4629 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
4630 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
4631 tp
->tg3_flags
&= ~TG3_FLAG_JUMBO_RING_ENABLE
;
4635 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
4637 struct tg3
*tp
= netdev_priv(dev
);
4640 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
4643 if (!netif_running(dev
)) {
4644 /* We'll just catch it later when the
4647 tg3_set_mtu(dev
, tp
, new_mtu
);
4653 tg3_full_lock(tp
, 1);
4655 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
4657 tg3_set_mtu(dev
, tp
, new_mtu
);
4659 err
= tg3_restart_hw(tp
, 0);
4662 tg3_netif_start(tp
);
4664 tg3_full_unlock(tp
);
4669 /* Free up pending packets in all rx/tx rings.
4671 * The chip has been shut down and the driver detached from
4672 * the networking, so no interrupts or new tx packets will
4673 * end up in the driver. tp->{tx,}lock is not held and we are not
4674 * in an interrupt context and thus may sleep.
4676 static void tg3_free_rings(struct tg3
*tp
)
4678 struct ring_info
*rxp
;
4681 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
4682 rxp
= &tp
->rx_std_buffers
[i
];
4684 if (rxp
->skb
== NULL
)
4686 pci_unmap_single(tp
->pdev
,
4687 pci_unmap_addr(rxp
, mapping
),
4688 tp
->rx_pkt_buf_sz
- tp
->rx_offset
,
4689 PCI_DMA_FROMDEVICE
);
4690 dev_kfree_skb_any(rxp
->skb
);
4694 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
4695 rxp
= &tp
->rx_jumbo_buffers
[i
];
4697 if (rxp
->skb
== NULL
)
4699 pci_unmap_single(tp
->pdev
,
4700 pci_unmap_addr(rxp
, mapping
),
4701 RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
,
4702 PCI_DMA_FROMDEVICE
);
4703 dev_kfree_skb_any(rxp
->skb
);
4707 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
4708 struct tx_ring_info
*txp
;
4709 struct sk_buff
*skb
;
4712 txp
= &tp
->tx_buffers
[i
];
4720 pci_unmap_single(tp
->pdev
,
4721 pci_unmap_addr(txp
, mapping
),
4728 for (j
= 0; j
< skb_shinfo(skb
)->nr_frags
; j
++) {
4729 txp
= &tp
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
4730 pci_unmap_page(tp
->pdev
,
4731 pci_unmap_addr(txp
, mapping
),
4732 skb_shinfo(skb
)->frags
[j
].size
,
4737 dev_kfree_skb_any(skb
);
4741 /* Initialize tx/rx rings for packet processing.
4743 * The chip has been shut down and the driver detached from
4744 * the networking, so no interrupts or new tx packets will
4745 * end up in the driver. tp->{tx,}lock are held and thus
4748 static int tg3_init_rings(struct tg3
*tp
)
4752 /* Free up all the SKBs. */
4755 /* Zero out all descriptors. */
4756 memset(tp
->rx_std
, 0, TG3_RX_RING_BYTES
);
4757 memset(tp
->rx_jumbo
, 0, TG3_RX_JUMBO_RING_BYTES
);
4758 memset(tp
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
4759 memset(tp
->tx_ring
, 0, TG3_TX_RING_BYTES
);
4761 tp
->rx_pkt_buf_sz
= RX_PKT_BUF_SZ
;
4762 if ((tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) &&
4763 (tp
->dev
->mtu
> ETH_DATA_LEN
))
4764 tp
->rx_pkt_buf_sz
= RX_JUMBO_PKT_BUF_SZ
;
4766 /* Initialize invariants of the rings, we only set this
4767 * stuff once. This works because the card does not
4768 * write into the rx buffer posting rings.
4770 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
4771 struct tg3_rx_buffer_desc
*rxd
;
4773 rxd
= &tp
->rx_std
[i
];
4774 rxd
->idx_len
= (tp
->rx_pkt_buf_sz
- tp
->rx_offset
- 64)
4776 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
4777 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
4778 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
4781 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
4782 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
4783 struct tg3_rx_buffer_desc
*rxd
;
4785 rxd
= &tp
->rx_jumbo
[i
];
4786 rxd
->idx_len
= (RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
- 64)
4788 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
4790 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
4791 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
4795 /* Now allocate fresh SKBs for each rx ring. */
4796 for (i
= 0; i
< tp
->rx_pending
; i
++) {
4797 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_STD
, -1, i
) < 0) {
4798 printk(KERN_WARNING PFX
4799 "%s: Using a smaller RX standard ring, "
4800 "only %d out of %d buffers were allocated "
4802 tp
->dev
->name
, i
, tp
->rx_pending
);
4810 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
4811 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
4812 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_JUMBO
,
4814 printk(KERN_WARNING PFX
4815 "%s: Using a smaller RX jumbo ring, "
4816 "only %d out of %d buffers were "
4817 "allocated successfully.\n",
4818 tp
->dev
->name
, i
, tp
->rx_jumbo_pending
);
4823 tp
->rx_jumbo_pending
= i
;
4832 * Must not be invoked with interrupt sources disabled and
4833 * the hardware shutdown down.
4835 static void tg3_free_consistent(struct tg3
*tp
)
4837 kfree(tp
->rx_std_buffers
);
4838 tp
->rx_std_buffers
= NULL
;
4840 pci_free_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4841 tp
->rx_std
, tp
->rx_std_mapping
);
4845 pci_free_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4846 tp
->rx_jumbo
, tp
->rx_jumbo_mapping
);
4847 tp
->rx_jumbo
= NULL
;
4850 pci_free_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4851 tp
->rx_rcb
, tp
->rx_rcb_mapping
);
4855 pci_free_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4856 tp
->tx_ring
, tp
->tx_desc_mapping
);
4859 if (tp
->hw_status
) {
4860 pci_free_consistent(tp
->pdev
, TG3_HW_STATUS_SIZE
,
4861 tp
->hw_status
, tp
->status_mapping
);
4862 tp
->hw_status
= NULL
;
4865 pci_free_consistent(tp
->pdev
, sizeof(struct tg3_hw_stats
),
4866 tp
->hw_stats
, tp
->stats_mapping
);
4867 tp
->hw_stats
= NULL
;
4872 * Must not be invoked with interrupt sources disabled and
4873 * the hardware shutdown down. Can sleep.
4875 static int tg3_alloc_consistent(struct tg3
*tp
)
4877 tp
->rx_std_buffers
= kzalloc((sizeof(struct ring_info
) *
4879 TG3_RX_JUMBO_RING_SIZE
)) +
4880 (sizeof(struct tx_ring_info
) *
4883 if (!tp
->rx_std_buffers
)
4886 tp
->rx_jumbo_buffers
= &tp
->rx_std_buffers
[TG3_RX_RING_SIZE
];
4887 tp
->tx_buffers
= (struct tx_ring_info
*)
4888 &tp
->rx_jumbo_buffers
[TG3_RX_JUMBO_RING_SIZE
];
4890 tp
->rx_std
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4891 &tp
->rx_std_mapping
);
4895 tp
->rx_jumbo
= pci_alloc_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4896 &tp
->rx_jumbo_mapping
);
4901 tp
->rx_rcb
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4902 &tp
->rx_rcb_mapping
);
4906 tp
->tx_ring
= pci_alloc_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4907 &tp
->tx_desc_mapping
);
4911 tp
->hw_status
= pci_alloc_consistent(tp
->pdev
,
4913 &tp
->status_mapping
);
4917 tp
->hw_stats
= pci_alloc_consistent(tp
->pdev
,
4918 sizeof(struct tg3_hw_stats
),
4919 &tp
->stats_mapping
);
4923 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
4924 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
4929 tg3_free_consistent(tp
);
4933 #define MAX_WAIT_CNT 1000
4935 /* To stop a block, clear the enable bit and poll till it
4936 * clears. tp->lock is held.
4938 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
4943 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
4950 /* We can't enable/disable these bits of the
4951 * 5705/5750, just say success.
4964 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
4967 if ((val
& enable_bit
) == 0)
4971 if (i
== MAX_WAIT_CNT
&& !silent
) {
4972 printk(KERN_ERR PFX
"tg3_stop_block timed out, "
4973 "ofs=%lx enable_bit=%x\n",
4981 /* tp->lock is held. */
4982 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
4986 tg3_disable_ints(tp
);
4988 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
4989 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
4992 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
4993 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
4994 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
4995 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
4996 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
4997 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
4999 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
5000 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
5001 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
5002 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
5003 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
5004 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
5005 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
5007 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
5008 tw32_f(MAC_MODE
, tp
->mac_mode
);
5011 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
5012 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
5014 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
5016 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
5019 if (i
>= MAX_WAIT_CNT
) {
5020 printk(KERN_ERR PFX
"tg3_abort_hw timed out for %s, "
5021 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5022 tp
->dev
->name
, tr32(MAC_TX_MODE
));
5026 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
5027 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
5028 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
5030 tw32(FTQ_RESET
, 0xffffffff);
5031 tw32(FTQ_RESET
, 0x00000000);
5033 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
5034 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
5037 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
5039 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
5044 /* tp->lock is held. */
5045 static int tg3_nvram_lock(struct tg3
*tp
)
5047 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
5050 if (tp
->nvram_lock_cnt
== 0) {
5051 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
5052 for (i
= 0; i
< 8000; i
++) {
5053 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
5058 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
5062 tp
->nvram_lock_cnt
++;
5067 /* tp->lock is held. */
5068 static void tg3_nvram_unlock(struct tg3
*tp
)
5070 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
5071 if (tp
->nvram_lock_cnt
> 0)
5072 tp
->nvram_lock_cnt
--;
5073 if (tp
->nvram_lock_cnt
== 0)
5074 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
5078 /* tp->lock is held. */
5079 static void tg3_enable_nvram_access(struct tg3
*tp
)
5081 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
5082 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
5083 u32 nvaccess
= tr32(NVRAM_ACCESS
);
5085 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
5089 /* tp->lock is held. */
5090 static void tg3_disable_nvram_access(struct tg3
*tp
)
5092 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
5093 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
5094 u32 nvaccess
= tr32(NVRAM_ACCESS
);
5096 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
5100 static void tg3_ape_send_event(struct tg3
*tp
, u32 event
)
5105 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
5106 if (apedata
!= APE_SEG_SIG_MAGIC
)
5109 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
5110 if (apedata
!= APE_FW_STATUS_READY
)
5113 /* Wait for up to 1 millisecond for APE to service previous event. */
5114 for (i
= 0; i
< 10; i
++) {
5115 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
5118 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
5120 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
5121 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
5122 event
| APE_EVENT_STATUS_EVENT_PENDING
);
5124 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
5126 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
5132 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
5133 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
5136 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
5141 if (!(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
))
5145 case RESET_KIND_INIT
:
5146 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
5147 APE_HOST_SEG_SIG_MAGIC
);
5148 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
5149 APE_HOST_SEG_LEN_MAGIC
);
5150 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
5151 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
5152 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
5153 APE_HOST_DRIVER_ID_MAGIC
);
5154 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
5155 APE_HOST_BEHAV_NO_PHYLOCK
);
5157 event
= APE_EVENT_STATUS_STATE_START
;
5159 case RESET_KIND_SHUTDOWN
:
5160 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
5162 case RESET_KIND_SUSPEND
:
5163 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
5169 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
5171 tg3_ape_send_event(tp
, event
);
5174 /* tp->lock is held. */
5175 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
5177 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
5178 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
5180 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
5182 case RESET_KIND_INIT
:
5183 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
5187 case RESET_KIND_SHUTDOWN
:
5188 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
5192 case RESET_KIND_SUSPEND
:
5193 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
5202 if (kind
== RESET_KIND_INIT
||
5203 kind
== RESET_KIND_SUSPEND
)
5204 tg3_ape_driver_state_change(tp
, kind
);
5207 /* tp->lock is held. */
5208 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
5210 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
5212 case RESET_KIND_INIT
:
5213 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
5214 DRV_STATE_START_DONE
);
5217 case RESET_KIND_SHUTDOWN
:
5218 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
5219 DRV_STATE_UNLOAD_DONE
);
5227 if (kind
== RESET_KIND_SHUTDOWN
)
5228 tg3_ape_driver_state_change(tp
, kind
);
5231 /* tp->lock is held. */
5232 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
5234 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
5236 case RESET_KIND_INIT
:
5237 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
5241 case RESET_KIND_SHUTDOWN
:
5242 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
5246 case RESET_KIND_SUSPEND
:
5247 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
5257 static int tg3_poll_fw(struct tg3
*tp
)
5262 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
5263 /* Wait up to 20ms for init done. */
5264 for (i
= 0; i
< 200; i
++) {
5265 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
5272 /* Wait for firmware initialization to complete. */
5273 for (i
= 0; i
< 100000; i
++) {
5274 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
5275 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
5280 /* Chip might not be fitted with firmware. Some Sun onboard
5281 * parts are configured like that. So don't signal the timeout
5282 * of the above loop as an error, but do report the lack of
5283 * running firmware once.
5286 !(tp
->tg3_flags2
& TG3_FLG2_NO_FWARE_REPORTED
)) {
5287 tp
->tg3_flags2
|= TG3_FLG2_NO_FWARE_REPORTED
;
5289 printk(KERN_INFO PFX
"%s: No firmware running.\n",
5296 /* Save PCI command register before chip reset */
5297 static void tg3_save_pci_state(struct tg3
*tp
)
5299 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
5302 /* Restore PCI state after chip reset */
5303 static void tg3_restore_pci_state(struct tg3
*tp
)
5307 /* Re-enable indirect register accesses. */
5308 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
5309 tp
->misc_host_ctrl
);
5311 /* Set MAX PCI retry to zero. */
5312 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
5313 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
5314 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
))
5315 val
|= PCISTATE_RETRY_SAME_DMA
;
5316 /* Allow reads and writes to the APE register and memory space. */
5317 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)
5318 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
5319 PCISTATE_ALLOW_APE_SHMEM_WR
;
5320 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
5322 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
5324 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)
5325 pcie_set_readrq(tp
->pdev
, 4096);
5327 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
5328 tp
->pci_cacheline_sz
);
5329 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
5333 /* Make sure PCI-X relaxed ordering bit is clear. */
5337 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
5339 pcix_cmd
&= ~PCI_X_CMD_ERO
;
5340 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
5344 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
5346 /* Chip reset on 5780 will reset MSI enable bit,
5347 * so need to restore it.
5349 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
5352 pci_read_config_word(tp
->pdev
,
5353 tp
->msi_cap
+ PCI_MSI_FLAGS
,
5355 pci_write_config_word(tp
->pdev
,
5356 tp
->msi_cap
+ PCI_MSI_FLAGS
,
5357 ctrl
| PCI_MSI_FLAGS_ENABLE
);
5358 val
= tr32(MSGINT_MODE
);
5359 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
5364 static void tg3_stop_fw(struct tg3
*);
5366 /* tp->lock is held. */
5367 static int tg3_chip_reset(struct tg3
*tp
)
5370 void (*write_op
)(struct tg3
*, u32
, u32
);
5375 /* No matching tg3_nvram_unlock() after this because
5376 * chip reset below will undo the nvram lock.
5378 tp
->nvram_lock_cnt
= 0;
5380 /* GRC_MISC_CFG core clock reset will clear the memory
5381 * enable bit in PCI register 4 and the MSI enable bit
5382 * on some chips, so we save relevant registers here.
5384 tg3_save_pci_state(tp
);
5386 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
5387 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
5388 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
5389 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
5390 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
5391 tw32(GRC_FASTBOOT_PC
, 0);
5394 * We must avoid the readl() that normally takes place.
5395 * It locks machines, causes machine checks, and other
5396 * fun things. So, temporarily disable the 5701
5397 * hardware workaround, while we do the reset.
5399 write_op
= tp
->write32
;
5400 if (write_op
== tg3_write_flush_reg32
)
5401 tp
->write32
= tg3_write32
;
5403 /* Prevent the irq handler from reading or writing PCI registers
5404 * during chip reset when the memory enable bit in the PCI command
5405 * register may be cleared. The chip does not generate interrupt
5406 * at this time, but the irq handler may still be called due to irq
5407 * sharing or irqpoll.
5409 tp
->tg3_flags
|= TG3_FLAG_CHIP_RESETTING
;
5410 if (tp
->hw_status
) {
5411 tp
->hw_status
->status
= 0;
5412 tp
->hw_status
->status_tag
= 0;
5416 synchronize_irq(tp
->pdev
->irq
);
5419 val
= GRC_MISC_CFG_CORECLK_RESET
;
5421 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
5422 if (tr32(0x7e2c) == 0x60) {
5425 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
5426 tw32(GRC_MISC_CFG
, (1 << 29));
5431 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
5432 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
5433 tw32(GRC_VCPU_EXT_CTRL
,
5434 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
5437 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
5438 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
5439 tw32(GRC_MISC_CFG
, val
);
5441 /* restore 5701 hardware bug workaround write method */
5442 tp
->write32
= write_op
;
5444 /* Unfortunately, we have to delay before the PCI read back.
5445 * Some 575X chips even will not respond to a PCI cfg access
5446 * when the reset command is given to the chip.
5448 * How do these hardware designers expect things to work
5449 * properly if the PCI write is posted for a long period
5450 * of time? It is always necessary to have some method by
5451 * which a register read back can occur to push the write
5452 * out which does the reset.
5454 * For most tg3 variants the trick below was working.
5459 /* Flush PCI posted writes. The normal MMIO registers
5460 * are inaccessible at this time so this is the only
5461 * way to make this reliably (actually, this is no longer
5462 * the case, see above). I tried to use indirect
5463 * register read/write but this upset some 5701 variants.
5465 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
5469 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
5470 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
5474 /* Wait for link training to complete. */
5475 for (i
= 0; i
< 5000; i
++)
5478 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
5479 pci_write_config_dword(tp
->pdev
, 0xc4,
5480 cfg_val
| (1 << 15));
5482 /* Set PCIE max payload size and clear error status. */
5483 pci_write_config_dword(tp
->pdev
, 0xd8, 0xf5000);
5486 tg3_restore_pci_state(tp
);
5488 tp
->tg3_flags
&= ~TG3_FLAG_CHIP_RESETTING
;
5491 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
5492 val
= tr32(MEMARB_MODE
);
5493 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
5495 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
5497 tw32(0x5000, 0x400);
5500 tw32(GRC_MODE
, tp
->grc_mode
);
5502 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
5505 tw32(0xc4, val
| (1 << 15));
5508 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
5509 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
5510 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
5511 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
5512 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
5513 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
5516 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
5517 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
5518 tw32_f(MAC_MODE
, tp
->mac_mode
);
5519 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
5520 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
5521 tw32_f(MAC_MODE
, tp
->mac_mode
);
5523 tw32_f(MAC_MODE
, 0);
5526 err
= tg3_poll_fw(tp
);
5530 if ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
5531 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
5534 tw32(0x7c00, val
| (1 << 25));
5537 /* Reprobe ASF enable state. */
5538 tp
->tg3_flags
&= ~TG3_FLAG_ENABLE_ASF
;
5539 tp
->tg3_flags2
&= ~TG3_FLG2_ASF_NEW_HANDSHAKE
;
5540 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
5541 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
5544 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
5545 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
5546 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
5547 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
5548 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
5555 /* tp->lock is held. */
5556 static void tg3_stop_fw(struct tg3
*tp
)
5558 if ((tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) &&
5559 !(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)) {
5562 /* Wait for RX cpu to ACK the previous event. */
5563 tg3_wait_for_event_ack(tp
);
5565 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
5566 val
= tr32(GRC_RX_CPU_EVENT
);
5567 val
|= GRC_RX_CPU_DRIVER_EVENT
;
5568 tw32(GRC_RX_CPU_EVENT
, val
);
5570 /* Wait for RX cpu to ACK this event. */
5571 tg3_wait_for_event_ack(tp
);
5575 /* tp->lock is held. */
5576 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
5582 tg3_write_sig_pre_reset(tp
, kind
);
5584 tg3_abort_hw(tp
, silent
);
5585 err
= tg3_chip_reset(tp
);
5587 tg3_write_sig_legacy(tp
, kind
);
5588 tg3_write_sig_post_reset(tp
, kind
);
5596 #define TG3_FW_RELEASE_MAJOR 0x0
5597 #define TG3_FW_RELASE_MINOR 0x0
5598 #define TG3_FW_RELEASE_FIX 0x0
5599 #define TG3_FW_START_ADDR 0x08000000
5600 #define TG3_FW_TEXT_ADDR 0x08000000
5601 #define TG3_FW_TEXT_LEN 0x9c0
5602 #define TG3_FW_RODATA_ADDR 0x080009c0
5603 #define TG3_FW_RODATA_LEN 0x60
5604 #define TG3_FW_DATA_ADDR 0x08000a40
5605 #define TG3_FW_DATA_LEN 0x20
5606 #define TG3_FW_SBSS_ADDR 0x08000a60
5607 #define TG3_FW_SBSS_LEN 0xc
5608 #define TG3_FW_BSS_ADDR 0x08000a70
5609 #define TG3_FW_BSS_LEN 0x10
5611 static const u32 tg3FwText
[(TG3_FW_TEXT_LEN
/ sizeof(u32
)) + 1] = {
5612 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5613 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5614 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5615 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5616 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5617 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5618 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5619 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5620 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5621 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5622 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5623 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5624 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5625 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5626 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5627 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5628 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5629 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5630 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5631 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5632 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5633 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5634 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5635 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5636 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5638 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5639 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5640 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5641 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5642 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5643 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5644 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5645 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5646 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5647 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5648 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5649 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5650 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5651 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5652 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5653 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5654 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5655 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5656 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5657 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5658 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5659 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5660 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5661 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5662 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5663 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5664 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5665 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5666 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5667 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5668 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5669 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5670 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5671 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5672 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5673 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5674 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5675 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5676 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5677 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5678 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5679 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5680 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5681 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5682 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5683 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5684 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5685 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5686 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5687 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5688 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5689 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5690 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5691 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5692 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5693 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5694 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5695 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5696 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5697 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5698 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5699 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5700 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5701 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5702 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5705 static const u32 tg3FwRodata
[(TG3_FW_RODATA_LEN
/ sizeof(u32
)) + 1] = {
5706 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5707 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5708 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5709 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5713 #if 0 /* All zeros, don't eat up space with it. */
5714 u32 tg3FwData
[(TG3_FW_DATA_LEN
/ sizeof(u32
)) + 1] = {
5715 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5716 0x00000000, 0x00000000, 0x00000000, 0x00000000
5720 #define RX_CPU_SCRATCH_BASE 0x30000
5721 #define RX_CPU_SCRATCH_SIZE 0x04000
5722 #define TX_CPU_SCRATCH_BASE 0x34000
5723 #define TX_CPU_SCRATCH_SIZE 0x04000
5725 /* tp->lock is held. */
5726 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
5730 BUG_ON(offset
== TX_CPU_BASE
&&
5731 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
));
5733 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
5734 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
5736 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
5739 if (offset
== RX_CPU_BASE
) {
5740 for (i
= 0; i
< 10000; i
++) {
5741 tw32(offset
+ CPU_STATE
, 0xffffffff);
5742 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
5743 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
5747 tw32(offset
+ CPU_STATE
, 0xffffffff);
5748 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
5751 for (i
= 0; i
< 10000; i
++) {
5752 tw32(offset
+ CPU_STATE
, 0xffffffff);
5753 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
5754 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
5760 printk(KERN_ERR PFX
"tg3_reset_cpu timed out for %s, "
5763 (offset
== RX_CPU_BASE
? "RX" : "TX"));
5767 /* Clear firmware's nvram arbitration. */
5768 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
5769 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
5774 unsigned int text_base
;
5775 unsigned int text_len
;
5776 const u32
*text_data
;
5777 unsigned int rodata_base
;
5778 unsigned int rodata_len
;
5779 const u32
*rodata_data
;
5780 unsigned int data_base
;
5781 unsigned int data_len
;
5782 const u32
*data_data
;
5785 /* tp->lock is held. */
5786 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
5787 int cpu_scratch_size
, struct fw_info
*info
)
5789 int err
, lock_err
, i
;
5790 void (*write_op
)(struct tg3
*, u32
, u32
);
5792 if (cpu_base
== TX_CPU_BASE
&&
5793 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5794 printk(KERN_ERR PFX
"tg3_load_firmware_cpu: Trying to load "
5795 "TX cpu firmware on %s which is 5705.\n",
5800 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
5801 write_op
= tg3_write_mem
;
5803 write_op
= tg3_write_indirect_reg32
;
5805 /* It is possible that bootcode is still loading at this point.
5806 * Get the nvram lock first before halting the cpu.
5808 lock_err
= tg3_nvram_lock(tp
);
5809 err
= tg3_halt_cpu(tp
, cpu_base
);
5811 tg3_nvram_unlock(tp
);
5815 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
5816 write_op(tp
, cpu_scratch_base
+ i
, 0);
5817 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5818 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
5819 for (i
= 0; i
< (info
->text_len
/ sizeof(u32
)); i
++)
5820 write_op(tp
, (cpu_scratch_base
+
5821 (info
->text_base
& 0xffff) +
5824 info
->text_data
[i
] : 0));
5825 for (i
= 0; i
< (info
->rodata_len
/ sizeof(u32
)); i
++)
5826 write_op(tp
, (cpu_scratch_base
+
5827 (info
->rodata_base
& 0xffff) +
5829 (info
->rodata_data
?
5830 info
->rodata_data
[i
] : 0));
5831 for (i
= 0; i
< (info
->data_len
/ sizeof(u32
)); i
++)
5832 write_op(tp
, (cpu_scratch_base
+
5833 (info
->data_base
& 0xffff) +
5836 info
->data_data
[i
] : 0));
5844 /* tp->lock is held. */
5845 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
5847 struct fw_info info
;
5850 info
.text_base
= TG3_FW_TEXT_ADDR
;
5851 info
.text_len
= TG3_FW_TEXT_LEN
;
5852 info
.text_data
= &tg3FwText
[0];
5853 info
.rodata_base
= TG3_FW_RODATA_ADDR
;
5854 info
.rodata_len
= TG3_FW_RODATA_LEN
;
5855 info
.rodata_data
= &tg3FwRodata
[0];
5856 info
.data_base
= TG3_FW_DATA_ADDR
;
5857 info
.data_len
= TG3_FW_DATA_LEN
;
5858 info
.data_data
= NULL
;
5860 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
5861 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
5866 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
5867 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
5872 /* Now startup only the RX cpu. */
5873 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
5874 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
5876 for (i
= 0; i
< 5; i
++) {
5877 if (tr32(RX_CPU_BASE
+ CPU_PC
) == TG3_FW_TEXT_ADDR
)
5879 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
5880 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
5881 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
5885 printk(KERN_ERR PFX
"tg3_load_firmware fails for %s "
5886 "to set RX CPU PC, is %08x should be %08x\n",
5887 tp
->dev
->name
, tr32(RX_CPU_BASE
+ CPU_PC
),
5891 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
5892 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
5898 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5899 #define TG3_TSO_FW_RELASE_MINOR 0x6
5900 #define TG3_TSO_FW_RELEASE_FIX 0x0
5901 #define TG3_TSO_FW_START_ADDR 0x08000000
5902 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5903 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5904 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5905 #define TG3_TSO_FW_RODATA_LEN 0x60
5906 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5907 #define TG3_TSO_FW_DATA_LEN 0x30
5908 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5909 #define TG3_TSO_FW_SBSS_LEN 0x2c
5910 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5911 #define TG3_TSO_FW_BSS_LEN 0x894
5913 static const u32 tg3TsoFwText
[(TG3_TSO_FW_TEXT_LEN
/ 4) + 1] = {
5914 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5915 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5916 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5917 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5918 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5919 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5920 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5921 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5922 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5923 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5924 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5925 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5926 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5927 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5928 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5929 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5930 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5931 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5932 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5933 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5934 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5935 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5936 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5937 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5938 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5939 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5940 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5941 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5942 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5943 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5944 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5945 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5946 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5947 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5948 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5949 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5950 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5951 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5952 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5953 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5954 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5955 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5956 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5957 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5958 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5959 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5960 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5961 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5962 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5963 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5964 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5965 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5966 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5967 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5968 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5969 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5970 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5971 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5972 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5973 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5974 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5975 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5976 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5977 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5978 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5979 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5980 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5981 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5982 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5983 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5984 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5985 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5986 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5987 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5988 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5989 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5990 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5991 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5992 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5993 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5994 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5995 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5996 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5997 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5998 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5999 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6000 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6001 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6002 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6003 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6004 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6005 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6006 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6007 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6008 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6009 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6010 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6011 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6012 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6013 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6014 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6015 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6016 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6017 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6018 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6019 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6020 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6021 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6022 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6023 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6024 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6025 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6026 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6027 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6028 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6029 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6030 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6031 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6032 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6033 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6034 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6035 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6036 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6037 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6038 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6039 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6040 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6041 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6042 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6043 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6044 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6045 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6046 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6047 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6048 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6049 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6050 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6051 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6052 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6053 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6054 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6055 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6056 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6057 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6058 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6059 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6060 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6061 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6062 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6063 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6064 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6065 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6066 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6067 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6068 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6069 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6070 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6071 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6072 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6073 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6074 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6075 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6076 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6077 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6078 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6079 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6080 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6081 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6082 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6083 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6084 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6085 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6086 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6087 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6088 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6089 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6090 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6091 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6092 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6093 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6094 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6095 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6096 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6097 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6098 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6099 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6100 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6101 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6102 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6103 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6104 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6105 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6106 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6107 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6108 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6109 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6110 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6111 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6112 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6113 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6114 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6115 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6116 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6117 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6118 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6119 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6120 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6121 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6122 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6123 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6124 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6125 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6126 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6127 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6128 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6129 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6130 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6131 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6132 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6133 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6134 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6135 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6136 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6137 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6138 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6139 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6140 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6141 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6142 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6143 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6144 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6145 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6146 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6147 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6148 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6149 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6150 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6151 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6152 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6153 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6154 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6155 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6156 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6157 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6158 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6159 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6160 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6161 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6162 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6163 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6164 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6165 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6166 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6167 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6168 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6169 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6170 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6171 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6172 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6173 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6174 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6175 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6176 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6177 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6178 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6179 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6180 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6181 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6182 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6183 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6184 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6185 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6186 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6187 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6188 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6189 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6190 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6191 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6192 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6193 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6194 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6195 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6196 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6197 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6200 static const u32 tg3TsoFwRodata
[] = {
6201 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6202 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6203 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6204 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6208 static const u32 tg3TsoFwData
[] = {
6209 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6210 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6214 /* 5705 needs a special version of the TSO firmware. */
6215 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6216 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6217 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6218 #define TG3_TSO5_FW_START_ADDR 0x00010000
6219 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6220 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6221 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6222 #define TG3_TSO5_FW_RODATA_LEN 0x50
6223 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6224 #define TG3_TSO5_FW_DATA_LEN 0x20
6225 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6226 #define TG3_TSO5_FW_SBSS_LEN 0x28
6227 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6228 #define TG3_TSO5_FW_BSS_LEN 0x88
6230 static const u32 tg3Tso5FwText
[(TG3_TSO5_FW_TEXT_LEN
/ 4) + 1] = {
6231 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6232 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6233 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6234 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6235 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6236 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6237 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6238 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6239 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6240 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6241 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6242 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6243 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6244 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6245 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6246 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6247 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6248 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6249 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6250 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6251 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6252 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6253 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6254 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6255 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6256 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6257 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6258 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6259 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6260 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6261 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6262 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6263 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6264 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6265 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6266 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6267 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6268 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6269 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6270 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6271 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6272 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6273 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6274 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6275 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6276 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6277 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6278 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6279 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6280 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6281 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6282 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6283 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6284 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6285 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6286 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6287 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6288 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6289 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6290 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6291 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6292 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6293 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6294 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6295 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6296 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6297 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6298 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6299 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6300 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6301 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6302 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6303 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6304 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6305 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6306 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6307 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6308 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6309 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6310 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6311 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6312 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6313 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6314 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6315 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6316 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6317 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6318 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6319 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6320 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6321 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6322 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6323 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6324 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6325 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6326 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6327 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6328 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6329 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6330 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6331 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6332 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6333 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6334 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6335 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6336 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6337 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6338 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6339 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6340 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6341 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6342 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6343 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6344 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6345 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6346 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6347 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6348 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6349 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6350 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6351 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6352 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6353 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6354 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6355 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6356 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6357 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6358 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6359 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6360 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6361 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6362 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6363 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6364 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6365 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6366 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6367 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6368 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6369 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6370 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6371 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6372 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6373 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6374 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6375 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6376 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6377 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6378 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6379 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6380 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6381 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6382 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6383 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6384 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6385 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6386 0x00000000, 0x00000000, 0x00000000,
6389 static const u32 tg3Tso5FwRodata
[(TG3_TSO5_FW_RODATA_LEN
/ 4) + 1] = {
6390 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6391 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6392 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6393 0x00000000, 0x00000000, 0x00000000,
6396 static const u32 tg3Tso5FwData
[(TG3_TSO5_FW_DATA_LEN
/ 4) + 1] = {
6397 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6398 0x00000000, 0x00000000, 0x00000000,
6401 /* tp->lock is held. */
6402 static int tg3_load_tso_firmware(struct tg3
*tp
)
6404 struct fw_info info
;
6405 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
6408 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
6411 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6412 info
.text_base
= TG3_TSO5_FW_TEXT_ADDR
;
6413 info
.text_len
= TG3_TSO5_FW_TEXT_LEN
;
6414 info
.text_data
= &tg3Tso5FwText
[0];
6415 info
.rodata_base
= TG3_TSO5_FW_RODATA_ADDR
;
6416 info
.rodata_len
= TG3_TSO5_FW_RODATA_LEN
;
6417 info
.rodata_data
= &tg3Tso5FwRodata
[0];
6418 info
.data_base
= TG3_TSO5_FW_DATA_ADDR
;
6419 info
.data_len
= TG3_TSO5_FW_DATA_LEN
;
6420 info
.data_data
= &tg3Tso5FwData
[0];
6421 cpu_base
= RX_CPU_BASE
;
6422 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
6423 cpu_scratch_size
= (info
.text_len
+
6426 TG3_TSO5_FW_SBSS_LEN
+
6427 TG3_TSO5_FW_BSS_LEN
);
6429 info
.text_base
= TG3_TSO_FW_TEXT_ADDR
;
6430 info
.text_len
= TG3_TSO_FW_TEXT_LEN
;
6431 info
.text_data
= &tg3TsoFwText
[0];
6432 info
.rodata_base
= TG3_TSO_FW_RODATA_ADDR
;
6433 info
.rodata_len
= TG3_TSO_FW_RODATA_LEN
;
6434 info
.rodata_data
= &tg3TsoFwRodata
[0];
6435 info
.data_base
= TG3_TSO_FW_DATA_ADDR
;
6436 info
.data_len
= TG3_TSO_FW_DATA_LEN
;
6437 info
.data_data
= &tg3TsoFwData
[0];
6438 cpu_base
= TX_CPU_BASE
;
6439 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
6440 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
6443 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
6444 cpu_scratch_base
, cpu_scratch_size
,
6449 /* Now startup the cpu. */
6450 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
6451 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
6453 for (i
= 0; i
< 5; i
++) {
6454 if (tr32(cpu_base
+ CPU_PC
) == info
.text_base
)
6456 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
6457 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
6458 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
6462 printk(KERN_ERR PFX
"tg3_load_tso_firmware fails for %s "
6463 "to set CPU PC, is %08x should be %08x\n",
6464 tp
->dev
->name
, tr32(cpu_base
+ CPU_PC
),
6468 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
6469 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
6474 /* tp->lock is held. */
6475 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
6477 u32 addr_high
, addr_low
;
6480 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
6481 tp
->dev
->dev_addr
[1]);
6482 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
6483 (tp
->dev
->dev_addr
[3] << 16) |
6484 (tp
->dev
->dev_addr
[4] << 8) |
6485 (tp
->dev
->dev_addr
[5] << 0));
6486 for (i
= 0; i
< 4; i
++) {
6487 if (i
== 1 && skip_mac_1
)
6489 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
6490 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
6493 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
6494 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
6495 for (i
= 0; i
< 12; i
++) {
6496 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
6497 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
6501 addr_high
= (tp
->dev
->dev_addr
[0] +
6502 tp
->dev
->dev_addr
[1] +
6503 tp
->dev
->dev_addr
[2] +
6504 tp
->dev
->dev_addr
[3] +
6505 tp
->dev
->dev_addr
[4] +
6506 tp
->dev
->dev_addr
[5]) &
6507 TX_BACKOFF_SEED_MASK
;
6508 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
6511 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
6513 struct tg3
*tp
= netdev_priv(dev
);
6514 struct sockaddr
*addr
= p
;
6515 int err
= 0, skip_mac_1
= 0;
6517 if (!is_valid_ether_addr(addr
->sa_data
))
6520 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
6522 if (!netif_running(dev
))
6525 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
6526 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
6528 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
6529 addr0_low
= tr32(MAC_ADDR_0_LOW
);
6530 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
6531 addr1_low
= tr32(MAC_ADDR_1_LOW
);
6533 /* Skip MAC addr 1 if ASF is using it. */
6534 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
6535 !(addr1_high
== 0 && addr1_low
== 0))
6538 spin_lock_bh(&tp
->lock
);
6539 __tg3_set_mac_addr(tp
, skip_mac_1
);
6540 spin_unlock_bh(&tp
->lock
);
6545 /* tp->lock is held. */
6546 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
6547 dma_addr_t mapping
, u32 maxlen_flags
,
6551 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
6552 ((u64
) mapping
>> 32));
6554 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
6555 ((u64
) mapping
& 0xffffffff));
6557 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
6560 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
6562 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
6566 static void __tg3_set_rx_mode(struct net_device
*);
6567 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
6569 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
6570 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
6571 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
6572 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
6573 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6574 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
6575 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
6577 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
6578 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
6579 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6580 u32 val
= ec
->stats_block_coalesce_usecs
;
6582 if (!netif_carrier_ok(tp
->dev
))
6585 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
6589 /* tp->lock is held. */
6590 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
6592 u32 val
, rdmac_mode
;
6595 tg3_disable_ints(tp
);
6599 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
6601 if (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) {
6602 tg3_abort_hw(tp
, 1);
6608 err
= tg3_chip_reset(tp
);
6612 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
6614 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5784_A0
||
6615 tp
->pci_chip_rev_id
== CHIPREV_ID_5784_A1
) {
6616 val
= tr32(TG3_CPMU_CTRL
);
6617 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
6618 tw32(TG3_CPMU_CTRL
, val
);
6620 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
6621 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
6622 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
6623 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
6625 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
6626 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
6627 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
6628 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
6630 val
= tr32(TG3_CPMU_HST_ACC
);
6631 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
6632 val
|= CPMU_HST_ACC_MACCLK_6_25
;
6633 tw32(TG3_CPMU_HST_ACC
, val
);
6636 /* This works around an issue with Athlon chipsets on
6637 * B3 tigon3 silicon. This bit has no effect on any
6638 * other revision. But do not set this on PCI Express
6639 * chips and don't even touch the clocks if the CPMU is present.
6641 if (!(tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
)) {
6642 if (!(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
6643 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
6644 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
6647 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
6648 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
6649 val
= tr32(TG3PCI_PCISTATE
);
6650 val
|= PCISTATE_RETRY_SAME_DMA
;
6651 tw32(TG3PCI_PCISTATE
, val
);
6654 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) {
6655 /* Allow reads and writes to the
6656 * APE register and memory space.
6658 val
= tr32(TG3PCI_PCISTATE
);
6659 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
6660 PCISTATE_ALLOW_APE_SHMEM_WR
;
6661 tw32(TG3PCI_PCISTATE
, val
);
6664 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
6665 /* Enable some hw fixes. */
6666 val
= tr32(TG3PCI_MSI_DATA
);
6667 val
|= (1 << 26) | (1 << 28) | (1 << 29);
6668 tw32(TG3PCI_MSI_DATA
, val
);
6671 /* Descriptor ring init may make accesses to the
6672 * NIC SRAM area to setup the TX descriptors, so we
6673 * can only do this after the hardware has been
6674 * successfully reset.
6676 err
= tg3_init_rings(tp
);
6680 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
6681 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
6682 /* This value is determined during the probe time DMA
6683 * engine test, tg3_test_dma.
6685 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
6688 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
6689 GRC_MODE_4X_NIC_SEND_RINGS
|
6690 GRC_MODE_NO_TX_PHDR_CSUM
|
6691 GRC_MODE_NO_RX_PHDR_CSUM
);
6692 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
6694 /* Pseudo-header checksum is done by hardware logic and not
6695 * the offload processers, so make the chip do the pseudo-
6696 * header checksums on receive. For transmit it is more
6697 * convenient to do the pseudo-header checksum in software
6698 * as Linux does that on transmit for us in all cases.
6700 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
6704 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
6706 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6707 val
= tr32(GRC_MISC_CFG
);
6709 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
6710 tw32(GRC_MISC_CFG
, val
);
6712 /* Initialize MBUF/DESC pool. */
6713 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
6715 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
6716 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
6717 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
6718 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
6720 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
6721 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
6722 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
6724 else if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
6727 fw_len
= (TG3_TSO5_FW_TEXT_LEN
+
6728 TG3_TSO5_FW_RODATA_LEN
+
6729 TG3_TSO5_FW_DATA_LEN
+
6730 TG3_TSO5_FW_SBSS_LEN
+
6731 TG3_TSO5_FW_BSS_LEN
);
6732 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
6733 tw32(BUFMGR_MB_POOL_ADDR
,
6734 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
6735 tw32(BUFMGR_MB_POOL_SIZE
,
6736 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
6739 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
6740 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
6741 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
6742 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
6743 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
6744 tw32(BUFMGR_MB_HIGH_WATER
,
6745 tp
->bufmgr_config
.mbuf_high_water
);
6747 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
6748 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
6749 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
6750 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
6751 tw32(BUFMGR_MB_HIGH_WATER
,
6752 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
6754 tw32(BUFMGR_DMA_LOW_WATER
,
6755 tp
->bufmgr_config
.dma_low_water
);
6756 tw32(BUFMGR_DMA_HIGH_WATER
,
6757 tp
->bufmgr_config
.dma_high_water
);
6759 tw32(BUFMGR_MODE
, BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
);
6760 for (i
= 0; i
< 2000; i
++) {
6761 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
6766 printk(KERN_ERR PFX
"tg3_reset_hw cannot enable BUFMGR for %s.\n",
6771 /* Setup replenish threshold. */
6772 val
= tp
->rx_pending
/ 8;
6775 else if (val
> tp
->rx_std_max_post
)
6776 val
= tp
->rx_std_max_post
;
6777 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
6778 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
6779 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
6781 if (val
> (TG3_RX_INTERNAL_RING_SZ_5906
/ 2))
6782 val
= TG3_RX_INTERNAL_RING_SZ_5906
/ 2;
6785 tw32(RCVBDI_STD_THRESH
, val
);
6787 /* Initialize TG3_BDINFO's at:
6788 * RCVDBDI_STD_BD: standard eth size rx ring
6789 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6790 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6793 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6794 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6795 * ring attribute flags
6796 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6798 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6799 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6801 * The size of each ring is fixed in the firmware, but the location is
6804 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6805 ((u64
) tp
->rx_std_mapping
>> 32));
6806 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6807 ((u64
) tp
->rx_std_mapping
& 0xffffffff));
6808 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
6809 NIC_SRAM_RX_BUFFER_DESC
);
6811 /* Don't even try to program the JUMBO/MINI buffer descriptor
6814 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
6815 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6816 RX_STD_MAX_SIZE_5705
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
6818 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6819 RX_STD_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
6821 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6822 BDINFO_FLAGS_DISABLED
);
6824 /* Setup replenish threshold. */
6825 tw32(RCVBDI_JUMBO_THRESH
, tp
->rx_jumbo_pending
/ 8);
6827 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
6828 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6829 ((u64
) tp
->rx_jumbo_mapping
>> 32));
6830 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6831 ((u64
) tp
->rx_jumbo_mapping
& 0xffffffff));
6832 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6833 RX_JUMBO_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
6834 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
6835 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
6837 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6838 BDINFO_FLAGS_DISABLED
);
6843 /* There is only one send ring on 5705/5750, no need to explicitly
6844 * disable the others.
6846 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6847 /* Clear out send RCB ring in SRAM. */
6848 for (i
= NIC_SRAM_SEND_RCB
; i
< NIC_SRAM_RCV_RET_RCB
; i
+= TG3_BDINFO_SIZE
)
6849 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
6850 BDINFO_FLAGS_DISABLED
);
6855 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
6856 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
6858 tg3_set_bdinfo(tp
, NIC_SRAM_SEND_RCB
,
6859 tp
->tx_desc_mapping
,
6860 (TG3_TX_RING_SIZE
<<
6861 BDINFO_FLAGS_MAXLEN_SHIFT
),
6862 NIC_SRAM_TX_BUFFER_DESC
);
6864 /* There is only one receive return ring on 5705/5750, no need
6865 * to explicitly disable the others.
6867 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6868 for (i
= NIC_SRAM_RCV_RET_RCB
; i
< NIC_SRAM_STATS_BLK
;
6869 i
+= TG3_BDINFO_SIZE
) {
6870 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
6871 BDINFO_FLAGS_DISABLED
);
6876 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
6878 tg3_set_bdinfo(tp
, NIC_SRAM_RCV_RET_RCB
,
6880 (TG3_RX_RCB_RING_SIZE(tp
) <<
6881 BDINFO_FLAGS_MAXLEN_SHIFT
),
6884 tp
->rx_std_ptr
= tp
->rx_pending
;
6885 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
6888 tp
->rx_jumbo_ptr
= (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) ?
6889 tp
->rx_jumbo_pending
: 0;
6890 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
6893 /* Initialize MAC address and backoff seed. */
6894 __tg3_set_mac_addr(tp
, 0);
6896 /* MTU + ethernet header + FCS + optional VLAN tag */
6897 tw32(MAC_RX_MTU_SIZE
, tp
->dev
->mtu
+ ETH_HLEN
+ 8);
6899 /* The slot time is changed by tg3_setup_phy if we
6900 * run at gigabit with half duplex.
6902 tw32(MAC_TX_LENGTHS
,
6903 (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6904 (6 << TX_LENGTHS_IPG_SHIFT
) |
6905 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6907 /* Receive rules. */
6908 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
6909 tw32(RCVLPC_CONFIG
, 0x0181);
6911 /* Calculate RDMAC_MODE setting early, we need it to determine
6912 * the RCVLPC_STATE_ENABLE mask.
6914 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
6915 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
6916 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
6917 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
6918 RDMAC_MODE_LNGREAD_ENAB
);
6920 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
)
6921 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
6922 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
6923 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
6925 /* If statement applies to 5705 and 5750 PCI devices only */
6926 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
6927 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
6928 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)) {
6929 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
&&
6930 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
6931 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
6932 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
6933 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
)) {
6934 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
6938 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)
6939 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
6941 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
6942 rdmac_mode
|= (1 << 27);
6944 /* Receive/send statistics. */
6945 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
6946 val
= tr32(RCVLPC_STATS_ENABLE
);
6947 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
6948 tw32(RCVLPC_STATS_ENABLE
, val
);
6949 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
6950 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
6951 val
= tr32(RCVLPC_STATS_ENABLE
);
6952 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
6953 tw32(RCVLPC_STATS_ENABLE
, val
);
6955 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
6957 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
6958 tw32(SNDDATAI_STATSENAB
, 0xffffff);
6959 tw32(SNDDATAI_STATSCTRL
,
6960 (SNDDATAI_SCTRL_ENABLE
|
6961 SNDDATAI_SCTRL_FASTUPD
));
6963 /* Setup host coalescing engine. */
6964 tw32(HOSTCC_MODE
, 0);
6965 for (i
= 0; i
< 2000; i
++) {
6966 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
6971 __tg3_set_coalesce(tp
, &tp
->coal
);
6973 /* set status block DMA address */
6974 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6975 ((u64
) tp
->status_mapping
>> 32));
6976 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6977 ((u64
) tp
->status_mapping
& 0xffffffff));
6979 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6980 /* Status/statistics block address. See tg3_timer,
6981 * the tg3_periodic_fetch_stats call there, and
6982 * tg3_get_stats to see how this works for 5705/5750 chips.
6984 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6985 ((u64
) tp
->stats_mapping
>> 32));
6986 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6987 ((u64
) tp
->stats_mapping
& 0xffffffff));
6988 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
6989 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
6992 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
6994 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
6995 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
6996 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
6997 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
6999 /* Clear statistics/status block in chip, and status block in ram. */
7000 for (i
= NIC_SRAM_STATS_BLK
;
7001 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
7003 tg3_write_mem(tp
, i
, 0);
7006 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7008 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
7009 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
7010 /* reset to prevent losing 1st rx packet intermittently */
7011 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7015 tp
->mac_mode
= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
7016 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
7017 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
7018 !(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) &&
7019 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
7020 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7021 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
7024 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7025 * If TG3_FLG2_IS_NIC is zero, we should read the
7026 * register to preserve the GPIO settings for LOMs. The GPIOs,
7027 * whether used as inputs or outputs, are set by boot code after
7030 if (!(tp
->tg3_flags2
& TG3_FLG2_IS_NIC
)) {
7033 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
7034 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
7035 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
7037 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
7038 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
7039 GRC_LCLCTRL_GPIO_OUTPUT3
;
7041 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
7042 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
7044 tp
->grc_local_ctrl
&= ~gpio_mask
;
7045 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
7047 /* GPIO1 must be driven high for eeprom write protect */
7048 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
)
7049 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
7050 GRC_LCLCTRL_GPIO_OUTPUT1
);
7052 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
7055 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0);
7058 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
7059 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
7063 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
7064 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
7065 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
7066 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
7067 WDMAC_MODE_LNGREAD_ENAB
);
7069 /* If statement applies to 5705 and 5750 PCI devices only */
7070 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
7071 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
7072 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) {
7073 if ((tp
->tg3_flags
& TG3_FLG2_TSO_CAPABLE
) &&
7074 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
7075 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
7077 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
7078 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
7079 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
7080 val
|= WDMAC_MODE_RX_ACCEL
;
7084 /* Enable host coalescing bug fix */
7085 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
) ||
7086 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
) ||
7087 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
) ||
7088 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
))
7089 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
7091 tw32_f(WDMAC_MODE
, val
);
7094 if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
7097 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7099 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
7100 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
7101 pcix_cmd
|= PCI_X_CMD_READ_2K
;
7102 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
7103 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
7104 pcix_cmd
|= PCI_X_CMD_READ_2K
;
7106 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
7110 tw32_f(RDMAC_MODE
, rdmac_mode
);
7113 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
7114 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
7115 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
7117 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
7119 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
7121 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
7123 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
7124 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
7125 tw32(RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
);
7126 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
7127 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
7128 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
7129 tw32(SNDBDI_MODE
, SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
);
7130 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
7132 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
7133 err
= tg3_load_5701_a0_firmware_fix(tp
);
7138 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
7139 err
= tg3_load_tso_firmware(tp
);
7144 tp
->tx_mode
= TX_MODE_ENABLE
;
7145 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
7148 tp
->rx_mode
= RX_MODE_ENABLE
;
7149 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
7150 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
7151 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
7153 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7156 if (tp
->link_config
.phy_is_low_power
) {
7157 tp
->link_config
.phy_is_low_power
= 0;
7158 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
7159 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
7160 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
7163 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
7164 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
7167 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
7169 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
7170 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
7171 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7174 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7177 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
7178 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
7179 !(tp
->tg3_flags2
& TG3_FLG2_SERDES_PREEMPHASIS
)) {
7180 /* Set drive transmission level to 1.2V */
7181 /* only if the signal pre-emphasis bit is not set */
7182 val
= tr32(MAC_SERDES_CFG
);
7185 tw32(MAC_SERDES_CFG
, val
);
7187 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
7188 tw32(MAC_SERDES_CFG
, 0x616000);
7191 /* Prevent chip from dropping frames when flow control
7194 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, 2);
7196 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
7197 (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
7198 /* Use hardware link auto-negotiation */
7199 tp
->tg3_flags2
|= TG3_FLG2_HW_AUTONEG
;
7202 if ((tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) &&
7203 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
7206 tmp
= tr32(SERDES_RX_CTRL
);
7207 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
7208 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
7209 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
7210 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
7213 err
= tg3_setup_phy(tp
, 0);
7217 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) &&
7218 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5906
) {
7221 /* Clear CRC stats. */
7222 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
7223 tg3_writephy(tp
, MII_TG3_TEST1
,
7224 tmp
| MII_TG3_TEST1_CRC_EN
);
7225 tg3_readphy(tp
, 0x14, &tmp
);
7229 __tg3_set_rx_mode(tp
->dev
);
7231 /* Initialize receive rules. */
7232 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
7233 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
7234 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
7235 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
7237 if ((tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
7238 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
7242 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)
7246 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
7248 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
7250 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
7252 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
7254 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
7256 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
7258 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
7260 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
7262 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
7264 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
7266 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
7268 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
7270 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7272 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7280 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)
7281 /* Write our heartbeat update interval to APE. */
7282 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
7283 APE_HOST_HEARTBEAT_INT_DISABLE
);
7285 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
7290 /* Called at device open time to get the chip ready for
7291 * packet processing. Invoked with tp->lock held.
7293 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
7297 /* Force the chip into D0. */
7298 err
= tg3_set_power_state(tp
, PCI_D0
);
7302 tg3_switch_clocks(tp
);
7304 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
7306 err
= tg3_reset_hw(tp
, reset_phy
);
7312 #define TG3_STAT_ADD32(PSTAT, REG) \
7313 do { u32 __val = tr32(REG); \
7314 (PSTAT)->low += __val; \
7315 if ((PSTAT)->low < __val) \
7316 (PSTAT)->high += 1; \
7319 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
7321 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
7323 if (!netif_carrier_ok(tp
->dev
))
7326 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
7327 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
7328 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
7329 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
7330 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
7331 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
7332 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
7333 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
7334 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
7335 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
7336 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
7337 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
7338 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
7340 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
7341 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
7342 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
7343 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
7344 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
7345 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
7346 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
7347 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
7348 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
7349 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
7350 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
7351 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
7352 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
7353 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
7355 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
7356 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
7357 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
7360 static void tg3_timer(unsigned long __opaque
)
7362 struct tg3
*tp
= (struct tg3
*) __opaque
;
7367 spin_lock(&tp
->lock
);
7369 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
7370 /* All of this garbage is because when using non-tagged
7371 * IRQ status the mailbox/status_block protocol the chip
7372 * uses with the cpu is race prone.
7374 if (tp
->hw_status
->status
& SD_STATUS_UPDATED
) {
7375 tw32(GRC_LOCAL_CTRL
,
7376 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
7378 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7379 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
7382 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
7383 tp
->tg3_flags2
|= TG3_FLG2_RESTART_TIMER
;
7384 spin_unlock(&tp
->lock
);
7385 schedule_work(&tp
->reset_task
);
7390 /* This part only runs once per second. */
7391 if (!--tp
->timer_counter
) {
7392 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
7393 tg3_periodic_fetch_stats(tp
);
7395 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
7399 mac_stat
= tr32(MAC_STATUS
);
7402 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) {
7403 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
7405 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
7409 tg3_setup_phy(tp
, 0);
7410 } else if (tp
->tg3_flags
& TG3_FLAG_POLL_SERDES
) {
7411 u32 mac_stat
= tr32(MAC_STATUS
);
7414 if (netif_carrier_ok(tp
->dev
) &&
7415 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
7418 if (! netif_carrier_ok(tp
->dev
) &&
7419 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
7420 MAC_STATUS_SIGNAL_DET
))) {
7424 if (!tp
->serdes_counter
) {
7427 ~MAC_MODE_PORT_MODE_MASK
));
7429 tw32_f(MAC_MODE
, tp
->mac_mode
);
7432 tg3_setup_phy(tp
, 0);
7434 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
7435 tg3_serdes_parallel_detect(tp
);
7437 tp
->timer_counter
= tp
->timer_multiplier
;
7440 /* Heartbeat is only sent once every 2 seconds.
7442 * The heartbeat is to tell the ASF firmware that the host
7443 * driver is still alive. In the event that the OS crashes,
7444 * ASF needs to reset the hardware to free up the FIFO space
7445 * that may be filled with rx packets destined for the host.
7446 * If the FIFO is full, ASF will no longer function properly.
7448 * Unintended resets have been reported on real time kernels
7449 * where the timer doesn't run on time. Netpoll will also have
7452 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7453 * to check the ring condition when the heartbeat is expiring
7454 * before doing the reset. This will prevent most unintended
7457 if (!--tp
->asf_counter
) {
7458 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
7461 tg3_wait_for_event_ack(tp
);
7463 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
7464 FWCMD_NICDRV_ALIVE3
);
7465 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
7466 /* 5 seconds timeout */
7467 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, 5);
7468 val
= tr32(GRC_RX_CPU_EVENT
);
7469 val
|= GRC_RX_CPU_DRIVER_EVENT
;
7470 tw32_f(GRC_RX_CPU_EVENT
, val
);
7472 tp
->asf_counter
= tp
->asf_multiplier
;
7475 spin_unlock(&tp
->lock
);
7478 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
7479 add_timer(&tp
->timer
);
7482 static int tg3_request_irq(struct tg3
*tp
)
7485 unsigned long flags
;
7486 struct net_device
*dev
= tp
->dev
;
7488 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7490 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
)
7492 flags
= IRQF_SAMPLE_RANDOM
;
7495 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
7496 fn
= tg3_interrupt_tagged
;
7497 flags
= IRQF_SHARED
| IRQF_SAMPLE_RANDOM
;
7499 return (request_irq(tp
->pdev
->irq
, fn
, flags
, dev
->name
, dev
));
7502 static int tg3_test_interrupt(struct tg3
*tp
)
7504 struct net_device
*dev
= tp
->dev
;
7505 int err
, i
, intr_ok
= 0;
7507 if (!netif_running(dev
))
7510 tg3_disable_ints(tp
);
7512 free_irq(tp
->pdev
->irq
, dev
);
7514 err
= request_irq(tp
->pdev
->irq
, tg3_test_isr
,
7515 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, dev
);
7519 tp
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
7520 tg3_enable_ints(tp
);
7522 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
7525 for (i
= 0; i
< 5; i
++) {
7526 u32 int_mbox
, misc_host_ctrl
;
7528 int_mbox
= tr32_mailbox(MAILBOX_INTERRUPT_0
+
7530 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
7532 if ((int_mbox
!= 0) ||
7533 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
7541 tg3_disable_ints(tp
);
7543 free_irq(tp
->pdev
->irq
, dev
);
7545 err
= tg3_request_irq(tp
);
7556 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7557 * successfully restored
7559 static int tg3_test_msi(struct tg3
*tp
)
7561 struct net_device
*dev
= tp
->dev
;
7565 if (!(tp
->tg3_flags2
& TG3_FLG2_USING_MSI
))
7568 /* Turn off SERR reporting in case MSI terminates with Master
7571 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
7572 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
7573 pci_cmd
& ~PCI_COMMAND_SERR
);
7575 err
= tg3_test_interrupt(tp
);
7577 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
7582 /* other failures */
7586 /* MSI test failed, go back to INTx mode */
7587 printk(KERN_WARNING PFX
"%s: No interrupt was generated using MSI, "
7588 "switching to INTx mode. Please report this failure to "
7589 "the PCI maintainer and include system chipset information.\n",
7592 free_irq(tp
->pdev
->irq
, dev
);
7593 pci_disable_msi(tp
->pdev
);
7595 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
7597 err
= tg3_request_irq(tp
);
7601 /* Need to reset the chip because the MSI cycle may have terminated
7602 * with Master Abort.
7604 tg3_full_lock(tp
, 1);
7606 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7607 err
= tg3_init_hw(tp
, 1);
7609 tg3_full_unlock(tp
);
7612 free_irq(tp
->pdev
->irq
, dev
);
7617 static int tg3_open(struct net_device
*dev
)
7619 struct tg3
*tp
= netdev_priv(dev
);
7622 netif_carrier_off(tp
->dev
);
7624 tg3_full_lock(tp
, 0);
7626 err
= tg3_set_power_state(tp
, PCI_D0
);
7628 tg3_full_unlock(tp
);
7632 tg3_disable_ints(tp
);
7633 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
7635 tg3_full_unlock(tp
);
7637 /* The placement of this call is tied
7638 * to the setup and use of Host TX descriptors.
7640 err
= tg3_alloc_consistent(tp
);
7644 if (tp
->tg3_flags
& TG3_FLAG_SUPPORT_MSI
) {
7645 /* All MSI supporting chips should support tagged
7646 * status. Assert that this is the case.
7648 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
7649 printk(KERN_WARNING PFX
"%s: MSI without TAGGED? "
7650 "Not using MSI.\n", tp
->dev
->name
);
7651 } else if (pci_enable_msi(tp
->pdev
) == 0) {
7654 msi_mode
= tr32(MSGINT_MODE
);
7655 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
7656 tp
->tg3_flags2
|= TG3_FLG2_USING_MSI
;
7659 err
= tg3_request_irq(tp
);
7662 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7663 pci_disable_msi(tp
->pdev
);
7664 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
7666 tg3_free_consistent(tp
);
7670 napi_enable(&tp
->napi
);
7672 tg3_full_lock(tp
, 0);
7674 err
= tg3_init_hw(tp
, 1);
7676 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7679 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
7680 tp
->timer_offset
= HZ
;
7682 tp
->timer_offset
= HZ
/ 10;
7684 BUG_ON(tp
->timer_offset
> HZ
);
7685 tp
->timer_counter
= tp
->timer_multiplier
=
7686 (HZ
/ tp
->timer_offset
);
7687 tp
->asf_counter
= tp
->asf_multiplier
=
7688 ((HZ
/ tp
->timer_offset
) * 2);
7690 init_timer(&tp
->timer
);
7691 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
7692 tp
->timer
.data
= (unsigned long) tp
;
7693 tp
->timer
.function
= tg3_timer
;
7696 tg3_full_unlock(tp
);
7699 napi_disable(&tp
->napi
);
7700 free_irq(tp
->pdev
->irq
, dev
);
7701 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7702 pci_disable_msi(tp
->pdev
);
7703 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
7705 tg3_free_consistent(tp
);
7709 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7710 err
= tg3_test_msi(tp
);
7713 tg3_full_lock(tp
, 0);
7715 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7716 pci_disable_msi(tp
->pdev
);
7717 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
7719 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7721 tg3_free_consistent(tp
);
7723 tg3_full_unlock(tp
);
7725 napi_disable(&tp
->napi
);
7730 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7731 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
) {
7732 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
7734 tw32(PCIE_TRANSACTION_CFG
,
7735 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
7740 tg3_full_lock(tp
, 0);
7742 add_timer(&tp
->timer
);
7743 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
7744 tg3_enable_ints(tp
);
7746 tg3_full_unlock(tp
);
7748 netif_start_queue(dev
);
7754 /*static*/ void tg3_dump_state(struct tg3
*tp
)
7756 u32 val32
, val32_2
, val32_3
, val32_4
, val32_5
;
7760 pci_read_config_word(tp
->pdev
, PCI_STATUS
, &val16
);
7761 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, &val32
);
7762 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7766 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7767 tr32(MAC_MODE
), tr32(MAC_STATUS
));
7768 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7769 tr32(MAC_EVENT
), tr32(MAC_LED_CTRL
));
7770 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7771 tr32(MAC_TX_MODE
), tr32(MAC_TX_STATUS
));
7772 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7773 tr32(MAC_RX_MODE
), tr32(MAC_RX_STATUS
));
7775 /* Send data initiator control block */
7776 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7777 tr32(SNDDATAI_MODE
), tr32(SNDDATAI_STATUS
));
7778 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7779 tr32(SNDDATAI_STATSCTRL
));
7781 /* Send data completion control block */
7782 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE
));
7784 /* Send BD ring selector block */
7785 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7786 tr32(SNDBDS_MODE
), tr32(SNDBDS_STATUS
));
7788 /* Send BD initiator control block */
7789 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7790 tr32(SNDBDI_MODE
), tr32(SNDBDI_STATUS
));
7792 /* Send BD completion control block */
7793 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE
));
7795 /* Receive list placement control block */
7796 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7797 tr32(RCVLPC_MODE
), tr32(RCVLPC_STATUS
));
7798 printk(" RCVLPC_STATSCTRL[%08x]\n",
7799 tr32(RCVLPC_STATSCTRL
));
7801 /* Receive data and receive BD initiator control block */
7802 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7803 tr32(RCVDBDI_MODE
), tr32(RCVDBDI_STATUS
));
7805 /* Receive data completion control block */
7806 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7809 /* Receive BD initiator control block */
7810 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7811 tr32(RCVBDI_MODE
), tr32(RCVBDI_STATUS
));
7813 /* Receive BD completion control block */
7814 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7815 tr32(RCVCC_MODE
), tr32(RCVCC_STATUS
));
7817 /* Receive list selector control block */
7818 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7819 tr32(RCVLSC_MODE
), tr32(RCVLSC_STATUS
));
7821 /* Mbuf cluster free block */
7822 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7823 tr32(MBFREE_MODE
), tr32(MBFREE_STATUS
));
7825 /* Host coalescing control block */
7826 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7827 tr32(HOSTCC_MODE
), tr32(HOSTCC_STATUS
));
7828 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7829 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7830 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
7831 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7832 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7833 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
7834 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7835 tr32(HOSTCC_STATS_BLK_NIC_ADDR
));
7836 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7837 tr32(HOSTCC_STATUS_BLK_NIC_ADDR
));
7839 /* Memory arbiter control block */
7840 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7841 tr32(MEMARB_MODE
), tr32(MEMARB_STATUS
));
7843 /* Buffer manager control block */
7844 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7845 tr32(BUFMGR_MODE
), tr32(BUFMGR_STATUS
));
7846 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7847 tr32(BUFMGR_MB_POOL_ADDR
), tr32(BUFMGR_MB_POOL_SIZE
));
7848 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7849 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7850 tr32(BUFMGR_DMA_DESC_POOL_ADDR
),
7851 tr32(BUFMGR_DMA_DESC_POOL_SIZE
));
7853 /* Read DMA control block */
7854 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7855 tr32(RDMAC_MODE
), tr32(RDMAC_STATUS
));
7857 /* Write DMA control block */
7858 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7859 tr32(WDMAC_MODE
), tr32(WDMAC_STATUS
));
7861 /* DMA completion block */
7862 printk("DEBUG: DMAC_MODE[%08x]\n",
7866 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7867 tr32(GRC_MODE
), tr32(GRC_MISC_CFG
));
7868 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7869 tr32(GRC_LOCAL_CTRL
));
7872 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7873 tr32(RCVDBDI_JUMBO_BD
+ 0x0),
7874 tr32(RCVDBDI_JUMBO_BD
+ 0x4),
7875 tr32(RCVDBDI_JUMBO_BD
+ 0x8),
7876 tr32(RCVDBDI_JUMBO_BD
+ 0xc));
7877 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7878 tr32(RCVDBDI_STD_BD
+ 0x0),
7879 tr32(RCVDBDI_STD_BD
+ 0x4),
7880 tr32(RCVDBDI_STD_BD
+ 0x8),
7881 tr32(RCVDBDI_STD_BD
+ 0xc));
7882 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7883 tr32(RCVDBDI_MINI_BD
+ 0x0),
7884 tr32(RCVDBDI_MINI_BD
+ 0x4),
7885 tr32(RCVDBDI_MINI_BD
+ 0x8),
7886 tr32(RCVDBDI_MINI_BD
+ 0xc));
7888 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x0, &val32
);
7889 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x4, &val32_2
);
7890 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x8, &val32_3
);
7891 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0xc, &val32_4
);
7892 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7893 val32
, val32_2
, val32_3
, val32_4
);
7895 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x0, &val32
);
7896 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x4, &val32_2
);
7897 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x8, &val32_3
);
7898 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0xc, &val32_4
);
7899 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7900 val32
, val32_2
, val32_3
, val32_4
);
7902 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x0, &val32
);
7903 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x4, &val32_2
);
7904 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x8, &val32_3
);
7905 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0xc, &val32_4
);
7906 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x10, &val32_5
);
7907 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7908 val32
, val32_2
, val32_3
, val32_4
, val32_5
);
7910 /* SW status block */
7911 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7912 tp
->hw_status
->status
,
7913 tp
->hw_status
->status_tag
,
7914 tp
->hw_status
->rx_jumbo_consumer
,
7915 tp
->hw_status
->rx_consumer
,
7916 tp
->hw_status
->rx_mini_consumer
,
7917 tp
->hw_status
->idx
[0].rx_producer
,
7918 tp
->hw_status
->idx
[0].tx_consumer
);
7920 /* SW statistics block */
7921 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7922 ((u32
*)tp
->hw_stats
)[0],
7923 ((u32
*)tp
->hw_stats
)[1],
7924 ((u32
*)tp
->hw_stats
)[2],
7925 ((u32
*)tp
->hw_stats
)[3]);
7928 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7929 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x0),
7930 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x4),
7931 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x0),
7932 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x4));
7934 /* NIC side send descriptors. */
7935 for (i
= 0; i
< 6; i
++) {
7938 txd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_TX_BUFFER_DESC
7939 + (i
* sizeof(struct tg3_tx_buffer_desc
));
7940 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7942 readl(txd
+ 0x0), readl(txd
+ 0x4),
7943 readl(txd
+ 0x8), readl(txd
+ 0xc));
7946 /* NIC side RX descriptors. */
7947 for (i
= 0; i
< 6; i
++) {
7950 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_BUFFER_DESC
7951 + (i
* sizeof(struct tg3_rx_buffer_desc
));
7952 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7954 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7955 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7956 rxd
+= (4 * sizeof(u32
));
7957 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7959 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7960 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7963 for (i
= 0; i
< 6; i
++) {
7966 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_JUMBO_BUFFER_DESC
7967 + (i
* sizeof(struct tg3_rx_buffer_desc
));
7968 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7970 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7971 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7972 rxd
+= (4 * sizeof(u32
));
7973 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7975 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7976 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7981 static struct net_device_stats
*tg3_get_stats(struct net_device
*);
7982 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
7984 static int tg3_close(struct net_device
*dev
)
7986 struct tg3
*tp
= netdev_priv(dev
);
7988 napi_disable(&tp
->napi
);
7989 cancel_work_sync(&tp
->reset_task
);
7991 netif_stop_queue(dev
);
7993 del_timer_sync(&tp
->timer
);
7995 tg3_full_lock(tp
, 1);
8000 tg3_disable_ints(tp
);
8002 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8004 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
8006 tg3_full_unlock(tp
);
8008 free_irq(tp
->pdev
->irq
, dev
);
8009 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
8010 pci_disable_msi(tp
->pdev
);
8011 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
8014 memcpy(&tp
->net_stats_prev
, tg3_get_stats(tp
->dev
),
8015 sizeof(tp
->net_stats_prev
));
8016 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
8017 sizeof(tp
->estats_prev
));
8019 tg3_free_consistent(tp
);
8021 tg3_set_power_state(tp
, PCI_D3hot
);
8023 netif_carrier_off(tp
->dev
);
8028 static inline unsigned long get_stat64(tg3_stat64_t
*val
)
8032 #if (BITS_PER_LONG == 32)
8035 ret
= ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
8040 static unsigned long calc_crc_errors(struct tg3
*tp
)
8042 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
8044 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) &&
8045 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
8046 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
8049 spin_lock_bh(&tp
->lock
);
8050 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
8051 tg3_writephy(tp
, MII_TG3_TEST1
,
8052 val
| MII_TG3_TEST1_CRC_EN
);
8053 tg3_readphy(tp
, 0x14, &val
);
8056 spin_unlock_bh(&tp
->lock
);
8058 tp
->phy_crc_errors
+= val
;
8060 return tp
->phy_crc_errors
;
8063 return get_stat64(&hw_stats
->rx_fcs_errors
);
8066 #define ESTAT_ADD(member) \
8067 estats->member = old_estats->member + \
8068 get_stat64(&hw_stats->member)
8070 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
8072 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
8073 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
8074 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
8079 ESTAT_ADD(rx_octets
);
8080 ESTAT_ADD(rx_fragments
);
8081 ESTAT_ADD(rx_ucast_packets
);
8082 ESTAT_ADD(rx_mcast_packets
);
8083 ESTAT_ADD(rx_bcast_packets
);
8084 ESTAT_ADD(rx_fcs_errors
);
8085 ESTAT_ADD(rx_align_errors
);
8086 ESTAT_ADD(rx_xon_pause_rcvd
);
8087 ESTAT_ADD(rx_xoff_pause_rcvd
);
8088 ESTAT_ADD(rx_mac_ctrl_rcvd
);
8089 ESTAT_ADD(rx_xoff_entered
);
8090 ESTAT_ADD(rx_frame_too_long_errors
);
8091 ESTAT_ADD(rx_jabbers
);
8092 ESTAT_ADD(rx_undersize_packets
);
8093 ESTAT_ADD(rx_in_length_errors
);
8094 ESTAT_ADD(rx_out_length_errors
);
8095 ESTAT_ADD(rx_64_or_less_octet_packets
);
8096 ESTAT_ADD(rx_65_to_127_octet_packets
);
8097 ESTAT_ADD(rx_128_to_255_octet_packets
);
8098 ESTAT_ADD(rx_256_to_511_octet_packets
);
8099 ESTAT_ADD(rx_512_to_1023_octet_packets
);
8100 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
8101 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
8102 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
8103 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
8104 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
8106 ESTAT_ADD(tx_octets
);
8107 ESTAT_ADD(tx_collisions
);
8108 ESTAT_ADD(tx_xon_sent
);
8109 ESTAT_ADD(tx_xoff_sent
);
8110 ESTAT_ADD(tx_flow_control
);
8111 ESTAT_ADD(tx_mac_errors
);
8112 ESTAT_ADD(tx_single_collisions
);
8113 ESTAT_ADD(tx_mult_collisions
);
8114 ESTAT_ADD(tx_deferred
);
8115 ESTAT_ADD(tx_excessive_collisions
);
8116 ESTAT_ADD(tx_late_collisions
);
8117 ESTAT_ADD(tx_collide_2times
);
8118 ESTAT_ADD(tx_collide_3times
);
8119 ESTAT_ADD(tx_collide_4times
);
8120 ESTAT_ADD(tx_collide_5times
);
8121 ESTAT_ADD(tx_collide_6times
);
8122 ESTAT_ADD(tx_collide_7times
);
8123 ESTAT_ADD(tx_collide_8times
);
8124 ESTAT_ADD(tx_collide_9times
);
8125 ESTAT_ADD(tx_collide_10times
);
8126 ESTAT_ADD(tx_collide_11times
);
8127 ESTAT_ADD(tx_collide_12times
);
8128 ESTAT_ADD(tx_collide_13times
);
8129 ESTAT_ADD(tx_collide_14times
);
8130 ESTAT_ADD(tx_collide_15times
);
8131 ESTAT_ADD(tx_ucast_packets
);
8132 ESTAT_ADD(tx_mcast_packets
);
8133 ESTAT_ADD(tx_bcast_packets
);
8134 ESTAT_ADD(tx_carrier_sense_errors
);
8135 ESTAT_ADD(tx_discards
);
8136 ESTAT_ADD(tx_errors
);
8138 ESTAT_ADD(dma_writeq_full
);
8139 ESTAT_ADD(dma_write_prioq_full
);
8140 ESTAT_ADD(rxbds_empty
);
8141 ESTAT_ADD(rx_discards
);
8142 ESTAT_ADD(rx_errors
);
8143 ESTAT_ADD(rx_threshold_hit
);
8145 ESTAT_ADD(dma_readq_full
);
8146 ESTAT_ADD(dma_read_prioq_full
);
8147 ESTAT_ADD(tx_comp_queue_full
);
8149 ESTAT_ADD(ring_set_send_prod_index
);
8150 ESTAT_ADD(ring_status_update
);
8151 ESTAT_ADD(nic_irqs
);
8152 ESTAT_ADD(nic_avoided_irqs
);
8153 ESTAT_ADD(nic_tx_threshold_hit
);
8158 static struct net_device_stats
*tg3_get_stats(struct net_device
*dev
)
8160 struct tg3
*tp
= netdev_priv(dev
);
8161 struct net_device_stats
*stats
= &tp
->net_stats
;
8162 struct net_device_stats
*old_stats
= &tp
->net_stats_prev
;
8163 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
8168 stats
->rx_packets
= old_stats
->rx_packets
+
8169 get_stat64(&hw_stats
->rx_ucast_packets
) +
8170 get_stat64(&hw_stats
->rx_mcast_packets
) +
8171 get_stat64(&hw_stats
->rx_bcast_packets
);
8173 stats
->tx_packets
= old_stats
->tx_packets
+
8174 get_stat64(&hw_stats
->tx_ucast_packets
) +
8175 get_stat64(&hw_stats
->tx_mcast_packets
) +
8176 get_stat64(&hw_stats
->tx_bcast_packets
);
8178 stats
->rx_bytes
= old_stats
->rx_bytes
+
8179 get_stat64(&hw_stats
->rx_octets
);
8180 stats
->tx_bytes
= old_stats
->tx_bytes
+
8181 get_stat64(&hw_stats
->tx_octets
);
8183 stats
->rx_errors
= old_stats
->rx_errors
+
8184 get_stat64(&hw_stats
->rx_errors
);
8185 stats
->tx_errors
= old_stats
->tx_errors
+
8186 get_stat64(&hw_stats
->tx_errors
) +
8187 get_stat64(&hw_stats
->tx_mac_errors
) +
8188 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
8189 get_stat64(&hw_stats
->tx_discards
);
8191 stats
->multicast
= old_stats
->multicast
+
8192 get_stat64(&hw_stats
->rx_mcast_packets
);
8193 stats
->collisions
= old_stats
->collisions
+
8194 get_stat64(&hw_stats
->tx_collisions
);
8196 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
8197 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
8198 get_stat64(&hw_stats
->rx_undersize_packets
);
8200 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
8201 get_stat64(&hw_stats
->rxbds_empty
);
8202 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
8203 get_stat64(&hw_stats
->rx_align_errors
);
8204 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
8205 get_stat64(&hw_stats
->tx_discards
);
8206 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
8207 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
8209 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
8210 calc_crc_errors(tp
);
8212 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
8213 get_stat64(&hw_stats
->rx_discards
);
8218 static inline u32
calc_crc(unsigned char *buf
, int len
)
8226 for (j
= 0; j
< len
; j
++) {
8229 for (k
= 0; k
< 8; k
++) {
8243 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
8245 /* accept or reject all multicast frames */
8246 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
8247 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
8248 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
8249 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
8252 static void __tg3_set_rx_mode(struct net_device
*dev
)
8254 struct tg3
*tp
= netdev_priv(dev
);
8257 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
8258 RX_MODE_KEEP_VLAN_TAG
);
8260 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8263 #if TG3_VLAN_TAG_USED
8265 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
8266 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
8268 /* By definition, VLAN is disabled always in this
8271 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
8272 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
8275 if (dev
->flags
& IFF_PROMISC
) {
8276 /* Promiscuous mode. */
8277 rx_mode
|= RX_MODE_PROMISC
;
8278 } else if (dev
->flags
& IFF_ALLMULTI
) {
8279 /* Accept all multicast. */
8280 tg3_set_multi (tp
, 1);
8281 } else if (dev
->mc_count
< 1) {
8282 /* Reject all multicast. */
8283 tg3_set_multi (tp
, 0);
8285 /* Accept one or more multicast(s). */
8286 struct dev_mc_list
*mclist
;
8288 u32 mc_filter
[4] = { 0, };
8293 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
8294 i
++, mclist
= mclist
->next
) {
8296 crc
= calc_crc (mclist
->dmi_addr
, ETH_ALEN
);
8298 regidx
= (bit
& 0x60) >> 5;
8300 mc_filter
[regidx
] |= (1 << bit
);
8303 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
8304 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
8305 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
8306 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
8309 if (rx_mode
!= tp
->rx_mode
) {
8310 tp
->rx_mode
= rx_mode
;
8311 tw32_f(MAC_RX_MODE
, rx_mode
);
8316 static void tg3_set_rx_mode(struct net_device
*dev
)
8318 struct tg3
*tp
= netdev_priv(dev
);
8320 if (!netif_running(dev
))
8323 tg3_full_lock(tp
, 0);
8324 __tg3_set_rx_mode(dev
);
8325 tg3_full_unlock(tp
);
8328 #define TG3_REGDUMP_LEN (32 * 1024)
8330 static int tg3_get_regs_len(struct net_device
*dev
)
8332 return TG3_REGDUMP_LEN
;
8335 static void tg3_get_regs(struct net_device
*dev
,
8336 struct ethtool_regs
*regs
, void *_p
)
8339 struct tg3
*tp
= netdev_priv(dev
);
8345 memset(p
, 0, TG3_REGDUMP_LEN
);
8347 if (tp
->link_config
.phy_is_low_power
)
8350 tg3_full_lock(tp
, 0);
8352 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8353 #define GET_REG32_LOOP(base,len) \
8354 do { p = (u32 *)(orig_p + (base)); \
8355 for (i = 0; i < len; i += 4) \
8356 __GET_REG32((base) + i); \
8358 #define GET_REG32_1(reg) \
8359 do { p = (u32 *)(orig_p + (reg)); \
8360 __GET_REG32((reg)); \
8363 GET_REG32_LOOP(TG3PCI_VENDOR
, 0xb0);
8364 GET_REG32_LOOP(MAILBOX_INTERRUPT_0
, 0x200);
8365 GET_REG32_LOOP(MAC_MODE
, 0x4f0);
8366 GET_REG32_LOOP(SNDDATAI_MODE
, 0xe0);
8367 GET_REG32_1(SNDDATAC_MODE
);
8368 GET_REG32_LOOP(SNDBDS_MODE
, 0x80);
8369 GET_REG32_LOOP(SNDBDI_MODE
, 0x48);
8370 GET_REG32_1(SNDBDC_MODE
);
8371 GET_REG32_LOOP(RCVLPC_MODE
, 0x20);
8372 GET_REG32_LOOP(RCVLPC_SELLST_BASE
, 0x15c);
8373 GET_REG32_LOOP(RCVDBDI_MODE
, 0x0c);
8374 GET_REG32_LOOP(RCVDBDI_JUMBO_BD
, 0x3c);
8375 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0
, 0x44);
8376 GET_REG32_1(RCVDCC_MODE
);
8377 GET_REG32_LOOP(RCVBDI_MODE
, 0x20);
8378 GET_REG32_LOOP(RCVCC_MODE
, 0x14);
8379 GET_REG32_LOOP(RCVLSC_MODE
, 0x08);
8380 GET_REG32_1(MBFREE_MODE
);
8381 GET_REG32_LOOP(HOSTCC_MODE
, 0x100);
8382 GET_REG32_LOOP(MEMARB_MODE
, 0x10);
8383 GET_REG32_LOOP(BUFMGR_MODE
, 0x58);
8384 GET_REG32_LOOP(RDMAC_MODE
, 0x08);
8385 GET_REG32_LOOP(WDMAC_MODE
, 0x08);
8386 GET_REG32_1(RX_CPU_MODE
);
8387 GET_REG32_1(RX_CPU_STATE
);
8388 GET_REG32_1(RX_CPU_PGMCTR
);
8389 GET_REG32_1(RX_CPU_HWBKPT
);
8390 GET_REG32_1(TX_CPU_MODE
);
8391 GET_REG32_1(TX_CPU_STATE
);
8392 GET_REG32_1(TX_CPU_PGMCTR
);
8393 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0
, 0x110);
8394 GET_REG32_LOOP(FTQ_RESET
, 0x120);
8395 GET_REG32_LOOP(MSGINT_MODE
, 0x0c);
8396 GET_REG32_1(DMAC_MODE
);
8397 GET_REG32_LOOP(GRC_MODE
, 0x4c);
8398 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
8399 GET_REG32_LOOP(NVRAM_CMD
, 0x24);
8402 #undef GET_REG32_LOOP
8405 tg3_full_unlock(tp
);
8408 static int tg3_get_eeprom_len(struct net_device
*dev
)
8410 struct tg3
*tp
= netdev_priv(dev
);
8412 return tp
->nvram_size
;
8415 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
);
8416 static int tg3_nvram_read_le(struct tg3
*tp
, u32 offset
, __le32
*val
);
8417 static int tg3_nvram_read_swab(struct tg3
*tp
, u32 offset
, u32
*val
);
8419 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
8421 struct tg3
*tp
= netdev_priv(dev
);
8424 u32 i
, offset
, len
, b_offset
, b_count
;
8427 if (tp
->link_config
.phy_is_low_power
)
8430 offset
= eeprom
->offset
;
8434 eeprom
->magic
= TG3_EEPROM_MAGIC
;
8437 /* adjustments to start on required 4 byte boundary */
8438 b_offset
= offset
& 3;
8439 b_count
= 4 - b_offset
;
8440 if (b_count
> len
) {
8441 /* i.e. offset=1 len=2 */
8444 ret
= tg3_nvram_read_le(tp
, offset
-b_offset
, &val
);
8447 memcpy(data
, ((char*)&val
) + b_offset
, b_count
);
8450 eeprom
->len
+= b_count
;
8453 /* read bytes upto the last 4 byte boundary */
8454 pd
= &data
[eeprom
->len
];
8455 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
8456 ret
= tg3_nvram_read_le(tp
, offset
+ i
, &val
);
8461 memcpy(pd
+ i
, &val
, 4);
8466 /* read last bytes not ending on 4 byte boundary */
8467 pd
= &data
[eeprom
->len
];
8469 b_offset
= offset
+ len
- b_count
;
8470 ret
= tg3_nvram_read_le(tp
, b_offset
, &val
);
8473 memcpy(pd
, &val
, b_count
);
8474 eeprom
->len
+= b_count
;
8479 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
8481 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
8483 struct tg3
*tp
= netdev_priv(dev
);
8485 u32 offset
, len
, b_offset
, odd_len
;
8489 if (tp
->link_config
.phy_is_low_power
)
8492 if (eeprom
->magic
!= TG3_EEPROM_MAGIC
)
8495 offset
= eeprom
->offset
;
8498 if ((b_offset
= (offset
& 3))) {
8499 /* adjustments to start on required 4 byte boundary */
8500 ret
= tg3_nvram_read_le(tp
, offset
-b_offset
, &start
);
8511 /* adjustments to end on required 4 byte boundary */
8513 len
= (len
+ 3) & ~3;
8514 ret
= tg3_nvram_read_le(tp
, offset
+len
-4, &end
);
8520 if (b_offset
|| odd_len
) {
8521 buf
= kmalloc(len
, GFP_KERNEL
);
8525 memcpy(buf
, &start
, 4);
8527 memcpy(buf
+len
-4, &end
, 4);
8528 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
8531 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
8539 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8541 struct tg3
*tp
= netdev_priv(dev
);
8543 cmd
->supported
= (SUPPORTED_Autoneg
);
8545 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
8546 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
8547 SUPPORTED_1000baseT_Full
);
8549 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)) {
8550 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
8551 SUPPORTED_100baseT_Full
|
8552 SUPPORTED_10baseT_Half
|
8553 SUPPORTED_10baseT_Full
|
8555 cmd
->port
= PORT_TP
;
8557 cmd
->supported
|= SUPPORTED_FIBRE
;
8558 cmd
->port
= PORT_FIBRE
;
8561 cmd
->advertising
= tp
->link_config
.advertising
;
8562 if (netif_running(dev
)) {
8563 cmd
->speed
= tp
->link_config
.active_speed
;
8564 cmd
->duplex
= tp
->link_config
.active_duplex
;
8566 cmd
->phy_address
= PHY_ADDR
;
8567 cmd
->transceiver
= 0;
8568 cmd
->autoneg
= tp
->link_config
.autoneg
;
8574 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
8576 struct tg3
*tp
= netdev_priv(dev
);
8578 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) {
8579 /* These are the only valid advertisement bits allowed. */
8580 if (cmd
->autoneg
== AUTONEG_ENABLE
&&
8581 (cmd
->advertising
& ~(ADVERTISED_1000baseT_Half
|
8582 ADVERTISED_1000baseT_Full
|
8583 ADVERTISED_Autoneg
|
8586 /* Fiber can only do SPEED_1000. */
8587 else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
8588 (cmd
->speed
!= SPEED_1000
))
8590 /* Copper cannot force SPEED_1000. */
8591 } else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
8592 (cmd
->speed
== SPEED_1000
))
8594 else if ((cmd
->speed
== SPEED_1000
) &&
8595 (tp
->tg3_flags2
& TG3_FLAG_10_100_ONLY
))
8598 tg3_full_lock(tp
, 0);
8600 tp
->link_config
.autoneg
= cmd
->autoneg
;
8601 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
8602 tp
->link_config
.advertising
= (cmd
->advertising
|
8603 ADVERTISED_Autoneg
);
8604 tp
->link_config
.speed
= SPEED_INVALID
;
8605 tp
->link_config
.duplex
= DUPLEX_INVALID
;
8607 tp
->link_config
.advertising
= 0;
8608 tp
->link_config
.speed
= cmd
->speed
;
8609 tp
->link_config
.duplex
= cmd
->duplex
;
8612 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
8613 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
8614 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
8616 if (netif_running(dev
))
8617 tg3_setup_phy(tp
, 1);
8619 tg3_full_unlock(tp
);
8624 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
8626 struct tg3
*tp
= netdev_priv(dev
);
8628 strcpy(info
->driver
, DRV_MODULE_NAME
);
8629 strcpy(info
->version
, DRV_MODULE_VERSION
);
8630 strcpy(info
->fw_version
, tp
->fw_ver
);
8631 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
8634 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
8636 struct tg3
*tp
= netdev_priv(dev
);
8638 if (tp
->tg3_flags
& TG3_FLAG_WOL_CAP
)
8639 wol
->supported
= WAKE_MAGIC
;
8643 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)
8644 wol
->wolopts
= WAKE_MAGIC
;
8645 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
8648 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
8650 struct tg3
*tp
= netdev_priv(dev
);
8652 if (wol
->wolopts
& ~WAKE_MAGIC
)
8654 if ((wol
->wolopts
& WAKE_MAGIC
) &&
8655 !(tp
->tg3_flags
& TG3_FLAG_WOL_CAP
))
8658 spin_lock_bh(&tp
->lock
);
8659 if (wol
->wolopts
& WAKE_MAGIC
)
8660 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
8662 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
8663 spin_unlock_bh(&tp
->lock
);
8668 static u32
tg3_get_msglevel(struct net_device
*dev
)
8670 struct tg3
*tp
= netdev_priv(dev
);
8671 return tp
->msg_enable
;
8674 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
8676 struct tg3
*tp
= netdev_priv(dev
);
8677 tp
->msg_enable
= value
;
8680 static int tg3_set_tso(struct net_device
*dev
, u32 value
)
8682 struct tg3
*tp
= netdev_priv(dev
);
8684 if (!(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
8689 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_2
) &&
8690 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5906
)) {
8692 dev
->features
|= NETIF_F_TSO6
;
8693 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8694 dev
->features
|= NETIF_F_TSO_ECN
;
8696 dev
->features
&= ~(NETIF_F_TSO6
| NETIF_F_TSO_ECN
);
8698 return ethtool_op_set_tso(dev
, value
);
8701 static int tg3_nway_reset(struct net_device
*dev
)
8703 struct tg3
*tp
= netdev_priv(dev
);
8707 if (!netif_running(dev
))
8710 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8713 spin_lock_bh(&tp
->lock
);
8715 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8716 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
8717 ((bmcr
& BMCR_ANENABLE
) ||
8718 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
))) {
8719 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
8723 spin_unlock_bh(&tp
->lock
);
8728 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
8730 struct tg3
*tp
= netdev_priv(dev
);
8732 ering
->rx_max_pending
= TG3_RX_RING_SIZE
- 1;
8733 ering
->rx_mini_max_pending
= 0;
8734 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
8735 ering
->rx_jumbo_max_pending
= TG3_RX_JUMBO_RING_SIZE
- 1;
8737 ering
->rx_jumbo_max_pending
= 0;
8739 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
8741 ering
->rx_pending
= tp
->rx_pending
;
8742 ering
->rx_mini_pending
= 0;
8743 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
8744 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
8746 ering
->rx_jumbo_pending
= 0;
8748 ering
->tx_pending
= tp
->tx_pending
;
8751 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
8753 struct tg3
*tp
= netdev_priv(dev
);
8754 int irq_sync
= 0, err
= 0;
8756 if ((ering
->rx_pending
> TG3_RX_RING_SIZE
- 1) ||
8757 (ering
->rx_jumbo_pending
> TG3_RX_JUMBO_RING_SIZE
- 1) ||
8758 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
8759 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
8760 ((tp
->tg3_flags2
& TG3_FLG2_TSO_BUG
) &&
8761 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
8764 if (netif_running(dev
)) {
8769 tg3_full_lock(tp
, irq_sync
);
8771 tp
->rx_pending
= ering
->rx_pending
;
8773 if ((tp
->tg3_flags2
& TG3_FLG2_MAX_RXPEND_64
) &&
8774 tp
->rx_pending
> 63)
8775 tp
->rx_pending
= 63;
8776 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
8777 tp
->tx_pending
= ering
->tx_pending
;
8779 if (netif_running(dev
)) {
8780 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8781 err
= tg3_restart_hw(tp
, 1);
8783 tg3_netif_start(tp
);
8786 tg3_full_unlock(tp
);
8791 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
8793 struct tg3
*tp
= netdev_priv(dev
);
8795 epause
->autoneg
= (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) != 0;
8797 if (tp
->link_config
.active_flowctrl
& TG3_FLOW_CTRL_RX
)
8798 epause
->rx_pause
= 1;
8800 epause
->rx_pause
= 0;
8802 if (tp
->link_config
.active_flowctrl
& TG3_FLOW_CTRL_TX
)
8803 epause
->tx_pause
= 1;
8805 epause
->tx_pause
= 0;
8808 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
8810 struct tg3
*tp
= netdev_priv(dev
);
8811 int irq_sync
= 0, err
= 0;
8813 if (netif_running(dev
)) {
8818 tg3_full_lock(tp
, irq_sync
);
8820 if (epause
->autoneg
)
8821 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
8823 tp
->tg3_flags
&= ~TG3_FLAG_PAUSE_AUTONEG
;
8824 if (epause
->rx_pause
)
8825 tp
->link_config
.flowctrl
|= TG3_FLOW_CTRL_RX
;
8827 tp
->link_config
.flowctrl
&= ~TG3_FLOW_CTRL_RX
;
8828 if (epause
->tx_pause
)
8829 tp
->link_config
.flowctrl
|= TG3_FLOW_CTRL_TX
;
8831 tp
->link_config
.flowctrl
&= ~TG3_FLOW_CTRL_TX
;
8833 if (netif_running(dev
)) {
8834 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8835 err
= tg3_restart_hw(tp
, 1);
8837 tg3_netif_start(tp
);
8840 tg3_full_unlock(tp
);
8845 static u32
tg3_get_rx_csum(struct net_device
*dev
)
8847 struct tg3
*tp
= netdev_priv(dev
);
8848 return (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0;
8851 static int tg3_set_rx_csum(struct net_device
*dev
, u32 data
)
8853 struct tg3
*tp
= netdev_priv(dev
);
8855 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
8861 spin_lock_bh(&tp
->lock
);
8863 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
8865 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
8866 spin_unlock_bh(&tp
->lock
);
8871 static int tg3_set_tx_csum(struct net_device
*dev
, u32 data
)
8873 struct tg3
*tp
= netdev_priv(dev
);
8875 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
8881 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8882 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
8883 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
8884 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
8885 ethtool_op_set_tx_ipv6_csum(dev
, data
);
8887 ethtool_op_set_tx_csum(dev
, data
);
8892 static int tg3_get_sset_count (struct net_device
*dev
, int sset
)
8896 return TG3_NUM_TEST
;
8898 return TG3_NUM_STATS
;
8904 static void tg3_get_strings (struct net_device
*dev
, u32 stringset
, u8
*buf
)
8906 switch (stringset
) {
8908 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
8911 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
8914 WARN_ON(1); /* we need a WARN() */
8919 static int tg3_phys_id(struct net_device
*dev
, u32 data
)
8921 struct tg3
*tp
= netdev_priv(dev
);
8924 if (!netif_running(tp
->dev
))
8928 data
= UINT_MAX
/ 2;
8930 for (i
= 0; i
< (data
* 2); i
++) {
8932 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
8933 LED_CTRL_1000MBPS_ON
|
8934 LED_CTRL_100MBPS_ON
|
8935 LED_CTRL_10MBPS_ON
|
8936 LED_CTRL_TRAFFIC_OVERRIDE
|
8937 LED_CTRL_TRAFFIC_BLINK
|
8938 LED_CTRL_TRAFFIC_LED
);
8941 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
8942 LED_CTRL_TRAFFIC_OVERRIDE
);
8944 if (msleep_interruptible(500))
8947 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8951 static void tg3_get_ethtool_stats (struct net_device
*dev
,
8952 struct ethtool_stats
*estats
, u64
*tmp_stats
)
8954 struct tg3
*tp
= netdev_priv(dev
);
8955 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
8958 #define NVRAM_TEST_SIZE 0x100
8959 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8960 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8961 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
8962 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8963 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8965 static int tg3_test_nvram(struct tg3
*tp
)
8969 int i
, j
, k
, err
= 0, size
;
8971 if (tg3_nvram_read_swab(tp
, 0, &magic
) != 0)
8974 if (magic
== TG3_EEPROM_MAGIC
)
8975 size
= NVRAM_TEST_SIZE
;
8976 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
8977 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
8978 TG3_EEPROM_SB_FORMAT_1
) {
8979 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
8980 case TG3_EEPROM_SB_REVISION_0
:
8981 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
8983 case TG3_EEPROM_SB_REVISION_2
:
8984 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
8986 case TG3_EEPROM_SB_REVISION_3
:
8987 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
8994 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
8995 size
= NVRAM_SELFBOOT_HW_SIZE
;
8999 buf
= kmalloc(size
, GFP_KERNEL
);
9004 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
9005 if ((err
= tg3_nvram_read_le(tp
, i
, &buf
[j
])) != 0)
9011 /* Selfboot format */
9012 magic
= swab32(le32_to_cpu(buf
[0]));
9013 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
9014 TG3_EEPROM_MAGIC_FW
) {
9015 u8
*buf8
= (u8
*) buf
, csum8
= 0;
9017 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
9018 TG3_EEPROM_SB_REVISION_2
) {
9019 /* For rev 2, the csum doesn't include the MBA. */
9020 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
9022 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
9025 for (i
= 0; i
< size
; i
++)
9038 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
9039 TG3_EEPROM_MAGIC_HW
) {
9040 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
9041 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
9042 u8
*buf8
= (u8
*) buf
;
9044 /* Separate the parity bits and the data bytes. */
9045 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
9046 if ((i
== 0) || (i
== 8)) {
9050 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
9051 parity
[k
++] = buf8
[i
] & msk
;
9058 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
9059 parity
[k
++] = buf8
[i
] & msk
;
9062 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
9063 parity
[k
++] = buf8
[i
] & msk
;
9066 data
[j
++] = buf8
[i
];
9070 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
9071 u8 hw8
= hweight8(data
[i
]);
9073 if ((hw8
& 0x1) && parity
[i
])
9075 else if (!(hw8
& 0x1) && !parity
[i
])
9082 /* Bootstrap checksum at offset 0x10 */
9083 csum
= calc_crc((unsigned char *) buf
, 0x10);
9084 if(csum
!= le32_to_cpu(buf
[0x10/4]))
9087 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9088 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
9089 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
9099 #define TG3_SERDES_TIMEOUT_SEC 2
9100 #define TG3_COPPER_TIMEOUT_SEC 6
9102 static int tg3_test_link(struct tg3
*tp
)
9106 if (!netif_running(tp
->dev
))
9109 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
9110 max
= TG3_SERDES_TIMEOUT_SEC
;
9112 max
= TG3_COPPER_TIMEOUT_SEC
;
9114 for (i
= 0; i
< max
; i
++) {
9115 if (netif_carrier_ok(tp
->dev
))
9118 if (msleep_interruptible(1000))
9125 /* Only test the commonly used registers */
9126 static int tg3_test_registers(struct tg3
*tp
)
9128 int i
, is_5705
, is_5750
;
9129 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
9133 #define TG3_FL_5705 0x1
9134 #define TG3_FL_NOT_5705 0x2
9135 #define TG3_FL_NOT_5788 0x4
9136 #define TG3_FL_NOT_5750 0x8
9140 /* MAC Control Registers */
9141 { MAC_MODE
, TG3_FL_NOT_5705
,
9142 0x00000000, 0x00ef6f8c },
9143 { MAC_MODE
, TG3_FL_5705
,
9144 0x00000000, 0x01ef6b8c },
9145 { MAC_STATUS
, TG3_FL_NOT_5705
,
9146 0x03800107, 0x00000000 },
9147 { MAC_STATUS
, TG3_FL_5705
,
9148 0x03800100, 0x00000000 },
9149 { MAC_ADDR_0_HIGH
, 0x0000,
9150 0x00000000, 0x0000ffff },
9151 { MAC_ADDR_0_LOW
, 0x0000,
9152 0x00000000, 0xffffffff },
9153 { MAC_RX_MTU_SIZE
, 0x0000,
9154 0x00000000, 0x0000ffff },
9155 { MAC_TX_MODE
, 0x0000,
9156 0x00000000, 0x00000070 },
9157 { MAC_TX_LENGTHS
, 0x0000,
9158 0x00000000, 0x00003fff },
9159 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
9160 0x00000000, 0x000007fc },
9161 { MAC_RX_MODE
, TG3_FL_5705
,
9162 0x00000000, 0x000007dc },
9163 { MAC_HASH_REG_0
, 0x0000,
9164 0x00000000, 0xffffffff },
9165 { MAC_HASH_REG_1
, 0x0000,
9166 0x00000000, 0xffffffff },
9167 { MAC_HASH_REG_2
, 0x0000,
9168 0x00000000, 0xffffffff },
9169 { MAC_HASH_REG_3
, 0x0000,
9170 0x00000000, 0xffffffff },
9172 /* Receive Data and Receive BD Initiator Control Registers. */
9173 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
9174 0x00000000, 0xffffffff },
9175 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
9176 0x00000000, 0xffffffff },
9177 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
9178 0x00000000, 0x00000003 },
9179 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
9180 0x00000000, 0xffffffff },
9181 { RCVDBDI_STD_BD
+0, 0x0000,
9182 0x00000000, 0xffffffff },
9183 { RCVDBDI_STD_BD
+4, 0x0000,
9184 0x00000000, 0xffffffff },
9185 { RCVDBDI_STD_BD
+8, 0x0000,
9186 0x00000000, 0xffff0002 },
9187 { RCVDBDI_STD_BD
+0xc, 0x0000,
9188 0x00000000, 0xffffffff },
9190 /* Receive BD Initiator Control Registers. */
9191 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
9192 0x00000000, 0xffffffff },
9193 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
9194 0x00000000, 0x000003ff },
9195 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
9196 0x00000000, 0xffffffff },
9198 /* Host Coalescing Control Registers. */
9199 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
9200 0x00000000, 0x00000004 },
9201 { HOSTCC_MODE
, TG3_FL_5705
,
9202 0x00000000, 0x000000f6 },
9203 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
9204 0x00000000, 0xffffffff },
9205 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
9206 0x00000000, 0x000003ff },
9207 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
9208 0x00000000, 0xffffffff },
9209 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
9210 0x00000000, 0x000003ff },
9211 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
9212 0x00000000, 0xffffffff },
9213 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
9214 0x00000000, 0x000000ff },
9215 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
9216 0x00000000, 0xffffffff },
9217 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
9218 0x00000000, 0x000000ff },
9219 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
9220 0x00000000, 0xffffffff },
9221 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
9222 0x00000000, 0xffffffff },
9223 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
9224 0x00000000, 0xffffffff },
9225 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
9226 0x00000000, 0x000000ff },
9227 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
9228 0x00000000, 0xffffffff },
9229 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
9230 0x00000000, 0x000000ff },
9231 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
9232 0x00000000, 0xffffffff },
9233 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
9234 0x00000000, 0xffffffff },
9235 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
9236 0x00000000, 0xffffffff },
9237 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
9238 0x00000000, 0xffffffff },
9239 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
9240 0x00000000, 0xffffffff },
9241 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
9242 0xffffffff, 0x00000000 },
9243 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
9244 0xffffffff, 0x00000000 },
9246 /* Buffer Manager Control Registers. */
9247 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
9248 0x00000000, 0x007fff80 },
9249 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
9250 0x00000000, 0x007fffff },
9251 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
9252 0x00000000, 0x0000003f },
9253 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
9254 0x00000000, 0x000001ff },
9255 { BUFMGR_MB_HIGH_WATER
, 0x0000,
9256 0x00000000, 0x000001ff },
9257 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
9258 0xffffffff, 0x00000000 },
9259 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
9260 0xffffffff, 0x00000000 },
9262 /* Mailbox Registers */
9263 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
9264 0x00000000, 0x000001ff },
9265 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
9266 0x00000000, 0x000001ff },
9267 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
9268 0x00000000, 0x000007ff },
9269 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
9270 0x00000000, 0x000001ff },
9272 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9275 is_5705
= is_5750
= 0;
9276 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
9278 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9282 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
9283 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
9286 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
9289 if ((tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
9290 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
9293 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
9296 offset
= (u32
) reg_tbl
[i
].offset
;
9297 read_mask
= reg_tbl
[i
].read_mask
;
9298 write_mask
= reg_tbl
[i
].write_mask
;
9300 /* Save the original register content */
9301 save_val
= tr32(offset
);
9303 /* Determine the read-only value. */
9304 read_val
= save_val
& read_mask
;
9306 /* Write zero to the register, then make sure the read-only bits
9307 * are not changed and the read/write bits are all zeros.
9313 /* Test the read-only and read/write bits. */
9314 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
9317 /* Write ones to all the bits defined by RdMask and WrMask, then
9318 * make sure the read-only bits are not changed and the
9319 * read/write bits are all ones.
9321 tw32(offset
, read_mask
| write_mask
);
9325 /* Test the read-only bits. */
9326 if ((val
& read_mask
) != read_val
)
9329 /* Test the read/write bits. */
9330 if ((val
& write_mask
) != write_mask
)
9333 tw32(offset
, save_val
);
9339 if (netif_msg_hw(tp
))
9340 printk(KERN_ERR PFX
"Register test failed at offset %x\n",
9342 tw32(offset
, save_val
);
9346 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
9348 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9352 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
9353 for (j
= 0; j
< len
; j
+= 4) {
9356 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
9357 tg3_read_mem(tp
, offset
+ j
, &val
);
9358 if (val
!= test_pattern
[i
])
9365 static int tg3_test_memory(struct tg3
*tp
)
9367 static struct mem_entry
{
9370 } mem_tbl_570x
[] = {
9371 { 0x00000000, 0x00b50},
9372 { 0x00002000, 0x1c000},
9373 { 0xffffffff, 0x00000}
9374 }, mem_tbl_5705
[] = {
9375 { 0x00000100, 0x0000c},
9376 { 0x00000200, 0x00008},
9377 { 0x00004000, 0x00800},
9378 { 0x00006000, 0x01000},
9379 { 0x00008000, 0x02000},
9380 { 0x00010000, 0x0e000},
9381 { 0xffffffff, 0x00000}
9382 }, mem_tbl_5755
[] = {
9383 { 0x00000200, 0x00008},
9384 { 0x00004000, 0x00800},
9385 { 0x00006000, 0x00800},
9386 { 0x00008000, 0x02000},
9387 { 0x00010000, 0x0c000},
9388 { 0xffffffff, 0x00000}
9389 }, mem_tbl_5906
[] = {
9390 { 0x00000200, 0x00008},
9391 { 0x00004000, 0x00400},
9392 { 0x00006000, 0x00400},
9393 { 0x00008000, 0x01000},
9394 { 0x00010000, 0x01000},
9395 { 0xffffffff, 0x00000}
9397 struct mem_entry
*mem_tbl
;
9401 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
9402 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
9403 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
9404 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
9405 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
9406 mem_tbl
= mem_tbl_5755
;
9407 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
9408 mem_tbl
= mem_tbl_5906
;
9410 mem_tbl
= mem_tbl_5705
;
9412 mem_tbl
= mem_tbl_570x
;
9414 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
9415 if ((err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
,
9416 mem_tbl
[i
].len
)) != 0)
9423 #define TG3_MAC_LOOPBACK 0
9424 #define TG3_PHY_LOOPBACK 1
9426 static int tg3_run_loopback(struct tg3
*tp
, int loopback_mode
)
9428 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
9430 struct sk_buff
*skb
, *rx_skb
;
9433 int num_pkts
, tx_len
, rx_len
, i
, err
;
9434 struct tg3_rx_buffer_desc
*desc
;
9436 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
9437 /* HW errata - mac loopback fails in some cases on 5780.
9438 * Normal traffic and PHY loopback are not affected by
9441 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
)
9444 mac_mode
= (tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
) |
9445 MAC_MODE_PORT_INT_LPBACK
;
9446 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
9447 mac_mode
|= MAC_MODE_LINK_POLARITY
;
9448 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
9449 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
9451 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
9452 tw32(MAC_MODE
, mac_mode
);
9453 } else if (loopback_mode
== TG3_PHY_LOOPBACK
) {
9456 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
9459 if (!tg3_readphy(tp
, MII_TG3_EPHY_TEST
, &phytest
)) {
9462 tg3_writephy(tp
, MII_TG3_EPHY_TEST
,
9463 phytest
| MII_TG3_EPHY_SHADOW_EN
);
9464 if (!tg3_readphy(tp
, 0x1b, &phy
))
9465 tg3_writephy(tp
, 0x1b, phy
& ~0x20);
9466 tg3_writephy(tp
, MII_TG3_EPHY_TEST
, phytest
);
9468 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED100
;
9470 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
| BMCR_SPEED1000
;
9472 tg3_phy_toggle_automdix(tp
, 0);
9474 tg3_writephy(tp
, MII_BMCR
, val
);
9477 mac_mode
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
9478 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
9479 tg3_writephy(tp
, MII_TG3_EPHY_PTEST
, 0x1800);
9480 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
9482 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
9484 /* reset to prevent losing 1st rx packet intermittently */
9485 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
9486 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9488 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9490 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
9491 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
)
9492 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
9493 else if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5411
)
9494 mac_mode
|= MAC_MODE_LINK_POLARITY
;
9495 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
9496 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
9498 tw32(MAC_MODE
, mac_mode
);
9506 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
9510 tx_data
= skb_put(skb
, tx_len
);
9511 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
9512 memset(tx_data
+ 6, 0x0, 8);
9514 tw32(MAC_RX_MTU_SIZE
, tx_len
+ 4);
9516 for (i
= 14; i
< tx_len
; i
++)
9517 tx_data
[i
] = (u8
) (i
& 0xff);
9519 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
9521 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9526 rx_start_idx
= tp
->hw_status
->idx
[0].rx_producer
;
9530 tg3_set_txd(tp
, tp
->tx_prod
, map
, tx_len
, 0, 1);
9535 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
,
9537 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
);
9541 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9542 for (i
= 0; i
< 25; i
++) {
9543 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
9548 tx_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
9549 rx_idx
= tp
->hw_status
->idx
[0].rx_producer
;
9550 if ((tx_idx
== tp
->tx_prod
) &&
9551 (rx_idx
== (rx_start_idx
+ num_pkts
)))
9555 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
9558 if (tx_idx
!= tp
->tx_prod
)
9561 if (rx_idx
!= rx_start_idx
+ num_pkts
)
9564 desc
= &tp
->rx_rcb
[rx_start_idx
];
9565 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
9566 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
9567 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
9570 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
9571 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
9574 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4;
9575 if (rx_len
!= tx_len
)
9578 rx_skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
9580 map
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
], mapping
);
9581 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
, PCI_DMA_FROMDEVICE
);
9583 for (i
= 14; i
< tx_len
; i
++) {
9584 if (*(rx_skb
->data
+ i
) != (u8
) (i
& 0xff))
9589 /* tg3_free_rings will unmap and free the rx_skb */
9594 #define TG3_MAC_LOOPBACK_FAILED 1
9595 #define TG3_PHY_LOOPBACK_FAILED 2
9596 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9597 TG3_PHY_LOOPBACK_FAILED)
9599 static int tg3_test_loopback(struct tg3
*tp
)
9604 if (!netif_running(tp
->dev
))
9605 return TG3_LOOPBACK_FAILED
;
9607 err
= tg3_reset_hw(tp
, 1);
9609 return TG3_LOOPBACK_FAILED
;
9611 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
9612 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
9616 tw32(TG3_CPMU_MUTEX_REQ
, CPMU_MUTEX_REQ_DRIVER
);
9618 /* Wait for up to 40 microseconds to acquire lock. */
9619 for (i
= 0; i
< 4; i
++) {
9620 status
= tr32(TG3_CPMU_MUTEX_GNT
);
9621 if (status
== CPMU_MUTEX_GNT_DRIVER
)
9626 if (status
!= CPMU_MUTEX_GNT_DRIVER
)
9627 return TG3_LOOPBACK_FAILED
;
9629 /* Turn off link-based power management. */
9630 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
9632 cpmuctrl
& ~(CPMU_CTRL_LINK_SPEED_MODE
|
9633 CPMU_CTRL_LINK_AWARE_MODE
));
9636 if (tg3_run_loopback(tp
, TG3_MAC_LOOPBACK
))
9637 err
|= TG3_MAC_LOOPBACK_FAILED
;
9639 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
9640 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
9641 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
9643 /* Release the mutex */
9644 tw32(TG3_CPMU_MUTEX_GNT
, CPMU_MUTEX_GNT_DRIVER
);
9647 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
9648 if (tg3_run_loopback(tp
, TG3_PHY_LOOPBACK
))
9649 err
|= TG3_PHY_LOOPBACK_FAILED
;
9655 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
9658 struct tg3
*tp
= netdev_priv(dev
);
9660 if (tp
->link_config
.phy_is_low_power
)
9661 tg3_set_power_state(tp
, PCI_D0
);
9663 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
9665 if (tg3_test_nvram(tp
) != 0) {
9666 etest
->flags
|= ETH_TEST_FL_FAILED
;
9669 if (tg3_test_link(tp
) != 0) {
9670 etest
->flags
|= ETH_TEST_FL_FAILED
;
9673 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
9674 int err
, irq_sync
= 0;
9676 if (netif_running(dev
)) {
9681 tg3_full_lock(tp
, irq_sync
);
9683 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
9684 err
= tg3_nvram_lock(tp
);
9685 tg3_halt_cpu(tp
, RX_CPU_BASE
);
9686 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
9687 tg3_halt_cpu(tp
, TX_CPU_BASE
);
9689 tg3_nvram_unlock(tp
);
9691 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
9694 if (tg3_test_registers(tp
) != 0) {
9695 etest
->flags
|= ETH_TEST_FL_FAILED
;
9698 if (tg3_test_memory(tp
) != 0) {
9699 etest
->flags
|= ETH_TEST_FL_FAILED
;
9702 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
9703 etest
->flags
|= ETH_TEST_FL_FAILED
;
9705 tg3_full_unlock(tp
);
9707 if (tg3_test_interrupt(tp
) != 0) {
9708 etest
->flags
|= ETH_TEST_FL_FAILED
;
9712 tg3_full_lock(tp
, 0);
9714 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
9715 if (netif_running(dev
)) {
9716 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
9717 if (!tg3_restart_hw(tp
, 1))
9718 tg3_netif_start(tp
);
9721 tg3_full_unlock(tp
);
9723 if (tp
->link_config
.phy_is_low_power
)
9724 tg3_set_power_state(tp
, PCI_D3hot
);
9728 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
9730 struct mii_ioctl_data
*data
= if_mii(ifr
);
9731 struct tg3
*tp
= netdev_priv(dev
);
9736 data
->phy_id
= PHY_ADDR
;
9742 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
9743 break; /* We have no PHY */
9745 if (tp
->link_config
.phy_is_low_power
)
9748 spin_lock_bh(&tp
->lock
);
9749 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
9750 spin_unlock_bh(&tp
->lock
);
9752 data
->val_out
= mii_regval
;
9758 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
9759 break; /* We have no PHY */
9761 if (!capable(CAP_NET_ADMIN
))
9764 if (tp
->link_config
.phy_is_low_power
)
9767 spin_lock_bh(&tp
->lock
);
9768 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
9769 spin_unlock_bh(&tp
->lock
);
9780 #if TG3_VLAN_TAG_USED
9781 static void tg3_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
9783 struct tg3
*tp
= netdev_priv(dev
);
9785 if (netif_running(dev
))
9788 tg3_full_lock(tp
, 0);
9792 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9793 __tg3_set_rx_mode(dev
);
9795 if (netif_running(dev
))
9796 tg3_netif_start(tp
);
9798 tg3_full_unlock(tp
);
9802 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
9804 struct tg3
*tp
= netdev_priv(dev
);
9806 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
9810 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
9812 struct tg3
*tp
= netdev_priv(dev
);
9813 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
9814 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
9816 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
9817 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
9818 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
9819 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
9820 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
9823 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
9824 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
9825 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
9826 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
9827 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
9828 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
9829 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
9830 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
9831 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
9832 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
9835 /* No rx interrupts will be generated if both are zero */
9836 if ((ec
->rx_coalesce_usecs
== 0) &&
9837 (ec
->rx_max_coalesced_frames
== 0))
9840 /* No tx interrupts will be generated if both are zero */
9841 if ((ec
->tx_coalesce_usecs
== 0) &&
9842 (ec
->tx_max_coalesced_frames
== 0))
9845 /* Only copy relevant parameters, ignore all others. */
9846 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
9847 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
9848 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
9849 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
9850 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
9851 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
9852 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
9853 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
9854 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
9856 if (netif_running(dev
)) {
9857 tg3_full_lock(tp
, 0);
9858 __tg3_set_coalesce(tp
, &tp
->coal
);
9859 tg3_full_unlock(tp
);
9864 static const struct ethtool_ops tg3_ethtool_ops
= {
9865 .get_settings
= tg3_get_settings
,
9866 .set_settings
= tg3_set_settings
,
9867 .get_drvinfo
= tg3_get_drvinfo
,
9868 .get_regs_len
= tg3_get_regs_len
,
9869 .get_regs
= tg3_get_regs
,
9870 .get_wol
= tg3_get_wol
,
9871 .set_wol
= tg3_set_wol
,
9872 .get_msglevel
= tg3_get_msglevel
,
9873 .set_msglevel
= tg3_set_msglevel
,
9874 .nway_reset
= tg3_nway_reset
,
9875 .get_link
= ethtool_op_get_link
,
9876 .get_eeprom_len
= tg3_get_eeprom_len
,
9877 .get_eeprom
= tg3_get_eeprom
,
9878 .set_eeprom
= tg3_set_eeprom
,
9879 .get_ringparam
= tg3_get_ringparam
,
9880 .set_ringparam
= tg3_set_ringparam
,
9881 .get_pauseparam
= tg3_get_pauseparam
,
9882 .set_pauseparam
= tg3_set_pauseparam
,
9883 .get_rx_csum
= tg3_get_rx_csum
,
9884 .set_rx_csum
= tg3_set_rx_csum
,
9885 .set_tx_csum
= tg3_set_tx_csum
,
9886 .set_sg
= ethtool_op_set_sg
,
9887 .set_tso
= tg3_set_tso
,
9888 .self_test
= tg3_self_test
,
9889 .get_strings
= tg3_get_strings
,
9890 .phys_id
= tg3_phys_id
,
9891 .get_ethtool_stats
= tg3_get_ethtool_stats
,
9892 .get_coalesce
= tg3_get_coalesce
,
9893 .set_coalesce
= tg3_set_coalesce
,
9894 .get_sset_count
= tg3_get_sset_count
,
9897 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
9899 u32 cursize
, val
, magic
;
9901 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
9903 if (tg3_nvram_read_swab(tp
, 0, &magic
) != 0)
9906 if ((magic
!= TG3_EEPROM_MAGIC
) &&
9907 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
9908 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
9912 * Size the chip by reading offsets at increasing powers of two.
9913 * When we encounter our validation signature, we know the addressing
9914 * has wrapped around, and thus have our chip size.
9918 while (cursize
< tp
->nvram_size
) {
9919 if (tg3_nvram_read_swab(tp
, cursize
, &val
) != 0)
9928 tp
->nvram_size
= cursize
;
9931 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
9935 if (tg3_nvram_read_swab(tp
, 0, &val
) != 0)
9938 /* Selfboot format */
9939 if (val
!= TG3_EEPROM_MAGIC
) {
9940 tg3_get_eeprom_size(tp
);
9944 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
9946 tp
->nvram_size
= (val
>> 16) * 1024;
9950 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
9953 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
9957 nvcfg1
= tr32(NVRAM_CFG1
);
9958 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
9959 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9962 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
9963 tw32(NVRAM_CFG1
, nvcfg1
);
9966 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) ||
9967 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
9968 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
9969 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
9970 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9971 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
9972 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9974 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
9975 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9976 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
9978 case FLASH_VENDOR_ATMEL_EEPROM
:
9979 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9980 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9981 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9983 case FLASH_VENDOR_ST
:
9984 tp
->nvram_jedecnum
= JEDEC_ST
;
9985 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
9986 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9988 case FLASH_VENDOR_SAIFUN
:
9989 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
9990 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
9992 case FLASH_VENDOR_SST_SMALL
:
9993 case FLASH_VENDOR_SST_LARGE
:
9994 tp
->nvram_jedecnum
= JEDEC_SST
;
9995 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
10000 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
10001 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
10002 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10006 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
10010 nvcfg1
= tr32(NVRAM_CFG1
);
10012 /* NVRAM protection for TPM */
10013 if (nvcfg1
& (1 << 27))
10014 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
10016 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
10017 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
10018 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
10019 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
10020 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10022 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
10023 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
10024 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10025 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
10027 case FLASH_5752VENDOR_ST_M45PE10
:
10028 case FLASH_5752VENDOR_ST_M45PE20
:
10029 case FLASH_5752VENDOR_ST_M45PE40
:
10030 tp
->nvram_jedecnum
= JEDEC_ST
;
10031 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10032 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
10036 if (tp
->tg3_flags2
& TG3_FLG2_FLASH
) {
10037 switch (nvcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
10038 case FLASH_5752PAGE_SIZE_256
:
10039 tp
->nvram_pagesize
= 256;
10041 case FLASH_5752PAGE_SIZE_512
:
10042 tp
->nvram_pagesize
= 512;
10044 case FLASH_5752PAGE_SIZE_1K
:
10045 tp
->nvram_pagesize
= 1024;
10047 case FLASH_5752PAGE_SIZE_2K
:
10048 tp
->nvram_pagesize
= 2048;
10050 case FLASH_5752PAGE_SIZE_4K
:
10051 tp
->nvram_pagesize
= 4096;
10053 case FLASH_5752PAGE_SIZE_264
:
10054 tp
->nvram_pagesize
= 264;
10059 /* For eeprom, set pagesize to maximum eeprom size */
10060 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
10062 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
10063 tw32(NVRAM_CFG1
, nvcfg1
);
10067 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
10069 u32 nvcfg1
, protect
= 0;
10071 nvcfg1
= tr32(NVRAM_CFG1
);
10073 /* NVRAM protection for TPM */
10074 if (nvcfg1
& (1 << 27)) {
10075 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
10079 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
10081 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
10082 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
10083 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
10084 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
10085 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
10086 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10087 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
10088 tp
->nvram_pagesize
= 264;
10089 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
10090 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
10091 tp
->nvram_size
= (protect
? 0x3e200 :
10092 TG3_NVRAM_SIZE_512KB
);
10093 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
10094 tp
->nvram_size
= (protect
? 0x1f200 :
10095 TG3_NVRAM_SIZE_256KB
);
10097 tp
->nvram_size
= (protect
? 0x1f200 :
10098 TG3_NVRAM_SIZE_128KB
);
10100 case FLASH_5752VENDOR_ST_M45PE10
:
10101 case FLASH_5752VENDOR_ST_M45PE20
:
10102 case FLASH_5752VENDOR_ST_M45PE40
:
10103 tp
->nvram_jedecnum
= JEDEC_ST
;
10104 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10105 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
10106 tp
->nvram_pagesize
= 256;
10107 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
10108 tp
->nvram_size
= (protect
?
10109 TG3_NVRAM_SIZE_64KB
:
10110 TG3_NVRAM_SIZE_128KB
);
10111 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
10112 tp
->nvram_size
= (protect
?
10113 TG3_NVRAM_SIZE_64KB
:
10114 TG3_NVRAM_SIZE_256KB
);
10116 tp
->nvram_size
= (protect
?
10117 TG3_NVRAM_SIZE_128KB
:
10118 TG3_NVRAM_SIZE_512KB
);
10123 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
10127 nvcfg1
= tr32(NVRAM_CFG1
);
10129 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
10130 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
10131 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
10132 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
10133 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
10134 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
10135 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10136 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
10138 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
10139 tw32(NVRAM_CFG1
, nvcfg1
);
10141 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
10142 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
10143 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
10144 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
10145 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
10146 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10147 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
10148 tp
->nvram_pagesize
= 264;
10150 case FLASH_5752VENDOR_ST_M45PE10
:
10151 case FLASH_5752VENDOR_ST_M45PE20
:
10152 case FLASH_5752VENDOR_ST_M45PE40
:
10153 tp
->nvram_jedecnum
= JEDEC_ST
;
10154 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10155 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
10156 tp
->nvram_pagesize
= 256;
10161 static void __devinit
tg3_get_5761_nvram_info(struct tg3
*tp
)
10163 u32 nvcfg1
, protect
= 0;
10165 nvcfg1
= tr32(NVRAM_CFG1
);
10167 /* NVRAM protection for TPM */
10168 if (nvcfg1
& (1 << 27)) {
10169 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
10173 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
10175 case FLASH_5761VENDOR_ATMEL_ADB021D
:
10176 case FLASH_5761VENDOR_ATMEL_ADB041D
:
10177 case FLASH_5761VENDOR_ATMEL_ADB081D
:
10178 case FLASH_5761VENDOR_ATMEL_ADB161D
:
10179 case FLASH_5761VENDOR_ATMEL_MDB021D
:
10180 case FLASH_5761VENDOR_ATMEL_MDB041D
:
10181 case FLASH_5761VENDOR_ATMEL_MDB081D
:
10182 case FLASH_5761VENDOR_ATMEL_MDB161D
:
10183 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
10184 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10185 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
10186 tp
->tg3_flags3
|= TG3_FLG3_NO_NVRAM_ADDR_TRANS
;
10187 tp
->nvram_pagesize
= 256;
10189 case FLASH_5761VENDOR_ST_A_M45PE20
:
10190 case FLASH_5761VENDOR_ST_A_M45PE40
:
10191 case FLASH_5761VENDOR_ST_A_M45PE80
:
10192 case FLASH_5761VENDOR_ST_A_M45PE16
:
10193 case FLASH_5761VENDOR_ST_M_M45PE20
:
10194 case FLASH_5761VENDOR_ST_M_M45PE40
:
10195 case FLASH_5761VENDOR_ST_M_M45PE80
:
10196 case FLASH_5761VENDOR_ST_M_M45PE16
:
10197 tp
->nvram_jedecnum
= JEDEC_ST
;
10198 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10199 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
10200 tp
->nvram_pagesize
= 256;
10205 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
10208 case FLASH_5761VENDOR_ATMEL_ADB161D
:
10209 case FLASH_5761VENDOR_ATMEL_MDB161D
:
10210 case FLASH_5761VENDOR_ST_A_M45PE16
:
10211 case FLASH_5761VENDOR_ST_M_M45PE16
:
10212 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
10214 case FLASH_5761VENDOR_ATMEL_ADB081D
:
10215 case FLASH_5761VENDOR_ATMEL_MDB081D
:
10216 case FLASH_5761VENDOR_ST_A_M45PE80
:
10217 case FLASH_5761VENDOR_ST_M_M45PE80
:
10218 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
10220 case FLASH_5761VENDOR_ATMEL_ADB041D
:
10221 case FLASH_5761VENDOR_ATMEL_MDB041D
:
10222 case FLASH_5761VENDOR_ST_A_M45PE40
:
10223 case FLASH_5761VENDOR_ST_M_M45PE40
:
10224 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
10226 case FLASH_5761VENDOR_ATMEL_ADB021D
:
10227 case FLASH_5761VENDOR_ATMEL_MDB021D
:
10228 case FLASH_5761VENDOR_ST_A_M45PE20
:
10229 case FLASH_5761VENDOR_ST_M_M45PE20
:
10230 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
10236 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
10238 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
10239 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
10240 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
10243 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10244 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
10246 tw32_f(GRC_EEPROM_ADDR
,
10247 (EEPROM_ADDR_FSM_RESET
|
10248 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
10249 EEPROM_ADDR_CLKPERD_SHIFT
)));
10253 /* Enable seeprom accesses. */
10254 tw32_f(GRC_LOCAL_CTRL
,
10255 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
10258 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
10259 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
10260 tp
->tg3_flags
|= TG3_FLAG_NVRAM
;
10262 if (tg3_nvram_lock(tp
)) {
10263 printk(KERN_WARNING PFX
"%s: Cannot get nvarm lock, "
10264 "tg3_nvram_init failed.\n", tp
->dev
->name
);
10267 tg3_enable_nvram_access(tp
);
10269 tp
->nvram_size
= 0;
10271 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
10272 tg3_get_5752_nvram_info(tp
);
10273 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
10274 tg3_get_5755_nvram_info(tp
);
10275 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
10276 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
)
10277 tg3_get_5787_nvram_info(tp
);
10278 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
10279 tg3_get_5761_nvram_info(tp
);
10280 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
10281 tg3_get_5906_nvram_info(tp
);
10283 tg3_get_nvram_info(tp
);
10285 if (tp
->nvram_size
== 0)
10286 tg3_get_nvram_size(tp
);
10288 tg3_disable_nvram_access(tp
);
10289 tg3_nvram_unlock(tp
);
10292 tp
->tg3_flags
&= ~(TG3_FLAG_NVRAM
| TG3_FLAG_NVRAM_BUFFERED
);
10294 tg3_get_eeprom_size(tp
);
10298 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
10299 u32 offset
, u32
*val
)
10304 if (offset
> EEPROM_ADDR_ADDR_MASK
||
10308 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
10309 EEPROM_ADDR_DEVID_MASK
|
10311 tw32(GRC_EEPROM_ADDR
,
10313 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
10314 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
10315 EEPROM_ADDR_ADDR_MASK
) |
10316 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
10318 for (i
= 0; i
< 1000; i
++) {
10319 tmp
= tr32(GRC_EEPROM_ADDR
);
10321 if (tmp
& EEPROM_ADDR_COMPLETE
)
10325 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
10328 *val
= tr32(GRC_EEPROM_DATA
);
10332 #define NVRAM_CMD_TIMEOUT 10000
10334 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
10338 tw32(NVRAM_CMD
, nvram_cmd
);
10339 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
10341 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
10346 if (i
== NVRAM_CMD_TIMEOUT
) {
10352 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
10354 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM
) &&
10355 (tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
10356 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
10357 !(tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM_ADDR_TRANS
) &&
10358 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
10360 addr
= ((addr
/ tp
->nvram_pagesize
) <<
10361 ATMEL_AT45DB0X1B_PAGE_POS
) +
10362 (addr
% tp
->nvram_pagesize
);
10367 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
10369 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM
) &&
10370 (tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
10371 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
10372 !(tp
->tg3_flags3
& TG3_FLG3_NO_NVRAM_ADDR_TRANS
) &&
10373 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
10375 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
10376 tp
->nvram_pagesize
) +
10377 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
10382 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
10386 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
))
10387 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
10389 offset
= tg3_nvram_phys_addr(tp
, offset
);
10391 if (offset
> NVRAM_ADDR_MSK
)
10394 ret
= tg3_nvram_lock(tp
);
10398 tg3_enable_nvram_access(tp
);
10400 tw32(NVRAM_ADDR
, offset
);
10401 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
10402 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
10405 *val
= swab32(tr32(NVRAM_RDDATA
));
10407 tg3_disable_nvram_access(tp
);
10409 tg3_nvram_unlock(tp
);
10414 static int tg3_nvram_read_le(struct tg3
*tp
, u32 offset
, __le32
*val
)
10417 int res
= tg3_nvram_read(tp
, offset
, &v
);
10419 *val
= cpu_to_le32(v
);
10423 static int tg3_nvram_read_swab(struct tg3
*tp
, u32 offset
, u32
*val
)
10428 err
= tg3_nvram_read(tp
, offset
, &tmp
);
10429 *val
= swab32(tmp
);
10433 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
10434 u32 offset
, u32 len
, u8
*buf
)
10439 for (i
= 0; i
< len
; i
+= 4) {
10445 memcpy(&data
, buf
+ i
, 4);
10447 tw32(GRC_EEPROM_DATA
, le32_to_cpu(data
));
10449 val
= tr32(GRC_EEPROM_ADDR
);
10450 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
10452 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
10454 tw32(GRC_EEPROM_ADDR
, val
|
10455 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
10456 (addr
& EEPROM_ADDR_ADDR_MASK
) |
10457 EEPROM_ADDR_START
|
10458 EEPROM_ADDR_WRITE
);
10460 for (j
= 0; j
< 1000; j
++) {
10461 val
= tr32(GRC_EEPROM_ADDR
);
10463 if (val
& EEPROM_ADDR_COMPLETE
)
10467 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
10476 /* offset and length are dword aligned */
10477 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
10481 u32 pagesize
= tp
->nvram_pagesize
;
10482 u32 pagemask
= pagesize
- 1;
10486 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
10492 u32 phy_addr
, page_off
, size
;
10494 phy_addr
= offset
& ~pagemask
;
10496 for (j
= 0; j
< pagesize
; j
+= 4) {
10497 if ((ret
= tg3_nvram_read_le(tp
, phy_addr
+ j
,
10498 (__le32
*) (tmp
+ j
))))
10504 page_off
= offset
& pagemask
;
10511 memcpy(tmp
+ page_off
, buf
, size
);
10513 offset
= offset
+ (pagesize
- page_off
);
10515 tg3_enable_nvram_access(tp
);
10518 * Before we can erase the flash page, we need
10519 * to issue a special "write enable" command.
10521 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
10523 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
10526 /* Erase the target page */
10527 tw32(NVRAM_ADDR
, phy_addr
);
10529 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
10530 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
10532 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
10535 /* Issue another write enable to start the write. */
10536 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
10538 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
10541 for (j
= 0; j
< pagesize
; j
+= 4) {
10544 data
= *((__be32
*) (tmp
+ j
));
10545 /* swab32(le32_to_cpu(data)), actually */
10546 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
10548 tw32(NVRAM_ADDR
, phy_addr
+ j
);
10550 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
10554 nvram_cmd
|= NVRAM_CMD_FIRST
;
10555 else if (j
== (pagesize
- 4))
10556 nvram_cmd
|= NVRAM_CMD_LAST
;
10558 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
10565 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
10566 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
10573 /* offset and length are dword aligned */
10574 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
10579 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
10580 u32 page_off
, phy_addr
, nvram_cmd
;
10583 memcpy(&data
, buf
+ i
, 4);
10584 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
10586 page_off
= offset
% tp
->nvram_pagesize
;
10588 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
10590 tw32(NVRAM_ADDR
, phy_addr
);
10592 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
10594 if ((page_off
== 0) || (i
== 0))
10595 nvram_cmd
|= NVRAM_CMD_FIRST
;
10596 if (page_off
== (tp
->nvram_pagesize
- 4))
10597 nvram_cmd
|= NVRAM_CMD_LAST
;
10599 if (i
== (len
- 4))
10600 nvram_cmd
|= NVRAM_CMD_LAST
;
10602 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
) &&
10603 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5755
) &&
10604 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5787
) &&
10605 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
) &&
10606 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) &&
10607 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
10608 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
10610 if ((ret
= tg3_nvram_exec_cmd(tp
,
10611 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
10616 if (!(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
10617 /* We always do complete word writes to eeprom. */
10618 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
10621 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
10627 /* offset and length are dword aligned */
10628 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
10632 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
10633 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
10634 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
10638 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
)) {
10639 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
10644 ret
= tg3_nvram_lock(tp
);
10648 tg3_enable_nvram_access(tp
);
10649 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
10650 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
))
10651 tw32(NVRAM_WRITE1
, 0x406);
10653 grc_mode
= tr32(GRC_MODE
);
10654 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
10656 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) ||
10657 !(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
10659 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
10663 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
10667 grc_mode
= tr32(GRC_MODE
);
10668 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
10670 tg3_disable_nvram_access(tp
);
10671 tg3_nvram_unlock(tp
);
10674 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
10675 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10682 struct subsys_tbl_ent
{
10683 u16 subsys_vendor
, subsys_devid
;
10687 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
10688 /* Broadcom boards. */
10689 { PCI_VENDOR_ID_BROADCOM
, 0x1644, PHY_ID_BCM5401
}, /* BCM95700A6 */
10690 { PCI_VENDOR_ID_BROADCOM
, 0x0001, PHY_ID_BCM5701
}, /* BCM95701A5 */
10691 { PCI_VENDOR_ID_BROADCOM
, 0x0002, PHY_ID_BCM8002
}, /* BCM95700T6 */
10692 { PCI_VENDOR_ID_BROADCOM
, 0x0003, 0 }, /* BCM95700A9 */
10693 { PCI_VENDOR_ID_BROADCOM
, 0x0005, PHY_ID_BCM5701
}, /* BCM95701T1 */
10694 { PCI_VENDOR_ID_BROADCOM
, 0x0006, PHY_ID_BCM5701
}, /* BCM95701T8 */
10695 { PCI_VENDOR_ID_BROADCOM
, 0x0007, 0 }, /* BCM95701A7 */
10696 { PCI_VENDOR_ID_BROADCOM
, 0x0008, PHY_ID_BCM5701
}, /* BCM95701A10 */
10697 { PCI_VENDOR_ID_BROADCOM
, 0x8008, PHY_ID_BCM5701
}, /* BCM95701A12 */
10698 { PCI_VENDOR_ID_BROADCOM
, 0x0009, PHY_ID_BCM5703
}, /* BCM95703Ax1 */
10699 { PCI_VENDOR_ID_BROADCOM
, 0x8009, PHY_ID_BCM5703
}, /* BCM95703Ax2 */
10702 { PCI_VENDOR_ID_3COM
, 0x1000, PHY_ID_BCM5401
}, /* 3C996T */
10703 { PCI_VENDOR_ID_3COM
, 0x1006, PHY_ID_BCM5701
}, /* 3C996BT */
10704 { PCI_VENDOR_ID_3COM
, 0x1004, 0 }, /* 3C996SX */
10705 { PCI_VENDOR_ID_3COM
, 0x1007, PHY_ID_BCM5701
}, /* 3C1000T */
10706 { PCI_VENDOR_ID_3COM
, 0x1008, PHY_ID_BCM5701
}, /* 3C940BR01 */
10709 { PCI_VENDOR_ID_DELL
, 0x00d1, PHY_ID_BCM5401
}, /* VIPER */
10710 { PCI_VENDOR_ID_DELL
, 0x0106, PHY_ID_BCM5401
}, /* JAGUAR */
10711 { PCI_VENDOR_ID_DELL
, 0x0109, PHY_ID_BCM5411
}, /* MERLOT */
10712 { PCI_VENDOR_ID_DELL
, 0x010a, PHY_ID_BCM5411
}, /* SLIM_MERLOT */
10714 /* Compaq boards. */
10715 { PCI_VENDOR_ID_COMPAQ
, 0x007c, PHY_ID_BCM5701
}, /* BANSHEE */
10716 { PCI_VENDOR_ID_COMPAQ
, 0x009a, PHY_ID_BCM5701
}, /* BANSHEE_2 */
10717 { PCI_VENDOR_ID_COMPAQ
, 0x007d, 0 }, /* CHANGELING */
10718 { PCI_VENDOR_ID_COMPAQ
, 0x0085, PHY_ID_BCM5701
}, /* NC7780 */
10719 { PCI_VENDOR_ID_COMPAQ
, 0x0099, PHY_ID_BCM5701
}, /* NC7780_2 */
10722 { PCI_VENDOR_ID_IBM
, 0x0281, 0 } /* IBM??? */
10725 static inline struct subsys_tbl_ent
*lookup_by_subsys(struct tg3
*tp
)
10729 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
10730 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
10731 tp
->pdev
->subsystem_vendor
) &&
10732 (subsys_id_to_phy_id
[i
].subsys_devid
==
10733 tp
->pdev
->subsystem_device
))
10734 return &subsys_id_to_phy_id
[i
];
10739 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
10744 /* On some early chips the SRAM cannot be accessed in D3hot state,
10745 * so need make sure we're in D0.
10747 pci_read_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
10748 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
10749 pci_write_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
10752 /* Make sure register accesses (indirect or otherwise)
10753 * will function correctly.
10755 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
10756 tp
->misc_host_ctrl
);
10758 /* The memory arbiter has to be enabled in order for SRAM accesses
10759 * to succeed. Normally on powerup the tg3 chip firmware will make
10760 * sure it is enabled, but other entities such as system netboot
10761 * code might disable it.
10763 val
= tr32(MEMARB_MODE
);
10764 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
10766 tp
->phy_id
= PHY_ID_INVALID
;
10767 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
10769 /* Assume an onboard device and WOL capable by default. */
10770 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
| TG3_FLAG_WOL_CAP
;
10772 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
10773 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
10774 tp
->tg3_flags
&= ~TG3_FLAG_EEPROM_WRITE_PROT
;
10775 tp
->tg3_flags2
|= TG3_FLG2_IS_NIC
;
10777 val
= tr32(VCPU_CFGSHDW
);
10778 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
10779 tp
->tg3_flags
|= TG3_FLAG_ASPM_WORKAROUND
;
10780 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
10781 (val
& VCPU_CFGSHDW_WOL_MAGPKT
))
10782 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
10786 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
10787 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
10788 u32 nic_cfg
, led_cfg
;
10789 u32 nic_phy_id
, ver
, cfg2
= 0, eeprom_phy_id
;
10790 int eeprom_phy_serdes
= 0;
10792 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
10793 tp
->nic_sram_data_cfg
= nic_cfg
;
10795 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
10796 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
10797 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
10798 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
10799 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
) &&
10800 (ver
> 0) && (ver
< 0x100))
10801 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
10803 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
10804 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
10805 eeprom_phy_serdes
= 1;
10807 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
10808 if (nic_phy_id
!= 0) {
10809 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
10810 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
10812 eeprom_phy_id
= (id1
>> 16) << 10;
10813 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
10814 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
10818 tp
->phy_id
= eeprom_phy_id
;
10819 if (eeprom_phy_serdes
) {
10820 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
10821 tp
->tg3_flags2
|= TG3_FLG2_MII_SERDES
;
10823 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
10826 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
10827 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
10828 SHASTA_EXT_LED_MODE_MASK
);
10830 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
10834 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
10835 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
10838 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
10839 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
10842 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
10843 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
10845 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10846 * read on some older 5700/5701 bootcode.
10848 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
10850 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
10852 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
10856 case SHASTA_EXT_LED_SHARED
:
10857 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
10858 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
10859 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
10860 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
10861 LED_CTRL_MODE_PHY_2
);
10864 case SHASTA_EXT_LED_MAC
:
10865 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
10868 case SHASTA_EXT_LED_COMBO
:
10869 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
10870 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
10871 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
10872 LED_CTRL_MODE_PHY_2
);
10877 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10878 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
10879 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
10880 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
10882 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
10883 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
10885 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
10886 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
;
10887 if ((tp
->pdev
->subsystem_vendor
==
10888 PCI_VENDOR_ID_ARIMA
) &&
10889 (tp
->pdev
->subsystem_device
== 0x205a ||
10890 tp
->pdev
->subsystem_device
== 0x2063))
10891 tp
->tg3_flags
&= ~TG3_FLAG_EEPROM_WRITE_PROT
;
10893 tp
->tg3_flags
&= ~TG3_FLAG_EEPROM_WRITE_PROT
;
10894 tp
->tg3_flags2
|= TG3_FLG2_IS_NIC
;
10897 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
10898 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
10899 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
10900 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
10902 if (nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
)
10903 tp
->tg3_flags3
|= TG3_FLG3_ENABLE_APE
;
10904 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
&&
10905 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
10906 tp
->tg3_flags
&= ~TG3_FLAG_WOL_CAP
;
10908 if (tp
->tg3_flags
& TG3_FLAG_WOL_CAP
&&
10909 nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)
10910 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
10912 if (cfg2
& (1 << 17))
10913 tp
->tg3_flags2
|= TG3_FLG2_CAPACITIVE_COUPLING
;
10915 /* serdes signal pre-emphasis in register 0x590 set by */
10916 /* bootcode if bit 18 is set */
10917 if (cfg2
& (1 << 18))
10918 tp
->tg3_flags2
|= TG3_FLG2_SERDES_PREEMPHASIS
;
10920 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
10923 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
10924 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
10925 tp
->tg3_flags
|= TG3_FLAG_ASPM_WORKAROUND
;
10930 static int __devinit
tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
10935 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
10936 tw32(OTP_CTRL
, cmd
);
10938 /* Wait for up to 1 ms for command to execute. */
10939 for (i
= 0; i
< 100; i
++) {
10940 val
= tr32(OTP_STATUS
);
10941 if (val
& OTP_STATUS_CMD_DONE
)
10946 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
10949 /* Read the gphy configuration from the OTP region of the chip. The gphy
10950 * configuration is a 32-bit value that straddles the alignment boundary.
10951 * We do two 32-bit reads and then shift and merge the results.
10953 static u32 __devinit
tg3_read_otp_phycfg(struct tg3
*tp
)
10955 u32 bhalf_otp
, thalf_otp
;
10957 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
10959 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
10962 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
10964 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
10967 thalf_otp
= tr32(OTP_READ_DATA
);
10969 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
10971 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
10974 bhalf_otp
= tr32(OTP_READ_DATA
);
10976 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
10979 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
10981 u32 hw_phy_id_1
, hw_phy_id_2
;
10982 u32 hw_phy_id
, hw_phy_id_masked
;
10985 /* Reading the PHY ID register can conflict with ASF
10986 * firwmare access to the PHY hardware.
10989 if ((tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) ||
10990 (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
)) {
10991 hw_phy_id
= hw_phy_id_masked
= PHY_ID_INVALID
;
10993 /* Now read the physical PHY_ID from the chip and verify
10994 * that it is sane. If it doesn't look good, we fall back
10995 * to either the hard-coded table based PHY_ID and failing
10996 * that the value found in the eeprom area.
10998 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
10999 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
11001 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
11002 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
11003 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
11005 hw_phy_id_masked
= hw_phy_id
& PHY_ID_MASK
;
11008 if (!err
&& KNOWN_PHY_ID(hw_phy_id_masked
)) {
11009 tp
->phy_id
= hw_phy_id
;
11010 if (hw_phy_id_masked
== PHY_ID_BCM8002
)
11011 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
11013 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_SERDES
;
11015 if (tp
->phy_id
!= PHY_ID_INVALID
) {
11016 /* Do nothing, phy ID already set up in
11017 * tg3_get_eeprom_hw_cfg().
11020 struct subsys_tbl_ent
*p
;
11022 /* No eeprom signature? Try the hardcoded
11023 * subsys device table.
11025 p
= lookup_by_subsys(tp
);
11029 tp
->phy_id
= p
->phy_id
;
11031 tp
->phy_id
== PHY_ID_BCM8002
)
11032 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
11036 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) &&
11037 !(tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) &&
11038 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
11039 u32 bmsr
, adv_reg
, tg3_ctrl
, mask
;
11041 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
11042 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
11043 (bmsr
& BMSR_LSTATUS
))
11044 goto skip_phy_reset
;
11046 err
= tg3_phy_reset(tp
);
11050 adv_reg
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
11051 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
11052 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
11054 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
11055 tg3_ctrl
= (MII_TG3_CTRL_ADV_1000_HALF
|
11056 MII_TG3_CTRL_ADV_1000_FULL
);
11057 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
11058 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
11059 tg3_ctrl
|= (MII_TG3_CTRL_AS_MASTER
|
11060 MII_TG3_CTRL_ENABLE_AS_MASTER
);
11063 mask
= (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
11064 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
11065 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
);
11066 if (!tg3_copper_is_advertising_all(tp
, mask
)) {
11067 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
11069 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
11070 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
11072 tg3_writephy(tp
, MII_BMCR
,
11073 BMCR_ANENABLE
| BMCR_ANRESTART
);
11075 tg3_phy_set_wirespeed(tp
);
11077 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
11078 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
11079 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
11083 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
11084 err
= tg3_init_5401phy_dsp(tp
);
11089 if (!err
&& ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
)) {
11090 err
= tg3_init_5401phy_dsp(tp
);
11093 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
11094 tp
->link_config
.advertising
=
11095 (ADVERTISED_1000baseT_Half
|
11096 ADVERTISED_1000baseT_Full
|
11097 ADVERTISED_Autoneg
|
11099 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
11100 tp
->link_config
.advertising
&=
11101 ~(ADVERTISED_1000baseT_Half
|
11102 ADVERTISED_1000baseT_Full
);
11107 static void __devinit
tg3_read_partno(struct tg3
*tp
)
11109 unsigned char vpd_data
[256];
11113 if (tg3_nvram_read_swab(tp
, 0x0, &magic
))
11114 goto out_not_found
;
11116 if (magic
== TG3_EEPROM_MAGIC
) {
11117 for (i
= 0; i
< 256; i
+= 4) {
11120 if (tg3_nvram_read(tp
, 0x100 + i
, &tmp
))
11121 goto out_not_found
;
11123 vpd_data
[i
+ 0] = ((tmp
>> 0) & 0xff);
11124 vpd_data
[i
+ 1] = ((tmp
>> 8) & 0xff);
11125 vpd_data
[i
+ 2] = ((tmp
>> 16) & 0xff);
11126 vpd_data
[i
+ 3] = ((tmp
>> 24) & 0xff);
11131 vpd_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_VPD
);
11132 for (i
= 0; i
< 256; i
+= 4) {
11137 pci_write_config_word(tp
->pdev
, vpd_cap
+ PCI_VPD_ADDR
,
11139 while (j
++ < 100) {
11140 pci_read_config_word(tp
->pdev
, vpd_cap
+
11141 PCI_VPD_ADDR
, &tmp16
);
11142 if (tmp16
& 0x8000)
11146 if (!(tmp16
& 0x8000))
11147 goto out_not_found
;
11149 pci_read_config_dword(tp
->pdev
, vpd_cap
+ PCI_VPD_DATA
,
11151 v
= cpu_to_le32(tmp
);
11152 memcpy(&vpd_data
[i
], &v
, 4);
11156 /* Now parse and find the part number. */
11157 for (i
= 0; i
< 254; ) {
11158 unsigned char val
= vpd_data
[i
];
11159 unsigned int block_end
;
11161 if (val
== 0x82 || val
== 0x91) {
11164 (vpd_data
[i
+ 2] << 8)));
11169 goto out_not_found
;
11171 block_end
= (i
+ 3 +
11173 (vpd_data
[i
+ 2] << 8)));
11176 if (block_end
> 256)
11177 goto out_not_found
;
11179 while (i
< (block_end
- 2)) {
11180 if (vpd_data
[i
+ 0] == 'P' &&
11181 vpd_data
[i
+ 1] == 'N') {
11182 int partno_len
= vpd_data
[i
+ 2];
11185 if (partno_len
> 24 || (partno_len
+ i
) > 256)
11186 goto out_not_found
;
11188 memcpy(tp
->board_part_number
,
11189 &vpd_data
[i
], partno_len
);
11194 i
+= 3 + vpd_data
[i
+ 2];
11197 /* Part number not found. */
11198 goto out_not_found
;
11202 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11203 strcpy(tp
->board_part_number
, "BCM95906");
11205 strcpy(tp
->board_part_number
, "none");
11208 static int __devinit
tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
11212 if (tg3_nvram_read_swab(tp
, offset
, &val
) ||
11213 (val
& 0xfc000000) != 0x0c000000 ||
11214 tg3_nvram_read_swab(tp
, offset
+ 4, &val
) ||
11221 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
11223 u32 val
, offset
, start
;
11227 if (tg3_nvram_read_swab(tp
, 0, &val
))
11230 if (val
!= TG3_EEPROM_MAGIC
)
11233 if (tg3_nvram_read_swab(tp
, 0xc, &offset
) ||
11234 tg3_nvram_read_swab(tp
, 0x4, &start
))
11237 offset
= tg3_nvram_logical_addr(tp
, offset
);
11239 if (!tg3_fw_img_is_valid(tp
, offset
) ||
11240 tg3_nvram_read_swab(tp
, offset
+ 8, &ver_offset
))
11243 offset
= offset
+ ver_offset
- start
;
11244 for (i
= 0; i
< 16; i
+= 4) {
11246 if (tg3_nvram_read_le(tp
, offset
+ i
, &v
))
11249 memcpy(tp
->fw_ver
+ i
, &v
, 4);
11252 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) ||
11253 (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
))
11256 for (offset
= TG3_NVM_DIR_START
;
11257 offset
< TG3_NVM_DIR_END
;
11258 offset
+= TG3_NVM_DIRENT_SIZE
) {
11259 if (tg3_nvram_read_swab(tp
, offset
, &val
))
11262 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
11266 if (offset
== TG3_NVM_DIR_END
)
11269 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
11270 start
= 0x08000000;
11271 else if (tg3_nvram_read_swab(tp
, offset
- 4, &start
))
11274 if (tg3_nvram_read_swab(tp
, offset
+ 4, &offset
) ||
11275 !tg3_fw_img_is_valid(tp
, offset
) ||
11276 tg3_nvram_read_swab(tp
, offset
+ 8, &val
))
11279 offset
+= val
- start
;
11281 bcnt
= strlen(tp
->fw_ver
);
11283 tp
->fw_ver
[bcnt
++] = ',';
11284 tp
->fw_ver
[bcnt
++] = ' ';
11286 for (i
= 0; i
< 4; i
++) {
11288 if (tg3_nvram_read_le(tp
, offset
, &v
))
11291 offset
+= sizeof(v
);
11293 if (bcnt
> TG3_VER_SIZE
- sizeof(v
)) {
11294 memcpy(&tp
->fw_ver
[bcnt
], &v
, TG3_VER_SIZE
- bcnt
);
11298 memcpy(&tp
->fw_ver
[bcnt
], &v
, sizeof(v
));
11302 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
11305 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*);
11307 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
11309 static struct pci_device_id write_reorder_chipsets
[] = {
11310 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
11311 PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
11312 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
11313 PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
11314 { PCI_DEVICE(PCI_VENDOR_ID_VIA
,
11315 PCI_DEVICE_ID_VIA_8385_0
) },
11319 u32 cacheline_sz_reg
;
11320 u32 pci_state_reg
, grc_misc_cfg
;
11325 /* Force memory write invalidate off. If we leave it on,
11326 * then on 5700_BX chips we have to enable a workaround.
11327 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11328 * to match the cacheline size. The Broadcom driver have this
11329 * workaround but turns MWI off all the times so never uses
11330 * it. This seems to suggest that the workaround is insufficient.
11332 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11333 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
11334 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11336 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11337 * has the register indirect write enable bit set before
11338 * we try to access any of the MMIO registers. It is also
11339 * critical that the PCI-X hw workaround situation is decided
11340 * before that as well.
11342 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
11345 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
11346 MISC_HOST_CTRL_CHIPREV_SHIFT
);
11347 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
11348 u32 prod_id_asic_rev
;
11350 pci_read_config_dword(tp
->pdev
, TG3PCI_PRODID_ASICREV
,
11351 &prod_id_asic_rev
);
11352 tp
->pci_chip_rev_id
= prod_id_asic_rev
& PROD_ID_ASIC_REV_MASK
;
11355 /* Wrong chip ID in 5752 A0. This code can be removed later
11356 * as A0 is not in production.
11358 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
11359 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
11361 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11362 * we need to disable memory and use config. cycles
11363 * only to access all registers. The 5702/03 chips
11364 * can mistakenly decode the special cycles from the
11365 * ICH chipsets as memory write cycles, causing corruption
11366 * of register and memory space. Only certain ICH bridges
11367 * will drive special cycles with non-zero data during the
11368 * address phase which can fall within the 5703's address
11369 * range. This is not an ICH bug as the PCI spec allows
11370 * non-zero address during special cycles. However, only
11371 * these ICH bridges are known to drive non-zero addresses
11372 * during special cycles.
11374 * Since special cycles do not cross PCI bridges, we only
11375 * enable this workaround if the 5703 is on the secondary
11376 * bus of these ICH bridges.
11378 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
11379 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
11380 static struct tg3_dev_id
{
11384 } ich_chipsets
[] = {
11385 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
11387 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
11389 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
11391 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
11395 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
11396 struct pci_dev
*bridge
= NULL
;
11398 while (pci_id
->vendor
!= 0) {
11399 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
11405 if (pci_id
->rev
!= PCI_ANY_ID
) {
11406 if (bridge
->revision
> pci_id
->rev
)
11409 if (bridge
->subordinate
&&
11410 (bridge
->subordinate
->number
==
11411 tp
->pdev
->bus
->number
)) {
11413 tp
->tg3_flags2
|= TG3_FLG2_ICH_WORKAROUND
;
11414 pci_dev_put(bridge
);
11420 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
11421 static struct tg3_dev_id
{
11424 } bridge_chipsets
[] = {
11425 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
11426 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
11429 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
11430 struct pci_dev
*bridge
= NULL
;
11432 while (pci_id
->vendor
!= 0) {
11433 bridge
= pci_get_device(pci_id
->vendor
,
11440 if (bridge
->subordinate
&&
11441 (bridge
->subordinate
->number
<=
11442 tp
->pdev
->bus
->number
) &&
11443 (bridge
->subordinate
->subordinate
>=
11444 tp
->pdev
->bus
->number
)) {
11445 tp
->tg3_flags3
|= TG3_FLG3_5701_DMA_BUG
;
11446 pci_dev_put(bridge
);
11452 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11453 * DMA addresses > 40-bit. This bridge may have other additional
11454 * 57xx devices behind it in some 4-port NIC designs for example.
11455 * Any tg3 device found behind the bridge will also need the 40-bit
11458 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
11459 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
11460 tp
->tg3_flags2
|= TG3_FLG2_5780_CLASS
;
11461 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
11462 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
11465 struct pci_dev
*bridge
= NULL
;
11468 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
11469 PCI_DEVICE_ID_SERVERWORKS_EPB
,
11471 if (bridge
&& bridge
->subordinate
&&
11472 (bridge
->subordinate
->number
<=
11473 tp
->pdev
->bus
->number
) &&
11474 (bridge
->subordinate
->subordinate
>=
11475 tp
->pdev
->bus
->number
)) {
11476 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
11477 pci_dev_put(bridge
);
11483 /* Initialize misc host control in PCI block. */
11484 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
11485 MISC_HOST_CTRL_CHIPREV
);
11486 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
11487 tp
->misc_host_ctrl
);
11489 pci_read_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
11490 &cacheline_sz_reg
);
11492 tp
->pci_cacheline_sz
= (cacheline_sz_reg
>> 0) & 0xff;
11493 tp
->pci_lat_timer
= (cacheline_sz_reg
>> 8) & 0xff;
11494 tp
->pci_hdr_type
= (cacheline_sz_reg
>> 16) & 0xff;
11495 tp
->pci_bist
= (cacheline_sz_reg
>> 24) & 0xff;
11497 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
11498 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
))
11499 tp
->pdev_peer
= tg3_find_peer(tp
);
11501 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
11502 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
11503 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
11504 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
11505 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
11506 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
11507 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
11508 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
11509 tp
->tg3_flags2
|= TG3_FLG2_5750_PLUS
;
11511 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) ||
11512 (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
11513 tp
->tg3_flags2
|= TG3_FLG2_5705_PLUS
;
11515 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
11516 tp
->tg3_flags
|= TG3_FLAG_SUPPORT_MSI
;
11517 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
11518 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
11519 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
11520 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
11521 tp
->pdev_peer
== tp
->pdev
))
11522 tp
->tg3_flags
&= ~TG3_FLAG_SUPPORT_MSI
;
11524 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
11525 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
11526 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
11527 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
11528 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
11529 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO_2
;
11530 tp
->tg3_flags2
|= TG3_FLG2_1SHOT_MSI
;
11532 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO_1
| TG3_FLG2_TSO_BUG
;
11533 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
11535 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
11536 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_BUG
;
11540 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) ||
11541 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
11542 tp
->tg3_flags2
|= TG3_FLG2_JUMBO_CAPABLE
;
11544 pcie_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
);
11545 if (pcie_cap
!= 0) {
11546 tp
->tg3_flags2
|= TG3_FLG2_PCI_EXPRESS
;
11548 pcie_set_readrq(tp
->pdev
, 4096);
11550 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
11553 pci_read_config_word(tp
->pdev
,
11554 pcie_cap
+ PCI_EXP_LNKCTL
,
11556 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
)
11557 tp
->tg3_flags2
&= ~TG3_FLG2_HW_TSO_2
;
11561 /* If we have an AMD 762 or VIA K8T800 chipset, write
11562 * reordering to the mailbox registers done by the host
11563 * controller can cause major troubles. We read back from
11564 * every mailbox register write to force the writes to be
11565 * posted to the chip in order.
11567 if (pci_dev_present(write_reorder_chipsets
) &&
11568 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
11569 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
11571 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
11572 tp
->pci_lat_timer
< 64) {
11573 tp
->pci_lat_timer
= 64;
11575 cacheline_sz_reg
= ((tp
->pci_cacheline_sz
& 0xff) << 0);
11576 cacheline_sz_reg
|= ((tp
->pci_lat_timer
& 0xff) << 8);
11577 cacheline_sz_reg
|= ((tp
->pci_hdr_type
& 0xff) << 16);
11578 cacheline_sz_reg
|= ((tp
->pci_bist
& 0xff) << 24);
11580 pci_write_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
11584 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) ||
11585 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
11586 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
11587 if (!tp
->pcix_cap
) {
11588 printk(KERN_ERR PFX
"Cannot find PCI-X "
11589 "capability, aborting.\n");
11594 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
11597 if (tp
->pcix_cap
&& (pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0) {
11598 tp
->tg3_flags
|= TG3_FLAG_PCIX_MODE
;
11600 /* If this is a 5700 BX chipset, and we are in PCI-X
11601 * mode, enable register write workaround.
11603 * The workaround is to use indirect register accesses
11604 * for all chip writes not to mailbox registers.
11606 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
11609 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
11611 /* The chip can have it's power management PCI config
11612 * space registers clobbered due to this bug.
11613 * So explicitly force the chip into D0 here.
11615 pci_read_config_dword(tp
->pdev
,
11616 tp
->pm_cap
+ PCI_PM_CTRL
,
11618 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
11619 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
11620 pci_write_config_dword(tp
->pdev
,
11621 tp
->pm_cap
+ PCI_PM_CTRL
,
11624 /* Also, force SERR#/PERR# in PCI command. */
11625 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11626 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
11627 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11631 /* 5700 BX chips need to have their TX producer index mailboxes
11632 * written twice to workaround a bug.
11634 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
)
11635 tp
->tg3_flags
|= TG3_FLAG_TXD_MBOX_HWBUG
;
11637 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
11638 tp
->tg3_flags
|= TG3_FLAG_PCI_HIGH_SPEED
;
11639 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
11640 tp
->tg3_flags
|= TG3_FLAG_PCI_32BIT
;
11642 /* Chip-specific fixup from Broadcom driver */
11643 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
11644 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
11645 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
11646 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
11649 /* Default fast path register access methods */
11650 tp
->read32
= tg3_read32
;
11651 tp
->write32
= tg3_write32
;
11652 tp
->read32_mbox
= tg3_read32
;
11653 tp
->write32_mbox
= tg3_write32
;
11654 tp
->write32_tx_mbox
= tg3_write32
;
11655 tp
->write32_rx_mbox
= tg3_write32
;
11657 /* Various workaround register access methods */
11658 if (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
)
11659 tp
->write32
= tg3_write_indirect_reg32
;
11660 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
11661 ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
11662 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
11664 * Back to back register writes can cause problems on these
11665 * chips, the workaround is to read back all reg writes
11666 * except those to mailbox regs.
11668 * See tg3_write_indirect_reg32().
11670 tp
->write32
= tg3_write_flush_reg32
;
11674 if ((tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
) ||
11675 (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)) {
11676 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
11677 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
11678 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
11681 if (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
) {
11682 tp
->read32
= tg3_read_indirect_reg32
;
11683 tp
->write32
= tg3_write_indirect_reg32
;
11684 tp
->read32_mbox
= tg3_read_indirect_mbox
;
11685 tp
->write32_mbox
= tg3_write_indirect_mbox
;
11686 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
11687 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
11692 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11693 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
11694 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11696 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
11697 tp
->read32_mbox
= tg3_read32_mbox_5906
;
11698 tp
->write32_mbox
= tg3_write32_mbox_5906
;
11699 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
11700 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
11703 if (tp
->write32
== tg3_write_indirect_reg32
||
11704 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
11705 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
11706 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
11707 tp
->tg3_flags
|= TG3_FLAG_SRAM_USE_CONFIG
;
11709 /* Get eeprom hw config before calling tg3_set_power_state().
11710 * In particular, the TG3_FLG2_IS_NIC flag must be
11711 * determined before calling tg3_set_power_state() so that
11712 * we know whether or not to switch out of Vaux power.
11713 * When the flag is set, it means that GPIO1 is used for eeprom
11714 * write protect and also implies that it is a LOM where GPIOs
11715 * are not used to switch power.
11717 tg3_get_eeprom_hw_cfg(tp
);
11719 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) {
11720 /* Allow reads and writes to the
11721 * APE register and memory space.
11723 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
11724 PCISTATE_ALLOW_APE_SHMEM_WR
;
11725 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
11729 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
11730 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
11731 tp
->tg3_flags
|= TG3_FLAG_CPMU_PRESENT
;
11733 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5784_A0
||
11734 tp
->pci_chip_rev_id
== CHIPREV_ID_5784_A1
||
11735 tp
->pci_chip_rev_id
== CHIPREV_ID_5761_A0
||
11736 tp
->pci_chip_rev_id
== CHIPREV_ID_5761_A1
)
11737 tp
->tg3_flags3
|= TG3_FLG3_5761_5784_AX_FIXES
;
11740 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11741 * GPIO1 driven high will bring 5700's external PHY out of reset.
11742 * It is also used as eeprom write protect on LOMs.
11744 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
11745 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
11746 (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
11747 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
11748 GRC_LCLCTRL_GPIO_OUTPUT1
);
11749 /* Unused GPIO3 must be driven as output on 5752 because there
11750 * are no pull-up resistors on unused GPIO pins.
11752 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
11753 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
11755 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
11756 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
11758 /* Force the chip into D0. */
11759 err
= tg3_set_power_state(tp
, PCI_D0
);
11761 printk(KERN_ERR PFX
"(%s) transition to D0 failed\n",
11762 pci_name(tp
->pdev
));
11766 /* 5700 B0 chips do not support checksumming correctly due
11767 * to hardware bugs.
11769 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5700_B0
)
11770 tp
->tg3_flags
|= TG3_FLAG_BROKEN_CHECKSUMS
;
11772 /* Derive initial jumbo mode from MTU assigned in
11773 * ether_setup() via the alloc_etherdev() call
11775 if (tp
->dev
->mtu
> ETH_DATA_LEN
&&
11776 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
11777 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
11779 /* Determine WakeOnLan speed to use. */
11780 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
11781 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
11782 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
11783 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
11784 tp
->tg3_flags
&= ~(TG3_FLAG_WOL_SPEED_100MB
);
11786 tp
->tg3_flags
|= TG3_FLAG_WOL_SPEED_100MB
;
11789 /* A few boards don't want Ethernet@WireSpeed phy feature */
11790 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
11791 ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
11792 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
11793 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
11794 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) ||
11795 (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
))
11796 tp
->tg3_flags2
|= TG3_FLG2_NO_ETH_WIRE_SPEED
;
11798 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
11799 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
11800 tp
->tg3_flags2
|= TG3_FLG2_PHY_ADC_BUG
;
11801 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
11802 tp
->tg3_flags2
|= TG3_FLG2_PHY_5704_A0_BUG
;
11804 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
11805 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
11806 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
11807 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
11808 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
11809 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
11810 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
11811 tp
->tg3_flags2
|= TG3_FLG2_PHY_JITTER_BUG
;
11812 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
11813 tp
->tg3_flags2
|= TG3_FLG2_PHY_ADJUST_TRIM
;
11814 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5906
)
11815 tp
->tg3_flags2
|= TG3_FLG2_PHY_BER_BUG
;
11818 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
11819 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
11820 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
11821 if (tp
->phy_otp
== 0)
11822 tp
->phy_otp
= TG3_OTP_DEFAULT
;
11825 if (tp
->tg3_flags
& TG3_FLAG_CPMU_PRESENT
)
11826 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
11828 tp
->mi_mode
= MAC_MI_MODE_BASE
;
11830 tp
->coalesce_mode
= 0;
11831 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
11832 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
11833 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
11835 /* Initialize MAC MI mode, polling disabled. */
11836 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
11839 /* Initialize data/descriptor byte/word swapping. */
11840 val
= tr32(GRC_MODE
);
11841 val
&= GRC_MODE_HOST_STACKUP
;
11842 tw32(GRC_MODE
, val
| tp
->grc_mode
);
11844 tg3_switch_clocks(tp
);
11846 /* Clear this out for sanity. */
11847 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
11849 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
11851 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
11852 (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) == 0) {
11853 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
11855 if (chiprevid
== CHIPREV_ID_5701_A0
||
11856 chiprevid
== CHIPREV_ID_5701_B0
||
11857 chiprevid
== CHIPREV_ID_5701_B2
||
11858 chiprevid
== CHIPREV_ID_5701_B5
) {
11859 void __iomem
*sram_base
;
11861 /* Write some dummy words into the SRAM status block
11862 * area, see if it reads back correctly. If the return
11863 * value is bad, force enable the PCIX workaround.
11865 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
11867 writel(0x00000000, sram_base
);
11868 writel(0x00000000, sram_base
+ 4);
11869 writel(0xffffffff, sram_base
+ 4);
11870 if (readl(sram_base
) != 0x00000000)
11871 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
11876 tg3_nvram_init(tp
);
11878 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
11879 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
11881 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
11882 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
11883 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
11884 tp
->tg3_flags2
|= TG3_FLG2_IS_5788
;
11886 if (!(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
11887 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
))
11888 tp
->tg3_flags
|= TG3_FLAG_TAGGED_STATUS
;
11889 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
11890 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
11891 HOSTCC_MODE_CLRTICK_TXBD
);
11893 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
11894 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
11895 tp
->misc_host_ctrl
);
11898 /* these are limited to 10/100 only */
11899 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
11900 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
11901 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
11902 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
11903 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
11904 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
11905 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
11906 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
11907 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
11908 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
||
11909 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5787F
)) ||
11910 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11911 tp
->tg3_flags
|= TG3_FLAG_10_100_ONLY
;
11913 err
= tg3_phy_probe(tp
);
11915 printk(KERN_ERR PFX
"(%s) phy probe failed, err %d\n",
11916 pci_name(tp
->pdev
), err
);
11917 /* ... but do not return immediately ... */
11920 tg3_read_partno(tp
);
11921 tg3_read_fw_ver(tp
);
11923 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
11924 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
11926 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
11927 tp
->tg3_flags
|= TG3_FLAG_USE_MI_INTERRUPT
;
11929 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
11932 /* 5700 {AX,BX} chips have a broken status block link
11933 * change bit implementation, so we must use the
11934 * status register in those cases.
11936 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
11937 tp
->tg3_flags
|= TG3_FLAG_USE_LINKCHG_REG
;
11939 tp
->tg3_flags
&= ~TG3_FLAG_USE_LINKCHG_REG
;
11941 /* The led_ctrl is set during tg3_phy_probe, here we might
11942 * have to force the link status polling mechanism based
11943 * upon subsystem IDs.
11945 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
11946 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
11947 !(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
11948 tp
->tg3_flags
|= (TG3_FLAG_USE_MI_INTERRUPT
|
11949 TG3_FLAG_USE_LINKCHG_REG
);
11952 /* For all SERDES we poll the MAC status register. */
11953 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
11954 tp
->tg3_flags
|= TG3_FLAG_POLL_SERDES
;
11956 tp
->tg3_flags
&= ~TG3_FLAG_POLL_SERDES
;
11958 /* All chips before 5787 can get confused if TX buffers
11959 * straddle the 4GB address boundary in some cases.
11961 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
11962 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
11963 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
11964 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
11965 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
11966 tp
->dev
->hard_start_xmit
= tg3_start_xmit
;
11968 tp
->dev
->hard_start_xmit
= tg3_start_xmit_dma_bug
;
11971 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
11972 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0)
11975 tp
->rx_std_max_post
= TG3_RX_RING_SIZE
;
11977 /* Increment the rx prod index on the rx std ring by at most
11978 * 8 for these chips to workaround hw errata.
11980 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
11981 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
11982 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
11983 tp
->rx_std_max_post
= 8;
11985 if (tp
->tg3_flags
& TG3_FLAG_ASPM_WORKAROUND
)
11986 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
11987 PCIE_PWR_MGMT_L1_THRESH_MSK
;
11992 #ifdef CONFIG_SPARC
11993 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
11995 struct net_device
*dev
= tp
->dev
;
11996 struct pci_dev
*pdev
= tp
->pdev
;
11997 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
11998 const unsigned char *addr
;
12001 addr
= of_get_property(dp
, "local-mac-address", &len
);
12002 if (addr
&& len
== 6) {
12003 memcpy(dev
->dev_addr
, addr
, 6);
12004 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
12010 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
12012 struct net_device
*dev
= tp
->dev
;
12014 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
12015 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
12020 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
12022 struct net_device
*dev
= tp
->dev
;
12023 u32 hi
, lo
, mac_offset
;
12026 #ifdef CONFIG_SPARC
12027 if (!tg3_get_macaddr_sparc(tp
))
12032 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
12033 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
12034 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
12036 if (tg3_nvram_lock(tp
))
12037 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
12039 tg3_nvram_unlock(tp
);
12041 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12044 /* First try to get it from MAC address mailbox. */
12045 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
12046 if ((hi
>> 16) == 0x484b) {
12047 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
12048 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
12050 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
12051 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
12052 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
12053 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
12054 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
12056 /* Some old bootcode may report a 0 MAC address in SRAM */
12057 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
12060 /* Next, try NVRAM. */
12061 if (!tg3_nvram_read(tp
, mac_offset
+ 0, &hi
) &&
12062 !tg3_nvram_read(tp
, mac_offset
+ 4, &lo
)) {
12063 dev
->dev_addr
[0] = ((hi
>> 16) & 0xff);
12064 dev
->dev_addr
[1] = ((hi
>> 24) & 0xff);
12065 dev
->dev_addr
[2] = ((lo
>> 0) & 0xff);
12066 dev
->dev_addr
[3] = ((lo
>> 8) & 0xff);
12067 dev
->dev_addr
[4] = ((lo
>> 16) & 0xff);
12068 dev
->dev_addr
[5] = ((lo
>> 24) & 0xff);
12070 /* Finally just fetch it out of the MAC control regs. */
12072 hi
= tr32(MAC_ADDR_0_HIGH
);
12073 lo
= tr32(MAC_ADDR_0_LOW
);
12075 dev
->dev_addr
[5] = lo
& 0xff;
12076 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
12077 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
12078 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
12079 dev
->dev_addr
[1] = hi
& 0xff;
12080 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
12084 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
12085 #ifdef CONFIG_SPARC
12086 if (!tg3_get_default_macaddr_sparc(tp
))
12091 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
12095 #define BOUNDARY_SINGLE_CACHELINE 1
12096 #define BOUNDARY_MULTI_CACHELINE 2
12098 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
12100 int cacheline_size
;
12104 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
12106 cacheline_size
= 1024;
12108 cacheline_size
= (int) byte
* 4;
12110 /* On 5703 and later chips, the boundary bits have no
12113 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12114 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
12115 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
12118 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12119 goal
= BOUNDARY_MULTI_CACHELINE
;
12121 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12122 goal
= BOUNDARY_SINGLE_CACHELINE
;
12131 /* PCI controllers on most RISC systems tend to disconnect
12132 * when a device tries to burst across a cache-line boundary.
12133 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12135 * Unfortunately, for PCI-E there are only limited
12136 * write-side controls for this, and thus for reads
12137 * we will still get the disconnects. We'll also waste
12138 * these PCI cycles for both read and write for chips
12139 * other than 5700 and 5701 which do not implement the
12142 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
12143 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
12144 switch (cacheline_size
) {
12149 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
12150 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
12151 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
12153 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
12154 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
12159 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
12160 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
12164 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
12165 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
12168 } else if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
12169 switch (cacheline_size
) {
12173 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
12174 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
12175 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
12181 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
12182 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
12186 switch (cacheline_size
) {
12188 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
12189 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
12190 DMA_RWCTRL_WRITE_BNDRY_16
);
12195 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
12196 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
12197 DMA_RWCTRL_WRITE_BNDRY_32
);
12202 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
12203 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
12204 DMA_RWCTRL_WRITE_BNDRY_64
);
12209 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
12210 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
12211 DMA_RWCTRL_WRITE_BNDRY_128
);
12216 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
12217 DMA_RWCTRL_WRITE_BNDRY_256
);
12220 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
12221 DMA_RWCTRL_WRITE_BNDRY_512
);
12225 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
12226 DMA_RWCTRL_WRITE_BNDRY_1024
);
12235 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
12237 struct tg3_internal_buffer_desc test_desc
;
12238 u32 sram_dma_descs
;
12241 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
12243 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
12244 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
12245 tw32(RDMAC_STATUS
, 0);
12246 tw32(WDMAC_STATUS
, 0);
12248 tw32(BUFMGR_MODE
, 0);
12249 tw32(FTQ_RESET
, 0);
12251 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
12252 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
12253 test_desc
.nic_mbuf
= 0x00002100;
12254 test_desc
.len
= size
;
12257 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12258 * the *second* time the tg3 driver was getting loaded after an
12261 * Broadcom tells me:
12262 * ...the DMA engine is connected to the GRC block and a DMA
12263 * reset may affect the GRC block in some unpredictable way...
12264 * The behavior of resets to individual blocks has not been tested.
12266 * Broadcom noted the GRC reset will also reset all sub-components.
12269 test_desc
.cqid_sqid
= (13 << 8) | 2;
12271 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
12274 test_desc
.cqid_sqid
= (16 << 8) | 7;
12276 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
12279 test_desc
.flags
= 0x00000005;
12281 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
12284 val
= *(((u32
*)&test_desc
) + i
);
12285 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
12286 sram_dma_descs
+ (i
* sizeof(u32
)));
12287 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
12289 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
12292 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
12294 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
12298 for (i
= 0; i
< 40; i
++) {
12302 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
12304 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
12305 if ((val
& 0xffff) == sram_dma_descs
) {
12316 #define TEST_BUFFER_SIZE 0x2000
12318 static int __devinit
tg3_test_dma(struct tg3
*tp
)
12320 dma_addr_t buf_dma
;
12321 u32
*buf
, saved_dma_rwctrl
;
12324 buf
= pci_alloc_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, &buf_dma
);
12330 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
12331 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
12333 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
12335 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
12336 /* DMA read watermark not used on PCIE */
12337 tp
->dma_rwctrl
|= 0x00180000;
12338 } else if (!(tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
12339 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
12340 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
12341 tp
->dma_rwctrl
|= 0x003f0000;
12343 tp
->dma_rwctrl
|= 0x003f000f;
12345 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
12346 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
12347 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
12348 u32 read_water
= 0x7;
12350 /* If the 5704 is behind the EPB bridge, we can
12351 * do the less restrictive ONE_DMA workaround for
12352 * better performance.
12354 if ((tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) &&
12355 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
12356 tp
->dma_rwctrl
|= 0x8000;
12357 else if (ccval
== 0x6 || ccval
== 0x7)
12358 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
12360 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
12362 /* Set bit 23 to enable PCIX hw bug fix */
12364 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
12365 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
12367 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
12368 /* 5780 always in PCIX mode */
12369 tp
->dma_rwctrl
|= 0x00144000;
12370 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
12371 /* 5714 always in PCIX mode */
12372 tp
->dma_rwctrl
|= 0x00148000;
12374 tp
->dma_rwctrl
|= 0x001b000f;
12378 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
12379 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
12380 tp
->dma_rwctrl
&= 0xfffffff0;
12382 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
12383 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
12384 /* Remove this if it causes problems for some boards. */
12385 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
12387 /* On 5700/5701 chips, we need to set this bit.
12388 * Otherwise the chip will issue cacheline transactions
12389 * to streamable DMA memory with not all the byte
12390 * enables turned on. This is an error on several
12391 * RISC PCI controllers, in particular sparc64.
12393 * On 5703/5704 chips, this bit has been reassigned
12394 * a different meaning. In particular, it is used
12395 * on those chips to enable a PCI-X workaround.
12397 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
12400 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
12403 /* Unneeded, already done by tg3_get_invariants. */
12404 tg3_switch_clocks(tp
);
12408 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
12409 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
12412 /* It is best to perform DMA test with maximum write burst size
12413 * to expose the 5700/5701 write DMA bug.
12415 saved_dma_rwctrl
= tp
->dma_rwctrl
;
12416 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
12417 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
12422 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
12425 /* Send the buffer to the chip. */
12426 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
12428 printk(KERN_ERR
"tg3_test_dma() Write the buffer failed %d\n", ret
);
12433 /* validate data reached card RAM correctly. */
12434 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
12436 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
12437 if (le32_to_cpu(val
) != p
[i
]) {
12438 printk(KERN_ERR
" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val
, i
);
12439 /* ret = -ENODEV here? */
12444 /* Now read it back. */
12445 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
12447 printk(KERN_ERR
"tg3_test_dma() Read the buffer failed %d\n", ret
);
12453 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
12457 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
12458 DMA_RWCTRL_WRITE_BNDRY_16
) {
12459 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
12460 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
12461 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
12464 printk(KERN_ERR
"tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p
[i
], i
);
12470 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
12476 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
12477 DMA_RWCTRL_WRITE_BNDRY_16
) {
12478 static struct pci_device_id dma_wait_state_chipsets
[] = {
12479 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
,
12480 PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
12484 /* DMA test passed without adjusting DMA boundary,
12485 * now look for chipsets that are known to expose the
12486 * DMA bug without failing the test.
12488 if (pci_dev_present(dma_wait_state_chipsets
)) {
12489 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
12490 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
12493 /* Safe to use the calculated DMA boundary. */
12494 tp
->dma_rwctrl
= saved_dma_rwctrl
;
12496 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
12500 pci_free_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
12505 static void __devinit
tg3_init_link_config(struct tg3
*tp
)
12507 tp
->link_config
.advertising
=
12508 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
12509 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
12510 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
12511 ADVERTISED_Autoneg
| ADVERTISED_MII
);
12512 tp
->link_config
.speed
= SPEED_INVALID
;
12513 tp
->link_config
.duplex
= DUPLEX_INVALID
;
12514 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
12515 tp
->link_config
.active_speed
= SPEED_INVALID
;
12516 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
12517 tp
->link_config
.phy_is_low_power
= 0;
12518 tp
->link_config
.orig_speed
= SPEED_INVALID
;
12519 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
12520 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
12523 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
12525 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
12526 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
12527 DEFAULT_MB_RDMA_LOW_WATER_5705
;
12528 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
12529 DEFAULT_MB_MACRX_LOW_WATER_5705
;
12530 tp
->bufmgr_config
.mbuf_high_water
=
12531 DEFAULT_MB_HIGH_WATER_5705
;
12532 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
12533 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
12534 DEFAULT_MB_MACRX_LOW_WATER_5906
;
12535 tp
->bufmgr_config
.mbuf_high_water
=
12536 DEFAULT_MB_HIGH_WATER_5906
;
12539 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
12540 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
12541 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
12542 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
12543 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
12544 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
12546 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
12547 DEFAULT_MB_RDMA_LOW_WATER
;
12548 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
12549 DEFAULT_MB_MACRX_LOW_WATER
;
12550 tp
->bufmgr_config
.mbuf_high_water
=
12551 DEFAULT_MB_HIGH_WATER
;
12553 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
12554 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
12555 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
12556 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
12557 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
12558 DEFAULT_MB_HIGH_WATER_JUMBO
;
12561 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
12562 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
12565 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
12567 switch (tp
->phy_id
& PHY_ID_MASK
) {
12568 case PHY_ID_BCM5400
: return "5400";
12569 case PHY_ID_BCM5401
: return "5401";
12570 case PHY_ID_BCM5411
: return "5411";
12571 case PHY_ID_BCM5701
: return "5701";
12572 case PHY_ID_BCM5703
: return "5703";
12573 case PHY_ID_BCM5704
: return "5704";
12574 case PHY_ID_BCM5705
: return "5705";
12575 case PHY_ID_BCM5750
: return "5750";
12576 case PHY_ID_BCM5752
: return "5752";
12577 case PHY_ID_BCM5714
: return "5714";
12578 case PHY_ID_BCM5780
: return "5780";
12579 case PHY_ID_BCM5755
: return "5755";
12580 case PHY_ID_BCM5787
: return "5787";
12581 case PHY_ID_BCM5784
: return "5784";
12582 case PHY_ID_BCM5756
: return "5722/5756";
12583 case PHY_ID_BCM5906
: return "5906";
12584 case PHY_ID_BCM5761
: return "5761";
12585 case PHY_ID_BCM8002
: return "8002/serdes";
12586 case 0: return "serdes";
12587 default: return "unknown";
12591 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
12593 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
12594 strcpy(str
, "PCI Express");
12596 } else if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
12597 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
12599 strcpy(str
, "PCIX:");
12601 if ((clock_ctrl
== 7) ||
12602 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
12603 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
12604 strcat(str
, "133MHz");
12605 else if (clock_ctrl
== 0)
12606 strcat(str
, "33MHz");
12607 else if (clock_ctrl
== 2)
12608 strcat(str
, "50MHz");
12609 else if (clock_ctrl
== 4)
12610 strcat(str
, "66MHz");
12611 else if (clock_ctrl
== 6)
12612 strcat(str
, "100MHz");
12614 strcpy(str
, "PCI:");
12615 if (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
)
12616 strcat(str
, "66MHz");
12618 strcat(str
, "33MHz");
12620 if (tp
->tg3_flags
& TG3_FLAG_PCI_32BIT
)
12621 strcat(str
, ":32-bit");
12623 strcat(str
, ":64-bit");
12627 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
12629 struct pci_dev
*peer
;
12630 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
12632 for (func
= 0; func
< 8; func
++) {
12633 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
12634 if (peer
&& peer
!= tp
->pdev
)
12638 /* 5704 can be configured in single-port mode, set peer to
12639 * tp->pdev in that case.
12647 * We don't need to keep the refcount elevated; there's no way
12648 * to remove one half of this device without removing the other
12655 static void __devinit
tg3_init_coal(struct tg3
*tp
)
12657 struct ethtool_coalesce
*ec
= &tp
->coal
;
12659 memset(ec
, 0, sizeof(*ec
));
12660 ec
->cmd
= ETHTOOL_GCOALESCE
;
12661 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
12662 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
12663 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
12664 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
12665 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
12666 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
12667 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
12668 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
12669 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
12671 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
12672 HOSTCC_MODE_CLRTICK_TXBD
)) {
12673 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
12674 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
12675 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
12676 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
12679 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
12680 ec
->rx_coalesce_usecs_irq
= 0;
12681 ec
->tx_coalesce_usecs_irq
= 0;
12682 ec
->stats_block_coalesce_usecs
= 0;
12686 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
12687 const struct pci_device_id
*ent
)
12689 static int tg3_version_printed
= 0;
12690 resource_size_t tg3reg_base
;
12691 unsigned long tg3reg_len
;
12692 struct net_device
*dev
;
12696 u64 dma_mask
, persist_dma_mask
;
12697 DECLARE_MAC_BUF(mac
);
12699 if (tg3_version_printed
++ == 0)
12700 printk(KERN_INFO
"%s", version
);
12702 err
= pci_enable_device(pdev
);
12704 printk(KERN_ERR PFX
"Cannot enable PCI device, "
12709 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
12710 printk(KERN_ERR PFX
"Cannot find proper PCI device "
12711 "base address, aborting.\n");
12713 goto err_out_disable_pdev
;
12716 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
12718 printk(KERN_ERR PFX
"Cannot obtain PCI resources, "
12720 goto err_out_disable_pdev
;
12723 pci_set_master(pdev
);
12725 /* Find power-management capability. */
12726 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
12728 printk(KERN_ERR PFX
"Cannot find PowerManagement capability, "
12731 goto err_out_free_res
;
12734 tg3reg_base
= pci_resource_start(pdev
, 0);
12735 tg3reg_len
= pci_resource_len(pdev
, 0);
12737 dev
= alloc_etherdev(sizeof(*tp
));
12739 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
12741 goto err_out_free_res
;
12744 SET_NETDEV_DEV(dev
, &pdev
->dev
);
12746 #if TG3_VLAN_TAG_USED
12747 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
12748 dev
->vlan_rx_register
= tg3_vlan_rx_register
;
12751 tp
= netdev_priv(dev
);
12754 tp
->pm_cap
= pm_cap
;
12755 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
12756 tp
->rx_mode
= TG3_DEF_RX_MODE
;
12757 tp
->tx_mode
= TG3_DEF_TX_MODE
;
12760 tp
->msg_enable
= tg3_debug
;
12762 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
12764 /* The word/byte swap controls here control register access byte
12765 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12768 tp
->misc_host_ctrl
=
12769 MISC_HOST_CTRL_MASK_PCI_INT
|
12770 MISC_HOST_CTRL_WORD_SWAP
|
12771 MISC_HOST_CTRL_INDIR_ACCESS
|
12772 MISC_HOST_CTRL_PCISTATE_RW
;
12774 /* The NONFRM (non-frame) byte/word swap controls take effect
12775 * on descriptor entries, anything which isn't packet data.
12777 * The StrongARM chips on the board (one for tx, one for rx)
12778 * are running in big-endian mode.
12780 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
12781 GRC_MODE_WSWAP_NONFRM_DATA
);
12782 #ifdef __BIG_ENDIAN
12783 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
12785 spin_lock_init(&tp
->lock
);
12786 spin_lock_init(&tp
->indirect_lock
);
12787 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
12789 tp
->regs
= ioremap_nocache(tg3reg_base
, tg3reg_len
);
12791 printk(KERN_ERR PFX
"Cannot map device registers, "
12794 goto err_out_free_dev
;
12797 tg3_init_link_config(tp
);
12799 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
12800 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
12801 tp
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
12803 dev
->open
= tg3_open
;
12804 dev
->stop
= tg3_close
;
12805 dev
->get_stats
= tg3_get_stats
;
12806 dev
->set_multicast_list
= tg3_set_rx_mode
;
12807 dev
->set_mac_address
= tg3_set_mac_addr
;
12808 dev
->do_ioctl
= tg3_ioctl
;
12809 dev
->tx_timeout
= tg3_tx_timeout
;
12810 netif_napi_add(dev
, &tp
->napi
, tg3_poll
, 64);
12811 dev
->ethtool_ops
= &tg3_ethtool_ops
;
12812 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
12813 dev
->change_mtu
= tg3_change_mtu
;
12814 dev
->irq
= pdev
->irq
;
12815 #ifdef CONFIG_NET_POLL_CONTROLLER
12816 dev
->poll_controller
= tg3_poll_controller
;
12819 err
= tg3_get_invariants(tp
);
12821 printk(KERN_ERR PFX
"Problem fetching invariants of chip, "
12823 goto err_out_iounmap
;
12826 /* The EPB bridge inside 5714, 5715, and 5780 and any
12827 * device behind the EPB cannot support DMA addresses > 40-bit.
12828 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12829 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12830 * do DMA address check in tg3_start_xmit().
12832 if (tp
->tg3_flags2
& TG3_FLG2_IS_5788
)
12833 persist_dma_mask
= dma_mask
= DMA_32BIT_MASK
;
12834 else if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) {
12835 persist_dma_mask
= dma_mask
= DMA_40BIT_MASK
;
12836 #ifdef CONFIG_HIGHMEM
12837 dma_mask
= DMA_64BIT_MASK
;
12840 persist_dma_mask
= dma_mask
= DMA_64BIT_MASK
;
12842 /* Configure DMA attributes. */
12843 if (dma_mask
> DMA_32BIT_MASK
) {
12844 err
= pci_set_dma_mask(pdev
, dma_mask
);
12846 dev
->features
|= NETIF_F_HIGHDMA
;
12847 err
= pci_set_consistent_dma_mask(pdev
,
12850 printk(KERN_ERR PFX
"Unable to obtain 64 bit "
12851 "DMA for consistent allocations\n");
12852 goto err_out_iounmap
;
12856 if (err
|| dma_mask
== DMA_32BIT_MASK
) {
12857 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
12859 printk(KERN_ERR PFX
"No usable DMA configuration, "
12861 goto err_out_iounmap
;
12865 tg3_init_bufmgr_config(tp
);
12867 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
12868 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
12870 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
12871 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
12872 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
||
12873 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
12874 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
12875 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
12877 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
| TG3_FLG2_TSO_BUG
;
12880 /* TSO is on by default on chips that support hardware TSO.
12881 * Firmware TSO on older chips gives lower performance, so it
12882 * is off by default, but can be enabled using ethtool.
12884 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
12885 dev
->features
|= NETIF_F_TSO
;
12886 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_2
) &&
12887 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5906
))
12888 dev
->features
|= NETIF_F_TSO6
;
12889 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12890 dev
->features
|= NETIF_F_TSO_ECN
;
12894 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
12895 !(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) &&
12896 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
12897 tp
->tg3_flags2
|= TG3_FLG2_MAX_RXPEND_64
;
12898 tp
->rx_pending
= 63;
12901 err
= tg3_get_device_address(tp
);
12903 printk(KERN_ERR PFX
"Could not obtain valid ethernet address, "
12905 goto err_out_iounmap
;
12908 if (tp
->tg3_flags3
& TG3_FLG3_ENABLE_APE
) {
12909 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
12910 printk(KERN_ERR PFX
"Cannot find proper PCI device "
12911 "base address for APE, aborting.\n");
12913 goto err_out_iounmap
;
12916 tg3reg_base
= pci_resource_start(pdev
, 2);
12917 tg3reg_len
= pci_resource_len(pdev
, 2);
12919 tp
->aperegs
= ioremap_nocache(tg3reg_base
, tg3reg_len
);
12920 if (!tp
->aperegs
) {
12921 printk(KERN_ERR PFX
"Cannot map APE registers, "
12924 goto err_out_iounmap
;
12927 tg3_ape_lock_init(tp
);
12931 * Reset chip in case UNDI or EFI driver did not shutdown
12932 * DMA self test will enable WDMAC and we'll see (spurious)
12933 * pending DMA on the PCI bus at that point.
12935 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
12936 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
12937 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
12938 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12941 err
= tg3_test_dma(tp
);
12943 printk(KERN_ERR PFX
"DMA engine test failed, aborting.\n");
12944 goto err_out_apeunmap
;
12947 /* Tigon3 can do ipv4 only... and some chips have buggy
12950 if ((tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) == 0) {
12951 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
12952 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
12953 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
12954 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
12955 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
12956 dev
->features
|= NETIF_F_IPV6_CSUM
;
12958 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
12960 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
12962 /* flow control autonegotiation is default behavior */
12963 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
12964 tp
->link_config
.flowctrl
= TG3_FLOW_CTRL_TX
| TG3_FLOW_CTRL_RX
;
12968 pci_set_drvdata(pdev
, dev
);
12970 err
= register_netdev(dev
);
12972 printk(KERN_ERR PFX
"Cannot register net device, "
12974 goto err_out_apeunmap
;
12977 printk(KERN_INFO
"%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12978 "(%s) %s Ethernet %s\n",
12980 tp
->board_part_number
,
12981 tp
->pci_chip_rev_id
,
12982 tg3_phy_string(tp
),
12983 tg3_bus_string(tp
, str
),
12984 ((tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) ? "10/100Base-TX" :
12985 ((tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) ? "1000Base-SX" :
12986 "10/100/1000Base-T")),
12987 print_mac(mac
, dev
->dev_addr
));
12989 printk(KERN_INFO
"%s: RXcsums[%d] LinkChgREG[%d] "
12990 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12992 (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0,
12993 (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) != 0,
12994 (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) != 0,
12995 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0,
12996 (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
) == 0,
12997 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) != 0);
12998 printk(KERN_INFO
"%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12999 dev
->name
, tp
->dma_rwctrl
,
13000 (pdev
->dma_mask
== DMA_32BIT_MASK
) ? 32 :
13001 (((u64
) pdev
->dma_mask
== DMA_40BIT_MASK
) ? 40 : 64));
13007 iounmap(tp
->aperegs
);
13008 tp
->aperegs
= NULL
;
13021 pci_release_regions(pdev
);
13023 err_out_disable_pdev
:
13024 pci_disable_device(pdev
);
13025 pci_set_drvdata(pdev
, NULL
);
13029 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
13031 struct net_device
*dev
= pci_get_drvdata(pdev
);
13034 struct tg3
*tp
= netdev_priv(dev
);
13036 flush_scheduled_work();
13037 unregister_netdev(dev
);
13039 iounmap(tp
->aperegs
);
13040 tp
->aperegs
= NULL
;
13047 pci_release_regions(pdev
);
13048 pci_disable_device(pdev
);
13049 pci_set_drvdata(pdev
, NULL
);
13053 static int tg3_suspend(struct pci_dev
*pdev
, pm_message_t state
)
13055 struct net_device
*dev
= pci_get_drvdata(pdev
);
13056 struct tg3
*tp
= netdev_priv(dev
);
13059 /* PCI register 4 needs to be saved whether netif_running() or not.
13060 * MSI address and data need to be saved if using MSI and
13063 pci_save_state(pdev
);
13065 if (!netif_running(dev
))
13068 flush_scheduled_work();
13069 tg3_netif_stop(tp
);
13071 del_timer_sync(&tp
->timer
);
13073 tg3_full_lock(tp
, 1);
13074 tg3_disable_ints(tp
);
13075 tg3_full_unlock(tp
);
13077 netif_device_detach(dev
);
13079 tg3_full_lock(tp
, 0);
13080 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13081 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
13082 tg3_full_unlock(tp
);
13084 err
= tg3_set_power_state(tp
, pci_choose_state(pdev
, state
));
13086 tg3_full_lock(tp
, 0);
13088 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
13089 if (tg3_restart_hw(tp
, 1))
13092 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
13093 add_timer(&tp
->timer
);
13095 netif_device_attach(dev
);
13096 tg3_netif_start(tp
);
13099 tg3_full_unlock(tp
);
13105 static int tg3_resume(struct pci_dev
*pdev
)
13107 struct net_device
*dev
= pci_get_drvdata(pdev
);
13108 struct tg3
*tp
= netdev_priv(dev
);
13111 pci_restore_state(tp
->pdev
);
13113 if (!netif_running(dev
))
13116 err
= tg3_set_power_state(tp
, PCI_D0
);
13120 netif_device_attach(dev
);
13122 tg3_full_lock(tp
, 0);
13124 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
13125 err
= tg3_restart_hw(tp
, 1);
13129 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
13130 add_timer(&tp
->timer
);
13132 tg3_netif_start(tp
);
13135 tg3_full_unlock(tp
);
13140 static struct pci_driver tg3_driver
= {
13141 .name
= DRV_MODULE_NAME
,
13142 .id_table
= tg3_pci_tbl
,
13143 .probe
= tg3_init_one
,
13144 .remove
= __devexit_p(tg3_remove_one
),
13145 .suspend
= tg3_suspend
,
13146 .resume
= tg3_resume
13149 static int __init
tg3_init(void)
13151 return pci_register_driver(&tg3_driver
);
13154 static void __exit
tg3_cleanup(void)
13156 pci_unregister_driver(&tg3_driver
);
13159 module_init(tg3_init
);
13160 module_exit(tg3_cleanup
);