2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
50 #include <net/checksum.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
61 #include <asm/idprom.h>
70 /* Functions & macros to verify TG3_FLAGS types */
72 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
74 return test_bit(flag
, bits
);
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
84 clear_bit(flag
, bits
);
87 #define tg3_flag(tp, flag) \
88 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag) \
90 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag) \
92 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MIN_NUM 128
97 #define DRV_MODULE_VERSION \
98 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE "December 03, 2012"
101 #define RESET_KIND_SHUTDOWN 0
102 #define RESET_KIND_INIT 1
103 #define RESET_KIND_SUSPEND 2
105 #define TG3_DEF_RX_MODE 0
106 #define TG3_DEF_TX_MODE 0
107 #define TG3_DEF_MSG_ENABLE \
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119 /* length of time before we decide the hardware is borked,
120 * and dev->tx_timeout() should be called to fix the problem
123 #define TG3_TX_TIMEOUT (5 * HZ)
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU 60
127 #define TG3_MAX_MTU(tp) \
128 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131 * You can't change the ring sizes, but you can change where you place
132 * them in the NIC onboard memory.
134 #define TG3_RX_STD_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING 200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143 /* Do not place this n-ring entries value into the tp struct itself,
144 * we really want to expose these constants to GCC so that modulo et
145 * al. operations are done with shifts and masks instead of with
146 * hw multiply/modulo instructions. Another solution would be to
147 * replace things like '% foo' with '& (foo - 1)'.
150 #define TG3_TX_RING_SIZE 512
151 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153 #define TG3_RX_STD_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 #define TG3_DMA_BYTE_ENAB 64
165 #define TG3_RX_STD_DMA_SZ 1536
166 #define TG3_RX_JMB_DMA_SZ 9046
168 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180 * that are at least dword aligned when used in PCIX mode. The driver
181 * works around this bug by double copying the packet. This workaround
182 * is built into the normal double copy length check for efficiency.
184 * However, the double copy is only necessary on those architectures
185 * where unaligned memory accesses are inefficient. For those architectures
186 * where unaligned memory accesses incur little penalty, we can reintegrate
187 * the 5701 in the normal rx path. Doing so saves a device structure
188 * dereference by hardcoding the double copy threshold in place.
190 #define TG3_RX_COPY_THRESHOLD 256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K 2048
206 #define TG3_TX_BD_DMA_MAX_4K 4096
208 #define TG3_RAW_IP_ALIGN 2
210 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
211 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 #define FIRMWARE_TG3 "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217 static char version
[] =
218 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION
);
224 MODULE_FIRMWARE(FIRMWARE_TG3
);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
228 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug
, int, 0);
230 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
255 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
256 TG3_DRV_DATA_FLAG_5705_10_100
},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
258 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
259 TG3_DRV_DATA_FLAG_5705_10_100
},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
262 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
263 TG3_DRV_DATA_FLAG_5705_10_100
},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
269 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
275 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
283 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
284 PCI_VENDOR_ID_LENOVO
,
285 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
286 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
289 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
308 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
309 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
310 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
312 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
313 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
314 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
315 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
316 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
317 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
327 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
329 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
333 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
340 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
344 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
346 static const struct {
347 const char string
[ETH_GSTRING_LEN
];
348 } ethtool_stats_keys
[] = {
351 { "rx_ucast_packets" },
352 { "rx_mcast_packets" },
353 { "rx_bcast_packets" },
355 { "rx_align_errors" },
356 { "rx_xon_pause_rcvd" },
357 { "rx_xoff_pause_rcvd" },
358 { "rx_mac_ctrl_rcvd" },
359 { "rx_xoff_entered" },
360 { "rx_frame_too_long_errors" },
362 { "rx_undersize_packets" },
363 { "rx_in_length_errors" },
364 { "rx_out_length_errors" },
365 { "rx_64_or_less_octet_packets" },
366 { "rx_65_to_127_octet_packets" },
367 { "rx_128_to_255_octet_packets" },
368 { "rx_256_to_511_octet_packets" },
369 { "rx_512_to_1023_octet_packets" },
370 { "rx_1024_to_1522_octet_packets" },
371 { "rx_1523_to_2047_octet_packets" },
372 { "rx_2048_to_4095_octet_packets" },
373 { "rx_4096_to_8191_octet_packets" },
374 { "rx_8192_to_9022_octet_packets" },
381 { "tx_flow_control" },
383 { "tx_single_collisions" },
384 { "tx_mult_collisions" },
386 { "tx_excessive_collisions" },
387 { "tx_late_collisions" },
388 { "tx_collide_2times" },
389 { "tx_collide_3times" },
390 { "tx_collide_4times" },
391 { "tx_collide_5times" },
392 { "tx_collide_6times" },
393 { "tx_collide_7times" },
394 { "tx_collide_8times" },
395 { "tx_collide_9times" },
396 { "tx_collide_10times" },
397 { "tx_collide_11times" },
398 { "tx_collide_12times" },
399 { "tx_collide_13times" },
400 { "tx_collide_14times" },
401 { "tx_collide_15times" },
402 { "tx_ucast_packets" },
403 { "tx_mcast_packets" },
404 { "tx_bcast_packets" },
405 { "tx_carrier_sense_errors" },
409 { "dma_writeq_full" },
410 { "dma_write_prioq_full" },
414 { "rx_threshold_hit" },
416 { "dma_readq_full" },
417 { "dma_read_prioq_full" },
418 { "tx_comp_queue_full" },
420 { "ring_set_send_prod_index" },
421 { "ring_status_update" },
423 { "nic_avoided_irqs" },
424 { "nic_tx_threshold_hit" },
426 { "mbuf_lwm_thresh_hit" },
429 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
430 #define TG3_NVRAM_TEST 0
431 #define TG3_LINK_TEST 1
432 #define TG3_REGISTER_TEST 2
433 #define TG3_MEMORY_TEST 3
434 #define TG3_MAC_LOOPB_TEST 4
435 #define TG3_PHY_LOOPB_TEST 5
436 #define TG3_EXT_LOOPB_TEST 6
437 #define TG3_INTERRUPT_TEST 7
440 static const struct {
441 const char string
[ETH_GSTRING_LEN
];
442 } ethtool_test_keys
[] = {
443 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
444 [TG3_LINK_TEST
] = { "link test (online) " },
445 [TG3_REGISTER_TEST
] = { "register test (offline)" },
446 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
447 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
448 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
449 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
450 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
453 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
456 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
458 writel(val
, tp
->regs
+ off
);
461 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
463 return readl(tp
->regs
+ off
);
466 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
468 writel(val
, tp
->aperegs
+ off
);
471 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
473 return readl(tp
->aperegs
+ off
);
476 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
480 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
481 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
482 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
483 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
486 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
488 writel(val
, tp
->regs
+ off
);
489 readl(tp
->regs
+ off
);
492 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
497 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
498 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
499 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
500 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
504 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
508 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
509 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
510 TG3_64BIT_REG_LOW
, val
);
513 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
514 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
515 TG3_64BIT_REG_LOW
, val
);
519 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
520 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
521 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
522 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
524 /* In indirect mode when disabling interrupts, we also need
525 * to clear the interrupt bit in the GRC local ctrl register.
527 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
529 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
530 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
534 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
539 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
540 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
541 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
542 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
546 /* usec_wait specifies the wait time in usec when writing to certain registers
547 * where it is unsafe to read back the register without some delay.
548 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
549 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
551 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
553 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
554 /* Non-posted methods */
555 tp
->write32(tp
, off
, val
);
558 tg3_write32(tp
, off
, val
);
563 /* Wait again after the read for the posted method to guarantee that
564 * the wait time is met.
570 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
572 tp
->write32_mbox(tp
, off
, val
);
573 if (!tg3_flag(tp
, MBOX_WRITE_REORDER
) && !tg3_flag(tp
, ICH_WORKAROUND
))
574 tp
->read32_mbox(tp
, off
);
577 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
579 void __iomem
*mbox
= tp
->regs
+ off
;
581 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
583 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
587 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
589 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
592 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
594 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
597 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
598 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
599 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
600 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
601 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
603 #define tw32(reg, val) tp->write32(tp, reg, val)
604 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
605 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
606 #define tr32(reg) tp->read32(tp, reg)
608 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
612 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
613 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
616 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
617 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
618 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
619 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
621 /* Always leave this as zero. */
622 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
624 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
625 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
627 /* Always leave this as zero. */
628 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
630 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
633 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
637 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
638 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
643 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
644 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
645 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
646 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
648 /* Always leave this as zero. */
649 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
651 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
652 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
654 /* Always leave this as zero. */
655 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
657 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
660 static void tg3_ape_lock_init(struct tg3
*tp
)
665 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
666 regbase
= TG3_APE_LOCK_GRANT
;
668 regbase
= TG3_APE_PER_LOCK_GRANT
;
670 /* Make sure the driver hasn't any stale locks. */
671 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
673 case TG3_APE_LOCK_PHY0
:
674 case TG3_APE_LOCK_PHY1
:
675 case TG3_APE_LOCK_PHY2
:
676 case TG3_APE_LOCK_PHY3
:
677 bit
= APE_LOCK_GRANT_DRIVER
;
681 bit
= APE_LOCK_GRANT_DRIVER
;
683 bit
= 1 << tp
->pci_fn
;
685 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
690 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
694 u32 status
, req
, gnt
, bit
;
696 if (!tg3_flag(tp
, ENABLE_APE
))
700 case TG3_APE_LOCK_GPIO
:
701 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
703 case TG3_APE_LOCK_GRC
:
704 case TG3_APE_LOCK_MEM
:
706 bit
= APE_LOCK_REQ_DRIVER
;
708 bit
= 1 << tp
->pci_fn
;
710 case TG3_APE_LOCK_PHY0
:
711 case TG3_APE_LOCK_PHY1
:
712 case TG3_APE_LOCK_PHY2
:
713 case TG3_APE_LOCK_PHY3
:
714 bit
= APE_LOCK_REQ_DRIVER
;
720 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
721 req
= TG3_APE_LOCK_REQ
;
722 gnt
= TG3_APE_LOCK_GRANT
;
724 req
= TG3_APE_PER_LOCK_REQ
;
725 gnt
= TG3_APE_PER_LOCK_GRANT
;
730 tg3_ape_write32(tp
, req
+ off
, bit
);
732 /* Wait for up to 1 millisecond to acquire lock. */
733 for (i
= 0; i
< 100; i
++) {
734 status
= tg3_ape_read32(tp
, gnt
+ off
);
741 /* Revoke the lock request. */
742 tg3_ape_write32(tp
, gnt
+ off
, bit
);
749 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
753 if (!tg3_flag(tp
, ENABLE_APE
))
757 case TG3_APE_LOCK_GPIO
:
758 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
760 case TG3_APE_LOCK_GRC
:
761 case TG3_APE_LOCK_MEM
:
763 bit
= APE_LOCK_GRANT_DRIVER
;
765 bit
= 1 << tp
->pci_fn
;
767 case TG3_APE_LOCK_PHY0
:
768 case TG3_APE_LOCK_PHY1
:
769 case TG3_APE_LOCK_PHY2
:
770 case TG3_APE_LOCK_PHY3
:
771 bit
= APE_LOCK_GRANT_DRIVER
;
777 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
778 gnt
= TG3_APE_LOCK_GRANT
;
780 gnt
= TG3_APE_PER_LOCK_GRANT
;
782 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
785 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
790 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
793 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
794 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
797 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
800 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
803 return timeout_us
? 0 : -EBUSY
;
806 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
810 for (i
= 0; i
< timeout_us
/ 10; i
++) {
811 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
813 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
819 return i
== timeout_us
/ 10;
822 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
826 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
828 if (!tg3_flag(tp
, APE_HAS_NCSI
))
831 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
832 if (apedata
!= APE_SEG_SIG_MAGIC
)
835 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
836 if (!(apedata
& APE_FW_STATUS_READY
))
839 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
841 msgoff
= bufoff
+ 2 * sizeof(u32
);
842 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
847 /* Cap xfer sizes to scratchpad limits. */
848 length
= (len
> maxlen
) ? maxlen
: len
;
851 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
852 if (!(apedata
& APE_FW_STATUS_READY
))
855 /* Wait for up to 1 msec for APE to service previous event. */
856 err
= tg3_ape_event_lock(tp
, 1000);
860 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
861 APE_EVENT_STATUS_SCRTCHPD_READ
|
862 APE_EVENT_STATUS_EVENT_PENDING
;
863 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
865 tg3_ape_write32(tp
, bufoff
, base_off
);
866 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
868 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
869 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
873 if (tg3_ape_wait_for_event(tp
, 30000))
876 for (i
= 0; length
; i
+= 4, length
-= 4) {
877 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
878 memcpy(data
, &val
, sizeof(u32
));
886 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
891 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
892 if (apedata
!= APE_SEG_SIG_MAGIC
)
895 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
896 if (!(apedata
& APE_FW_STATUS_READY
))
899 /* Wait for up to 1 millisecond for APE to service previous event. */
900 err
= tg3_ape_event_lock(tp
, 1000);
904 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
905 event
| APE_EVENT_STATUS_EVENT_PENDING
);
907 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
908 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
913 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
918 if (!tg3_flag(tp
, ENABLE_APE
))
922 case RESET_KIND_INIT
:
923 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
924 APE_HOST_SEG_SIG_MAGIC
);
925 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
926 APE_HOST_SEG_LEN_MAGIC
);
927 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
928 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
929 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
930 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
931 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
932 APE_HOST_BEHAV_NO_PHYLOCK
);
933 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
934 TG3_APE_HOST_DRVR_STATE_START
);
936 event
= APE_EVENT_STATUS_STATE_START
;
938 case RESET_KIND_SHUTDOWN
:
939 /* With the interface we are currently using,
940 * APE does not track driver state. Wiping
941 * out the HOST SEGMENT SIGNATURE forces
942 * the APE to assume OS absent status.
944 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
946 if (device_may_wakeup(&tp
->pdev
->dev
) &&
947 tg3_flag(tp
, WOL_ENABLE
)) {
948 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
949 TG3_APE_HOST_WOL_SPEED_AUTO
);
950 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
952 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
954 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
956 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
958 case RESET_KIND_SUSPEND
:
959 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
965 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
967 tg3_ape_send_event(tp
, event
);
970 static void tg3_disable_ints(struct tg3
*tp
)
974 tw32(TG3PCI_MISC_HOST_CTRL
,
975 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
976 for (i
= 0; i
< tp
->irq_max
; i
++)
977 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
980 static void tg3_enable_ints(struct tg3
*tp
)
987 tw32(TG3PCI_MISC_HOST_CTRL
,
988 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
990 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
991 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
992 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
994 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
995 if (tg3_flag(tp
, 1SHOT_MSI
))
996 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
998 tp
->coal_now
|= tnapi
->coal_now
;
1001 /* Force an initial interrupt */
1002 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1003 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1004 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1006 tw32(HOSTCC_MODE
, tp
->coal_now
);
1008 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1011 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1013 struct tg3
*tp
= tnapi
->tp
;
1014 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1015 unsigned int work_exists
= 0;
1017 /* check for phy events */
1018 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1019 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1023 /* check for TX work to do */
1024 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1027 /* check for RX work to do */
1028 if (tnapi
->rx_rcb_prod_idx
&&
1029 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1036 * similar to tg3_enable_ints, but it accurately determines whether there
1037 * is new work pending and can return without flushing the PIO write
1038 * which reenables interrupts
1040 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1042 struct tg3
*tp
= tnapi
->tp
;
1044 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1047 /* When doing tagged status, this work check is unnecessary.
1048 * The last_tag we write above tells the chip which piece of
1049 * work we've completed.
1051 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1052 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1053 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1056 static void tg3_switch_clocks(struct tg3
*tp
)
1059 u32 orig_clock_ctrl
;
1061 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1064 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1066 orig_clock_ctrl
= clock_ctrl
;
1067 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1068 CLOCK_CTRL_CLKRUN_OENABLE
|
1070 tp
->pci_clock_ctrl
= clock_ctrl
;
1072 if (tg3_flag(tp
, 5705_PLUS
)) {
1073 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1074 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1075 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1077 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1078 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1080 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1082 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1083 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1086 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1089 #define PHY_BUSY_LOOPS 5000
1091 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1097 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1099 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1103 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1107 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1108 MI_COM_PHY_ADDR_MASK
);
1109 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1110 MI_COM_REG_ADDR_MASK
);
1111 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1113 tw32_f(MAC_MI_COM
, frame_val
);
1115 loops
= PHY_BUSY_LOOPS
;
1116 while (loops
!= 0) {
1118 frame_val
= tr32(MAC_MI_COM
);
1120 if ((frame_val
& MI_COM_BUSY
) == 0) {
1122 frame_val
= tr32(MAC_MI_COM
);
1130 *val
= frame_val
& MI_COM_DATA_MASK
;
1134 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1135 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1139 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1144 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1150 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1151 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1154 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1156 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1160 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1162 frame_val
= ((tp
->phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1163 MI_COM_PHY_ADDR_MASK
);
1164 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1165 MI_COM_REG_ADDR_MASK
);
1166 frame_val
|= (val
& MI_COM_DATA_MASK
);
1167 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1169 tw32_f(MAC_MI_COM
, frame_val
);
1171 loops
= PHY_BUSY_LOOPS
;
1172 while (loops
!= 0) {
1174 frame_val
= tr32(MAC_MI_COM
);
1175 if ((frame_val
& MI_COM_BUSY
) == 0) {
1177 frame_val
= tr32(MAC_MI_COM
);
1187 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1188 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1192 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1197 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1201 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1205 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1209 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1210 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1214 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1220 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1224 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1228 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1232 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1233 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1237 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1243 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1247 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1249 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1254 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1258 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1260 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1265 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1269 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1270 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1271 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1273 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1278 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1280 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1281 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1283 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1286 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1291 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1297 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1299 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1301 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1302 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1307 static int tg3_bmcr_reset(struct tg3
*tp
)
1312 /* OK, reset it, and poll the BMCR_RESET bit until it
1313 * clears or we time out.
1315 phy_control
= BMCR_RESET
;
1316 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1322 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1326 if ((phy_control
& BMCR_RESET
) == 0) {
1338 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1340 struct tg3
*tp
= bp
->priv
;
1343 spin_lock_bh(&tp
->lock
);
1345 if (tg3_readphy(tp
, reg
, &val
))
1348 spin_unlock_bh(&tp
->lock
);
1353 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1355 struct tg3
*tp
= bp
->priv
;
1358 spin_lock_bh(&tp
->lock
);
1360 if (tg3_writephy(tp
, reg
, val
))
1363 spin_unlock_bh(&tp
->lock
);
1368 static int tg3_mdio_reset(struct mii_bus
*bp
)
1373 static void tg3_mdio_config_5785(struct tg3
*tp
)
1376 struct phy_device
*phydev
;
1378 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1379 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1380 case PHY_ID_BCM50610
:
1381 case PHY_ID_BCM50610M
:
1382 val
= MAC_PHYCFG2_50610_LED_MODES
;
1384 case PHY_ID_BCMAC131
:
1385 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1387 case PHY_ID_RTL8211C
:
1388 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1390 case PHY_ID_RTL8201E
:
1391 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1397 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1398 tw32(MAC_PHYCFG2
, val
);
1400 val
= tr32(MAC_PHYCFG1
);
1401 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1402 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1403 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1404 tw32(MAC_PHYCFG1
, val
);
1409 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1410 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1411 MAC_PHYCFG2_FMODE_MASK_MASK
|
1412 MAC_PHYCFG2_GMODE_MASK_MASK
|
1413 MAC_PHYCFG2_ACT_MASK_MASK
|
1414 MAC_PHYCFG2_QUAL_MASK_MASK
|
1415 MAC_PHYCFG2_INBAND_ENABLE
;
1417 tw32(MAC_PHYCFG2
, val
);
1419 val
= tr32(MAC_PHYCFG1
);
1420 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1421 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1422 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1423 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1424 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1425 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1426 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1428 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1429 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1430 tw32(MAC_PHYCFG1
, val
);
1432 val
= tr32(MAC_EXT_RGMII_MODE
);
1433 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1434 MAC_RGMII_MODE_RX_QUALITY
|
1435 MAC_RGMII_MODE_RX_ACTIVITY
|
1436 MAC_RGMII_MODE_RX_ENG_DET
|
1437 MAC_RGMII_MODE_TX_ENABLE
|
1438 MAC_RGMII_MODE_TX_LOWPWR
|
1439 MAC_RGMII_MODE_TX_RESET
);
1440 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1441 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1442 val
|= MAC_RGMII_MODE_RX_INT_B
|
1443 MAC_RGMII_MODE_RX_QUALITY
|
1444 MAC_RGMII_MODE_RX_ACTIVITY
|
1445 MAC_RGMII_MODE_RX_ENG_DET
;
1446 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1447 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1448 MAC_RGMII_MODE_TX_LOWPWR
|
1449 MAC_RGMII_MODE_TX_RESET
;
1451 tw32(MAC_EXT_RGMII_MODE
, val
);
1454 static void tg3_mdio_start(struct tg3
*tp
)
1456 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1457 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1460 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1461 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1462 tg3_mdio_config_5785(tp
);
1465 static int tg3_mdio_init(struct tg3
*tp
)
1469 struct phy_device
*phydev
;
1471 if (tg3_flag(tp
, 5717_PLUS
)) {
1474 tp
->phy_addr
= tp
->pci_fn
+ 1;
1476 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
)
1477 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1479 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1480 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1484 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1488 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1491 tp
->mdio_bus
= mdiobus_alloc();
1492 if (tp
->mdio_bus
== NULL
)
1495 tp
->mdio_bus
->name
= "tg3 mdio bus";
1496 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1497 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1498 tp
->mdio_bus
->priv
= tp
;
1499 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1500 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1501 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1502 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1503 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1504 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1506 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1507 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1509 /* The bus registration will look for all the PHYs on the mdio bus.
1510 * Unfortunately, it does not ensure the PHY is powered up before
1511 * accessing the PHY ID registers. A chip reset is the
1512 * quickest way to bring the device back to an operational state..
1514 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1517 i
= mdiobus_register(tp
->mdio_bus
);
1519 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1520 mdiobus_free(tp
->mdio_bus
);
1524 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1526 if (!phydev
|| !phydev
->drv
) {
1527 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1528 mdiobus_unregister(tp
->mdio_bus
);
1529 mdiobus_free(tp
->mdio_bus
);
1533 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1534 case PHY_ID_BCM57780
:
1535 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1536 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1538 case PHY_ID_BCM50610
:
1539 case PHY_ID_BCM50610M
:
1540 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1541 PHY_BRCM_RX_REFCLK_UNUSED
|
1542 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1543 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1544 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1545 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1546 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1547 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1548 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1549 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1551 case PHY_ID_RTL8211C
:
1552 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1554 case PHY_ID_RTL8201E
:
1555 case PHY_ID_BCMAC131
:
1556 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1557 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1558 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1562 tg3_flag_set(tp
, MDIOBUS_INITED
);
1564 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
1565 tg3_mdio_config_5785(tp
);
1570 static void tg3_mdio_fini(struct tg3
*tp
)
1572 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1573 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1574 mdiobus_unregister(tp
->mdio_bus
);
1575 mdiobus_free(tp
->mdio_bus
);
1579 /* tp->lock is held. */
1580 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1584 val
= tr32(GRC_RX_CPU_EVENT
);
1585 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1586 tw32_f(GRC_RX_CPU_EVENT
, val
);
1588 tp
->last_event_jiffies
= jiffies
;
1591 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1593 /* tp->lock is held. */
1594 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1597 unsigned int delay_cnt
;
1600 /* If enough time has passed, no wait is necessary. */
1601 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1602 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1604 if (time_remain
< 0)
1607 /* Check if we can shorten the wait time. */
1608 delay_cnt
= jiffies_to_usecs(time_remain
);
1609 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1610 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1611 delay_cnt
= (delay_cnt
>> 3) + 1;
1613 for (i
= 0; i
< delay_cnt
; i
++) {
1614 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1620 /* tp->lock is held. */
1621 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1626 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1628 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1629 val
|= (reg
& 0xffff);
1633 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1635 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1636 val
|= (reg
& 0xffff);
1640 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1641 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1643 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1644 val
|= (reg
& 0xffff);
1648 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1655 /* tp->lock is held. */
1656 static void tg3_ump_link_report(struct tg3
*tp
)
1660 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1663 tg3_phy_gather_ump_data(tp
, data
);
1665 tg3_wait_for_event_ack(tp
);
1667 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1668 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1669 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1670 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1671 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1672 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1674 tg3_generate_fw_event(tp
);
1677 /* tp->lock is held. */
1678 static void tg3_stop_fw(struct tg3
*tp
)
1680 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1681 /* Wait for RX cpu to ACK the previous event. */
1682 tg3_wait_for_event_ack(tp
);
1684 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1686 tg3_generate_fw_event(tp
);
1688 /* Wait for RX cpu to ACK this event. */
1689 tg3_wait_for_event_ack(tp
);
1693 /* tp->lock is held. */
1694 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1696 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1697 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1699 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1701 case RESET_KIND_INIT
:
1702 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1706 case RESET_KIND_SHUTDOWN
:
1707 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1711 case RESET_KIND_SUSPEND
:
1712 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1721 if (kind
== RESET_KIND_INIT
||
1722 kind
== RESET_KIND_SUSPEND
)
1723 tg3_ape_driver_state_change(tp
, kind
);
1726 /* tp->lock is held. */
1727 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1729 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1731 case RESET_KIND_INIT
:
1732 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1733 DRV_STATE_START_DONE
);
1736 case RESET_KIND_SHUTDOWN
:
1737 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1738 DRV_STATE_UNLOAD_DONE
);
1746 if (kind
== RESET_KIND_SHUTDOWN
)
1747 tg3_ape_driver_state_change(tp
, kind
);
1750 /* tp->lock is held. */
1751 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1753 if (tg3_flag(tp
, ENABLE_ASF
)) {
1755 case RESET_KIND_INIT
:
1756 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1760 case RESET_KIND_SHUTDOWN
:
1761 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1765 case RESET_KIND_SUSPEND
:
1766 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1776 static int tg3_poll_fw(struct tg3
*tp
)
1781 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1782 /* Wait up to 20ms for init done. */
1783 for (i
= 0; i
< 200; i
++) {
1784 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1791 /* Wait for firmware initialization to complete. */
1792 for (i
= 0; i
< 100000; i
++) {
1793 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1794 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1799 /* Chip might not be fitted with firmware. Some Sun onboard
1800 * parts are configured like that. So don't signal the timeout
1801 * of the above loop as an error, but do report the lack of
1802 * running firmware once.
1804 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1805 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1807 netdev_info(tp
->dev
, "No firmware running\n");
1810 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
1811 /* The 57765 A0 needs a little more
1812 * time to do some important work.
1820 static void tg3_link_report(struct tg3
*tp
)
1822 if (!netif_carrier_ok(tp
->dev
)) {
1823 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1824 tg3_ump_link_report(tp
);
1825 } else if (netif_msg_link(tp
)) {
1826 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1827 (tp
->link_config
.active_speed
== SPEED_1000
?
1829 (tp
->link_config
.active_speed
== SPEED_100
?
1831 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1834 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1835 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1837 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1840 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1841 netdev_info(tp
->dev
, "EEE is %s\n",
1842 tp
->setlpicnt
? "enabled" : "disabled");
1844 tg3_ump_link_report(tp
);
1848 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1852 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1853 miireg
= ADVERTISE_1000XPAUSE
;
1854 else if (flow_ctrl
& FLOW_CTRL_TX
)
1855 miireg
= ADVERTISE_1000XPSE_ASYM
;
1856 else if (flow_ctrl
& FLOW_CTRL_RX
)
1857 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1864 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1868 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1869 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1870 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1871 if (lcladv
& ADVERTISE_1000XPAUSE
)
1873 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1880 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1884 u32 old_rx_mode
= tp
->rx_mode
;
1885 u32 old_tx_mode
= tp
->tx_mode
;
1887 if (tg3_flag(tp
, USE_PHYLIB
))
1888 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1890 autoneg
= tp
->link_config
.autoneg
;
1892 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1893 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1894 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1896 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1898 flowctrl
= tp
->link_config
.flowctrl
;
1900 tp
->link_config
.active_flowctrl
= flowctrl
;
1902 if (flowctrl
& FLOW_CTRL_RX
)
1903 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1905 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1907 if (old_rx_mode
!= tp
->rx_mode
)
1908 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1910 if (flowctrl
& FLOW_CTRL_TX
)
1911 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1913 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1915 if (old_tx_mode
!= tp
->tx_mode
)
1916 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1919 static void tg3_adjust_link(struct net_device
*dev
)
1921 u8 oldflowctrl
, linkmesg
= 0;
1922 u32 mac_mode
, lcl_adv
, rmt_adv
;
1923 struct tg3
*tp
= netdev_priv(dev
);
1924 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1926 spin_lock_bh(&tp
->lock
);
1928 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
1929 MAC_MODE_HALF_DUPLEX
);
1931 oldflowctrl
= tp
->link_config
.active_flowctrl
;
1937 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
1938 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1939 else if (phydev
->speed
== SPEED_1000
||
1940 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
)
1941 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1943 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1945 if (phydev
->duplex
== DUPLEX_HALF
)
1946 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1948 lcl_adv
= mii_advertise_flowctrl(
1949 tp
->link_config
.flowctrl
);
1952 rmt_adv
= LPA_PAUSE_CAP
;
1953 if (phydev
->asym_pause
)
1954 rmt_adv
|= LPA_PAUSE_ASYM
;
1957 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
1959 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1961 if (mac_mode
!= tp
->mac_mode
) {
1962 tp
->mac_mode
= mac_mode
;
1963 tw32_f(MAC_MODE
, tp
->mac_mode
);
1967 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
1968 if (phydev
->speed
== SPEED_10
)
1970 MAC_MI_STAT_10MBPS_MODE
|
1971 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1973 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
1976 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
1977 tw32(MAC_TX_LENGTHS
,
1978 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1979 (6 << TX_LENGTHS_IPG_SHIFT
) |
1980 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1982 tw32(MAC_TX_LENGTHS
,
1983 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
1984 (6 << TX_LENGTHS_IPG_SHIFT
) |
1985 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
1987 if (phydev
->link
!= tp
->old_link
||
1988 phydev
->speed
!= tp
->link_config
.active_speed
||
1989 phydev
->duplex
!= tp
->link_config
.active_duplex
||
1990 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
1993 tp
->old_link
= phydev
->link
;
1994 tp
->link_config
.active_speed
= phydev
->speed
;
1995 tp
->link_config
.active_duplex
= phydev
->duplex
;
1997 spin_unlock_bh(&tp
->lock
);
2000 tg3_link_report(tp
);
2003 static int tg3_phy_init(struct tg3
*tp
)
2005 struct phy_device
*phydev
;
2007 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2010 /* Bring the PHY back to a known state. */
2013 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2015 /* Attach the MAC to the PHY. */
2016 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
), tg3_adjust_link
,
2017 phydev
->dev_flags
, phydev
->interface
);
2018 if (IS_ERR(phydev
)) {
2019 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2020 return PTR_ERR(phydev
);
2023 /* Mask with MAC supported features. */
2024 switch (phydev
->interface
) {
2025 case PHY_INTERFACE_MODE_GMII
:
2026 case PHY_INTERFACE_MODE_RGMII
:
2027 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2028 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2030 SUPPORTED_Asym_Pause
);
2034 case PHY_INTERFACE_MODE_MII
:
2035 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2037 SUPPORTED_Asym_Pause
);
2040 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2044 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2046 phydev
->advertising
= phydev
->supported
;
2051 static void tg3_phy_start(struct tg3
*tp
)
2053 struct phy_device
*phydev
;
2055 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2058 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2060 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2061 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2062 phydev
->speed
= tp
->link_config
.speed
;
2063 phydev
->duplex
= tp
->link_config
.duplex
;
2064 phydev
->autoneg
= tp
->link_config
.autoneg
;
2065 phydev
->advertising
= tp
->link_config
.advertising
;
2070 phy_start_aneg(phydev
);
2073 static void tg3_phy_stop(struct tg3
*tp
)
2075 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2078 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2081 static void tg3_phy_fini(struct tg3
*tp
)
2083 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2084 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2085 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2089 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2094 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2097 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2098 /* Cannot do read-modify-write on 5401 */
2099 err
= tg3_phy_auxctl_write(tp
,
2100 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2101 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2106 err
= tg3_phy_auxctl_read(tp
,
2107 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2111 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2112 err
= tg3_phy_auxctl_write(tp
,
2113 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2119 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2123 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2126 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2127 phytest
| MII_TG3_FET_SHADOW_EN
);
2128 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2130 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2132 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2133 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2135 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2139 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2143 if (!tg3_flag(tp
, 5705_PLUS
) ||
2144 (tg3_flag(tp
, 5717_PLUS
) &&
2145 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2148 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2149 tg3_phy_fet_toggle_apd(tp
, enable
);
2153 reg
= MII_TG3_MISC_SHDW_WREN
|
2154 MII_TG3_MISC_SHDW_SCR5_SEL
|
2155 MII_TG3_MISC_SHDW_SCR5_LPED
|
2156 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2157 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2158 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2159 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
|| !enable
)
2160 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2162 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2165 reg
= MII_TG3_MISC_SHDW_WREN
|
2166 MII_TG3_MISC_SHDW_APD_SEL
|
2167 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2169 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2171 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2174 static void tg3_phy_toggle_automdix(struct tg3
*tp
, int enable
)
2178 if (!tg3_flag(tp
, 5705_PLUS
) ||
2179 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2182 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2185 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2186 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2188 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2189 ephy
| MII_TG3_FET_SHADOW_EN
);
2190 if (!tg3_readphy(tp
, reg
, &phy
)) {
2192 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2194 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2195 tg3_writephy(tp
, reg
, phy
);
2197 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2202 ret
= tg3_phy_auxctl_read(tp
,
2203 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2206 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2208 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2209 tg3_phy_auxctl_write(tp
,
2210 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2215 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2220 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2223 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2225 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2226 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2229 static void tg3_phy_apply_otp(struct tg3
*tp
)
2238 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2241 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2242 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2243 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2245 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2246 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2247 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2249 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2250 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2251 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2253 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2254 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2256 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2257 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2259 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2260 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2261 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2263 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2266 static void tg3_phy_eee_adjust(struct tg3
*tp
, u32 current_link_up
)
2270 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2275 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2276 current_link_up
== 1 &&
2277 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2278 (tp
->link_config
.active_speed
== SPEED_100
||
2279 tp
->link_config
.active_speed
== SPEED_1000
)) {
2282 if (tp
->link_config
.active_speed
== SPEED_1000
)
2283 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2285 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2287 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2289 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2290 TG3_CL45_D7_EEERES_STAT
, &val
);
2292 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2293 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2297 if (!tp
->setlpicnt
) {
2298 if (current_link_up
== 1 &&
2299 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2300 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2301 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2304 val
= tr32(TG3_CPMU_EEE_MODE
);
2305 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2309 static void tg3_phy_eee_enable(struct tg3
*tp
)
2313 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2314 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2315 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2316 tg3_flag(tp
, 57765_CLASS
)) &&
2317 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2318 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2319 MII_TG3_DSP_TAP26_RMRXSTO
;
2320 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2321 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2324 val
= tr32(TG3_CPMU_EEE_MODE
);
2325 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2328 static int tg3_wait_macro_done(struct tg3
*tp
)
2335 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2336 if ((tmp32
& 0x1000) == 0)
2346 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2348 static const u32 test_pat
[4][6] = {
2349 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2350 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2351 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2352 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2356 for (chan
= 0; chan
< 4; chan
++) {
2359 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2360 (chan
* 0x2000) | 0x0200);
2361 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2363 for (i
= 0; i
< 6; i
++)
2364 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2367 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2368 if (tg3_wait_macro_done(tp
)) {
2373 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2374 (chan
* 0x2000) | 0x0200);
2375 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2376 if (tg3_wait_macro_done(tp
)) {
2381 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2382 if (tg3_wait_macro_done(tp
)) {
2387 for (i
= 0; i
< 6; i
+= 2) {
2390 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2391 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2392 tg3_wait_macro_done(tp
)) {
2398 if (low
!= test_pat
[chan
][i
] ||
2399 high
!= test_pat
[chan
][i
+1]) {
2400 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2401 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2402 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2412 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2416 for (chan
= 0; chan
< 4; chan
++) {
2419 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2420 (chan
* 0x2000) | 0x0200);
2421 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2422 for (i
= 0; i
< 6; i
++)
2423 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2424 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2425 if (tg3_wait_macro_done(tp
))
2432 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2434 u32 reg32
, phy9_orig
;
2435 int retries
, do_phy_reset
, err
;
2441 err
= tg3_bmcr_reset(tp
);
2447 /* Disable transmitter and interrupt. */
2448 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2452 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2454 /* Set full-duplex, 1000 mbps. */
2455 tg3_writephy(tp
, MII_BMCR
,
2456 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2458 /* Set to master mode. */
2459 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2462 tg3_writephy(tp
, MII_CTRL1000
,
2463 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2465 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2469 /* Block the PHY control access. */
2470 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2472 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2475 } while (--retries
);
2477 err
= tg3_phy_reset_chanpat(tp
);
2481 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2483 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2484 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2486 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2488 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2490 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2492 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2499 static void tg3_carrier_on(struct tg3
*tp
)
2501 netif_carrier_on(tp
->dev
);
2505 static void tg3_carrier_off(struct tg3
*tp
)
2507 netif_carrier_off(tp
->dev
);
2508 tp
->link_up
= false;
2511 /* This will reset the tigon3 PHY if there is no valid
2512 * link unless the FORCE argument is non-zero.
2514 static int tg3_phy_reset(struct tg3
*tp
)
2519 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2520 val
= tr32(GRC_MISC_CFG
);
2521 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2524 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2525 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2529 if (netif_running(tp
->dev
) && tp
->link_up
) {
2530 tg3_carrier_off(tp
);
2531 tg3_link_report(tp
);
2534 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
2535 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2536 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
2537 err
= tg3_phy_reset_5703_4_5(tp
);
2544 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
2545 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
2546 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2547 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2549 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2552 err
= tg3_bmcr_reset(tp
);
2556 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2557 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2558 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2560 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2563 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2564 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2565 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2566 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2567 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2568 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2570 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2574 if (tg3_flag(tp
, 5717_PLUS
) &&
2575 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2578 tg3_phy_apply_otp(tp
);
2580 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2581 tg3_phy_toggle_apd(tp
, true);
2583 tg3_phy_toggle_apd(tp
, false);
2586 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2587 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2588 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2589 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2590 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2593 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2594 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2595 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2598 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2599 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2600 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2601 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2602 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2603 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2605 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2606 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2607 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2608 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2609 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2610 tg3_writephy(tp
, MII_TG3_TEST1
,
2611 MII_TG3_TEST1_TRIM_EN
| 0x4);
2613 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2615 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2619 /* Set Extended packet length bit (bit 14) on all chips that */
2620 /* support jumbo frames */
2621 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2622 /* Cannot do read-modify-write on 5401 */
2623 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2624 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2625 /* Set bit 14 with read-modify-write to preserve other bits */
2626 err
= tg3_phy_auxctl_read(tp
,
2627 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2629 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2630 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2633 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2634 * jumbo frames transmission.
2636 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2637 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2638 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2639 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2642 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2643 /* adjust output voltage */
2644 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2647 tg3_phy_toggle_automdix(tp
, 1);
2648 tg3_phy_set_wirespeed(tp
);
2652 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2653 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2654 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2655 TG3_GPIO_MSG_NEED_VAUX)
2656 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2657 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2658 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2659 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2660 (TG3_GPIO_MSG_DRVR_PRES << 12))
2662 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2663 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2664 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2665 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2666 (TG3_GPIO_MSG_NEED_VAUX << 12))
2668 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2672 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2673 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2674 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2676 status
= tr32(TG3_CPMU_DRV_STATUS
);
2678 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2679 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2680 status
|= (newstat
<< shift
);
2682 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2683 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
2684 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2686 tw32(TG3_CPMU_DRV_STATUS
, status
);
2688 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2691 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2693 if (!tg3_flag(tp
, IS_NIC
))
2696 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2697 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2698 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2699 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2702 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2704 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2705 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2707 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2709 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2710 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2716 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2720 if (!tg3_flag(tp
, IS_NIC
) ||
2721 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2722 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)
2725 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2727 tw32_wait_f(GRC_LOCAL_CTRL
,
2728 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2729 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2731 tw32_wait_f(GRC_LOCAL_CTRL
,
2733 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2735 tw32_wait_f(GRC_LOCAL_CTRL
,
2736 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2737 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2740 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2742 if (!tg3_flag(tp
, IS_NIC
))
2745 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2746 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
2747 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2748 (GRC_LCLCTRL_GPIO_OE0
|
2749 GRC_LCLCTRL_GPIO_OE1
|
2750 GRC_LCLCTRL_GPIO_OE2
|
2751 GRC_LCLCTRL_GPIO_OUTPUT0
|
2752 GRC_LCLCTRL_GPIO_OUTPUT1
),
2753 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2754 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2755 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2756 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2757 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2758 GRC_LCLCTRL_GPIO_OE1
|
2759 GRC_LCLCTRL_GPIO_OE2
|
2760 GRC_LCLCTRL_GPIO_OUTPUT0
|
2761 GRC_LCLCTRL_GPIO_OUTPUT1
|
2763 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2764 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2766 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2767 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2768 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2770 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2771 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2772 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2775 u32 grc_local_ctrl
= 0;
2777 /* Workaround to prevent overdrawing Amps. */
2778 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2779 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2780 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2782 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2785 /* On 5753 and variants, GPIO2 cannot be used. */
2786 no_gpio2
= tp
->nic_sram_data_cfg
&
2787 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2789 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2790 GRC_LCLCTRL_GPIO_OE1
|
2791 GRC_LCLCTRL_GPIO_OE2
|
2792 GRC_LCLCTRL_GPIO_OUTPUT1
|
2793 GRC_LCLCTRL_GPIO_OUTPUT2
;
2795 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2796 GRC_LCLCTRL_GPIO_OUTPUT2
);
2798 tw32_wait_f(GRC_LOCAL_CTRL
,
2799 tp
->grc_local_ctrl
| grc_local_ctrl
,
2800 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2802 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2804 tw32_wait_f(GRC_LOCAL_CTRL
,
2805 tp
->grc_local_ctrl
| grc_local_ctrl
,
2806 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2809 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2810 tw32_wait_f(GRC_LOCAL_CTRL
,
2811 tp
->grc_local_ctrl
| grc_local_ctrl
,
2812 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2817 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2821 /* Serialize power state transitions */
2822 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2825 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2826 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2828 msg
= tg3_set_function_status(tp
, msg
);
2830 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2833 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2834 tg3_pwrsrc_switch_to_vaux(tp
);
2836 tg3_pwrsrc_die_with_vmain(tp
);
2839 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2842 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2844 bool need_vaux
= false;
2846 /* The GPIOs do something completely different on 57765. */
2847 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2850 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
2851 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
2852 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
2853 tg3_frob_aux_power_5717(tp
, include_wol
?
2854 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2858 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2859 struct net_device
*dev_peer
;
2861 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2863 /* remove_one() may have been run on the peer. */
2865 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2867 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2870 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2871 tg3_flag(tp_peer
, ENABLE_ASF
))
2876 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2877 tg3_flag(tp
, ENABLE_ASF
))
2881 tg3_pwrsrc_switch_to_vaux(tp
);
2883 tg3_pwrsrc_die_with_vmain(tp
);
2886 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2888 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2890 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2891 if (speed
!= SPEED_10
)
2893 } else if (speed
== SPEED_10
)
2899 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
2903 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
2904 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
2905 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2906 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
2909 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
2910 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
2911 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
2916 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
2918 val
= tr32(GRC_MISC_CFG
);
2919 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
2922 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2924 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2927 tg3_writephy(tp
, MII_ADVERTISE
, 0);
2928 tg3_writephy(tp
, MII_BMCR
,
2929 BMCR_ANENABLE
| BMCR_ANRESTART
);
2931 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2932 phytest
| MII_TG3_FET_SHADOW_EN
);
2933 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
2934 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
2936 MII_TG3_FET_SHDW_AUXMODE4
,
2939 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2942 } else if (do_low_power
) {
2943 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2944 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
2946 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
2947 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
2948 MII_TG3_AUXCTL_PCTL_VREG_11V
;
2949 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
2952 /* The PHY should not be powered down on some chips because
2955 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
2956 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
2957 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
2958 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) ||
2959 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
&&
2963 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
||
2964 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5761_AX
) {
2965 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2966 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2967 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
2968 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2971 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
2974 /* tp->lock is held. */
2975 static int tg3_nvram_lock(struct tg3
*tp
)
2977 if (tg3_flag(tp
, NVRAM
)) {
2980 if (tp
->nvram_lock_cnt
== 0) {
2981 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
2982 for (i
= 0; i
< 8000; i
++) {
2983 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
2988 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
2992 tp
->nvram_lock_cnt
++;
2997 /* tp->lock is held. */
2998 static void tg3_nvram_unlock(struct tg3
*tp
)
3000 if (tg3_flag(tp
, NVRAM
)) {
3001 if (tp
->nvram_lock_cnt
> 0)
3002 tp
->nvram_lock_cnt
--;
3003 if (tp
->nvram_lock_cnt
== 0)
3004 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3008 /* tp->lock is held. */
3009 static void tg3_enable_nvram_access(struct tg3
*tp
)
3011 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3012 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3014 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3018 /* tp->lock is held. */
3019 static void tg3_disable_nvram_access(struct tg3
*tp
)
3021 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3022 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3024 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3028 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3029 u32 offset
, u32
*val
)
3034 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3037 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3038 EEPROM_ADDR_DEVID_MASK
|
3040 tw32(GRC_EEPROM_ADDR
,
3042 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3043 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3044 EEPROM_ADDR_ADDR_MASK
) |
3045 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3047 for (i
= 0; i
< 1000; i
++) {
3048 tmp
= tr32(GRC_EEPROM_ADDR
);
3050 if (tmp
& EEPROM_ADDR_COMPLETE
)
3054 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3057 tmp
= tr32(GRC_EEPROM_DATA
);
3060 * The data will always be opposite the native endian
3061 * format. Perform a blind byteswap to compensate.
3068 #define NVRAM_CMD_TIMEOUT 10000
3070 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3074 tw32(NVRAM_CMD
, nvram_cmd
);
3075 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3077 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3083 if (i
== NVRAM_CMD_TIMEOUT
)
3089 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3091 if (tg3_flag(tp
, NVRAM
) &&
3092 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3093 tg3_flag(tp
, FLASH
) &&
3094 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3095 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3097 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3098 ATMEL_AT45DB0X1B_PAGE_POS
) +
3099 (addr
% tp
->nvram_pagesize
);
3104 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3106 if (tg3_flag(tp
, NVRAM
) &&
3107 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3108 tg3_flag(tp
, FLASH
) &&
3109 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3110 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3112 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3113 tp
->nvram_pagesize
) +
3114 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3119 /* NOTE: Data read in from NVRAM is byteswapped according to
3120 * the byteswapping settings for all other register accesses.
3121 * tg3 devices are BE devices, so on a BE machine, the data
3122 * returned will be exactly as it is seen in NVRAM. On a LE
3123 * machine, the 32-bit value will be byteswapped.
3125 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3129 if (!tg3_flag(tp
, NVRAM
))
3130 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3132 offset
= tg3_nvram_phys_addr(tp
, offset
);
3134 if (offset
> NVRAM_ADDR_MSK
)
3137 ret
= tg3_nvram_lock(tp
);
3141 tg3_enable_nvram_access(tp
);
3143 tw32(NVRAM_ADDR
, offset
);
3144 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3145 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3148 *val
= tr32(NVRAM_RDDATA
);
3150 tg3_disable_nvram_access(tp
);
3152 tg3_nvram_unlock(tp
);
3157 /* Ensures NVRAM data is in bytestream format. */
3158 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3161 int res
= tg3_nvram_read(tp
, offset
, &v
);
3163 *val
= cpu_to_be32(v
);
3167 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3168 u32 offset
, u32 len
, u8
*buf
)
3173 for (i
= 0; i
< len
; i
+= 4) {
3179 memcpy(&data
, buf
+ i
, 4);
3182 * The SEEPROM interface expects the data to always be opposite
3183 * the native endian format. We accomplish this by reversing
3184 * all the operations that would have been performed on the
3185 * data from a call to tg3_nvram_read_be32().
3187 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3189 val
= tr32(GRC_EEPROM_ADDR
);
3190 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3192 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3194 tw32(GRC_EEPROM_ADDR
, val
|
3195 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3196 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3200 for (j
= 0; j
< 1000; j
++) {
3201 val
= tr32(GRC_EEPROM_ADDR
);
3203 if (val
& EEPROM_ADDR_COMPLETE
)
3207 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3216 /* offset and length are dword aligned */
3217 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3221 u32 pagesize
= tp
->nvram_pagesize
;
3222 u32 pagemask
= pagesize
- 1;
3226 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3232 u32 phy_addr
, page_off
, size
;
3234 phy_addr
= offset
& ~pagemask
;
3236 for (j
= 0; j
< pagesize
; j
+= 4) {
3237 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3238 (__be32
*) (tmp
+ j
));
3245 page_off
= offset
& pagemask
;
3252 memcpy(tmp
+ page_off
, buf
, size
);
3254 offset
= offset
+ (pagesize
- page_off
);
3256 tg3_enable_nvram_access(tp
);
3259 * Before we can erase the flash page, we need
3260 * to issue a special "write enable" command.
3262 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3264 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3267 /* Erase the target page */
3268 tw32(NVRAM_ADDR
, phy_addr
);
3270 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3271 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3273 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3276 /* Issue another write enable to start the write. */
3277 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3279 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3282 for (j
= 0; j
< pagesize
; j
+= 4) {
3285 data
= *((__be32
*) (tmp
+ j
));
3287 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3289 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3291 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3295 nvram_cmd
|= NVRAM_CMD_FIRST
;
3296 else if (j
== (pagesize
- 4))
3297 nvram_cmd
|= NVRAM_CMD_LAST
;
3299 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3307 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3308 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3315 /* offset and length are dword aligned */
3316 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3321 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3322 u32 page_off
, phy_addr
, nvram_cmd
;
3325 memcpy(&data
, buf
+ i
, 4);
3326 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3328 page_off
= offset
% tp
->nvram_pagesize
;
3330 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3332 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3334 if (page_off
== 0 || i
== 0)
3335 nvram_cmd
|= NVRAM_CMD_FIRST
;
3336 if (page_off
== (tp
->nvram_pagesize
- 4))
3337 nvram_cmd
|= NVRAM_CMD_LAST
;
3340 nvram_cmd
|= NVRAM_CMD_LAST
;
3342 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3343 !tg3_flag(tp
, FLASH
) ||
3344 !tg3_flag(tp
, 57765_PLUS
))
3345 tw32(NVRAM_ADDR
, phy_addr
);
3347 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
3348 !tg3_flag(tp
, 5755_PLUS
) &&
3349 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3350 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3353 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3354 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3358 if (!tg3_flag(tp
, FLASH
)) {
3359 /* We always do complete word writes to eeprom. */
3360 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3363 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3370 /* offset and length are dword aligned */
3371 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3375 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3376 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3377 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3381 if (!tg3_flag(tp
, NVRAM
)) {
3382 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3386 ret
= tg3_nvram_lock(tp
);
3390 tg3_enable_nvram_access(tp
);
3391 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3392 tw32(NVRAM_WRITE1
, 0x406);
3394 grc_mode
= tr32(GRC_MODE
);
3395 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3397 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3398 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3401 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3405 grc_mode
= tr32(GRC_MODE
);
3406 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3408 tg3_disable_nvram_access(tp
);
3409 tg3_nvram_unlock(tp
);
3412 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3413 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3420 #define RX_CPU_SCRATCH_BASE 0x30000
3421 #define RX_CPU_SCRATCH_SIZE 0x04000
3422 #define TX_CPU_SCRATCH_BASE 0x34000
3423 #define TX_CPU_SCRATCH_SIZE 0x04000
3425 /* tp->lock is held. */
3426 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
3430 BUG_ON(offset
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3432 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3433 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3435 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3438 if (offset
== RX_CPU_BASE
) {
3439 for (i
= 0; i
< 10000; i
++) {
3440 tw32(offset
+ CPU_STATE
, 0xffffffff);
3441 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3442 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3446 tw32(offset
+ CPU_STATE
, 0xffffffff);
3447 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3450 for (i
= 0; i
< 10000; i
++) {
3451 tw32(offset
+ CPU_STATE
, 0xffffffff);
3452 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
3453 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
3459 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3460 __func__
, offset
== RX_CPU_BASE
? "RX" : "TX");
3464 /* Clear firmware's nvram arbitration. */
3465 if (tg3_flag(tp
, NVRAM
))
3466 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3471 unsigned int fw_base
;
3472 unsigned int fw_len
;
3473 const __be32
*fw_data
;
3476 /* tp->lock is held. */
3477 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3478 u32 cpu_scratch_base
, int cpu_scratch_size
,
3479 struct fw_info
*info
)
3481 int err
, lock_err
, i
;
3482 void (*write_op
)(struct tg3
*, u32
, u32
);
3484 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3486 "%s: Trying to load TX cpu firmware which is 5705\n",
3491 if (tg3_flag(tp
, 5705_PLUS
))
3492 write_op
= tg3_write_mem
;
3494 write_op
= tg3_write_indirect_reg32
;
3496 /* It is possible that bootcode is still loading at this point.
3497 * Get the nvram lock first before halting the cpu.
3499 lock_err
= tg3_nvram_lock(tp
);
3500 err
= tg3_halt_cpu(tp
, cpu_base
);
3502 tg3_nvram_unlock(tp
);
3506 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3507 write_op(tp
, cpu_scratch_base
+ i
, 0);
3508 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3509 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
3510 for (i
= 0; i
< (info
->fw_len
/ sizeof(u32
)); i
++)
3511 write_op(tp
, (cpu_scratch_base
+
3512 (info
->fw_base
& 0xffff) +
3514 be32_to_cpu(info
->fw_data
[i
]));
3522 /* tp->lock is held. */
3523 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3525 struct fw_info info
;
3526 const __be32
*fw_data
;
3529 fw_data
= (void *)tp
->fw
->data
;
3531 /* Firmware blob starts with version numbers, followed by
3532 start address and length. We are setting complete length.
3533 length = end_address_of_bss - start_address_of_text.
3534 Remainder is the blob to be loaded contiguously
3535 from start address. */
3537 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3538 info
.fw_len
= tp
->fw
->size
- 12;
3539 info
.fw_data
= &fw_data
[3];
3541 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3542 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3547 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3548 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3553 /* Now startup only the RX cpu. */
3554 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3555 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3557 for (i
= 0; i
< 5; i
++) {
3558 if (tr32(RX_CPU_BASE
+ CPU_PC
) == info
.fw_base
)
3560 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3561 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3562 tw32_f(RX_CPU_BASE
+ CPU_PC
, info
.fw_base
);
3566 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3567 "should be %08x\n", __func__
,
3568 tr32(RX_CPU_BASE
+ CPU_PC
), info
.fw_base
);
3571 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3572 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
3577 /* tp->lock is held. */
3578 static int tg3_load_tso_firmware(struct tg3
*tp
)
3580 struct fw_info info
;
3581 const __be32
*fw_data
;
3582 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3585 if (tg3_flag(tp
, HW_TSO_1
) ||
3586 tg3_flag(tp
, HW_TSO_2
) ||
3587 tg3_flag(tp
, HW_TSO_3
))
3590 fw_data
= (void *)tp
->fw
->data
;
3592 /* Firmware blob starts with version numbers, followed by
3593 start address and length. We are setting complete length.
3594 length = end_address_of_bss - start_address_of_text.
3595 Remainder is the blob to be loaded contiguously
3596 from start address. */
3598 info
.fw_base
= be32_to_cpu(fw_data
[1]);
3599 cpu_scratch_size
= tp
->fw_len
;
3600 info
.fw_len
= tp
->fw
->size
- 12;
3601 info
.fw_data
= &fw_data
[3];
3603 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
3604 cpu_base
= RX_CPU_BASE
;
3605 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3607 cpu_base
= TX_CPU_BASE
;
3608 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3609 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3612 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3613 cpu_scratch_base
, cpu_scratch_size
,
3618 /* Now startup the cpu. */
3619 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3620 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3622 for (i
= 0; i
< 5; i
++) {
3623 if (tr32(cpu_base
+ CPU_PC
) == info
.fw_base
)
3625 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3626 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3627 tw32_f(cpu_base
+ CPU_PC
, info
.fw_base
);
3632 "%s fails to set CPU PC, is %08x should be %08x\n",
3633 __func__
, tr32(cpu_base
+ CPU_PC
), info
.fw_base
);
3636 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3637 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3642 /* tp->lock is held. */
3643 static void __tg3_set_mac_addr(struct tg3
*tp
, int skip_mac_1
)
3645 u32 addr_high
, addr_low
;
3648 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3649 tp
->dev
->dev_addr
[1]);
3650 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3651 (tp
->dev
->dev_addr
[3] << 16) |
3652 (tp
->dev
->dev_addr
[4] << 8) |
3653 (tp
->dev
->dev_addr
[5] << 0));
3654 for (i
= 0; i
< 4; i
++) {
3655 if (i
== 1 && skip_mac_1
)
3657 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3658 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3661 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
3662 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
3663 for (i
= 0; i
< 12; i
++) {
3664 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3665 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3669 addr_high
= (tp
->dev
->dev_addr
[0] +
3670 tp
->dev
->dev_addr
[1] +
3671 tp
->dev
->dev_addr
[2] +
3672 tp
->dev
->dev_addr
[3] +
3673 tp
->dev
->dev_addr
[4] +
3674 tp
->dev
->dev_addr
[5]) &
3675 TX_BACKOFF_SEED_MASK
;
3676 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3679 static void tg3_enable_register_access(struct tg3
*tp
)
3682 * Make sure register accesses (indirect or otherwise) will function
3685 pci_write_config_dword(tp
->pdev
,
3686 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3689 static int tg3_power_up(struct tg3
*tp
)
3693 tg3_enable_register_access(tp
);
3695 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3697 /* Switch out of Vaux if it is a NIC */
3698 tg3_pwrsrc_switch_to_vmain(tp
);
3700 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3706 static int tg3_setup_phy(struct tg3
*, int);
3708 static int tg3_power_down_prepare(struct tg3
*tp
)
3711 bool device_should_wake
, do_low_power
;
3713 tg3_enable_register_access(tp
);
3715 /* Restore the CLKREQ setting. */
3716 if (tg3_flag(tp
, CLKREQ_BUG
))
3717 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3718 PCI_EXP_LNKCTL_CLKREQ_EN
);
3720 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3721 tw32(TG3PCI_MISC_HOST_CTRL
,
3722 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3724 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3725 tg3_flag(tp
, WOL_ENABLE
);
3727 if (tg3_flag(tp
, USE_PHYLIB
)) {
3728 do_low_power
= false;
3729 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3730 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3731 struct phy_device
*phydev
;
3732 u32 phyid
, advertising
;
3734 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3736 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3738 tp
->link_config
.speed
= phydev
->speed
;
3739 tp
->link_config
.duplex
= phydev
->duplex
;
3740 tp
->link_config
.autoneg
= phydev
->autoneg
;
3741 tp
->link_config
.advertising
= phydev
->advertising
;
3743 advertising
= ADVERTISED_TP
|
3745 ADVERTISED_Autoneg
|
3746 ADVERTISED_10baseT_Half
;
3748 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
3749 if (tg3_flag(tp
, WOL_SPEED_100MB
))
3751 ADVERTISED_100baseT_Half
|
3752 ADVERTISED_100baseT_Full
|
3753 ADVERTISED_10baseT_Full
;
3755 advertising
|= ADVERTISED_10baseT_Full
;
3758 phydev
->advertising
= advertising
;
3760 phy_start_aneg(phydev
);
3762 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
3763 if (phyid
!= PHY_ID_BCMAC131
) {
3764 phyid
&= PHY_BCM_OUI_MASK
;
3765 if (phyid
== PHY_BCM_OUI_1
||
3766 phyid
== PHY_BCM_OUI_2
||
3767 phyid
== PHY_BCM_OUI_3
)
3768 do_low_power
= true;
3772 do_low_power
= true;
3774 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
3775 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3777 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
3778 tg3_setup_phy(tp
, 0);
3781 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3784 val
= tr32(GRC_VCPU_EXT_CTRL
);
3785 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
3786 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
3790 for (i
= 0; i
< 200; i
++) {
3791 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
3792 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
3797 if (tg3_flag(tp
, WOL_CAP
))
3798 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
3799 WOL_DRV_STATE_SHUTDOWN
|
3803 if (device_should_wake
) {
3806 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
3808 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
3809 tg3_phy_auxctl_write(tp
,
3810 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
3811 MII_TG3_AUXCTL_PCTL_WOL_EN
|
3812 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3813 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
3817 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
3818 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
3820 mac_mode
= MAC_MODE_PORT_MODE_MII
;
3822 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
3823 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
3825 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
3826 SPEED_100
: SPEED_10
;
3827 if (tg3_5700_link_polarity(tp
, speed
))
3828 mac_mode
|= MAC_MODE_LINK_POLARITY
;
3830 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
3833 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
3836 if (!tg3_flag(tp
, 5750_PLUS
))
3837 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
3839 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
3840 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
3841 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
3842 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
3844 if (tg3_flag(tp
, ENABLE_APE
))
3845 mac_mode
|= MAC_MODE_APE_TX_EN
|
3846 MAC_MODE_APE_RX_EN
|
3847 MAC_MODE_TDE_ENABLE
;
3849 tw32_f(MAC_MODE
, mac_mode
);
3852 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
3856 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
3857 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3858 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
3861 base_val
= tp
->pci_clock_ctrl
;
3862 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
3863 CLOCK_CTRL_TXCLK_DISABLE
);
3865 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
3866 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
3867 } else if (tg3_flag(tp
, 5780_CLASS
) ||
3868 tg3_flag(tp
, CPMU_PRESENT
) ||
3869 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
3871 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
3872 u32 newbits1
, newbits2
;
3874 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3875 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3876 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
3877 CLOCK_CTRL_TXCLK_DISABLE
|
3879 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3880 } else if (tg3_flag(tp
, 5705_PLUS
)) {
3881 newbits1
= CLOCK_CTRL_625_CORE
;
3882 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
3884 newbits1
= CLOCK_CTRL_ALTCLK
;
3885 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
3888 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
3891 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
3894 if (!tg3_flag(tp
, 5705_PLUS
)) {
3897 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
3898 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
3899 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
3900 CLOCK_CTRL_TXCLK_DISABLE
|
3901 CLOCK_CTRL_44MHZ_CORE
);
3903 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
3906 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
3907 tp
->pci_clock_ctrl
| newbits3
, 40);
3911 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
3912 tg3_power_down_phy(tp
, do_low_power
);
3914 tg3_frob_aux_power(tp
, true);
3916 /* Workaround for unstable PLL clock */
3917 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
3918 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
3919 u32 val
= tr32(0x7d00);
3921 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3923 if (!tg3_flag(tp
, ENABLE_ASF
)) {
3926 err
= tg3_nvram_lock(tp
);
3927 tg3_halt_cpu(tp
, RX_CPU_BASE
);
3929 tg3_nvram_unlock(tp
);
3933 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
3938 static void tg3_power_down(struct tg3
*tp
)
3940 tg3_power_down_prepare(tp
);
3942 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
3943 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
3946 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
3948 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
3949 case MII_TG3_AUX_STAT_10HALF
:
3951 *duplex
= DUPLEX_HALF
;
3954 case MII_TG3_AUX_STAT_10FULL
:
3956 *duplex
= DUPLEX_FULL
;
3959 case MII_TG3_AUX_STAT_100HALF
:
3961 *duplex
= DUPLEX_HALF
;
3964 case MII_TG3_AUX_STAT_100FULL
:
3966 *duplex
= DUPLEX_FULL
;
3969 case MII_TG3_AUX_STAT_1000HALF
:
3970 *speed
= SPEED_1000
;
3971 *duplex
= DUPLEX_HALF
;
3974 case MII_TG3_AUX_STAT_1000FULL
:
3975 *speed
= SPEED_1000
;
3976 *duplex
= DUPLEX_FULL
;
3980 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3981 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
3983 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
3987 *speed
= SPEED_UNKNOWN
;
3988 *duplex
= DUPLEX_UNKNOWN
;
3993 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
3998 new_adv
= ADVERTISE_CSMA
;
3999 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4000 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4002 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4006 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4007 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4009 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
4010 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
4011 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4013 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4018 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4021 tw32(TG3_CPMU_EEE_MODE
,
4022 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4024 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4029 /* Advertise 100-BaseTX EEE ability */
4030 if (advertise
& ADVERTISED_100baseT_Full
)
4031 val
|= MDIO_AN_EEE_ADV_100TX
;
4032 /* Advertise 1000-BaseT EEE ability */
4033 if (advertise
& ADVERTISED_1000baseT_Full
)
4034 val
|= MDIO_AN_EEE_ADV_1000T
;
4035 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4039 switch (GET_ASIC_REV(tp
->pci_chip_rev_id
)) {
4041 case ASIC_REV_57765
:
4042 case ASIC_REV_57766
:
4044 /* If we advertised any eee advertisements above... */
4046 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4047 MII_TG3_DSP_TAP26_RMRXSTO
|
4048 MII_TG3_DSP_TAP26_OPCSINPT
;
4049 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4052 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4053 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4054 MII_TG3_DSP_CH34TP2_HIBW01
);
4057 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4066 static void tg3_phy_copper_begin(struct tg3
*tp
)
4068 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4069 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4072 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
4073 adv
= ADVERTISED_10baseT_Half
|
4074 ADVERTISED_10baseT_Full
;
4075 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4076 adv
|= ADVERTISED_100baseT_Half
|
4077 ADVERTISED_100baseT_Full
;
4079 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4081 adv
= tp
->link_config
.advertising
;
4082 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4083 adv
&= ~(ADVERTISED_1000baseT_Half
|
4084 ADVERTISED_1000baseT_Full
);
4086 fc
= tp
->link_config
.flowctrl
;
4089 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4091 tg3_writephy(tp
, MII_BMCR
,
4092 BMCR_ANENABLE
| BMCR_ANRESTART
);
4095 u32 bmcr
, orig_bmcr
;
4097 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4098 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4101 switch (tp
->link_config
.speed
) {
4107 bmcr
|= BMCR_SPEED100
;
4111 bmcr
|= BMCR_SPEED1000
;
4115 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4116 bmcr
|= BMCR_FULLDPLX
;
4118 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4119 (bmcr
!= orig_bmcr
)) {
4120 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4121 for (i
= 0; i
< 1500; i
++) {
4125 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4126 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4128 if (!(tmp
& BMSR_LSTATUS
)) {
4133 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4139 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4143 /* Turn off tap power management. */
4144 /* Set Extended packet length bit */
4145 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4147 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4148 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4149 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4150 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4151 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4158 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4160 u32 advmsk
, tgtadv
, advertising
;
4162 advertising
= tp
->link_config
.advertising
;
4163 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4165 advmsk
= ADVERTISE_ALL
;
4166 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4167 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4168 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4171 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4174 if ((*lcladv
& advmsk
) != tgtadv
)
4177 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4180 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4182 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4186 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
4187 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)) {
4188 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4189 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4190 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4192 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4195 if (tg3_ctrl
!= tgtadv
)
4202 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4206 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4209 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4212 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4215 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4218 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4219 tp
->link_config
.rmt_adv
= lpeth
;
4224 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, int curr_link_up
)
4226 if (curr_link_up
!= tp
->link_up
) {
4230 tg3_carrier_off(tp
);
4231 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4232 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4235 tg3_link_report(tp
);
4242 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
4244 int current_link_up
;
4246 u32 lcl_adv
, rmt_adv
;
4254 (MAC_STATUS_SYNC_CHANGED
|
4255 MAC_STATUS_CFG_CHANGED
|
4256 MAC_STATUS_MI_COMPLETION
|
4257 MAC_STATUS_LNKSTATE_CHANGED
));
4260 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4262 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4266 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4268 /* Some third-party PHYs need to be reset on link going
4271 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
4272 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
4273 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
4275 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4276 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4277 !(bmsr
& BMSR_LSTATUS
))
4283 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4284 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4285 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4286 !tg3_flag(tp
, INIT_COMPLETE
))
4289 if (!(bmsr
& BMSR_LSTATUS
)) {
4290 err
= tg3_init_5401phy_dsp(tp
);
4294 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4295 for (i
= 0; i
< 1000; i
++) {
4297 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4298 (bmsr
& BMSR_LSTATUS
)) {
4304 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4305 TG3_PHY_REV_BCM5401_B0
&&
4306 !(bmsr
& BMSR_LSTATUS
) &&
4307 tp
->link_config
.active_speed
== SPEED_1000
) {
4308 err
= tg3_phy_reset(tp
);
4310 err
= tg3_init_5401phy_dsp(tp
);
4315 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
4316 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
4317 /* 5701 {A0,B0} CRC bug workaround */
4318 tg3_writephy(tp
, 0x15, 0x0a75);
4319 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4320 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4321 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4324 /* Clear pending interrupts... */
4325 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4326 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4328 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4329 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4330 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4331 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4333 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
4334 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
4335 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4336 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4337 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4339 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4342 current_link_up
= 0;
4343 current_speed
= SPEED_UNKNOWN
;
4344 current_duplex
= DUPLEX_UNKNOWN
;
4345 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4346 tp
->link_config
.rmt_adv
= 0;
4348 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4349 err
= tg3_phy_auxctl_read(tp
,
4350 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4352 if (!err
&& !(val
& (1 << 10))) {
4353 tg3_phy_auxctl_write(tp
,
4354 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4361 for (i
= 0; i
< 100; i
++) {
4362 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4363 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4364 (bmsr
& BMSR_LSTATUS
))
4369 if (bmsr
& BMSR_LSTATUS
) {
4372 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4373 for (i
= 0; i
< 2000; i
++) {
4375 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4380 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4385 for (i
= 0; i
< 200; i
++) {
4386 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4387 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4389 if (bmcr
&& bmcr
!= 0x7fff)
4397 tp
->link_config
.active_speed
= current_speed
;
4398 tp
->link_config
.active_duplex
= current_duplex
;
4400 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4401 if ((bmcr
& BMCR_ANENABLE
) &&
4402 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4403 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4404 current_link_up
= 1;
4406 if (!(bmcr
& BMCR_ANENABLE
) &&
4407 tp
->link_config
.speed
== current_speed
&&
4408 tp
->link_config
.duplex
== current_duplex
&&
4409 tp
->link_config
.flowctrl
==
4410 tp
->link_config
.active_flowctrl
) {
4411 current_link_up
= 1;
4415 if (current_link_up
== 1 &&
4416 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4419 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4420 reg
= MII_TG3_FET_GEN_STAT
;
4421 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4423 reg
= MII_TG3_EXT_STAT
;
4424 bit
= MII_TG3_EXT_STAT_MDIX
;
4427 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4428 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4430 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4435 if (current_link_up
== 0 || (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4436 tg3_phy_copper_begin(tp
);
4438 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4439 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4440 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4441 current_link_up
= 1;
4444 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4445 if (current_link_up
== 1) {
4446 if (tp
->link_config
.active_speed
== SPEED_100
||
4447 tp
->link_config
.active_speed
== SPEED_10
)
4448 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4450 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4451 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4452 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4454 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4456 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4457 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4458 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4460 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
4461 if (current_link_up
== 1 &&
4462 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4463 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4465 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4468 /* ??? Without this setting Netgear GA302T PHY does not
4469 * ??? send/receive packets...
4471 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4472 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
4473 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4474 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4478 tw32_f(MAC_MODE
, tp
->mac_mode
);
4481 tg3_phy_eee_adjust(tp
, current_link_up
);
4483 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4484 /* Polled via timer. */
4485 tw32_f(MAC_EVENT
, 0);
4487 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4491 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
4492 current_link_up
== 1 &&
4493 tp
->link_config
.active_speed
== SPEED_1000
&&
4494 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4497 (MAC_STATUS_SYNC_CHANGED
|
4498 MAC_STATUS_CFG_CHANGED
));
4501 NIC_SRAM_FIRMWARE_MBOX
,
4502 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4505 /* Prevent send BD corruption. */
4506 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4507 if (tp
->link_config
.active_speed
== SPEED_100
||
4508 tp
->link_config
.active_speed
== SPEED_10
)
4509 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4510 PCI_EXP_LNKCTL_CLKREQ_EN
);
4512 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4513 PCI_EXP_LNKCTL_CLKREQ_EN
);
4516 tg3_test_and_report_link_chg(tp
, current_link_up
);
4521 struct tg3_fiber_aneginfo
{
4523 #define ANEG_STATE_UNKNOWN 0
4524 #define ANEG_STATE_AN_ENABLE 1
4525 #define ANEG_STATE_RESTART_INIT 2
4526 #define ANEG_STATE_RESTART 3
4527 #define ANEG_STATE_DISABLE_LINK_OK 4
4528 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4529 #define ANEG_STATE_ABILITY_DETECT 6
4530 #define ANEG_STATE_ACK_DETECT_INIT 7
4531 #define ANEG_STATE_ACK_DETECT 8
4532 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4533 #define ANEG_STATE_COMPLETE_ACK 10
4534 #define ANEG_STATE_IDLE_DETECT_INIT 11
4535 #define ANEG_STATE_IDLE_DETECT 12
4536 #define ANEG_STATE_LINK_OK 13
4537 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4538 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4541 #define MR_AN_ENABLE 0x00000001
4542 #define MR_RESTART_AN 0x00000002
4543 #define MR_AN_COMPLETE 0x00000004
4544 #define MR_PAGE_RX 0x00000008
4545 #define MR_NP_LOADED 0x00000010
4546 #define MR_TOGGLE_TX 0x00000020
4547 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4548 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4549 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4550 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4551 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4552 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4553 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4554 #define MR_TOGGLE_RX 0x00002000
4555 #define MR_NP_RX 0x00004000
4557 #define MR_LINK_OK 0x80000000
4559 unsigned long link_time
, cur_time
;
4561 u32 ability_match_cfg
;
4562 int ability_match_count
;
4564 char ability_match
, idle_match
, ack_match
;
4566 u32 txconfig
, rxconfig
;
4567 #define ANEG_CFG_NP 0x00000080
4568 #define ANEG_CFG_ACK 0x00000040
4569 #define ANEG_CFG_RF2 0x00000020
4570 #define ANEG_CFG_RF1 0x00000010
4571 #define ANEG_CFG_PS2 0x00000001
4572 #define ANEG_CFG_PS1 0x00008000
4573 #define ANEG_CFG_HD 0x00004000
4574 #define ANEG_CFG_FD 0x00002000
4575 #define ANEG_CFG_INVAL 0x00001f06
4580 #define ANEG_TIMER_ENAB 2
4581 #define ANEG_FAILED -1
4583 #define ANEG_STATE_SETTLE_TIME 10000
4585 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
4586 struct tg3_fiber_aneginfo
*ap
)
4589 unsigned long delta
;
4593 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
4597 ap
->ability_match_cfg
= 0;
4598 ap
->ability_match_count
= 0;
4599 ap
->ability_match
= 0;
4605 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
4606 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
4608 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
4609 ap
->ability_match_cfg
= rx_cfg_reg
;
4610 ap
->ability_match
= 0;
4611 ap
->ability_match_count
= 0;
4613 if (++ap
->ability_match_count
> 1) {
4614 ap
->ability_match
= 1;
4615 ap
->ability_match_cfg
= rx_cfg_reg
;
4618 if (rx_cfg_reg
& ANEG_CFG_ACK
)
4626 ap
->ability_match_cfg
= 0;
4627 ap
->ability_match_count
= 0;
4628 ap
->ability_match
= 0;
4634 ap
->rxconfig
= rx_cfg_reg
;
4637 switch (ap
->state
) {
4638 case ANEG_STATE_UNKNOWN
:
4639 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
4640 ap
->state
= ANEG_STATE_AN_ENABLE
;
4643 case ANEG_STATE_AN_ENABLE
:
4644 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
4645 if (ap
->flags
& MR_AN_ENABLE
) {
4648 ap
->ability_match_cfg
= 0;
4649 ap
->ability_match_count
= 0;
4650 ap
->ability_match
= 0;
4654 ap
->state
= ANEG_STATE_RESTART_INIT
;
4656 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
4660 case ANEG_STATE_RESTART_INIT
:
4661 ap
->link_time
= ap
->cur_time
;
4662 ap
->flags
&= ~(MR_NP_LOADED
);
4664 tw32(MAC_TX_AUTO_NEG
, 0);
4665 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4666 tw32_f(MAC_MODE
, tp
->mac_mode
);
4669 ret
= ANEG_TIMER_ENAB
;
4670 ap
->state
= ANEG_STATE_RESTART
;
4673 case ANEG_STATE_RESTART
:
4674 delta
= ap
->cur_time
- ap
->link_time
;
4675 if (delta
> ANEG_STATE_SETTLE_TIME
)
4676 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
4678 ret
= ANEG_TIMER_ENAB
;
4681 case ANEG_STATE_DISABLE_LINK_OK
:
4685 case ANEG_STATE_ABILITY_DETECT_INIT
:
4686 ap
->flags
&= ~(MR_TOGGLE_TX
);
4687 ap
->txconfig
= ANEG_CFG_FD
;
4688 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4689 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4690 ap
->txconfig
|= ANEG_CFG_PS1
;
4691 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4692 ap
->txconfig
|= ANEG_CFG_PS2
;
4693 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4694 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4695 tw32_f(MAC_MODE
, tp
->mac_mode
);
4698 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
4701 case ANEG_STATE_ABILITY_DETECT
:
4702 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
4703 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
4706 case ANEG_STATE_ACK_DETECT_INIT
:
4707 ap
->txconfig
|= ANEG_CFG_ACK
;
4708 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
4709 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
4710 tw32_f(MAC_MODE
, tp
->mac_mode
);
4713 ap
->state
= ANEG_STATE_ACK_DETECT
;
4716 case ANEG_STATE_ACK_DETECT
:
4717 if (ap
->ack_match
!= 0) {
4718 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
4719 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
4720 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
4722 ap
->state
= ANEG_STATE_AN_ENABLE
;
4724 } else if (ap
->ability_match
!= 0 &&
4725 ap
->rxconfig
== 0) {
4726 ap
->state
= ANEG_STATE_AN_ENABLE
;
4730 case ANEG_STATE_COMPLETE_ACK_INIT
:
4731 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
4735 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
4736 MR_LP_ADV_HALF_DUPLEX
|
4737 MR_LP_ADV_SYM_PAUSE
|
4738 MR_LP_ADV_ASYM_PAUSE
|
4739 MR_LP_ADV_REMOTE_FAULT1
|
4740 MR_LP_ADV_REMOTE_FAULT2
|
4741 MR_LP_ADV_NEXT_PAGE
|
4744 if (ap
->rxconfig
& ANEG_CFG_FD
)
4745 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
4746 if (ap
->rxconfig
& ANEG_CFG_HD
)
4747 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
4748 if (ap
->rxconfig
& ANEG_CFG_PS1
)
4749 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
4750 if (ap
->rxconfig
& ANEG_CFG_PS2
)
4751 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
4752 if (ap
->rxconfig
& ANEG_CFG_RF1
)
4753 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
4754 if (ap
->rxconfig
& ANEG_CFG_RF2
)
4755 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
4756 if (ap
->rxconfig
& ANEG_CFG_NP
)
4757 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
4759 ap
->link_time
= ap
->cur_time
;
4761 ap
->flags
^= (MR_TOGGLE_TX
);
4762 if (ap
->rxconfig
& 0x0008)
4763 ap
->flags
|= MR_TOGGLE_RX
;
4764 if (ap
->rxconfig
& ANEG_CFG_NP
)
4765 ap
->flags
|= MR_NP_RX
;
4766 ap
->flags
|= MR_PAGE_RX
;
4768 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
4769 ret
= ANEG_TIMER_ENAB
;
4772 case ANEG_STATE_COMPLETE_ACK
:
4773 if (ap
->ability_match
!= 0 &&
4774 ap
->rxconfig
== 0) {
4775 ap
->state
= ANEG_STATE_AN_ENABLE
;
4778 delta
= ap
->cur_time
- ap
->link_time
;
4779 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4780 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
4781 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4783 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
4784 !(ap
->flags
& MR_NP_RX
)) {
4785 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
4793 case ANEG_STATE_IDLE_DETECT_INIT
:
4794 ap
->link_time
= ap
->cur_time
;
4795 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4796 tw32_f(MAC_MODE
, tp
->mac_mode
);
4799 ap
->state
= ANEG_STATE_IDLE_DETECT
;
4800 ret
= ANEG_TIMER_ENAB
;
4803 case ANEG_STATE_IDLE_DETECT
:
4804 if (ap
->ability_match
!= 0 &&
4805 ap
->rxconfig
== 0) {
4806 ap
->state
= ANEG_STATE_AN_ENABLE
;
4809 delta
= ap
->cur_time
- ap
->link_time
;
4810 if (delta
> ANEG_STATE_SETTLE_TIME
) {
4811 /* XXX another gem from the Broadcom driver :( */
4812 ap
->state
= ANEG_STATE_LINK_OK
;
4816 case ANEG_STATE_LINK_OK
:
4817 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
4821 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
4822 /* ??? unimplemented */
4825 case ANEG_STATE_NEXT_PAGE_WAIT
:
4826 /* ??? unimplemented */
4837 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
4840 struct tg3_fiber_aneginfo aninfo
;
4841 int status
= ANEG_FAILED
;
4845 tw32_f(MAC_TX_AUTO_NEG
, 0);
4847 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
4848 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
4851 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
4854 memset(&aninfo
, 0, sizeof(aninfo
));
4855 aninfo
.flags
|= MR_AN_ENABLE
;
4856 aninfo
.state
= ANEG_STATE_UNKNOWN
;
4857 aninfo
.cur_time
= 0;
4859 while (++tick
< 195000) {
4860 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
4861 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
4867 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
4868 tw32_f(MAC_MODE
, tp
->mac_mode
);
4871 *txflags
= aninfo
.txconfig
;
4872 *rxflags
= aninfo
.flags
;
4874 if (status
== ANEG_DONE
&&
4875 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
4876 MR_LP_ADV_FULL_DUPLEX
)))
4882 static void tg3_init_bcm8002(struct tg3
*tp
)
4884 u32 mac_status
= tr32(MAC_STATUS
);
4887 /* Reset when initting first time or we have a link. */
4888 if (tg3_flag(tp
, INIT_COMPLETE
) &&
4889 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
4892 /* Set PLL lock range. */
4893 tg3_writephy(tp
, 0x16, 0x8007);
4896 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
4898 /* Wait for reset to complete. */
4899 /* XXX schedule_timeout() ... */
4900 for (i
= 0; i
< 500; i
++)
4903 /* Config mode; select PMA/Ch 1 regs. */
4904 tg3_writephy(tp
, 0x10, 0x8411);
4906 /* Enable auto-lock and comdet, select txclk for tx. */
4907 tg3_writephy(tp
, 0x11, 0x0a10);
4909 tg3_writephy(tp
, 0x18, 0x00a0);
4910 tg3_writephy(tp
, 0x16, 0x41ff);
4912 /* Assert and deassert POR. */
4913 tg3_writephy(tp
, 0x13, 0x0400);
4915 tg3_writephy(tp
, 0x13, 0x0000);
4917 tg3_writephy(tp
, 0x11, 0x0a50);
4919 tg3_writephy(tp
, 0x11, 0x0a10);
4921 /* Wait for signal to stabilize */
4922 /* XXX schedule_timeout() ... */
4923 for (i
= 0; i
< 15000; i
++)
4926 /* Deselect the channel register so we can read the PHYID
4929 tg3_writephy(tp
, 0x10, 0x8011);
4932 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
4935 u32 sg_dig_ctrl
, sg_dig_status
;
4936 u32 serdes_cfg
, expected_sg_dig_ctrl
;
4937 int workaround
, port_a
;
4938 int current_link_up
;
4941 expected_sg_dig_ctrl
= 0;
4944 current_link_up
= 0;
4946 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
4947 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
4949 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
4952 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4953 /* preserve bits 20-23 for voltage regulator */
4954 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
4957 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
4959 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
4960 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
4962 u32 val
= serdes_cfg
;
4968 tw32_f(MAC_SERDES_CFG
, val
);
4971 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
4973 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
4974 tg3_setup_flow_control(tp
, 0, 0);
4975 current_link_up
= 1;
4980 /* Want auto-negotiation. */
4981 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
4983 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
4984 if (flowctrl
& ADVERTISE_1000XPAUSE
)
4985 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
4986 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
4987 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
4989 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
4990 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
4991 tp
->serdes_counter
&&
4992 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
4993 MAC_STATUS_RCVD_CFG
)) ==
4994 MAC_STATUS_PCS_SYNCED
)) {
4995 tp
->serdes_counter
--;
4996 current_link_up
= 1;
5001 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5002 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5004 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5006 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5007 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5008 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5009 MAC_STATUS_SIGNAL_DET
)) {
5010 sg_dig_status
= tr32(SG_DIG_STATUS
);
5011 mac_status
= tr32(MAC_STATUS
);
5013 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5014 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5015 u32 local_adv
= 0, remote_adv
= 0;
5017 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5018 local_adv
|= ADVERTISE_1000XPAUSE
;
5019 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5020 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5022 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5023 remote_adv
|= LPA_1000XPAUSE
;
5024 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5025 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5027 tp
->link_config
.rmt_adv
=
5028 mii_adv_to_ethtool_adv_x(remote_adv
);
5030 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5031 current_link_up
= 1;
5032 tp
->serdes_counter
= 0;
5033 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5034 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5035 if (tp
->serdes_counter
)
5036 tp
->serdes_counter
--;
5039 u32 val
= serdes_cfg
;
5046 tw32_f(MAC_SERDES_CFG
, val
);
5049 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5052 /* Link parallel detection - link is up */
5053 /* only if we have PCS_SYNC and not */
5054 /* receiving config code words */
5055 mac_status
= tr32(MAC_STATUS
);
5056 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5057 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5058 tg3_setup_flow_control(tp
, 0, 0);
5059 current_link_up
= 1;
5061 TG3_PHYFLG_PARALLEL_DETECT
;
5062 tp
->serdes_counter
=
5063 SERDES_PARALLEL_DET_TIMEOUT
;
5065 goto restart_autoneg
;
5069 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5070 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5074 return current_link_up
;
5077 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5079 int current_link_up
= 0;
5081 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5084 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5085 u32 txflags
, rxflags
;
5088 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5089 u32 local_adv
= 0, remote_adv
= 0;
5091 if (txflags
& ANEG_CFG_PS1
)
5092 local_adv
|= ADVERTISE_1000XPAUSE
;
5093 if (txflags
& ANEG_CFG_PS2
)
5094 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5096 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5097 remote_adv
|= LPA_1000XPAUSE
;
5098 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5099 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5101 tp
->link_config
.rmt_adv
=
5102 mii_adv_to_ethtool_adv_x(remote_adv
);
5104 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5106 current_link_up
= 1;
5108 for (i
= 0; i
< 30; i
++) {
5111 (MAC_STATUS_SYNC_CHANGED
|
5112 MAC_STATUS_CFG_CHANGED
));
5114 if ((tr32(MAC_STATUS
) &
5115 (MAC_STATUS_SYNC_CHANGED
|
5116 MAC_STATUS_CFG_CHANGED
)) == 0)
5120 mac_status
= tr32(MAC_STATUS
);
5121 if (current_link_up
== 0 &&
5122 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5123 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5124 current_link_up
= 1;
5126 tg3_setup_flow_control(tp
, 0, 0);
5128 /* Forcing 1000FD link up. */
5129 current_link_up
= 1;
5131 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5134 tw32_f(MAC_MODE
, tp
->mac_mode
);
5139 return current_link_up
;
5142 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
5145 u16 orig_active_speed
;
5146 u8 orig_active_duplex
;
5148 int current_link_up
;
5151 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5152 orig_active_speed
= tp
->link_config
.active_speed
;
5153 orig_active_duplex
= tp
->link_config
.active_duplex
;
5155 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5157 tg3_flag(tp
, INIT_COMPLETE
)) {
5158 mac_status
= tr32(MAC_STATUS
);
5159 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5160 MAC_STATUS_SIGNAL_DET
|
5161 MAC_STATUS_CFG_CHANGED
|
5162 MAC_STATUS_RCVD_CFG
);
5163 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5164 MAC_STATUS_SIGNAL_DET
)) {
5165 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5166 MAC_STATUS_CFG_CHANGED
));
5171 tw32_f(MAC_TX_AUTO_NEG
, 0);
5173 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5174 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5175 tw32_f(MAC_MODE
, tp
->mac_mode
);
5178 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5179 tg3_init_bcm8002(tp
);
5181 /* Enable link change event even when serdes polling. */
5182 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5185 current_link_up
= 0;
5186 tp
->link_config
.rmt_adv
= 0;
5187 mac_status
= tr32(MAC_STATUS
);
5189 if (tg3_flag(tp
, HW_AUTONEG
))
5190 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5192 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5194 tp
->napi
[0].hw_status
->status
=
5195 (SD_STATUS_UPDATED
|
5196 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5198 for (i
= 0; i
< 100; i
++) {
5199 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5200 MAC_STATUS_CFG_CHANGED
));
5202 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5203 MAC_STATUS_CFG_CHANGED
|
5204 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5208 mac_status
= tr32(MAC_STATUS
);
5209 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5210 current_link_up
= 0;
5211 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5212 tp
->serdes_counter
== 0) {
5213 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5214 MAC_MODE_SEND_CONFIGS
));
5216 tw32_f(MAC_MODE
, tp
->mac_mode
);
5220 if (current_link_up
== 1) {
5221 tp
->link_config
.active_speed
= SPEED_1000
;
5222 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5223 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5224 LED_CTRL_LNKLED_OVERRIDE
|
5225 LED_CTRL_1000MBPS_ON
));
5227 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5228 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5229 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5230 LED_CTRL_LNKLED_OVERRIDE
|
5231 LED_CTRL_TRAFFIC_OVERRIDE
));
5234 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5235 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5236 if (orig_pause_cfg
!= now_pause_cfg
||
5237 orig_active_speed
!= tp
->link_config
.active_speed
||
5238 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5239 tg3_link_report(tp
);
5245 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
5247 int current_link_up
, err
= 0;
5251 u32 local_adv
, remote_adv
;
5253 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5254 tw32_f(MAC_MODE
, tp
->mac_mode
);
5260 (MAC_STATUS_SYNC_CHANGED
|
5261 MAC_STATUS_CFG_CHANGED
|
5262 MAC_STATUS_MI_COMPLETION
|
5263 MAC_STATUS_LNKSTATE_CHANGED
));
5269 current_link_up
= 0;
5270 current_speed
= SPEED_UNKNOWN
;
5271 current_duplex
= DUPLEX_UNKNOWN
;
5272 tp
->link_config
.rmt_adv
= 0;
5274 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5275 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5276 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
5277 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5278 bmsr
|= BMSR_LSTATUS
;
5280 bmsr
&= ~BMSR_LSTATUS
;
5283 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5285 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5286 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5287 /* do nothing, just check for link up at the end */
5288 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5291 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5292 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5293 ADVERTISE_1000XPAUSE
|
5294 ADVERTISE_1000XPSE_ASYM
|
5297 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5298 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5300 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5301 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5302 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5303 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5305 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5306 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5307 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5314 bmcr
&= ~BMCR_SPEED1000
;
5315 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5317 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5318 new_bmcr
|= BMCR_FULLDPLX
;
5320 if (new_bmcr
!= bmcr
) {
5321 /* BMCR_SPEED1000 is a reserved bit that needs
5322 * to be set on write.
5324 new_bmcr
|= BMCR_SPEED1000
;
5326 /* Force a linkdown */
5330 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5331 adv
&= ~(ADVERTISE_1000XFULL
|
5332 ADVERTISE_1000XHALF
|
5334 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5335 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5339 tg3_carrier_off(tp
);
5341 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5343 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5344 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5345 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
5347 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5348 bmsr
|= BMSR_LSTATUS
;
5350 bmsr
&= ~BMSR_LSTATUS
;
5352 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5356 if (bmsr
& BMSR_LSTATUS
) {
5357 current_speed
= SPEED_1000
;
5358 current_link_up
= 1;
5359 if (bmcr
& BMCR_FULLDPLX
)
5360 current_duplex
= DUPLEX_FULL
;
5362 current_duplex
= DUPLEX_HALF
;
5367 if (bmcr
& BMCR_ANENABLE
) {
5370 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5371 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5372 common
= local_adv
& remote_adv
;
5373 if (common
& (ADVERTISE_1000XHALF
|
5374 ADVERTISE_1000XFULL
)) {
5375 if (common
& ADVERTISE_1000XFULL
)
5376 current_duplex
= DUPLEX_FULL
;
5378 current_duplex
= DUPLEX_HALF
;
5380 tp
->link_config
.rmt_adv
=
5381 mii_adv_to_ethtool_adv_x(remote_adv
);
5382 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5383 /* Link is up via parallel detect */
5385 current_link_up
= 0;
5390 if (current_link_up
== 1 && current_duplex
== DUPLEX_FULL
)
5391 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5393 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5394 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5395 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5397 tw32_f(MAC_MODE
, tp
->mac_mode
);
5400 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5402 tp
->link_config
.active_speed
= current_speed
;
5403 tp
->link_config
.active_duplex
= current_duplex
;
5405 tg3_test_and_report_link_chg(tp
, current_link_up
);
5409 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5411 if (tp
->serdes_counter
) {
5412 /* Give autoneg time to complete. */
5413 tp
->serdes_counter
--;
5418 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5421 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5422 if (bmcr
& BMCR_ANENABLE
) {
5425 /* Select shadow register 0x1f */
5426 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5427 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5429 /* Select expansion interrupt status register */
5430 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5431 MII_TG3_DSP_EXP1_INT_STAT
);
5432 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5433 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5435 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5436 /* We have signal detect and not receiving
5437 * config code words, link is up by parallel
5441 bmcr
&= ~BMCR_ANENABLE
;
5442 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5443 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5444 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5447 } else if (tp
->link_up
&&
5448 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5449 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5452 /* Select expansion interrupt status register */
5453 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5454 MII_TG3_DSP_EXP1_INT_STAT
);
5455 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5459 /* Config code words received, turn on autoneg. */
5460 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5461 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5463 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5469 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
5474 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5475 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5476 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5477 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5479 err
= tg3_setup_copper_phy(tp
, force_reset
);
5481 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
5484 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5485 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5487 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5492 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5493 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5494 tw32(GRC_MISC_CFG
, val
);
5497 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5498 (6 << TX_LENGTHS_IPG_SHIFT
);
5499 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
5500 val
|= tr32(MAC_TX_LENGTHS
) &
5501 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5502 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5504 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5505 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5506 tw32(MAC_TX_LENGTHS
, val
|
5507 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5509 tw32(MAC_TX_LENGTHS
, val
|
5510 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5512 if (!tg3_flag(tp
, 5705_PLUS
)) {
5514 tw32(HOSTCC_STAT_COAL_TICKS
,
5515 tp
->coal
.stats_block_coalesce_usecs
);
5517 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
5521 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
5522 val
= tr32(PCIE_PWR_MGMT_THRESH
);
5524 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
5527 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
5528 tw32(PCIE_PWR_MGMT_THRESH
, val
);
5534 /* tp->lock must be held */
5535 static u64
tg3_refclk_read(struct tg3
*tp
)
5537 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
5538 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
5541 /* tp->lock must be held */
5542 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
5544 tw32(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_STOP
);
5545 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
5546 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
5547 tw32_f(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_RESUME
);
5550 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
5551 static inline void tg3_full_unlock(struct tg3
*tp
);
5552 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
5554 struct tg3
*tp
= netdev_priv(dev
);
5556 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
5557 SOF_TIMESTAMPING_RX_SOFTWARE
|
5558 SOF_TIMESTAMPING_SOFTWARE
|
5559 SOF_TIMESTAMPING_TX_HARDWARE
|
5560 SOF_TIMESTAMPING_RX_HARDWARE
|
5561 SOF_TIMESTAMPING_RAW_HARDWARE
;
5564 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
5566 info
->phc_index
= -1;
5568 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
5570 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
5571 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
5572 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
5573 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
5577 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
5579 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5580 bool neg_adj
= false;
5588 /* Frequency adjustment is performed using hardware with a 24 bit
5589 * accumulator and a programmable correction value. On each clk, the
5590 * correction value gets added to the accumulator and when it
5591 * overflows, the time counter is incremented/decremented.
5593 * So conversion from ppb to correction value is
5594 * ppb * (1 << 24) / 1000000000
5596 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
5597 TG3_EAV_REF_CLK_CORRECT_MASK
;
5599 tg3_full_lock(tp
, 0);
5602 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
5603 TG3_EAV_REF_CLK_CORRECT_EN
|
5604 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
5606 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
5608 tg3_full_unlock(tp
);
5613 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
5615 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5617 tg3_full_lock(tp
, 0);
5618 tp
->ptp_adjust
+= delta
;
5619 tg3_full_unlock(tp
);
5624 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
5628 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5630 tg3_full_lock(tp
, 0);
5631 ns
= tg3_refclk_read(tp
);
5632 ns
+= tp
->ptp_adjust
;
5633 tg3_full_unlock(tp
);
5635 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
5636 ts
->tv_nsec
= remainder
;
5641 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
5642 const struct timespec
*ts
)
5645 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
5647 ns
= timespec_to_ns(ts
);
5649 tg3_full_lock(tp
, 0);
5650 tg3_refclk_write(tp
, ns
);
5652 tg3_full_unlock(tp
);
5657 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
5658 struct ptp_clock_request
*rq
, int on
)
5663 static const struct ptp_clock_info tg3_ptp_caps
= {
5664 .owner
= THIS_MODULE
,
5665 .name
= "tg3 clock",
5666 .max_adj
= 250000000,
5671 .adjfreq
= tg3_ptp_adjfreq
,
5672 .adjtime
= tg3_ptp_adjtime
,
5673 .gettime
= tg3_ptp_gettime
,
5674 .settime
= tg3_ptp_settime
,
5675 .enable
= tg3_ptp_enable
,
5678 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
5679 struct skb_shared_hwtstamps
*timestamp
)
5681 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
5682 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
5686 /* tp->lock must be held */
5687 static void tg3_ptp_init(struct tg3
*tp
)
5689 if (!tg3_flag(tp
, PTP_CAPABLE
))
5692 /* Initialize the hardware clock to the system time. */
5693 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
5695 tp
->ptp_info
= tg3_ptp_caps
;
5698 /* tp->lock must be held */
5699 static void tg3_ptp_resume(struct tg3
*tp
)
5701 if (!tg3_flag(tp
, PTP_CAPABLE
))
5704 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
5708 static void tg3_ptp_fini(struct tg3
*tp
)
5710 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
5713 ptp_clock_unregister(tp
->ptp_clock
);
5714 tp
->ptp_clock
= NULL
;
5718 static inline int tg3_irq_sync(struct tg3
*tp
)
5720 return tp
->irq_sync
;
5723 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
5727 dst
= (u32
*)((u8
*)dst
+ off
);
5728 for (i
= 0; i
< len
; i
+= sizeof(u32
))
5729 *dst
++ = tr32(off
+ i
);
5732 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
5734 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
5735 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
5736 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
5737 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
5738 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
5739 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
5740 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
5741 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
5742 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
5743 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
5744 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
5745 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
5746 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
5747 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
5748 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
5749 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
5750 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
5751 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
5752 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
5754 if (tg3_flag(tp
, SUPPORT_MSIX
))
5755 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
5757 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
5758 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
5759 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
5760 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
5761 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
5762 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
5763 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
5764 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
5766 if (!tg3_flag(tp
, 5705_PLUS
)) {
5767 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
5768 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
5769 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
5772 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
5773 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
5774 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
5775 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
5776 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
5778 if (tg3_flag(tp
, NVRAM
))
5779 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
5782 static void tg3_dump_state(struct tg3
*tp
)
5787 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
5789 netdev_err(tp
->dev
, "Failed allocating register dump buffer\n");
5793 if (tg3_flag(tp
, PCI_EXPRESS
)) {
5794 /* Read up to but not including private PCI registers */
5795 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
5796 regs
[i
/ sizeof(u32
)] = tr32(i
);
5798 tg3_dump_legacy_regs(tp
, regs
);
5800 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
5801 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
5802 !regs
[i
+ 2] && !regs
[i
+ 3])
5805 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5807 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
5812 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
5813 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
5815 /* SW status block */
5817 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5819 tnapi
->hw_status
->status
,
5820 tnapi
->hw_status
->status_tag
,
5821 tnapi
->hw_status
->rx_jumbo_consumer
,
5822 tnapi
->hw_status
->rx_consumer
,
5823 tnapi
->hw_status
->rx_mini_consumer
,
5824 tnapi
->hw_status
->idx
[0].rx_producer
,
5825 tnapi
->hw_status
->idx
[0].tx_consumer
);
5828 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5830 tnapi
->last_tag
, tnapi
->last_irq_tag
,
5831 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
5833 tnapi
->prodring
.rx_std_prod_idx
,
5834 tnapi
->prodring
.rx_std_cons_idx
,
5835 tnapi
->prodring
.rx_jmb_prod_idx
,
5836 tnapi
->prodring
.rx_jmb_cons_idx
);
5840 /* This is called whenever we suspect that the system chipset is re-
5841 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5842 * is bogus tx completions. We try to recover by setting the
5843 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5846 static void tg3_tx_recover(struct tg3
*tp
)
5848 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
5849 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
5851 netdev_warn(tp
->dev
,
5852 "The system may be re-ordering memory-mapped I/O "
5853 "cycles to the network device, attempting to recover. "
5854 "Please report the problem to the driver maintainer "
5855 "and include system chipset information.\n");
5857 spin_lock(&tp
->lock
);
5858 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
5859 spin_unlock(&tp
->lock
);
5862 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
5864 /* Tell compiler to fetch tx indices from memory. */
5866 return tnapi
->tx_pending
-
5867 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
5870 /* Tigon3 never reports partial packet sends. So we do not
5871 * need special logic to handle SKBs that have not had all
5872 * of their frags sent yet, like SunGEM does.
5874 static void tg3_tx(struct tg3_napi
*tnapi
)
5876 struct tg3
*tp
= tnapi
->tp
;
5877 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
5878 u32 sw_idx
= tnapi
->tx_cons
;
5879 struct netdev_queue
*txq
;
5880 int index
= tnapi
- tp
->napi
;
5881 unsigned int pkts_compl
= 0, bytes_compl
= 0;
5883 if (tg3_flag(tp
, ENABLE_TSS
))
5886 txq
= netdev_get_tx_queue(tp
->dev
, index
);
5888 while (sw_idx
!= hw_idx
) {
5889 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
5890 struct sk_buff
*skb
= ri
->skb
;
5893 if (unlikely(skb
== NULL
)) {
5898 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
5899 struct skb_shared_hwtstamps timestamp
;
5900 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
5901 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
5903 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
5905 skb_tstamp_tx(skb
, ×tamp
);
5908 pci_unmap_single(tp
->pdev
,
5909 dma_unmap_addr(ri
, mapping
),
5915 while (ri
->fragmented
) {
5916 ri
->fragmented
= false;
5917 sw_idx
= NEXT_TX(sw_idx
);
5918 ri
= &tnapi
->tx_buffers
[sw_idx
];
5921 sw_idx
= NEXT_TX(sw_idx
);
5923 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5924 ri
= &tnapi
->tx_buffers
[sw_idx
];
5925 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
5928 pci_unmap_page(tp
->pdev
,
5929 dma_unmap_addr(ri
, mapping
),
5930 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
5933 while (ri
->fragmented
) {
5934 ri
->fragmented
= false;
5935 sw_idx
= NEXT_TX(sw_idx
);
5936 ri
= &tnapi
->tx_buffers
[sw_idx
];
5939 sw_idx
= NEXT_TX(sw_idx
);
5943 bytes_compl
+= skb
->len
;
5947 if (unlikely(tx_bug
)) {
5953 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
5955 tnapi
->tx_cons
= sw_idx
;
5957 /* Need to make the tx_cons update visible to tg3_start_xmit()
5958 * before checking for netif_queue_stopped(). Without the
5959 * memory barrier, there is a small possibility that tg3_start_xmit()
5960 * will miss it and cause the queue to be stopped forever.
5964 if (unlikely(netif_tx_queue_stopped(txq
) &&
5965 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
5966 __netif_tx_lock(txq
, smp_processor_id());
5967 if (netif_tx_queue_stopped(txq
) &&
5968 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
5969 netif_tx_wake_queue(txq
);
5970 __netif_tx_unlock(txq
);
5974 static void tg3_frag_free(bool is_frag
, void *data
)
5977 put_page(virt_to_head_page(data
));
5982 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
5984 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
5985 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
5990 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
5991 map_sz
, PCI_DMA_FROMDEVICE
);
5992 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
5997 /* Returns size of skb allocated or < 0 on error.
5999 * We only need to fill in the address because the other members
6000 * of the RX descriptor are invariant, see tg3_init_rings.
6002 * Note the purposeful assymetry of cpu vs. chip accesses. For
6003 * posting buffers we only dirty the first cache line of the RX
6004 * descriptor (containing the address). Whereas for the RX status
6005 * buffers the cpu only reads the last cacheline of the RX descriptor
6006 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6008 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6009 u32 opaque_key
, u32 dest_idx_unmasked
,
6010 unsigned int *frag_size
)
6012 struct tg3_rx_buffer_desc
*desc
;
6013 struct ring_info
*map
;
6016 int skb_size
, data_size
, dest_idx
;
6018 switch (opaque_key
) {
6019 case RXD_OPAQUE_RING_STD
:
6020 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6021 desc
= &tpr
->rx_std
[dest_idx
];
6022 map
= &tpr
->rx_std_buffers
[dest_idx
];
6023 data_size
= tp
->rx_pkt_map_sz
;
6026 case RXD_OPAQUE_RING_JUMBO
:
6027 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6028 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6029 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6030 data_size
= TG3_RX_JMB_MAP_SZ
;
6037 /* Do not overwrite any of the map or rp information
6038 * until we are sure we can commit to a new buffer.
6040 * Callers depend upon this behavior and assume that
6041 * we leave everything unchanged if we fail.
6043 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6044 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6045 if (skb_size
<= PAGE_SIZE
) {
6046 data
= netdev_alloc_frag(skb_size
);
6047 *frag_size
= skb_size
;
6049 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6055 mapping
= pci_map_single(tp
->pdev
,
6056 data
+ TG3_RX_OFFSET(tp
),
6058 PCI_DMA_FROMDEVICE
);
6059 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6060 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6065 dma_unmap_addr_set(map
, mapping
, mapping
);
6067 desc
->addr_hi
= ((u64
)mapping
>> 32);
6068 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6073 /* We only need to move over in the address because the other
6074 * members of the RX descriptor are invariant. See notes above
6075 * tg3_alloc_rx_data for full details.
6077 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6078 struct tg3_rx_prodring_set
*dpr
,
6079 u32 opaque_key
, int src_idx
,
6080 u32 dest_idx_unmasked
)
6082 struct tg3
*tp
= tnapi
->tp
;
6083 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6084 struct ring_info
*src_map
, *dest_map
;
6085 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6088 switch (opaque_key
) {
6089 case RXD_OPAQUE_RING_STD
:
6090 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6091 dest_desc
= &dpr
->rx_std
[dest_idx
];
6092 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6093 src_desc
= &spr
->rx_std
[src_idx
];
6094 src_map
= &spr
->rx_std_buffers
[src_idx
];
6097 case RXD_OPAQUE_RING_JUMBO
:
6098 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6099 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6100 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6101 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6102 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6109 dest_map
->data
= src_map
->data
;
6110 dma_unmap_addr_set(dest_map
, mapping
,
6111 dma_unmap_addr(src_map
, mapping
));
6112 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6113 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6115 /* Ensure that the update to the skb happens after the physical
6116 * addresses have been transferred to the new BD location.
6120 src_map
->data
= NULL
;
6123 /* The RX ring scheme is composed of multiple rings which post fresh
6124 * buffers to the chip, and one special ring the chip uses to report
6125 * status back to the host.
6127 * The special ring reports the status of received packets to the
6128 * host. The chip does not write into the original descriptor the
6129 * RX buffer was obtained from. The chip simply takes the original
6130 * descriptor as provided by the host, updates the status and length
6131 * field, then writes this into the next status ring entry.
6133 * Each ring the host uses to post buffers to the chip is described
6134 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6135 * it is first placed into the on-chip ram. When the packet's length
6136 * is known, it walks down the TG3_BDINFO entries to select the ring.
6137 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6138 * which is within the range of the new packet's length is chosen.
6140 * The "separate ring for rx status" scheme may sound queer, but it makes
6141 * sense from a cache coherency perspective. If only the host writes
6142 * to the buffer post rings, and only the chip writes to the rx status
6143 * rings, then cache lines never move beyond shared-modified state.
6144 * If both the host and chip were to write into the same ring, cache line
6145 * eviction could occur since both entities want it in an exclusive state.
6147 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6149 struct tg3
*tp
= tnapi
->tp
;
6150 u32 work_mask
, rx_std_posted
= 0;
6151 u32 std_prod_idx
, jmb_prod_idx
;
6152 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6155 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6157 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6159 * We need to order the read of hw_idx and the read of
6160 * the opaque cookie.
6165 std_prod_idx
= tpr
->rx_std_prod_idx
;
6166 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6167 while (sw_idx
!= hw_idx
&& budget
> 0) {
6168 struct ring_info
*ri
;
6169 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6171 struct sk_buff
*skb
;
6172 dma_addr_t dma_addr
;
6173 u32 opaque_key
, desc_idx
, *post_ptr
;
6177 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6178 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6179 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6180 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6181 dma_addr
= dma_unmap_addr(ri
, mapping
);
6183 post_ptr
= &std_prod_idx
;
6185 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6186 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6187 dma_addr
= dma_unmap_addr(ri
, mapping
);
6189 post_ptr
= &jmb_prod_idx
;
6191 goto next_pkt_nopost
;
6193 work_mask
|= opaque_key
;
6195 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6196 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6198 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6199 desc_idx
, *post_ptr
);
6201 /* Other statistics kept track of by card. */
6206 prefetch(data
+ TG3_RX_OFFSET(tp
));
6207 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6210 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6211 RXD_FLAG_PTPSTAT_PTPV1
||
6212 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6213 RXD_FLAG_PTPSTAT_PTPV2
) {
6214 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6215 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6218 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6220 unsigned int frag_size
;
6222 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6223 *post_ptr
, &frag_size
);
6227 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6228 PCI_DMA_FROMDEVICE
);
6230 skb
= build_skb(data
, frag_size
);
6232 tg3_frag_free(frag_size
!= 0, data
);
6233 goto drop_it_no_recycle
;
6235 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6236 /* Ensure that the update to the data happens
6237 * after the usage of the old DMA mapping.
6244 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6245 desc_idx
, *post_ptr
);
6247 skb
= netdev_alloc_skb(tp
->dev
,
6248 len
+ TG3_RAW_IP_ALIGN
);
6250 goto drop_it_no_recycle
;
6252 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6253 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6255 data
+ TG3_RX_OFFSET(tp
),
6257 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6262 tg3_hwclock_to_timestamp(tp
, tstamp
,
6263 skb_hwtstamps(skb
));
6265 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6266 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6267 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6268 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6269 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6271 skb_checksum_none_assert(skb
);
6273 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6275 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6276 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6278 goto drop_it_no_recycle
;
6281 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6282 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6283 __vlan_hwaccel_put_tag(skb
,
6284 desc
->err_vlan
& RXD_VLAN_MASK
);
6286 napi_gro_receive(&tnapi
->napi
, skb
);
6294 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6295 tpr
->rx_std_prod_idx
= std_prod_idx
&
6296 tp
->rx_std_ring_mask
;
6297 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6298 tpr
->rx_std_prod_idx
);
6299 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6304 sw_idx
&= tp
->rx_ret_ring_mask
;
6306 /* Refresh hw_idx to see if there is new work */
6307 if (sw_idx
== hw_idx
) {
6308 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6313 /* ACK the status ring. */
6314 tnapi
->rx_rcb_ptr
= sw_idx
;
6315 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6317 /* Refill RX ring(s). */
6318 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6319 /* Sync BD data before updating mailbox */
6322 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6323 tpr
->rx_std_prod_idx
= std_prod_idx
&
6324 tp
->rx_std_ring_mask
;
6325 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6326 tpr
->rx_std_prod_idx
);
6328 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6329 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6330 tp
->rx_jmb_ring_mask
;
6331 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6332 tpr
->rx_jmb_prod_idx
);
6335 } else if (work_mask
) {
6336 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6337 * updated before the producer indices can be updated.
6341 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6342 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6344 if (tnapi
!= &tp
->napi
[1]) {
6345 tp
->rx_refill
= true;
6346 napi_schedule(&tp
->napi
[1].napi
);
6353 static void tg3_poll_link(struct tg3
*tp
)
6355 /* handle link change and other phy events */
6356 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6357 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6359 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6360 sblk
->status
= SD_STATUS_UPDATED
|
6361 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6362 spin_lock(&tp
->lock
);
6363 if (tg3_flag(tp
, USE_PHYLIB
)) {
6365 (MAC_STATUS_SYNC_CHANGED
|
6366 MAC_STATUS_CFG_CHANGED
|
6367 MAC_STATUS_MI_COMPLETION
|
6368 MAC_STATUS_LNKSTATE_CHANGED
));
6371 tg3_setup_phy(tp
, 0);
6372 spin_unlock(&tp
->lock
);
6377 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6378 struct tg3_rx_prodring_set
*dpr
,
6379 struct tg3_rx_prodring_set
*spr
)
6381 u32 si
, di
, cpycnt
, src_prod_idx
;
6385 src_prod_idx
= spr
->rx_std_prod_idx
;
6387 /* Make sure updates to the rx_std_buffers[] entries and the
6388 * standard producer index are seen in the correct order.
6392 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6395 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6396 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6398 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6399 spr
->rx_std_cons_idx
;
6401 cpycnt
= min(cpycnt
,
6402 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6404 si
= spr
->rx_std_cons_idx
;
6405 di
= dpr
->rx_std_prod_idx
;
6407 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6408 if (dpr
->rx_std_buffers
[i
].data
) {
6418 /* Ensure that updates to the rx_std_buffers ring and the
6419 * shadowed hardware producer ring from tg3_recycle_skb() are
6420 * ordered correctly WRT the skb check above.
6424 memcpy(&dpr
->rx_std_buffers
[di
],
6425 &spr
->rx_std_buffers
[si
],
6426 cpycnt
* sizeof(struct ring_info
));
6428 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6429 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6430 sbd
= &spr
->rx_std
[si
];
6431 dbd
= &dpr
->rx_std
[di
];
6432 dbd
->addr_hi
= sbd
->addr_hi
;
6433 dbd
->addr_lo
= sbd
->addr_lo
;
6436 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6437 tp
->rx_std_ring_mask
;
6438 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6439 tp
->rx_std_ring_mask
;
6443 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6445 /* Make sure updates to the rx_jmb_buffers[] entries and
6446 * the jumbo producer index are seen in the correct order.
6450 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6453 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6454 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6456 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6457 spr
->rx_jmb_cons_idx
;
6459 cpycnt
= min(cpycnt
,
6460 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6462 si
= spr
->rx_jmb_cons_idx
;
6463 di
= dpr
->rx_jmb_prod_idx
;
6465 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6466 if (dpr
->rx_jmb_buffers
[i
].data
) {
6476 /* Ensure that updates to the rx_jmb_buffers ring and the
6477 * shadowed hardware producer ring from tg3_recycle_skb() are
6478 * ordered correctly WRT the skb check above.
6482 memcpy(&dpr
->rx_jmb_buffers
[di
],
6483 &spr
->rx_jmb_buffers
[si
],
6484 cpycnt
* sizeof(struct ring_info
));
6486 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6487 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6488 sbd
= &spr
->rx_jmb
[si
].std
;
6489 dbd
= &dpr
->rx_jmb
[di
].std
;
6490 dbd
->addr_hi
= sbd
->addr_hi
;
6491 dbd
->addr_lo
= sbd
->addr_lo
;
6494 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6495 tp
->rx_jmb_ring_mask
;
6496 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6497 tp
->rx_jmb_ring_mask
;
6503 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
6505 struct tg3
*tp
= tnapi
->tp
;
6507 /* run TX completion thread */
6508 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
6510 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6514 if (!tnapi
->rx_rcb_prod_idx
)
6517 /* run RX thread, within the bounds set by NAPI.
6518 * All RX "locking" is done by ensuring outside
6519 * code synchronizes with tg3->napi.poll()
6521 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
6522 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
6524 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
6525 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
6527 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
6528 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
6530 tp
->rx_refill
= false;
6531 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
6532 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
6533 &tp
->napi
[i
].prodring
);
6537 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
6538 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6539 dpr
->rx_std_prod_idx
);
6541 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
6542 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6543 dpr
->rx_jmb_prod_idx
);
6548 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
6554 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
6556 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
6557 schedule_work(&tp
->reset_task
);
6560 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
6562 cancel_work_sync(&tp
->reset_task
);
6563 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
6564 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
6567 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
6569 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6570 struct tg3
*tp
= tnapi
->tp
;
6572 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6575 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6577 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6580 if (unlikely(work_done
>= budget
))
6583 /* tp->last_tag is used in tg3_int_reenable() below
6584 * to tell the hw how much work has been processed,
6585 * so we must read it before checking for more work.
6587 tnapi
->last_tag
= sblk
->status_tag
;
6588 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6591 /* check for RX/TX work to do */
6592 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
6593 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
6595 /* This test here is not race free, but will reduce
6596 * the number of interrupts by looping again.
6598 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
6601 napi_complete(napi
);
6602 /* Reenable interrupts. */
6603 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
6605 /* This test here is synchronized by napi_schedule()
6606 * and napi_complete() to close the race condition.
6608 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
6609 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6610 HOSTCC_MODE_ENABLE
|
6621 /* work_done is guaranteed to be less than budget. */
6622 napi_complete(napi
);
6623 tg3_reset_task_schedule(tp
);
6627 static void tg3_process_error(struct tg3
*tp
)
6630 bool real_error
= false;
6632 if (tg3_flag(tp
, ERROR_PROCESSED
))
6635 /* Check Flow Attention register */
6636 val
= tr32(HOSTCC_FLOW_ATTN
);
6637 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
6638 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
6642 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
6643 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
6647 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
6648 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
6657 tg3_flag_set(tp
, ERROR_PROCESSED
);
6658 tg3_reset_task_schedule(tp
);
6661 static int tg3_poll(struct napi_struct
*napi
, int budget
)
6663 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
6664 struct tg3
*tp
= tnapi
->tp
;
6666 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6669 if (sblk
->status
& SD_STATUS_ERROR
)
6670 tg3_process_error(tp
);
6674 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
6676 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6679 if (unlikely(work_done
>= budget
))
6682 if (tg3_flag(tp
, TAGGED_STATUS
)) {
6683 /* tp->last_tag is used in tg3_int_reenable() below
6684 * to tell the hw how much work has been processed,
6685 * so we must read it before checking for more work.
6687 tnapi
->last_tag
= sblk
->status_tag
;
6688 tnapi
->last_irq_tag
= tnapi
->last_tag
;
6691 sblk
->status
&= ~SD_STATUS_UPDATED
;
6693 if (likely(!tg3_has_work(tnapi
))) {
6694 napi_complete(napi
);
6695 tg3_int_reenable(tnapi
);
6703 /* work_done is guaranteed to be less than budget. */
6704 napi_complete(napi
);
6705 tg3_reset_task_schedule(tp
);
6709 static void tg3_napi_disable(struct tg3
*tp
)
6713 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
6714 napi_disable(&tp
->napi
[i
].napi
);
6717 static void tg3_napi_enable(struct tg3
*tp
)
6721 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6722 napi_enable(&tp
->napi
[i
].napi
);
6725 static void tg3_napi_init(struct tg3
*tp
)
6729 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
6730 for (i
= 1; i
< tp
->irq_cnt
; i
++)
6731 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
6734 static void tg3_napi_fini(struct tg3
*tp
)
6738 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6739 netif_napi_del(&tp
->napi
[i
].napi
);
6742 static inline void tg3_netif_stop(struct tg3
*tp
)
6744 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6745 tg3_napi_disable(tp
);
6746 netif_carrier_off(tp
->dev
);
6747 netif_tx_disable(tp
->dev
);
6750 /* tp->lock must be held */
6751 static inline void tg3_netif_start(struct tg3
*tp
)
6755 /* NOTE: unconditional netif_tx_wake_all_queues is only
6756 * appropriate so long as all callers are assured to
6757 * have free tx slots (such as after tg3_init_hw)
6759 netif_tx_wake_all_queues(tp
->dev
);
6762 netif_carrier_on(tp
->dev
);
6764 tg3_napi_enable(tp
);
6765 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
6766 tg3_enable_ints(tp
);
6769 static void tg3_irq_quiesce(struct tg3
*tp
)
6773 BUG_ON(tp
->irq_sync
);
6778 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6779 synchronize_irq(tp
->napi
[i
].irq_vec
);
6782 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6783 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6784 * with as well. Most of the time, this is not necessary except when
6785 * shutting down the device.
6787 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
6789 spin_lock_bh(&tp
->lock
);
6791 tg3_irq_quiesce(tp
);
6794 static inline void tg3_full_unlock(struct tg3
*tp
)
6796 spin_unlock_bh(&tp
->lock
);
6799 /* One-shot MSI handler - Chip automatically disables interrupt
6800 * after sending MSI so driver doesn't have to do it.
6802 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
6804 struct tg3_napi
*tnapi
= dev_id
;
6805 struct tg3
*tp
= tnapi
->tp
;
6807 prefetch(tnapi
->hw_status
);
6809 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6811 if (likely(!tg3_irq_sync(tp
)))
6812 napi_schedule(&tnapi
->napi
);
6817 /* MSI ISR - No need to check for interrupt sharing and no need to
6818 * flush status block and interrupt mailbox. PCI ordering rules
6819 * guarantee that MSI will arrive after the status block.
6821 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
6823 struct tg3_napi
*tnapi
= dev_id
;
6824 struct tg3
*tp
= tnapi
->tp
;
6826 prefetch(tnapi
->hw_status
);
6828 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6830 * Writing any value to intr-mbox-0 clears PCI INTA# and
6831 * chip-internal interrupt pending events.
6832 * Writing non-zero to intr-mbox-0 additional tells the
6833 * NIC to stop sending us irqs, engaging "in-intr-handler"
6836 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
6837 if (likely(!tg3_irq_sync(tp
)))
6838 napi_schedule(&tnapi
->napi
);
6840 return IRQ_RETVAL(1);
6843 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
6845 struct tg3_napi
*tnapi
= dev_id
;
6846 struct tg3
*tp
= tnapi
->tp
;
6847 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6848 unsigned int handled
= 1;
6850 /* In INTx mode, it is possible for the interrupt to arrive at
6851 * the CPU before the status block posted prior to the interrupt.
6852 * Reading the PCI State register will confirm whether the
6853 * interrupt is ours and will flush the status block.
6855 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
6856 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6857 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6864 * Writing any value to intr-mbox-0 clears PCI INTA# and
6865 * chip-internal interrupt pending events.
6866 * Writing non-zero to intr-mbox-0 additional tells the
6867 * NIC to stop sending us irqs, engaging "in-intr-handler"
6870 * Flush the mailbox to de-assert the IRQ immediately to prevent
6871 * spurious interrupts. The flush impacts performance but
6872 * excessive spurious interrupts can be worse in some cases.
6874 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6875 if (tg3_irq_sync(tp
))
6877 sblk
->status
&= ~SD_STATUS_UPDATED
;
6878 if (likely(tg3_has_work(tnapi
))) {
6879 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6880 napi_schedule(&tnapi
->napi
);
6882 /* No work, shared interrupt perhaps? re-enable
6883 * interrupts, and flush that PCI write
6885 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
6889 return IRQ_RETVAL(handled
);
6892 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
6894 struct tg3_napi
*tnapi
= dev_id
;
6895 struct tg3
*tp
= tnapi
->tp
;
6896 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6897 unsigned int handled
= 1;
6899 /* In INTx mode, it is possible for the interrupt to arrive at
6900 * the CPU before the status block posted prior to the interrupt.
6901 * Reading the PCI State register will confirm whether the
6902 * interrupt is ours and will flush the status block.
6904 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
6905 if (tg3_flag(tp
, CHIP_RESETTING
) ||
6906 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6913 * writing any value to intr-mbox-0 clears PCI INTA# and
6914 * chip-internal interrupt pending events.
6915 * writing non-zero to intr-mbox-0 additional tells the
6916 * NIC to stop sending us irqs, engaging "in-intr-handler"
6919 * Flush the mailbox to de-assert the IRQ immediately to prevent
6920 * spurious interrupts. The flush impacts performance but
6921 * excessive spurious interrupts can be worse in some cases.
6923 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
6926 * In a shared interrupt configuration, sometimes other devices'
6927 * interrupts will scream. We record the current status tag here
6928 * so that the above check can report that the screaming interrupts
6929 * are unhandled. Eventually they will be silenced.
6931 tnapi
->last_irq_tag
= sblk
->status_tag
;
6933 if (tg3_irq_sync(tp
))
6936 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
6938 napi_schedule(&tnapi
->napi
);
6941 return IRQ_RETVAL(handled
);
6944 /* ISR for interrupt test */
6945 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
6947 struct tg3_napi
*tnapi
= dev_id
;
6948 struct tg3
*tp
= tnapi
->tp
;
6949 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
6951 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
6952 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
6953 tg3_disable_ints(tp
);
6954 return IRQ_RETVAL(1);
6956 return IRQ_RETVAL(0);
6959 #ifdef CONFIG_NET_POLL_CONTROLLER
6960 static void tg3_poll_controller(struct net_device
*dev
)
6963 struct tg3
*tp
= netdev_priv(dev
);
6965 if (tg3_irq_sync(tp
))
6968 for (i
= 0; i
< tp
->irq_cnt
; i
++)
6969 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
6973 static void tg3_tx_timeout(struct net_device
*dev
)
6975 struct tg3
*tp
= netdev_priv(dev
);
6977 if (netif_msg_tx_err(tp
)) {
6978 netdev_err(dev
, "transmit timed out, resetting\n");
6982 tg3_reset_task_schedule(tp
);
6985 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6986 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
6988 u32 base
= (u32
) mapping
& 0xffffffff;
6990 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
6993 /* Test for DMA addresses > 40-bit */
6994 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
6997 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6998 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
6999 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7006 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7007 dma_addr_t mapping
, u32 len
, u32 flags
,
7010 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7011 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7012 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7013 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7016 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7017 dma_addr_t map
, u32 len
, u32 flags
,
7020 struct tg3
*tp
= tnapi
->tp
;
7023 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7026 if (tg3_4g_overflow_test(map
, len
))
7029 if (tg3_40bit_overflow_test(tp
, map
, len
))
7032 if (tp
->dma_limit
) {
7033 u32 prvidx
= *entry
;
7034 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7035 while (len
> tp
->dma_limit
&& *budget
) {
7036 u32 frag_len
= tp
->dma_limit
;
7037 len
-= tp
->dma_limit
;
7039 /* Avoid the 8byte DMA problem */
7041 len
+= tp
->dma_limit
/ 2;
7042 frag_len
= tp
->dma_limit
/ 2;
7045 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7047 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7048 frag_len
, tmp_flag
, mss
, vlan
);
7051 *entry
= NEXT_TX(*entry
);
7058 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7059 len
, flags
, mss
, vlan
);
7061 *entry
= NEXT_TX(*entry
);
7064 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7068 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7069 len
, flags
, mss
, vlan
);
7070 *entry
= NEXT_TX(*entry
);
7076 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7079 struct sk_buff
*skb
;
7080 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7085 pci_unmap_single(tnapi
->tp
->pdev
,
7086 dma_unmap_addr(txb
, mapping
),
7090 while (txb
->fragmented
) {
7091 txb
->fragmented
= false;
7092 entry
= NEXT_TX(entry
);
7093 txb
= &tnapi
->tx_buffers
[entry
];
7096 for (i
= 0; i
<= last
; i
++) {
7097 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7099 entry
= NEXT_TX(entry
);
7100 txb
= &tnapi
->tx_buffers
[entry
];
7102 pci_unmap_page(tnapi
->tp
->pdev
,
7103 dma_unmap_addr(txb
, mapping
),
7104 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7106 while (txb
->fragmented
) {
7107 txb
->fragmented
= false;
7108 entry
= NEXT_TX(entry
);
7109 txb
= &tnapi
->tx_buffers
[entry
];
7114 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7115 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7116 struct sk_buff
**pskb
,
7117 u32
*entry
, u32
*budget
,
7118 u32 base_flags
, u32 mss
, u32 vlan
)
7120 struct tg3
*tp
= tnapi
->tp
;
7121 struct sk_buff
*new_skb
, *skb
= *pskb
;
7122 dma_addr_t new_addr
= 0;
7125 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
7126 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7128 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7130 new_skb
= skb_copy_expand(skb
,
7131 skb_headroom(skb
) + more_headroom
,
7132 skb_tailroom(skb
), GFP_ATOMIC
);
7138 /* New SKB is guaranteed to be linear. */
7139 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7141 /* Make sure the mapping succeeded */
7142 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7143 dev_kfree_skb(new_skb
);
7146 u32 save_entry
= *entry
;
7148 base_flags
|= TXD_FLAG_END
;
7150 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7151 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7154 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7155 new_skb
->len
, base_flags
,
7157 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7158 dev_kfree_skb(new_skb
);
7169 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7171 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7172 * TSO header is greater than 80 bytes.
7174 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7176 struct sk_buff
*segs
, *nskb
;
7177 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7179 /* Estimate the number of fragments in the worst case */
7180 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7181 netif_stop_queue(tp
->dev
);
7183 /* netif_tx_stop_queue() must be done before checking
7184 * checking tx index in tg3_tx_avail() below, because in
7185 * tg3_tx(), we update tx index before checking for
7186 * netif_tx_queue_stopped().
7189 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7190 return NETDEV_TX_BUSY
;
7192 netif_wake_queue(tp
->dev
);
7195 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7197 goto tg3_tso_bug_end
;
7203 tg3_start_xmit(nskb
, tp
->dev
);
7209 return NETDEV_TX_OK
;
7212 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7213 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7215 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7217 struct tg3
*tp
= netdev_priv(dev
);
7218 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7220 int i
= -1, would_hit_hwbug
;
7222 struct tg3_napi
*tnapi
;
7223 struct netdev_queue
*txq
;
7226 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7227 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7228 if (tg3_flag(tp
, ENABLE_TSS
))
7231 budget
= tg3_tx_avail(tnapi
);
7233 /* We are running in BH disabled context with netif_tx_lock
7234 * and TX reclaim runs via tp->napi.poll inside of a software
7235 * interrupt. Furthermore, IRQ processing runs lockless so we have
7236 * no IRQ context deadlocks to worry about either. Rejoice!
7238 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7239 if (!netif_tx_queue_stopped(txq
)) {
7240 netif_tx_stop_queue(txq
);
7242 /* This is a hard error, log it. */
7244 "BUG! Tx Ring full when queue awake!\n");
7246 return NETDEV_TX_BUSY
;
7249 entry
= tnapi
->tx_prod
;
7251 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7252 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7254 mss
= skb_shinfo(skb
)->gso_size
;
7257 u32 tcp_opt_len
, hdr_len
;
7259 if (skb_header_cloned(skb
) &&
7260 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7264 tcp_opt_len
= tcp_optlen(skb
);
7266 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7268 if (!skb_is_gso_v6(skb
)) {
7270 iph
->tot_len
= htons(mss
+ hdr_len
);
7273 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7274 tg3_flag(tp
, TSO_BUG
))
7275 return tg3_tso_bug(tp
, skb
);
7277 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7278 TXD_FLAG_CPU_POST_DMA
);
7280 if (tg3_flag(tp
, HW_TSO_1
) ||
7281 tg3_flag(tp
, HW_TSO_2
) ||
7282 tg3_flag(tp
, HW_TSO_3
)) {
7283 tcp_hdr(skb
)->check
= 0;
7284 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7286 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7291 if (tg3_flag(tp
, HW_TSO_3
)) {
7292 mss
|= (hdr_len
& 0xc) << 12;
7294 base_flags
|= 0x00000010;
7295 base_flags
|= (hdr_len
& 0x3e0) << 5;
7296 } else if (tg3_flag(tp
, HW_TSO_2
))
7297 mss
|= hdr_len
<< 9;
7298 else if (tg3_flag(tp
, HW_TSO_1
) ||
7299 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
7300 if (tcp_opt_len
|| iph
->ihl
> 5) {
7303 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7304 mss
|= (tsflags
<< 11);
7307 if (tcp_opt_len
|| iph
->ihl
> 5) {
7310 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7311 base_flags
|= tsflags
<< 12;
7316 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7317 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7318 base_flags
|= TXD_FLAG_JMB_PKT
;
7320 if (vlan_tx_tag_present(skb
)) {
7321 base_flags
|= TXD_FLAG_VLAN
;
7322 vlan
= vlan_tx_tag_get(skb
);
7325 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7326 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7327 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7328 base_flags
|= TXD_FLAG_HWTSTAMP
;
7331 len
= skb_headlen(skb
);
7333 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7334 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7338 tnapi
->tx_buffers
[entry
].skb
= skb
;
7339 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7341 would_hit_hwbug
= 0;
7343 if (tg3_flag(tp
, 5701_DMA_BUG
))
7344 would_hit_hwbug
= 1;
7346 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7347 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7349 would_hit_hwbug
= 1;
7350 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7353 if (!tg3_flag(tp
, HW_TSO_1
) &&
7354 !tg3_flag(tp
, HW_TSO_2
) &&
7355 !tg3_flag(tp
, HW_TSO_3
))
7358 /* Now loop through additional data
7359 * fragments, and queue them.
7361 last
= skb_shinfo(skb
)->nr_frags
- 1;
7362 for (i
= 0; i
<= last
; i
++) {
7363 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7365 len
= skb_frag_size(frag
);
7366 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7367 len
, DMA_TO_DEVICE
);
7369 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7370 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7372 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7376 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7378 ((i
== last
) ? TXD_FLAG_END
: 0),
7380 would_hit_hwbug
= 1;
7386 if (would_hit_hwbug
) {
7387 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7389 /* If the workaround fails due to memory/mapping
7390 * failure, silently drop this packet.
7392 entry
= tnapi
->tx_prod
;
7393 budget
= tg3_tx_avail(tnapi
);
7394 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7395 base_flags
, mss
, vlan
))
7399 skb_tx_timestamp(skb
);
7400 netdev_tx_sent_queue(txq
, skb
->len
);
7402 /* Sync BD data before updating mailbox */
7405 /* Packets are ready, update Tx producer idx local and on card. */
7406 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7408 tnapi
->tx_prod
= entry
;
7409 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7410 netif_tx_stop_queue(txq
);
7412 /* netif_tx_stop_queue() must be done before checking
7413 * checking tx index in tg3_tx_avail() below, because in
7414 * tg3_tx(), we update tx index before checking for
7415 * netif_tx_queue_stopped().
7418 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7419 netif_tx_wake_queue(txq
);
7423 return NETDEV_TX_OK
;
7426 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7427 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7432 return NETDEV_TX_OK
;
7435 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7438 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7439 MAC_MODE_PORT_MODE_MASK
);
7441 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7443 if (!tg3_flag(tp
, 5705_PLUS
))
7444 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7446 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7447 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7449 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7451 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7453 if (tg3_flag(tp
, 5705_PLUS
) ||
7454 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7455 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
7456 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7459 tw32(MAC_MODE
, tp
->mac_mode
);
7463 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7465 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7467 tg3_phy_toggle_apd(tp
, false);
7468 tg3_phy_toggle_automdix(tp
, 0);
7470 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7473 bmcr
= BMCR_FULLDPLX
;
7478 bmcr
|= BMCR_SPEED100
;
7482 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7484 bmcr
|= BMCR_SPEED100
;
7487 bmcr
|= BMCR_SPEED1000
;
7492 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
7493 tg3_readphy(tp
, MII_CTRL1000
, &val
);
7494 val
|= CTL1000_AS_MASTER
|
7495 CTL1000_ENABLE_MASTER
;
7496 tg3_writephy(tp
, MII_CTRL1000
, val
);
7498 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
7499 MII_TG3_FET_PTEST_TRIM_2
;
7500 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
7503 bmcr
|= BMCR_LOOPBACK
;
7505 tg3_writephy(tp
, MII_BMCR
, bmcr
);
7507 /* The write needs to be flushed for the FETs */
7508 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
7509 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7513 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
7514 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
7515 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
7516 MII_TG3_FET_PTEST_FRC_TX_LINK
|
7517 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
7519 /* The write needs to be flushed for the AC131 */
7520 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
7523 /* Reset to prevent losing 1st rx packet intermittently */
7524 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
7525 tg3_flag(tp
, 5780_CLASS
)) {
7526 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
7528 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
7531 mac_mode
= tp
->mac_mode
&
7532 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
7533 if (speed
== SPEED_1000
)
7534 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7536 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7538 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
7539 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
7541 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
7542 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7543 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
7544 mac_mode
|= MAC_MODE_LINK_POLARITY
;
7546 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
7547 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
7550 tw32(MAC_MODE
, mac_mode
);
7556 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
7558 struct tg3
*tp
= netdev_priv(dev
);
7560 if (features
& NETIF_F_LOOPBACK
) {
7561 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
7564 spin_lock_bh(&tp
->lock
);
7565 tg3_mac_loopback(tp
, true);
7566 netif_carrier_on(tp
->dev
);
7567 spin_unlock_bh(&tp
->lock
);
7568 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
7570 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
7573 spin_lock_bh(&tp
->lock
);
7574 tg3_mac_loopback(tp
, false);
7575 /* Force link status check */
7576 tg3_setup_phy(tp
, 1);
7577 spin_unlock_bh(&tp
->lock
);
7578 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
7582 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
7583 netdev_features_t features
)
7585 struct tg3
*tp
= netdev_priv(dev
);
7587 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
7588 features
&= ~NETIF_F_ALL_TSO
;
7593 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
7595 netdev_features_t changed
= dev
->features
^ features
;
7597 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
7598 tg3_set_loopback(dev
, features
);
7603 static void tg3_rx_prodring_free(struct tg3
*tp
,
7604 struct tg3_rx_prodring_set
*tpr
)
7608 if (tpr
!= &tp
->napi
[0].prodring
) {
7609 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
7610 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
7611 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7614 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
7615 for (i
= tpr
->rx_jmb_cons_idx
;
7616 i
!= tpr
->rx_jmb_prod_idx
;
7617 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
7618 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7626 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
7627 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
7630 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7631 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
7632 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
7637 /* Initialize rx rings for packet processing.
7639 * The chip has been shut down and the driver detached from
7640 * the networking, so no interrupts or new tx packets will
7641 * end up in the driver. tp->{tx,}lock are held and thus
7644 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
7645 struct tg3_rx_prodring_set
*tpr
)
7647 u32 i
, rx_pkt_dma_sz
;
7649 tpr
->rx_std_cons_idx
= 0;
7650 tpr
->rx_std_prod_idx
= 0;
7651 tpr
->rx_jmb_cons_idx
= 0;
7652 tpr
->rx_jmb_prod_idx
= 0;
7654 if (tpr
!= &tp
->napi
[0].prodring
) {
7655 memset(&tpr
->rx_std_buffers
[0], 0,
7656 TG3_RX_STD_BUFF_RING_SIZE(tp
));
7657 if (tpr
->rx_jmb_buffers
)
7658 memset(&tpr
->rx_jmb_buffers
[0], 0,
7659 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
7663 /* Zero out all descriptors. */
7664 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
7666 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
7667 if (tg3_flag(tp
, 5780_CLASS
) &&
7668 tp
->dev
->mtu
> ETH_DATA_LEN
)
7669 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
7670 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
7672 /* Initialize invariants of the rings, we only set this
7673 * stuff once. This works because the card does not
7674 * write into the rx buffer posting rings.
7676 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
7677 struct tg3_rx_buffer_desc
*rxd
;
7679 rxd
= &tpr
->rx_std
[i
];
7680 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
7681 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
7682 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
7683 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7686 /* Now allocate fresh SKBs for each rx ring. */
7687 for (i
= 0; i
< tp
->rx_pending
; i
++) {
7688 unsigned int frag_size
;
7690 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
7692 netdev_warn(tp
->dev
,
7693 "Using a smaller RX standard ring. Only "
7694 "%d out of %d buffers were allocated "
7695 "successfully\n", i
, tp
->rx_pending
);
7703 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
7706 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
7708 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
7711 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
7712 struct tg3_rx_buffer_desc
*rxd
;
7714 rxd
= &tpr
->rx_jmb
[i
].std
;
7715 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
7716 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
7718 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
7719 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
7722 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
7723 unsigned int frag_size
;
7725 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
7727 netdev_warn(tp
->dev
,
7728 "Using a smaller RX jumbo ring. Only %d "
7729 "out of %d buffers were allocated "
7730 "successfully\n", i
, tp
->rx_jumbo_pending
);
7733 tp
->rx_jumbo_pending
= i
;
7742 tg3_rx_prodring_free(tp
, tpr
);
7746 static void tg3_rx_prodring_fini(struct tg3
*tp
,
7747 struct tg3_rx_prodring_set
*tpr
)
7749 kfree(tpr
->rx_std_buffers
);
7750 tpr
->rx_std_buffers
= NULL
;
7751 kfree(tpr
->rx_jmb_buffers
);
7752 tpr
->rx_jmb_buffers
= NULL
;
7754 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
7755 tpr
->rx_std
, tpr
->rx_std_mapping
);
7759 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
7760 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
7765 static int tg3_rx_prodring_init(struct tg3
*tp
,
7766 struct tg3_rx_prodring_set
*tpr
)
7768 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
7770 if (!tpr
->rx_std_buffers
)
7773 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
7774 TG3_RX_STD_RING_BYTES(tp
),
7775 &tpr
->rx_std_mapping
,
7780 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
7781 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
7783 if (!tpr
->rx_jmb_buffers
)
7786 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7787 TG3_RX_JMB_RING_BYTES(tp
),
7788 &tpr
->rx_jmb_mapping
,
7797 tg3_rx_prodring_fini(tp
, tpr
);
7801 /* Free up pending packets in all rx/tx rings.
7803 * The chip has been shut down and the driver detached from
7804 * the networking, so no interrupts or new tx packets will
7805 * end up in the driver. tp->{tx,}lock is not held and we are not
7806 * in an interrupt context and thus may sleep.
7808 static void tg3_free_rings(struct tg3
*tp
)
7812 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
7813 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
7815 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
7817 if (!tnapi
->tx_buffers
)
7820 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
7821 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
7826 tg3_tx_skb_unmap(tnapi
, i
,
7827 skb_shinfo(skb
)->nr_frags
- 1);
7829 dev_kfree_skb_any(skb
);
7831 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
7835 /* Initialize tx/rx rings for packet processing.
7837 * The chip has been shut down and the driver detached from
7838 * the networking, so no interrupts or new tx packets will
7839 * end up in the driver. tp->{tx,}lock are held and thus
7842 static int tg3_init_rings(struct tg3
*tp
)
7846 /* Free up all the SKBs. */
7849 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7850 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7852 tnapi
->last_tag
= 0;
7853 tnapi
->last_irq_tag
= 0;
7854 tnapi
->hw_status
->status
= 0;
7855 tnapi
->hw_status
->status_tag
= 0;
7856 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
7861 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
7863 tnapi
->rx_rcb_ptr
= 0;
7865 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7867 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
7876 static void tg3_mem_tx_release(struct tg3
*tp
)
7880 for (i
= 0; i
< tp
->irq_max
; i
++) {
7881 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7883 if (tnapi
->tx_ring
) {
7884 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
7885 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
7886 tnapi
->tx_ring
= NULL
;
7889 kfree(tnapi
->tx_buffers
);
7890 tnapi
->tx_buffers
= NULL
;
7894 static int tg3_mem_tx_acquire(struct tg3
*tp
)
7897 struct tg3_napi
*tnapi
= &tp
->napi
[0];
7899 /* If multivector TSS is enabled, vector 0 does not handle
7900 * tx interrupts. Don't allocate any resources for it.
7902 if (tg3_flag(tp
, ENABLE_TSS
))
7905 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
7906 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
7907 TG3_TX_RING_SIZE
, GFP_KERNEL
);
7908 if (!tnapi
->tx_buffers
)
7911 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
7913 &tnapi
->tx_desc_mapping
,
7915 if (!tnapi
->tx_ring
)
7922 tg3_mem_tx_release(tp
);
7926 static void tg3_mem_rx_release(struct tg3
*tp
)
7930 for (i
= 0; i
< tp
->irq_max
; i
++) {
7931 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7933 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
7938 dma_free_coherent(&tp
->pdev
->dev
,
7939 TG3_RX_RCB_RING_BYTES(tp
),
7941 tnapi
->rx_rcb_mapping
);
7942 tnapi
->rx_rcb
= NULL
;
7946 static int tg3_mem_rx_acquire(struct tg3
*tp
)
7948 unsigned int i
, limit
;
7950 limit
= tp
->rxq_cnt
;
7952 /* If RSS is enabled, we need a (dummy) producer ring
7953 * set on vector zero. This is the true hw prodring.
7955 if (tg3_flag(tp
, ENABLE_RSS
))
7958 for (i
= 0; i
< limit
; i
++) {
7959 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7961 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
7964 /* If multivector RSS is enabled, vector 0
7965 * does not handle rx or tx interrupts.
7966 * Don't allocate any resources for it.
7968 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
7971 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
7972 TG3_RX_RCB_RING_BYTES(tp
),
7973 &tnapi
->rx_rcb_mapping
,
7978 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
7984 tg3_mem_rx_release(tp
);
7989 * Must not be invoked with interrupt sources disabled and
7990 * the hardware shutdown down.
7992 static void tg3_free_consistent(struct tg3
*tp
)
7996 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
7997 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
7999 if (tnapi
->hw_status
) {
8000 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8002 tnapi
->status_mapping
);
8003 tnapi
->hw_status
= NULL
;
8007 tg3_mem_rx_release(tp
);
8008 tg3_mem_tx_release(tp
);
8011 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8012 tp
->hw_stats
, tp
->stats_mapping
);
8013 tp
->hw_stats
= NULL
;
8018 * Must not be invoked with interrupt sources disabled and
8019 * the hardware shutdown down. Can sleep.
8021 static int tg3_alloc_consistent(struct tg3
*tp
)
8025 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8026 sizeof(struct tg3_hw_stats
),
8032 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8034 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8035 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8036 struct tg3_hw_status
*sblk
;
8038 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8040 &tnapi
->status_mapping
,
8042 if (!tnapi
->hw_status
)
8045 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8046 sblk
= tnapi
->hw_status
;
8048 if (tg3_flag(tp
, ENABLE_RSS
)) {
8049 u16
*prodptr
= NULL
;
8052 * When RSS is enabled, the status block format changes
8053 * slightly. The "rx_jumbo_consumer", "reserved",
8054 * and "rx_mini_consumer" members get mapped to the
8055 * other three rx return ring producer indexes.
8059 prodptr
= &sblk
->idx
[0].rx_producer
;
8062 prodptr
= &sblk
->rx_jumbo_consumer
;
8065 prodptr
= &sblk
->reserved
;
8068 prodptr
= &sblk
->rx_mini_consumer
;
8071 tnapi
->rx_rcb_prod_idx
= prodptr
;
8073 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8077 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8083 tg3_free_consistent(tp
);
8087 #define MAX_WAIT_CNT 1000
8089 /* To stop a block, clear the enable bit and poll till it
8090 * clears. tp->lock is held.
8092 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
8097 if (tg3_flag(tp
, 5705_PLUS
)) {
8104 /* We can't enable/disable these bits of the
8105 * 5705/5750, just say success.
8118 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8121 if ((val
& enable_bit
) == 0)
8125 if (i
== MAX_WAIT_CNT
&& !silent
) {
8126 dev_err(&tp
->pdev
->dev
,
8127 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8135 /* tp->lock is held. */
8136 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
8140 tg3_disable_ints(tp
);
8142 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8143 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8146 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8147 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8148 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8149 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8150 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8151 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8153 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8154 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8155 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8156 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8157 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8158 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8159 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8161 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8162 tw32_f(MAC_MODE
, tp
->mac_mode
);
8165 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8166 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8168 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8170 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8173 if (i
>= MAX_WAIT_CNT
) {
8174 dev_err(&tp
->pdev
->dev
,
8175 "%s timed out, TX_MODE_ENABLE will not clear "
8176 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8180 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8181 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8182 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8184 tw32(FTQ_RESET
, 0xffffffff);
8185 tw32(FTQ_RESET
, 0x00000000);
8187 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8188 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8190 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8191 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8192 if (tnapi
->hw_status
)
8193 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8199 /* Save PCI command register before chip reset */
8200 static void tg3_save_pci_state(struct tg3
*tp
)
8202 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8205 /* Restore PCI state after chip reset */
8206 static void tg3_restore_pci_state(struct tg3
*tp
)
8210 /* Re-enable indirect register accesses. */
8211 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8212 tp
->misc_host_ctrl
);
8214 /* Set MAX PCI retry to zero. */
8215 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8216 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
8217 tg3_flag(tp
, PCIX_MODE
))
8218 val
|= PCISTATE_RETRY_SAME_DMA
;
8219 /* Allow reads and writes to the APE register and memory space. */
8220 if (tg3_flag(tp
, ENABLE_APE
))
8221 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8222 PCISTATE_ALLOW_APE_SHMEM_WR
|
8223 PCISTATE_ALLOW_APE_PSPACE_WR
;
8224 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8226 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8228 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8229 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8230 tp
->pci_cacheline_sz
);
8231 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8235 /* Make sure PCI-X relaxed ordering bit is clear. */
8236 if (tg3_flag(tp
, PCIX_MODE
)) {
8239 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8241 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8242 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8246 if (tg3_flag(tp
, 5780_CLASS
)) {
8248 /* Chip reset on 5780 will reset MSI enable bit,
8249 * so need to restore it.
8251 if (tg3_flag(tp
, USING_MSI
)) {
8254 pci_read_config_word(tp
->pdev
,
8255 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8257 pci_write_config_word(tp
->pdev
,
8258 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8259 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8260 val
= tr32(MSGINT_MODE
);
8261 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8266 /* tp->lock is held. */
8267 static int tg3_chip_reset(struct tg3
*tp
)
8270 void (*write_op
)(struct tg3
*, u32
, u32
);
8275 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8277 /* No matching tg3_nvram_unlock() after this because
8278 * chip reset below will undo the nvram lock.
8280 tp
->nvram_lock_cnt
= 0;
8282 /* GRC_MISC_CFG core clock reset will clear the memory
8283 * enable bit in PCI register 4 and the MSI enable bit
8284 * on some chips, so we save relevant registers here.
8286 tg3_save_pci_state(tp
);
8288 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
8289 tg3_flag(tp
, 5755_PLUS
))
8290 tw32(GRC_FASTBOOT_PC
, 0);
8293 * We must avoid the readl() that normally takes place.
8294 * It locks machines, causes machine checks, and other
8295 * fun things. So, temporarily disable the 5701
8296 * hardware workaround, while we do the reset.
8298 write_op
= tp
->write32
;
8299 if (write_op
== tg3_write_flush_reg32
)
8300 tp
->write32
= tg3_write32
;
8302 /* Prevent the irq handler from reading or writing PCI registers
8303 * during chip reset when the memory enable bit in the PCI command
8304 * register may be cleared. The chip does not generate interrupt
8305 * at this time, but the irq handler may still be called due to irq
8306 * sharing or irqpoll.
8308 tg3_flag_set(tp
, CHIP_RESETTING
);
8309 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8310 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8311 if (tnapi
->hw_status
) {
8312 tnapi
->hw_status
->status
= 0;
8313 tnapi
->hw_status
->status_tag
= 0;
8315 tnapi
->last_tag
= 0;
8316 tnapi
->last_irq_tag
= 0;
8320 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8321 synchronize_irq(tp
->napi
[i
].irq_vec
);
8323 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
8324 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8325 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8329 val
= GRC_MISC_CFG_CORECLK_RESET
;
8331 if (tg3_flag(tp
, PCI_EXPRESS
)) {
8332 /* Force PCIe 1.0a mode */
8333 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
8334 !tg3_flag(tp
, 57765_PLUS
) &&
8335 tr32(TG3_PCIE_PHY_TSTCTL
) ==
8336 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
8337 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
8339 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
8340 tw32(GRC_MISC_CFG
, (1 << 29));
8345 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
8346 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8347 tw32(GRC_VCPU_EXT_CTRL
,
8348 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8351 /* Manage gphy power for all CPMU absent PCIe devices. */
8352 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8353 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8355 tw32(GRC_MISC_CFG
, val
);
8357 /* restore 5701 hardware bug workaround write method */
8358 tp
->write32
= write_op
;
8360 /* Unfortunately, we have to delay before the PCI read back.
8361 * Some 575X chips even will not respond to a PCI cfg access
8362 * when the reset command is given to the chip.
8364 * How do these hardware designers expect things to work
8365 * properly if the PCI write is posted for a long period
8366 * of time? It is always necessary to have some method by
8367 * which a register read back can occur to push the write
8368 * out which does the reset.
8370 * For most tg3 variants the trick below was working.
8375 /* Flush PCI posted writes. The normal MMIO registers
8376 * are inaccessible at this time so this is the only
8377 * way to make this reliably (actually, this is no longer
8378 * the case, see above). I tried to use indirect
8379 * register read/write but this upset some 5701 variants.
8381 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8385 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8388 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
8392 /* Wait for link training to complete. */
8393 for (j
= 0; j
< 5000; j
++)
8396 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8397 pci_write_config_dword(tp
->pdev
, 0xc4,
8398 cfg_val
| (1 << 15));
8401 /* Clear the "no snoop" and "relaxed ordering" bits. */
8402 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8404 * Older PCIe devices only support the 128 byte
8405 * MPS setting. Enforce the restriction.
8407 if (!tg3_flag(tp
, CPMU_PRESENT
))
8408 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8409 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8411 /* Clear error status */
8412 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8413 PCI_EXP_DEVSTA_CED
|
8414 PCI_EXP_DEVSTA_NFED
|
8415 PCI_EXP_DEVSTA_FED
|
8416 PCI_EXP_DEVSTA_URD
);
8419 tg3_restore_pci_state(tp
);
8421 tg3_flag_clear(tp
, CHIP_RESETTING
);
8422 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8425 if (tg3_flag(tp
, 5780_CLASS
))
8426 val
= tr32(MEMARB_MODE
);
8427 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8429 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
8431 tw32(0x5000, 0x400);
8434 tw32(GRC_MODE
, tp
->grc_mode
);
8436 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
8439 tw32(0xc4, val
| (1 << 15));
8442 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8443 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
8444 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8445 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
8446 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8447 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8450 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8451 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8453 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8454 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8459 tw32_f(MAC_MODE
, val
);
8462 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
8464 err
= tg3_poll_fw(tp
);
8470 if (tg3_flag(tp
, PCI_EXPRESS
) &&
8471 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
8472 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
8473 !tg3_flag(tp
, 57765_PLUS
)) {
8476 tw32(0x7c00, val
| (1 << 25));
8479 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
8480 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
8481 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
8484 /* Reprobe ASF enable state. */
8485 tg3_flag_clear(tp
, ENABLE_ASF
);
8486 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
8487 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
8488 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
8491 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
8492 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
8493 tg3_flag_set(tp
, ENABLE_ASF
);
8494 tp
->last_event_jiffies
= jiffies
;
8495 if (tg3_flag(tp
, 5750_PLUS
))
8496 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
8503 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
8504 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
8506 /* tp->lock is held. */
8507 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
8513 tg3_write_sig_pre_reset(tp
, kind
);
8515 tg3_abort_hw(tp
, silent
);
8516 err
= tg3_chip_reset(tp
);
8518 __tg3_set_mac_addr(tp
, 0);
8520 tg3_write_sig_legacy(tp
, kind
);
8521 tg3_write_sig_post_reset(tp
, kind
);
8524 /* Save the stats across chip resets... */
8525 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
8526 tg3_get_estats(tp
, &tp
->estats_prev
);
8528 /* And make sure the next sample is new data */
8529 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
8538 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
8540 struct tg3
*tp
= netdev_priv(dev
);
8541 struct sockaddr
*addr
= p
;
8542 int err
= 0, skip_mac_1
= 0;
8544 if (!is_valid_ether_addr(addr
->sa_data
))
8545 return -EADDRNOTAVAIL
;
8547 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
8549 if (!netif_running(dev
))
8552 if (tg3_flag(tp
, ENABLE_ASF
)) {
8553 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
8555 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
8556 addr0_low
= tr32(MAC_ADDR_0_LOW
);
8557 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
8558 addr1_low
= tr32(MAC_ADDR_1_LOW
);
8560 /* Skip MAC addr 1 if ASF is using it. */
8561 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
8562 !(addr1_high
== 0 && addr1_low
== 0))
8565 spin_lock_bh(&tp
->lock
);
8566 __tg3_set_mac_addr(tp
, skip_mac_1
);
8567 spin_unlock_bh(&tp
->lock
);
8572 /* tp->lock is held. */
8573 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
8574 dma_addr_t mapping
, u32 maxlen_flags
,
8578 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
8579 ((u64
) mapping
>> 32));
8581 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
8582 ((u64
) mapping
& 0xffffffff));
8584 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
8587 if (!tg3_flag(tp
, 5705_PLUS
))
8589 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
8594 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8598 if (!tg3_flag(tp
, ENABLE_TSS
)) {
8599 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
8600 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
8601 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
8603 tw32(HOSTCC_TXCOL_TICKS
, 0);
8604 tw32(HOSTCC_TXMAX_FRAMES
, 0);
8605 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
8607 for (; i
< tp
->txq_cnt
; i
++) {
8610 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
8611 tw32(reg
, ec
->tx_coalesce_usecs
);
8612 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
8613 tw32(reg
, ec
->tx_max_coalesced_frames
);
8614 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8615 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
8619 for (; i
< tp
->irq_max
- 1; i
++) {
8620 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8621 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8622 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8626 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8629 u32 limit
= tp
->rxq_cnt
;
8631 if (!tg3_flag(tp
, ENABLE_RSS
)) {
8632 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
8633 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
8634 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
8637 tw32(HOSTCC_RXCOL_TICKS
, 0);
8638 tw32(HOSTCC_RXMAX_FRAMES
, 0);
8639 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
8642 for (; i
< limit
; i
++) {
8645 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
8646 tw32(reg
, ec
->rx_coalesce_usecs
);
8647 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
8648 tw32(reg
, ec
->rx_max_coalesced_frames
);
8649 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
8650 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
8653 for (; i
< tp
->irq_max
- 1; i
++) {
8654 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
8655 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
8656 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
8660 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
8662 tg3_coal_tx_init(tp
, ec
);
8663 tg3_coal_rx_init(tp
, ec
);
8665 if (!tg3_flag(tp
, 5705_PLUS
)) {
8666 u32 val
= ec
->stats_block_coalesce_usecs
;
8668 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
8669 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
8674 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
8678 /* tp->lock is held. */
8679 static void tg3_rings_reset(struct tg3
*tp
)
8682 u32 stblk
, txrcb
, rxrcb
, limit
;
8683 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8685 /* Disable all transmit rings but the first. */
8686 if (!tg3_flag(tp
, 5705_PLUS
))
8687 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
8688 else if (tg3_flag(tp
, 5717_PLUS
))
8689 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
8690 else if (tg3_flag(tp
, 57765_CLASS
))
8691 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
8693 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8695 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
8696 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
8697 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8698 BDINFO_FLAGS_DISABLED
);
8701 /* Disable all receive return rings but the first. */
8702 if (tg3_flag(tp
, 5717_PLUS
))
8703 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
8704 else if (!tg3_flag(tp
, 5705_PLUS
))
8705 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
8706 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8707 tg3_flag(tp
, 57765_CLASS
))
8708 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
8710 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8712 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
8713 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
8714 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
8715 BDINFO_FLAGS_DISABLED
);
8717 /* Disable interrupts */
8718 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
8719 tp
->napi
[0].chk_msi_cnt
= 0;
8720 tp
->napi
[0].last_rx_cons
= 0;
8721 tp
->napi
[0].last_tx_cons
= 0;
8723 /* Zero mailbox registers. */
8724 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
8725 for (i
= 1; i
< tp
->irq_max
; i
++) {
8726 tp
->napi
[i
].tx_prod
= 0;
8727 tp
->napi
[i
].tx_cons
= 0;
8728 if (tg3_flag(tp
, ENABLE_TSS
))
8729 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
8730 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
8731 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
8732 tp
->napi
[i
].chk_msi_cnt
= 0;
8733 tp
->napi
[i
].last_rx_cons
= 0;
8734 tp
->napi
[i
].last_tx_cons
= 0;
8736 if (!tg3_flag(tp
, ENABLE_TSS
))
8737 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8739 tp
->napi
[0].tx_prod
= 0;
8740 tp
->napi
[0].tx_cons
= 0;
8741 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
8742 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
8745 /* Make sure the NIC-based send BD rings are disabled. */
8746 if (!tg3_flag(tp
, 5705_PLUS
)) {
8747 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
8748 for (i
= 0; i
< 16; i
++)
8749 tw32_tx_mbox(mbox
+ i
* 8, 0);
8752 txrcb
= NIC_SRAM_SEND_RCB
;
8753 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
8755 /* Clear status block in ram. */
8756 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8758 /* Set status block DMA address */
8759 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
8760 ((u64
) tnapi
->status_mapping
>> 32));
8761 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
8762 ((u64
) tnapi
->status_mapping
& 0xffffffff));
8764 if (tnapi
->tx_ring
) {
8765 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8766 (TG3_TX_RING_SIZE
<<
8767 BDINFO_FLAGS_MAXLEN_SHIFT
),
8768 NIC_SRAM_TX_BUFFER_DESC
);
8769 txrcb
+= TG3_BDINFO_SIZE
;
8772 if (tnapi
->rx_rcb
) {
8773 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8774 (tp
->rx_ret_ring_mask
+ 1) <<
8775 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
8776 rxrcb
+= TG3_BDINFO_SIZE
;
8779 stblk
= HOSTCC_STATBLCK_RING1
;
8781 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
8782 u64 mapping
= (u64
)tnapi
->status_mapping
;
8783 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
8784 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
8786 /* Clear status block in ram. */
8787 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8789 if (tnapi
->tx_ring
) {
8790 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
8791 (TG3_TX_RING_SIZE
<<
8792 BDINFO_FLAGS_MAXLEN_SHIFT
),
8793 NIC_SRAM_TX_BUFFER_DESC
);
8794 txrcb
+= TG3_BDINFO_SIZE
;
8797 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
8798 ((tp
->rx_ret_ring_mask
+ 1) <<
8799 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
8802 rxrcb
+= TG3_BDINFO_SIZE
;
8806 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
8808 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
8810 if (!tg3_flag(tp
, 5750_PLUS
) ||
8811 tg3_flag(tp
, 5780_CLASS
) ||
8812 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
8813 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
8814 tg3_flag(tp
, 57765_PLUS
))
8815 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
8816 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8817 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8818 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
8820 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
8822 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
8823 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
8825 val
= min(nic_rep_thresh
, host_rep_thresh
);
8826 tw32(RCVBDI_STD_THRESH
, val
);
8828 if (tg3_flag(tp
, 57765_PLUS
))
8829 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
8831 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8834 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
8836 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
8838 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
8839 tw32(RCVBDI_JUMBO_THRESH
, val
);
8841 if (tg3_flag(tp
, 57765_PLUS
))
8842 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
8845 static inline u32
calc_crc(unsigned char *buf
, int len
)
8853 for (j
= 0; j
< len
; j
++) {
8856 for (k
= 0; k
< 8; k
++) {
8869 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
8871 /* accept or reject all multicast frames */
8872 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
8873 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
8874 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
8875 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
8878 static void __tg3_set_rx_mode(struct net_device
*dev
)
8880 struct tg3
*tp
= netdev_priv(dev
);
8883 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
8884 RX_MODE_KEEP_VLAN_TAG
);
8886 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8887 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8890 if (!tg3_flag(tp
, ENABLE_ASF
))
8891 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
8894 if (dev
->flags
& IFF_PROMISC
) {
8895 /* Promiscuous mode. */
8896 rx_mode
|= RX_MODE_PROMISC
;
8897 } else if (dev
->flags
& IFF_ALLMULTI
) {
8898 /* Accept all multicast. */
8899 tg3_set_multi(tp
, 1);
8900 } else if (netdev_mc_empty(dev
)) {
8901 /* Reject all multicast. */
8902 tg3_set_multi(tp
, 0);
8904 /* Accept one or more multicast(s). */
8905 struct netdev_hw_addr
*ha
;
8906 u32 mc_filter
[4] = { 0, };
8911 netdev_for_each_mc_addr(ha
, dev
) {
8912 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
8914 regidx
= (bit
& 0x60) >> 5;
8916 mc_filter
[regidx
] |= (1 << bit
);
8919 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
8920 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
8921 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
8922 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
8925 if (rx_mode
!= tp
->rx_mode
) {
8926 tp
->rx_mode
= rx_mode
;
8927 tw32_f(MAC_RX_MODE
, rx_mode
);
8932 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
8936 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
8937 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
8940 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
8944 if (!tg3_flag(tp
, SUPPORT_MSIX
))
8947 if (tp
->rxq_cnt
== 1) {
8948 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
8952 /* Validate table against current IRQ count */
8953 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
8954 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
8958 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
8959 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
8962 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
8965 u32 reg
= MAC_RSS_INDIR_TBL_0
;
8967 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
8968 u32 val
= tp
->rss_ind_tbl
[i
];
8970 for (; i
% 8; i
++) {
8972 val
|= tp
->rss_ind_tbl
[i
];
8979 /* tp->lock is held. */
8980 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
8982 u32 val
, rdmac_mode
;
8984 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
8986 tg3_disable_ints(tp
);
8990 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
8992 if (tg3_flag(tp
, INIT_COMPLETE
))
8993 tg3_abort_hw(tp
, 1);
8995 /* Enable MAC control of LPI */
8996 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
8997 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
,
8998 TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
8999 TG3_CPMU_EEE_LNKIDL_UART_IDL
);
9001 tw32_f(TG3_CPMU_EEE_CTRL
,
9002 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
9004 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
9005 TG3_CPMU_EEEMD_LPI_IN_TX
|
9006 TG3_CPMU_EEEMD_LPI_IN_RX
|
9007 TG3_CPMU_EEEMD_EEE_ENABLE
;
9009 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
9010 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
9012 if (tg3_flag(tp
, ENABLE_APE
))
9013 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
9015 tw32_f(TG3_CPMU_EEE_MODE
, val
);
9017 tw32_f(TG3_CPMU_EEE_DBTMR1
,
9018 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
9019 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
9021 tw32_f(TG3_CPMU_EEE_DBTMR2
,
9022 TG3_CPMU_DBTMR2_APE_TX_2047US
|
9023 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
9029 err
= tg3_chip_reset(tp
);
9033 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9035 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
) {
9036 val
= tr32(TG3_CPMU_CTRL
);
9037 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9038 tw32(TG3_CPMU_CTRL
, val
);
9040 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9041 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9042 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9043 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9045 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9046 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9047 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9048 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9050 val
= tr32(TG3_CPMU_HST_ACC
);
9051 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9052 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9053 tw32(TG3_CPMU_HST_ACC
, val
);
9056 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
9057 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9058 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9059 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9060 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9062 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9063 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9065 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9067 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9068 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9071 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9072 u32 grc_mode
= tr32(GRC_MODE
);
9074 /* Access the lower 1K of PL PCIE block registers. */
9075 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9076 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9078 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9079 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9080 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9082 tw32(GRC_MODE
, grc_mode
);
9085 if (tg3_flag(tp
, 57765_CLASS
)) {
9086 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
) {
9087 u32 grc_mode
= tr32(GRC_MODE
);
9089 /* Access the lower 1K of PL PCIE block registers. */
9090 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9091 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9093 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9094 TG3_PCIE_PL_LO_PHYCTL5
);
9095 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9096 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9098 tw32(GRC_MODE
, grc_mode
);
9101 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_57765_AX
) {
9102 u32 grc_mode
= tr32(GRC_MODE
);
9104 /* Access the lower 1K of DL PCIE block registers. */
9105 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9106 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9108 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9109 TG3_PCIE_DL_LO_FTSMAX
);
9110 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9111 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9112 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9114 tw32(GRC_MODE
, grc_mode
);
9117 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9118 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9119 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9120 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9123 /* This works around an issue with Athlon chipsets on
9124 * B3 tigon3 silicon. This bit has no effect on any
9125 * other revision. But do not set this on PCI Express
9126 * chips and don't even touch the clocks if the CPMU is present.
9128 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9129 if (!tg3_flag(tp
, PCI_EXPRESS
))
9130 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9131 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9134 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
9135 tg3_flag(tp
, PCIX_MODE
)) {
9136 val
= tr32(TG3PCI_PCISTATE
);
9137 val
|= PCISTATE_RETRY_SAME_DMA
;
9138 tw32(TG3PCI_PCISTATE
, val
);
9141 if (tg3_flag(tp
, ENABLE_APE
)) {
9142 /* Allow reads and writes to the
9143 * APE register and memory space.
9145 val
= tr32(TG3PCI_PCISTATE
);
9146 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9147 PCISTATE_ALLOW_APE_SHMEM_WR
|
9148 PCISTATE_ALLOW_APE_PSPACE_WR
;
9149 tw32(TG3PCI_PCISTATE
, val
);
9152 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
9153 /* Enable some hw fixes. */
9154 val
= tr32(TG3PCI_MSI_DATA
);
9155 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9156 tw32(TG3PCI_MSI_DATA
, val
);
9159 /* Descriptor ring init may make accesses to the
9160 * NIC SRAM area to setup the TX descriptors, so we
9161 * can only do this after the hardware has been
9162 * successfully reset.
9164 err
= tg3_init_rings(tp
);
9168 if (tg3_flag(tp
, 57765_PLUS
)) {
9169 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9170 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9171 if (tp
->pci_chip_rev_id
== CHIPREV_ID_57765_A0
)
9172 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9173 if (!tg3_flag(tp
, 57765_CLASS
) &&
9174 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
)
9175 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9176 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9177 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5784
&&
9178 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5761
) {
9179 /* This value is determined during the probe time DMA
9180 * engine test, tg3_test_dma.
9182 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9185 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9186 GRC_MODE_4X_NIC_SEND_RINGS
|
9187 GRC_MODE_NO_TX_PHDR_CSUM
|
9188 GRC_MODE_NO_RX_PHDR_CSUM
);
9189 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9191 /* Pseudo-header checksum is done by hardware logic and not
9192 * the offload processers, so make the chip do the pseudo-
9193 * header checksums on receive. For transmit it is more
9194 * convenient to do the pseudo-header checksum in software
9195 * as Linux does that on transmit for us in all cases.
9197 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9199 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9201 tw32(TG3_RX_PTP_CTL
,
9202 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9204 if (tg3_flag(tp
, PTP_CAPABLE
))
9205 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9207 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9209 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9210 val
= tr32(GRC_MISC_CFG
);
9212 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9213 tw32(GRC_MISC_CFG
, val
);
9215 /* Initialize MBUF/DESC pool. */
9216 if (tg3_flag(tp
, 5750_PLUS
)) {
9218 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
9219 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9220 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
9221 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9223 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9224 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9225 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9226 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9229 fw_len
= tp
->fw_len
;
9230 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9231 tw32(BUFMGR_MB_POOL_ADDR
,
9232 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9233 tw32(BUFMGR_MB_POOL_SIZE
,
9234 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9237 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9238 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9239 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9240 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9241 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9242 tw32(BUFMGR_MB_HIGH_WATER
,
9243 tp
->bufmgr_config
.mbuf_high_water
);
9245 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9246 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9247 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9248 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9249 tw32(BUFMGR_MB_HIGH_WATER
,
9250 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9252 tw32(BUFMGR_DMA_LOW_WATER
,
9253 tp
->bufmgr_config
.dma_low_water
);
9254 tw32(BUFMGR_DMA_HIGH_WATER
,
9255 tp
->bufmgr_config
.dma_high_water
);
9257 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9258 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
9259 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9260 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
9261 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
9262 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
)
9263 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9264 tw32(BUFMGR_MODE
, val
);
9265 for (i
= 0; i
< 2000; i
++) {
9266 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9271 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9275 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
9276 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9278 tg3_setup_rxbd_thresholds(tp
);
9280 /* Initialize TG3_BDINFO's at:
9281 * RCVDBDI_STD_BD: standard eth size rx ring
9282 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9283 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9286 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9287 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9288 * ring attribute flags
9289 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9291 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9292 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9294 * The size of each ring is fixed in the firmware, but the location is
9297 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9298 ((u64
) tpr
->rx_std_mapping
>> 32));
9299 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9300 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
9301 if (!tg3_flag(tp
, 5717_PLUS
))
9302 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
9303 NIC_SRAM_RX_BUFFER_DESC
);
9305 /* Disable the mini ring */
9306 if (!tg3_flag(tp
, 5705_PLUS
))
9307 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9308 BDINFO_FLAGS_DISABLED
);
9310 /* Program the jumbo buffer descriptor ring control
9311 * blocks on those devices that have them.
9313 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
9314 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
9316 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
9317 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9318 ((u64
) tpr
->rx_jmb_mapping
>> 32));
9319 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9320 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
9321 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
9322 BDINFO_FLAGS_MAXLEN_SHIFT
;
9323 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9324 val
| BDINFO_FLAGS_USE_EXT_RECV
);
9325 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
9326 tg3_flag(tp
, 57765_CLASS
))
9327 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
9328 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
9330 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9331 BDINFO_FLAGS_DISABLED
);
9334 if (tg3_flag(tp
, 57765_PLUS
)) {
9335 val
= TG3_RX_STD_RING_SIZE(tp
);
9336 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
9337 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
9339 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9341 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9343 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
9345 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
9346 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
9348 tpr
->rx_jmb_prod_idx
=
9349 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
9350 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
9352 tg3_rings_reset(tp
);
9354 /* Initialize MAC address and backoff seed. */
9355 __tg3_set_mac_addr(tp
, 0);
9357 /* MTU + ethernet header + FCS + optional VLAN tag */
9358 tw32(MAC_RX_MTU_SIZE
,
9359 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
9361 /* The slot time is changed by tg3_setup_phy if we
9362 * run at gigabit with half duplex.
9364 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9365 (6 << TX_LENGTHS_IPG_SHIFT
) |
9366 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9368 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
9369 val
|= tr32(MAC_TX_LENGTHS
) &
9370 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
9371 TX_LENGTHS_CNT_DWN_VAL_MSK
);
9373 tw32(MAC_TX_LENGTHS
, val
);
9375 /* Receive rules. */
9376 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
9377 tw32(RCVLPC_CONFIG
, 0x0181);
9379 /* Calculate RDMAC_MODE setting early, we need it to determine
9380 * the RCVLPC_STATE_ENABLE mask.
9382 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
9383 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
9384 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
9385 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
9386 RDMAC_MODE_LNGREAD_ENAB
);
9388 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
)
9389 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
9391 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
9392 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
9393 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
9394 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
9395 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
9396 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
9398 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
9399 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
9400 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9401 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
9402 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
9403 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9404 !tg3_flag(tp
, IS_5788
)) {
9405 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9409 if (tg3_flag(tp
, PCI_EXPRESS
))
9410 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9412 if (tg3_flag(tp
, HW_TSO_1
) ||
9413 tg3_flag(tp
, HW_TSO_2
) ||
9414 tg3_flag(tp
, HW_TSO_3
))
9415 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
9417 if (tg3_flag(tp
, 57765_PLUS
) ||
9418 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
9419 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
9420 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
9422 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
9423 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
9425 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
9426 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
9427 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
9428 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
9429 tg3_flag(tp
, 57765_PLUS
)) {
9430 val
= tr32(TG3_RDMA_RSRVCTRL_REG
);
9431 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
) {
9432 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
9433 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
9434 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
9435 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
9436 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
9437 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
9439 tw32(TG3_RDMA_RSRVCTRL_REG
,
9440 val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
9443 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
9444 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9445 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9446 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
|
9447 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
9448 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
9451 /* Receive/send statistics. */
9452 if (tg3_flag(tp
, 5750_PLUS
)) {
9453 val
= tr32(RCVLPC_STATS_ENABLE
);
9454 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
9455 tw32(RCVLPC_STATS_ENABLE
, val
);
9456 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
9457 tg3_flag(tp
, TSO_CAPABLE
)) {
9458 val
= tr32(RCVLPC_STATS_ENABLE
);
9459 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
9460 tw32(RCVLPC_STATS_ENABLE
, val
);
9462 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
9464 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
9465 tw32(SNDDATAI_STATSENAB
, 0xffffff);
9466 tw32(SNDDATAI_STATSCTRL
,
9467 (SNDDATAI_SCTRL_ENABLE
|
9468 SNDDATAI_SCTRL_FASTUPD
));
9470 /* Setup host coalescing engine. */
9471 tw32(HOSTCC_MODE
, 0);
9472 for (i
= 0; i
< 2000; i
++) {
9473 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
9478 __tg3_set_coalesce(tp
, &tp
->coal
);
9480 if (!tg3_flag(tp
, 5705_PLUS
)) {
9481 /* Status/statistics block address. See tg3_timer,
9482 * the tg3_periodic_fetch_stats call there, and
9483 * tg3_get_stats to see how this works for 5705/5750 chips.
9485 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9486 ((u64
) tp
->stats_mapping
>> 32));
9487 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9488 ((u64
) tp
->stats_mapping
& 0xffffffff));
9489 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
9491 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
9493 /* Clear statistics and status block memory areas */
9494 for (i
= NIC_SRAM_STATS_BLK
;
9495 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
9497 tg3_write_mem(tp
, i
, 0);
9502 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
9504 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
9505 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
9506 if (!tg3_flag(tp
, 5705_PLUS
))
9507 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
9509 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
9510 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
9511 /* reset to prevent losing 1st rx packet intermittently */
9512 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9516 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
9517 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
9518 MAC_MODE_FHDE_ENABLE
;
9519 if (tg3_flag(tp
, ENABLE_APE
))
9520 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
9521 if (!tg3_flag(tp
, 5705_PLUS
) &&
9522 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9523 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
9524 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
9525 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
9528 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9529 * If TG3_FLAG_IS_NIC is zero, we should read the
9530 * register to preserve the GPIO settings for LOMs. The GPIOs,
9531 * whether used as inputs or outputs, are set by boot code after
9534 if (!tg3_flag(tp
, IS_NIC
)) {
9537 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
9538 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
9539 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
9541 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
9542 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
9543 GRC_LCLCTRL_GPIO_OUTPUT3
;
9545 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
9546 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
9548 tp
->grc_local_ctrl
&= ~gpio_mask
;
9549 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
9551 /* GPIO1 must be driven high for eeprom write protect */
9552 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
9553 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9554 GRC_LCLCTRL_GPIO_OUTPUT1
);
9556 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9559 if (tg3_flag(tp
, USING_MSIX
)) {
9560 val
= tr32(MSGINT_MODE
);
9561 val
|= MSGINT_MODE_ENABLE
;
9562 if (tp
->irq_cnt
> 1)
9563 val
|= MSGINT_MODE_MULTIVEC_EN
;
9564 if (!tg3_flag(tp
, 1SHOT_MSI
))
9565 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
9566 tw32(MSGINT_MODE
, val
);
9569 if (!tg3_flag(tp
, 5705_PLUS
)) {
9570 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
9574 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
9575 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
9576 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
9577 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
9578 WDMAC_MODE_LNGREAD_ENAB
);
9580 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
9581 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
9582 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9583 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
9584 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
9586 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9587 !tg3_flag(tp
, IS_5788
)) {
9588 val
|= WDMAC_MODE_RX_ACCEL
;
9592 /* Enable host coalescing bug fix */
9593 if (tg3_flag(tp
, 5755_PLUS
))
9594 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
9596 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
9597 val
|= WDMAC_MODE_BURST_ALL_DATA
;
9599 tw32_f(WDMAC_MODE
, val
);
9602 if (tg3_flag(tp
, PCIX_MODE
)) {
9605 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9607 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
9608 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
9609 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9610 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
9611 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
9612 pcix_cmd
|= PCI_X_CMD_READ_2K
;
9614 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
9618 tw32_f(RDMAC_MODE
, rdmac_mode
);
9621 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
) {
9622 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
9623 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
9626 if (i
< TG3_NUM_RDMA_CHANNELS
) {
9627 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9628 val
|= TG3_LSO_RD_DMA_TX_LENGTH_WA
;
9629 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9630 tg3_flag_set(tp
, 5719_RDMA_BUG
);
9634 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
9635 if (!tg3_flag(tp
, 5705_PLUS
))
9636 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
9638 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
9640 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
9642 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
9644 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
9645 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
9646 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
9647 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
9648 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
9649 tw32(RCVDBDI_MODE
, val
);
9650 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
9651 if (tg3_flag(tp
, HW_TSO_1
) ||
9652 tg3_flag(tp
, HW_TSO_2
) ||
9653 tg3_flag(tp
, HW_TSO_3
))
9654 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
9655 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
9656 if (tg3_flag(tp
, ENABLE_TSS
))
9657 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
9658 tw32(SNDBDI_MODE
, val
);
9659 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
9661 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
9662 err
= tg3_load_5701_a0_firmware_fix(tp
);
9667 if (tg3_flag(tp
, TSO_CAPABLE
)) {
9668 err
= tg3_load_tso_firmware(tp
);
9673 tp
->tx_mode
= TX_MODE_ENABLE
;
9675 if (tg3_flag(tp
, 5755_PLUS
) ||
9676 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
9677 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
9679 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
9680 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
9681 tp
->tx_mode
&= ~val
;
9682 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
9685 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
9688 if (tg3_flag(tp
, ENABLE_RSS
)) {
9689 tg3_rss_write_indir_tbl(tp
);
9691 /* Setup the "secret" hash key. */
9692 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
9693 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
9694 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
9695 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
9696 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
9697 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
9698 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
9699 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
9700 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
9701 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
9704 tp
->rx_mode
= RX_MODE_ENABLE
;
9705 if (tg3_flag(tp
, 5755_PLUS
))
9706 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
9708 if (tg3_flag(tp
, ENABLE_RSS
))
9709 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
9710 RX_MODE_RSS_ITBL_HASH_BITS_7
|
9711 RX_MODE_RSS_IPV6_HASH_EN
|
9712 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
9713 RX_MODE_RSS_IPV4_HASH_EN
|
9714 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
9716 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9719 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
9721 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
9722 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9723 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
9726 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
9729 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
9730 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
9731 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
9732 /* Set drive transmission level to 1.2V */
9733 /* only if the signal pre-emphasis bit is not set */
9734 val
= tr32(MAC_SERDES_CFG
);
9737 tw32(MAC_SERDES_CFG
, val
);
9739 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
9740 tw32(MAC_SERDES_CFG
, 0x616000);
9743 /* Prevent chip from dropping frames when flow control
9746 if (tg3_flag(tp
, 57765_CLASS
))
9750 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
9752 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
9753 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
9754 /* Use hardware link auto-negotiation */
9755 tg3_flag_set(tp
, HW_AUTONEG
);
9758 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
9759 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
9762 tmp
= tr32(SERDES_RX_CTRL
);
9763 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
9764 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
9765 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
9766 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9769 if (!tg3_flag(tp
, USE_PHYLIB
)) {
9770 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
9771 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
9773 err
= tg3_setup_phy(tp
, 0);
9777 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
9778 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
9781 /* Clear CRC stats. */
9782 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
9783 tg3_writephy(tp
, MII_TG3_TEST1
,
9784 tmp
| MII_TG3_TEST1_CRC_EN
);
9785 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
9790 __tg3_set_rx_mode(tp
->dev
);
9792 /* Initialize receive rules. */
9793 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
9794 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9795 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
9796 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
9798 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
9802 if (tg3_flag(tp
, ENABLE_ASF
))
9806 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
9808 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
9810 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
9812 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
9814 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
9816 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
9818 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
9820 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
9822 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
9824 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
9826 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
9828 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
9830 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9832 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9840 if (tg3_flag(tp
, ENABLE_APE
))
9841 /* Write our heartbeat update interval to APE. */
9842 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
9843 APE_HOST_HEARTBEAT_INT_DISABLE
);
9845 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
9850 /* Called at device open time to get the chip ready for
9851 * packet processing. Invoked with tp->lock held.
9853 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
9855 tg3_switch_clocks(tp
);
9857 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9859 return tg3_reset_hw(tp
, reset_phy
);
9862 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
9866 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
9867 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
9869 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
9872 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
9873 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
9874 memset(ocir
, 0, TG3_OCIR_LEN
);
9878 /* sysfs attributes for hwmon */
9879 static ssize_t
tg3_show_temp(struct device
*dev
,
9880 struct device_attribute
*devattr
, char *buf
)
9882 struct pci_dev
*pdev
= to_pci_dev(dev
);
9883 struct net_device
*netdev
= pci_get_drvdata(pdev
);
9884 struct tg3
*tp
= netdev_priv(netdev
);
9885 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
9888 spin_lock_bh(&tp
->lock
);
9889 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
9890 sizeof(temperature
));
9891 spin_unlock_bh(&tp
->lock
);
9892 return sprintf(buf
, "%u\n", temperature
);
9896 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
9897 TG3_TEMP_SENSOR_OFFSET
);
9898 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
9899 TG3_TEMP_CAUTION_OFFSET
);
9900 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
9901 TG3_TEMP_MAX_OFFSET
);
9903 static struct attribute
*tg3_attributes
[] = {
9904 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
9905 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
9906 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
9910 static const struct attribute_group tg3_group
= {
9911 .attrs
= tg3_attributes
,
9914 static void tg3_hwmon_close(struct tg3
*tp
)
9916 if (tp
->hwmon_dev
) {
9917 hwmon_device_unregister(tp
->hwmon_dev
);
9918 tp
->hwmon_dev
= NULL
;
9919 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
9923 static void tg3_hwmon_open(struct tg3
*tp
)
9927 struct pci_dev
*pdev
= tp
->pdev
;
9928 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
9930 tg3_sd_scan_scratchpad(tp
, ocirs
);
9932 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
9933 if (!ocirs
[i
].src_data_length
)
9936 size
+= ocirs
[i
].src_hdr_length
;
9937 size
+= ocirs
[i
].src_data_length
;
9943 /* Register hwmon sysfs hooks */
9944 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
9946 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
9950 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
9951 if (IS_ERR(tp
->hwmon_dev
)) {
9952 tp
->hwmon_dev
= NULL
;
9953 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
9954 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
9959 #define TG3_STAT_ADD32(PSTAT, REG) \
9960 do { u32 __val = tr32(REG); \
9961 (PSTAT)->low += __val; \
9962 if ((PSTAT)->low < __val) \
9963 (PSTAT)->high += 1; \
9966 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
9968 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
9973 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
9974 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
9975 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
9976 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
9977 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
9978 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
9979 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
9980 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
9981 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
9982 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
9983 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
9984 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
9985 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
9986 if (unlikely(tg3_flag(tp
, 5719_RDMA_BUG
) &&
9987 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
9988 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
9991 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
9992 val
&= ~TG3_LSO_RD_DMA_TX_LENGTH_WA
;
9993 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
9994 tg3_flag_clear(tp
, 5719_RDMA_BUG
);
9997 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
9998 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
9999 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10000 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10001 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10002 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10003 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10004 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10005 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10006 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10007 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10008 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10009 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10010 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10012 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10013 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
10014 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
&&
10015 tp
->pci_chip_rev_id
!= CHIPREV_ID_5720_A0
) {
10016 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10018 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10019 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10021 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10022 sp
->rx_discards
.low
+= val
;
10023 if (sp
->rx_discards
.low
< val
)
10024 sp
->rx_discards
.high
+= 1;
10026 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10028 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10031 static void tg3_chk_missed_msi(struct tg3
*tp
)
10035 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10036 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10038 if (tg3_has_work(tnapi
)) {
10039 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10040 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10041 if (tnapi
->chk_msi_cnt
< 1) {
10042 tnapi
->chk_msi_cnt
++;
10048 tnapi
->chk_msi_cnt
= 0;
10049 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10050 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10054 static void tg3_timer(unsigned long __opaque
)
10056 struct tg3
*tp
= (struct tg3
*) __opaque
;
10058 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10059 goto restart_timer
;
10061 spin_lock(&tp
->lock
);
10063 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
10064 tg3_flag(tp
, 57765_CLASS
))
10065 tg3_chk_missed_msi(tp
);
10067 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10068 /* All of this garbage is because when using non-tagged
10069 * IRQ status the mailbox/status_block protocol the chip
10070 * uses with the cpu is race prone.
10072 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10073 tw32(GRC_LOCAL_CTRL
,
10074 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10076 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10077 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10080 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10081 spin_unlock(&tp
->lock
);
10082 tg3_reset_task_schedule(tp
);
10083 goto restart_timer
;
10087 /* This part only runs once per second. */
10088 if (!--tp
->timer_counter
) {
10089 if (tg3_flag(tp
, 5705_PLUS
))
10090 tg3_periodic_fetch_stats(tp
);
10092 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10093 tg3_phy_eee_enable(tp
);
10095 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10099 mac_stat
= tr32(MAC_STATUS
);
10102 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10103 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10105 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10109 tg3_setup_phy(tp
, 0);
10110 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10111 u32 mac_stat
= tr32(MAC_STATUS
);
10112 int need_setup
= 0;
10115 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10118 if (!tp
->link_up
&&
10119 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10120 MAC_STATUS_SIGNAL_DET
))) {
10124 if (!tp
->serdes_counter
) {
10127 ~MAC_MODE_PORT_MODE_MASK
));
10129 tw32_f(MAC_MODE
, tp
->mac_mode
);
10132 tg3_setup_phy(tp
, 0);
10134 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10135 tg3_flag(tp
, 5780_CLASS
)) {
10136 tg3_serdes_parallel_detect(tp
);
10139 tp
->timer_counter
= tp
->timer_multiplier
;
10142 /* Heartbeat is only sent once every 2 seconds.
10144 * The heartbeat is to tell the ASF firmware that the host
10145 * driver is still alive. In the event that the OS crashes,
10146 * ASF needs to reset the hardware to free up the FIFO space
10147 * that may be filled with rx packets destined for the host.
10148 * If the FIFO is full, ASF will no longer function properly.
10150 * Unintended resets have been reported on real time kernels
10151 * where the timer doesn't run on time. Netpoll will also have
10154 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10155 * to check the ring condition when the heartbeat is expiring
10156 * before doing the reset. This will prevent most unintended
10159 if (!--tp
->asf_counter
) {
10160 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10161 tg3_wait_for_event_ack(tp
);
10163 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10164 FWCMD_NICDRV_ALIVE3
);
10165 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10166 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10167 TG3_FW_UPDATE_TIMEOUT_SEC
);
10169 tg3_generate_fw_event(tp
);
10171 tp
->asf_counter
= tp
->asf_multiplier
;
10174 spin_unlock(&tp
->lock
);
10177 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10178 add_timer(&tp
->timer
);
10181 static void tg3_timer_init(struct tg3
*tp
)
10183 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10184 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5717
&&
10185 !tg3_flag(tp
, 57765_CLASS
))
10186 tp
->timer_offset
= HZ
;
10188 tp
->timer_offset
= HZ
/ 10;
10190 BUG_ON(tp
->timer_offset
> HZ
);
10192 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10193 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10194 TG3_FW_UPDATE_FREQ_SEC
;
10196 init_timer(&tp
->timer
);
10197 tp
->timer
.data
= (unsigned long) tp
;
10198 tp
->timer
.function
= tg3_timer
;
10201 static void tg3_timer_start(struct tg3
*tp
)
10203 tp
->asf_counter
= tp
->asf_multiplier
;
10204 tp
->timer_counter
= tp
->timer_multiplier
;
10206 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10207 add_timer(&tp
->timer
);
10210 static void tg3_timer_stop(struct tg3
*tp
)
10212 del_timer_sync(&tp
->timer
);
10215 /* Restart hardware after configuration changes, self-test, etc.
10216 * Invoked with tp->lock held.
10218 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
10219 __releases(tp
->lock
)
10220 __acquires(tp
->lock
)
10224 err
= tg3_init_hw(tp
, reset_phy
);
10226 netdev_err(tp
->dev
,
10227 "Failed to re-initialize device, aborting\n");
10228 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10229 tg3_full_unlock(tp
);
10230 tg3_timer_stop(tp
);
10232 tg3_napi_enable(tp
);
10233 dev_close(tp
->dev
);
10234 tg3_full_lock(tp
, 0);
10239 static void tg3_reset_task(struct work_struct
*work
)
10241 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10244 tg3_full_lock(tp
, 0);
10246 if (!netif_running(tp
->dev
)) {
10247 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10248 tg3_full_unlock(tp
);
10252 tg3_full_unlock(tp
);
10256 tg3_netif_stop(tp
);
10258 tg3_full_lock(tp
, 1);
10260 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
10261 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10262 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10263 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
10264 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
10267 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
10268 err
= tg3_init_hw(tp
, 1);
10272 tg3_netif_start(tp
);
10275 tg3_full_unlock(tp
);
10280 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10283 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
10286 unsigned long flags
;
10288 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
10290 if (tp
->irq_cnt
== 1)
10291 name
= tp
->dev
->name
;
10293 name
= &tnapi
->irq_lbl
[0];
10294 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
10295 name
[IFNAMSIZ
-1] = 0;
10298 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10300 if (tg3_flag(tp
, 1SHOT_MSI
))
10301 fn
= tg3_msi_1shot
;
10304 fn
= tg3_interrupt
;
10305 if (tg3_flag(tp
, TAGGED_STATUS
))
10306 fn
= tg3_interrupt_tagged
;
10307 flags
= IRQF_SHARED
;
10310 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
10313 static int tg3_test_interrupt(struct tg3
*tp
)
10315 struct tg3_napi
*tnapi
= &tp
->napi
[0];
10316 struct net_device
*dev
= tp
->dev
;
10317 int err
, i
, intr_ok
= 0;
10320 if (!netif_running(dev
))
10323 tg3_disable_ints(tp
);
10325 free_irq(tnapi
->irq_vec
, tnapi
);
10328 * Turn off MSI one shot mode. Otherwise this test has no
10329 * observable way to know whether the interrupt was delivered.
10331 if (tg3_flag(tp
, 57765_PLUS
)) {
10332 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
10333 tw32(MSGINT_MODE
, val
);
10336 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
10337 IRQF_SHARED
, dev
->name
, tnapi
);
10341 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
10342 tg3_enable_ints(tp
);
10344 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
10347 for (i
= 0; i
< 5; i
++) {
10348 u32 int_mbox
, misc_host_ctrl
;
10350 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
10351 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
10353 if ((int_mbox
!= 0) ||
10354 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
10359 if (tg3_flag(tp
, 57765_PLUS
) &&
10360 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
10361 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
10366 tg3_disable_ints(tp
);
10368 free_irq(tnapi
->irq_vec
, tnapi
);
10370 err
= tg3_request_irq(tp
, 0);
10376 /* Reenable MSI one shot mode. */
10377 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
10378 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
10379 tw32(MSGINT_MODE
, val
);
10387 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10388 * successfully restored
10390 static int tg3_test_msi(struct tg3
*tp
)
10395 if (!tg3_flag(tp
, USING_MSI
))
10398 /* Turn off SERR reporting in case MSI terminates with Master
10401 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10402 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
10403 pci_cmd
& ~PCI_COMMAND_SERR
);
10405 err
= tg3_test_interrupt(tp
);
10407 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10412 /* other failures */
10416 /* MSI test failed, go back to INTx mode */
10417 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
10418 "to INTx mode. Please report this failure to the PCI "
10419 "maintainer and include system chipset information\n");
10421 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10423 pci_disable_msi(tp
->pdev
);
10425 tg3_flag_clear(tp
, USING_MSI
);
10426 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10428 err
= tg3_request_irq(tp
, 0);
10432 /* Need to reset the chip because the MSI cycle may have terminated
10433 * with Master Abort.
10435 tg3_full_lock(tp
, 1);
10437 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10438 err
= tg3_init_hw(tp
, 1);
10440 tg3_full_unlock(tp
);
10443 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
10448 static int tg3_request_firmware(struct tg3
*tp
)
10450 const __be32
*fw_data
;
10452 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
10453 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
10458 fw_data
= (void *)tp
->fw
->data
;
10460 /* Firmware blob starts with version numbers, followed by
10461 * start address and _full_ length including BSS sections
10462 * (which must be longer than the actual data, of course
10465 tp
->fw_len
= be32_to_cpu(fw_data
[2]); /* includes bss */
10466 if (tp
->fw_len
< (tp
->fw
->size
- 12)) {
10467 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
10468 tp
->fw_len
, tp
->fw_needed
);
10469 release_firmware(tp
->fw
);
10474 /* We no longer need firmware; we have it. */
10475 tp
->fw_needed
= NULL
;
10479 static u32
tg3_irq_count(struct tg3
*tp
)
10481 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
10484 /* We want as many rx rings enabled as there are cpus.
10485 * In multiqueue MSI-X mode, the first MSI-X vector
10486 * only deals with link interrupts, etc, so we add
10487 * one to the number of vectors we are requesting.
10489 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
10495 static bool tg3_enable_msix(struct tg3
*tp
)
10498 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
10500 tp
->txq_cnt
= tp
->txq_req
;
10501 tp
->rxq_cnt
= tp
->rxq_req
;
10503 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
10504 if (tp
->rxq_cnt
> tp
->rxq_max
)
10505 tp
->rxq_cnt
= tp
->rxq_max
;
10507 /* Disable multiple TX rings by default. Simple round-robin hardware
10508 * scheduling of the TX rings can cause starvation of rings with
10509 * small packets when other rings have TSO or jumbo packets.
10514 tp
->irq_cnt
= tg3_irq_count(tp
);
10516 for (i
= 0; i
< tp
->irq_max
; i
++) {
10517 msix_ent
[i
].entry
= i
;
10518 msix_ent
[i
].vector
= 0;
10521 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
10524 } else if (rc
!= 0) {
10525 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
10527 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
10530 tp
->rxq_cnt
= max(rc
- 1, 1);
10532 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
10535 for (i
= 0; i
< tp
->irq_max
; i
++)
10536 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
10538 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
10539 pci_disable_msix(tp
->pdev
);
10543 if (tp
->irq_cnt
== 1)
10546 tg3_flag_set(tp
, ENABLE_RSS
);
10548 if (tp
->txq_cnt
> 1)
10549 tg3_flag_set(tp
, ENABLE_TSS
);
10551 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
10556 static void tg3_ints_init(struct tg3
*tp
)
10558 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
10559 !tg3_flag(tp
, TAGGED_STATUS
)) {
10560 /* All MSI supporting chips should support tagged
10561 * status. Assert that this is the case.
10563 netdev_warn(tp
->dev
,
10564 "MSI without TAGGED_STATUS? Not using MSI\n");
10568 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
10569 tg3_flag_set(tp
, USING_MSIX
);
10570 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
10571 tg3_flag_set(tp
, USING_MSI
);
10573 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10574 u32 msi_mode
= tr32(MSGINT_MODE
);
10575 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
10576 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
10577 if (!tg3_flag(tp
, 1SHOT_MSI
))
10578 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10579 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
10582 if (!tg3_flag(tp
, USING_MSIX
)) {
10584 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
10587 if (tp
->irq_cnt
== 1) {
10590 netif_set_real_num_tx_queues(tp
->dev
, 1);
10591 netif_set_real_num_rx_queues(tp
->dev
, 1);
10595 static void tg3_ints_fini(struct tg3
*tp
)
10597 if (tg3_flag(tp
, USING_MSIX
))
10598 pci_disable_msix(tp
->pdev
);
10599 else if (tg3_flag(tp
, USING_MSI
))
10600 pci_disable_msi(tp
->pdev
);
10601 tg3_flag_clear(tp
, USING_MSI
);
10602 tg3_flag_clear(tp
, USING_MSIX
);
10603 tg3_flag_clear(tp
, ENABLE_RSS
);
10604 tg3_flag_clear(tp
, ENABLE_TSS
);
10607 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
10610 struct net_device
*dev
= tp
->dev
;
10614 * Setup interrupts first so we know how
10615 * many NAPI resources to allocate
10619 tg3_rss_check_indir_tbl(tp
);
10621 /* The placement of this call is tied
10622 * to the setup and use of Host TX descriptors.
10624 err
= tg3_alloc_consistent(tp
);
10630 tg3_napi_enable(tp
);
10632 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10633 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10634 err
= tg3_request_irq(tp
, i
);
10636 for (i
--; i
>= 0; i
--) {
10637 tnapi
= &tp
->napi
[i
];
10638 free_irq(tnapi
->irq_vec
, tnapi
);
10644 tg3_full_lock(tp
, 0);
10646 err
= tg3_init_hw(tp
, reset_phy
);
10648 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10649 tg3_free_rings(tp
);
10652 tg3_full_unlock(tp
);
10657 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
10658 err
= tg3_test_msi(tp
);
10661 tg3_full_lock(tp
, 0);
10662 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10663 tg3_free_rings(tp
);
10664 tg3_full_unlock(tp
);
10669 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
10670 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
10672 tw32(PCIE_TRANSACTION_CFG
,
10673 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
10679 tg3_hwmon_open(tp
);
10681 tg3_full_lock(tp
, 0);
10683 tg3_timer_start(tp
);
10684 tg3_flag_set(tp
, INIT_COMPLETE
);
10685 tg3_enable_ints(tp
);
10690 tg3_ptp_resume(tp
);
10693 tg3_full_unlock(tp
);
10695 netif_tx_start_all_queues(dev
);
10698 * Reset loopback feature if it was turned on while the device was down
10699 * make sure that it's installed properly now.
10701 if (dev
->features
& NETIF_F_LOOPBACK
)
10702 tg3_set_loopback(dev
, dev
->features
);
10707 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10708 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10709 free_irq(tnapi
->irq_vec
, tnapi
);
10713 tg3_napi_disable(tp
);
10715 tg3_free_consistent(tp
);
10723 static void tg3_stop(struct tg3
*tp
)
10727 tg3_reset_task_cancel(tp
);
10728 tg3_netif_stop(tp
);
10730 tg3_timer_stop(tp
);
10732 tg3_hwmon_close(tp
);
10736 tg3_full_lock(tp
, 1);
10738 tg3_disable_ints(tp
);
10740 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10741 tg3_free_rings(tp
);
10742 tg3_flag_clear(tp
, INIT_COMPLETE
);
10744 tg3_full_unlock(tp
);
10746 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
10747 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10748 free_irq(tnapi
->irq_vec
, tnapi
);
10755 tg3_free_consistent(tp
);
10758 static int tg3_open(struct net_device
*dev
)
10760 struct tg3
*tp
= netdev_priv(dev
);
10763 if (tp
->fw_needed
) {
10764 err
= tg3_request_firmware(tp
);
10765 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
10769 netdev_warn(tp
->dev
, "TSO capability disabled\n");
10770 tg3_flag_clear(tp
, TSO_CAPABLE
);
10771 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
10772 netdev_notice(tp
->dev
, "TSO capability restored\n");
10773 tg3_flag_set(tp
, TSO_CAPABLE
);
10777 tg3_carrier_off(tp
);
10779 err
= tg3_power_up(tp
);
10783 tg3_full_lock(tp
, 0);
10785 tg3_disable_ints(tp
);
10786 tg3_flag_clear(tp
, INIT_COMPLETE
);
10788 tg3_full_unlock(tp
);
10790 err
= tg3_start(tp
, true, true, true);
10792 tg3_frob_aux_power(tp
, false);
10793 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
10796 if (tg3_flag(tp
, PTP_CAPABLE
)) {
10797 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
10799 if (IS_ERR(tp
->ptp_clock
))
10800 tp
->ptp_clock
= NULL
;
10806 static int tg3_close(struct net_device
*dev
)
10808 struct tg3
*tp
= netdev_priv(dev
);
10814 /* Clear stats across close / open calls */
10815 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
10816 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
10818 tg3_power_down(tp
);
10820 tg3_carrier_off(tp
);
10825 static inline u64
get_stat64(tg3_stat64_t
*val
)
10827 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
10830 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
10832 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10834 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10835 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10836 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
10839 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
10840 tg3_writephy(tp
, MII_TG3_TEST1
,
10841 val
| MII_TG3_TEST1_CRC_EN
);
10842 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
10846 tp
->phy_crc_errors
+= val
;
10848 return tp
->phy_crc_errors
;
10851 return get_stat64(&hw_stats
->rx_fcs_errors
);
10854 #define ESTAT_ADD(member) \
10855 estats->member = old_estats->member + \
10856 get_stat64(&hw_stats->member)
10858 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
10860 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
10861 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10863 ESTAT_ADD(rx_octets
);
10864 ESTAT_ADD(rx_fragments
);
10865 ESTAT_ADD(rx_ucast_packets
);
10866 ESTAT_ADD(rx_mcast_packets
);
10867 ESTAT_ADD(rx_bcast_packets
);
10868 ESTAT_ADD(rx_fcs_errors
);
10869 ESTAT_ADD(rx_align_errors
);
10870 ESTAT_ADD(rx_xon_pause_rcvd
);
10871 ESTAT_ADD(rx_xoff_pause_rcvd
);
10872 ESTAT_ADD(rx_mac_ctrl_rcvd
);
10873 ESTAT_ADD(rx_xoff_entered
);
10874 ESTAT_ADD(rx_frame_too_long_errors
);
10875 ESTAT_ADD(rx_jabbers
);
10876 ESTAT_ADD(rx_undersize_packets
);
10877 ESTAT_ADD(rx_in_length_errors
);
10878 ESTAT_ADD(rx_out_length_errors
);
10879 ESTAT_ADD(rx_64_or_less_octet_packets
);
10880 ESTAT_ADD(rx_65_to_127_octet_packets
);
10881 ESTAT_ADD(rx_128_to_255_octet_packets
);
10882 ESTAT_ADD(rx_256_to_511_octet_packets
);
10883 ESTAT_ADD(rx_512_to_1023_octet_packets
);
10884 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
10885 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
10886 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
10887 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
10888 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
10890 ESTAT_ADD(tx_octets
);
10891 ESTAT_ADD(tx_collisions
);
10892 ESTAT_ADD(tx_xon_sent
);
10893 ESTAT_ADD(tx_xoff_sent
);
10894 ESTAT_ADD(tx_flow_control
);
10895 ESTAT_ADD(tx_mac_errors
);
10896 ESTAT_ADD(tx_single_collisions
);
10897 ESTAT_ADD(tx_mult_collisions
);
10898 ESTAT_ADD(tx_deferred
);
10899 ESTAT_ADD(tx_excessive_collisions
);
10900 ESTAT_ADD(tx_late_collisions
);
10901 ESTAT_ADD(tx_collide_2times
);
10902 ESTAT_ADD(tx_collide_3times
);
10903 ESTAT_ADD(tx_collide_4times
);
10904 ESTAT_ADD(tx_collide_5times
);
10905 ESTAT_ADD(tx_collide_6times
);
10906 ESTAT_ADD(tx_collide_7times
);
10907 ESTAT_ADD(tx_collide_8times
);
10908 ESTAT_ADD(tx_collide_9times
);
10909 ESTAT_ADD(tx_collide_10times
);
10910 ESTAT_ADD(tx_collide_11times
);
10911 ESTAT_ADD(tx_collide_12times
);
10912 ESTAT_ADD(tx_collide_13times
);
10913 ESTAT_ADD(tx_collide_14times
);
10914 ESTAT_ADD(tx_collide_15times
);
10915 ESTAT_ADD(tx_ucast_packets
);
10916 ESTAT_ADD(tx_mcast_packets
);
10917 ESTAT_ADD(tx_bcast_packets
);
10918 ESTAT_ADD(tx_carrier_sense_errors
);
10919 ESTAT_ADD(tx_discards
);
10920 ESTAT_ADD(tx_errors
);
10922 ESTAT_ADD(dma_writeq_full
);
10923 ESTAT_ADD(dma_write_prioq_full
);
10924 ESTAT_ADD(rxbds_empty
);
10925 ESTAT_ADD(rx_discards
);
10926 ESTAT_ADD(rx_errors
);
10927 ESTAT_ADD(rx_threshold_hit
);
10929 ESTAT_ADD(dma_readq_full
);
10930 ESTAT_ADD(dma_read_prioq_full
);
10931 ESTAT_ADD(tx_comp_queue_full
);
10933 ESTAT_ADD(ring_set_send_prod_index
);
10934 ESTAT_ADD(ring_status_update
);
10935 ESTAT_ADD(nic_irqs
);
10936 ESTAT_ADD(nic_avoided_irqs
);
10937 ESTAT_ADD(nic_tx_threshold_hit
);
10939 ESTAT_ADD(mbuf_lwm_thresh_hit
);
10942 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
10944 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
10945 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
10947 stats
->rx_packets
= old_stats
->rx_packets
+
10948 get_stat64(&hw_stats
->rx_ucast_packets
) +
10949 get_stat64(&hw_stats
->rx_mcast_packets
) +
10950 get_stat64(&hw_stats
->rx_bcast_packets
);
10952 stats
->tx_packets
= old_stats
->tx_packets
+
10953 get_stat64(&hw_stats
->tx_ucast_packets
) +
10954 get_stat64(&hw_stats
->tx_mcast_packets
) +
10955 get_stat64(&hw_stats
->tx_bcast_packets
);
10957 stats
->rx_bytes
= old_stats
->rx_bytes
+
10958 get_stat64(&hw_stats
->rx_octets
);
10959 stats
->tx_bytes
= old_stats
->tx_bytes
+
10960 get_stat64(&hw_stats
->tx_octets
);
10962 stats
->rx_errors
= old_stats
->rx_errors
+
10963 get_stat64(&hw_stats
->rx_errors
);
10964 stats
->tx_errors
= old_stats
->tx_errors
+
10965 get_stat64(&hw_stats
->tx_errors
) +
10966 get_stat64(&hw_stats
->tx_mac_errors
) +
10967 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
10968 get_stat64(&hw_stats
->tx_discards
);
10970 stats
->multicast
= old_stats
->multicast
+
10971 get_stat64(&hw_stats
->rx_mcast_packets
);
10972 stats
->collisions
= old_stats
->collisions
+
10973 get_stat64(&hw_stats
->tx_collisions
);
10975 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
10976 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
10977 get_stat64(&hw_stats
->rx_undersize_packets
);
10979 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
10980 get_stat64(&hw_stats
->rxbds_empty
);
10981 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
10982 get_stat64(&hw_stats
->rx_align_errors
);
10983 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
10984 get_stat64(&hw_stats
->tx_discards
);
10985 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
10986 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
10988 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
10989 tg3_calc_crc_errors(tp
);
10991 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
10992 get_stat64(&hw_stats
->rx_discards
);
10994 stats
->rx_dropped
= tp
->rx_dropped
;
10995 stats
->tx_dropped
= tp
->tx_dropped
;
10998 static int tg3_get_regs_len(struct net_device
*dev
)
11000 return TG3_REG_BLK_SIZE
;
11003 static void tg3_get_regs(struct net_device
*dev
,
11004 struct ethtool_regs
*regs
, void *_p
)
11006 struct tg3
*tp
= netdev_priv(dev
);
11010 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11012 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11015 tg3_full_lock(tp
, 0);
11017 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11019 tg3_full_unlock(tp
);
11022 static int tg3_get_eeprom_len(struct net_device
*dev
)
11024 struct tg3
*tp
= netdev_priv(dev
);
11026 return tp
->nvram_size
;
11029 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11031 struct tg3
*tp
= netdev_priv(dev
);
11034 u32 i
, offset
, len
, b_offset
, b_count
;
11037 if (tg3_flag(tp
, NO_NVRAM
))
11040 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11043 offset
= eeprom
->offset
;
11047 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11050 /* adjustments to start on required 4 byte boundary */
11051 b_offset
= offset
& 3;
11052 b_count
= 4 - b_offset
;
11053 if (b_count
> len
) {
11054 /* i.e. offset=1 len=2 */
11057 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11060 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11063 eeprom
->len
+= b_count
;
11066 /* read bytes up to the last 4 byte boundary */
11067 pd
= &data
[eeprom
->len
];
11068 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11069 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11074 memcpy(pd
+ i
, &val
, 4);
11079 /* read last bytes not ending on 4 byte boundary */
11080 pd
= &data
[eeprom
->len
];
11082 b_offset
= offset
+ len
- b_count
;
11083 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11086 memcpy(pd
, &val
, b_count
);
11087 eeprom
->len
+= b_count
;
11092 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11094 struct tg3
*tp
= netdev_priv(dev
);
11096 u32 offset
, len
, b_offset
, odd_len
;
11100 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11103 if (tg3_flag(tp
, NO_NVRAM
) ||
11104 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11107 offset
= eeprom
->offset
;
11110 if ((b_offset
= (offset
& 3))) {
11111 /* adjustments to start on required 4 byte boundary */
11112 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11123 /* adjustments to end on required 4 byte boundary */
11125 len
= (len
+ 3) & ~3;
11126 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11132 if (b_offset
|| odd_len
) {
11133 buf
= kmalloc(len
, GFP_KERNEL
);
11137 memcpy(buf
, &start
, 4);
11139 memcpy(buf
+len
-4, &end
, 4);
11140 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11143 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11151 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11153 struct tg3
*tp
= netdev_priv(dev
);
11155 if (tg3_flag(tp
, USE_PHYLIB
)) {
11156 struct phy_device
*phydev
;
11157 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11159 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11160 return phy_ethtool_gset(phydev
, cmd
);
11163 cmd
->supported
= (SUPPORTED_Autoneg
);
11165 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11166 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11167 SUPPORTED_1000baseT_Full
);
11169 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11170 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11171 SUPPORTED_100baseT_Full
|
11172 SUPPORTED_10baseT_Half
|
11173 SUPPORTED_10baseT_Full
|
11175 cmd
->port
= PORT_TP
;
11177 cmd
->supported
|= SUPPORTED_FIBRE
;
11178 cmd
->port
= PORT_FIBRE
;
11181 cmd
->advertising
= tp
->link_config
.advertising
;
11182 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11183 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11184 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11185 cmd
->advertising
|= ADVERTISED_Pause
;
11187 cmd
->advertising
|= ADVERTISED_Pause
|
11188 ADVERTISED_Asym_Pause
;
11190 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11191 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11194 if (netif_running(dev
) && tp
->link_up
) {
11195 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11196 cmd
->duplex
= tp
->link_config
.active_duplex
;
11197 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11198 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11199 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11200 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11202 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11205 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11206 cmd
->duplex
= DUPLEX_UNKNOWN
;
11207 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11209 cmd
->phy_address
= tp
->phy_addr
;
11210 cmd
->transceiver
= XCVR_INTERNAL
;
11211 cmd
->autoneg
= tp
->link_config
.autoneg
;
11217 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11219 struct tg3
*tp
= netdev_priv(dev
);
11220 u32 speed
= ethtool_cmd_speed(cmd
);
11222 if (tg3_flag(tp
, USE_PHYLIB
)) {
11223 struct phy_device
*phydev
;
11224 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11226 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11227 return phy_ethtool_sset(phydev
, cmd
);
11230 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11231 cmd
->autoneg
!= AUTONEG_DISABLE
)
11234 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
11235 cmd
->duplex
!= DUPLEX_FULL
&&
11236 cmd
->duplex
!= DUPLEX_HALF
)
11239 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11240 u32 mask
= ADVERTISED_Autoneg
|
11242 ADVERTISED_Asym_Pause
;
11244 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11245 mask
|= ADVERTISED_1000baseT_Half
|
11246 ADVERTISED_1000baseT_Full
;
11248 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
11249 mask
|= ADVERTISED_100baseT_Half
|
11250 ADVERTISED_100baseT_Full
|
11251 ADVERTISED_10baseT_Half
|
11252 ADVERTISED_10baseT_Full
|
11255 mask
|= ADVERTISED_FIBRE
;
11257 if (cmd
->advertising
& ~mask
)
11260 mask
&= (ADVERTISED_1000baseT_Half
|
11261 ADVERTISED_1000baseT_Full
|
11262 ADVERTISED_100baseT_Half
|
11263 ADVERTISED_100baseT_Full
|
11264 ADVERTISED_10baseT_Half
|
11265 ADVERTISED_10baseT_Full
);
11267 cmd
->advertising
&= mask
;
11269 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
11270 if (speed
!= SPEED_1000
)
11273 if (cmd
->duplex
!= DUPLEX_FULL
)
11276 if (speed
!= SPEED_100
&&
11282 tg3_full_lock(tp
, 0);
11284 tp
->link_config
.autoneg
= cmd
->autoneg
;
11285 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11286 tp
->link_config
.advertising
= (cmd
->advertising
|
11287 ADVERTISED_Autoneg
);
11288 tp
->link_config
.speed
= SPEED_UNKNOWN
;
11289 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
11291 tp
->link_config
.advertising
= 0;
11292 tp
->link_config
.speed
= speed
;
11293 tp
->link_config
.duplex
= cmd
->duplex
;
11296 if (netif_running(dev
))
11297 tg3_setup_phy(tp
, 1);
11299 tg3_full_unlock(tp
);
11304 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
11306 struct tg3
*tp
= netdev_priv(dev
);
11308 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
11309 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
11310 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
11311 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
11314 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11316 struct tg3
*tp
= netdev_priv(dev
);
11318 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
11319 wol
->supported
= WAKE_MAGIC
;
11321 wol
->supported
= 0;
11323 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
11324 wol
->wolopts
= WAKE_MAGIC
;
11325 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
11328 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11330 struct tg3
*tp
= netdev_priv(dev
);
11331 struct device
*dp
= &tp
->pdev
->dev
;
11333 if (wol
->wolopts
& ~WAKE_MAGIC
)
11335 if ((wol
->wolopts
& WAKE_MAGIC
) &&
11336 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
11339 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
11341 spin_lock_bh(&tp
->lock
);
11342 if (device_may_wakeup(dp
))
11343 tg3_flag_set(tp
, WOL_ENABLE
);
11345 tg3_flag_clear(tp
, WOL_ENABLE
);
11346 spin_unlock_bh(&tp
->lock
);
11351 static u32
tg3_get_msglevel(struct net_device
*dev
)
11353 struct tg3
*tp
= netdev_priv(dev
);
11354 return tp
->msg_enable
;
11357 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
11359 struct tg3
*tp
= netdev_priv(dev
);
11360 tp
->msg_enable
= value
;
11363 static int tg3_nway_reset(struct net_device
*dev
)
11365 struct tg3
*tp
= netdev_priv(dev
);
11368 if (!netif_running(dev
))
11371 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11374 if (tg3_flag(tp
, USE_PHYLIB
)) {
11375 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11377 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
11381 spin_lock_bh(&tp
->lock
);
11383 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
11384 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
11385 ((bmcr
& BMCR_ANENABLE
) ||
11386 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
11387 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
11391 spin_unlock_bh(&tp
->lock
);
11397 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11399 struct tg3
*tp
= netdev_priv(dev
);
11401 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
11402 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11403 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
11405 ering
->rx_jumbo_max_pending
= 0;
11407 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
11409 ering
->rx_pending
= tp
->rx_pending
;
11410 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
11411 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
11413 ering
->rx_jumbo_pending
= 0;
11415 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
11418 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
11420 struct tg3
*tp
= netdev_priv(dev
);
11421 int i
, irq_sync
= 0, err
= 0;
11423 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
11424 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
11425 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
11426 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
11427 (tg3_flag(tp
, TSO_BUG
) &&
11428 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
11431 if (netif_running(dev
)) {
11433 tg3_netif_stop(tp
);
11437 tg3_full_lock(tp
, irq_sync
);
11439 tp
->rx_pending
= ering
->rx_pending
;
11441 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
11442 tp
->rx_pending
> 63)
11443 tp
->rx_pending
= 63;
11444 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
11446 for (i
= 0; i
< tp
->irq_max
; i
++)
11447 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
11449 if (netif_running(dev
)) {
11450 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11451 err
= tg3_restart_hw(tp
, 1);
11453 tg3_netif_start(tp
);
11456 tg3_full_unlock(tp
);
11458 if (irq_sync
&& !err
)
11464 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11466 struct tg3
*tp
= netdev_priv(dev
);
11468 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
11470 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
11471 epause
->rx_pause
= 1;
11473 epause
->rx_pause
= 0;
11475 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
11476 epause
->tx_pause
= 1;
11478 epause
->tx_pause
= 0;
11481 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
11483 struct tg3
*tp
= netdev_priv(dev
);
11486 if (tg3_flag(tp
, USE_PHYLIB
)) {
11488 struct phy_device
*phydev
;
11490 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11492 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
11493 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
11494 (epause
->rx_pause
!= epause
->tx_pause
)))
11497 tp
->link_config
.flowctrl
= 0;
11498 if (epause
->rx_pause
) {
11499 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11501 if (epause
->tx_pause
) {
11502 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11503 newadv
= ADVERTISED_Pause
;
11505 newadv
= ADVERTISED_Pause
|
11506 ADVERTISED_Asym_Pause
;
11507 } else if (epause
->tx_pause
) {
11508 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11509 newadv
= ADVERTISED_Asym_Pause
;
11513 if (epause
->autoneg
)
11514 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11516 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11518 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
11519 u32 oldadv
= phydev
->advertising
&
11520 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
11521 if (oldadv
!= newadv
) {
11522 phydev
->advertising
&=
11523 ~(ADVERTISED_Pause
|
11524 ADVERTISED_Asym_Pause
);
11525 phydev
->advertising
|= newadv
;
11526 if (phydev
->autoneg
) {
11528 * Always renegotiate the link to
11529 * inform our link partner of our
11530 * flow control settings, even if the
11531 * flow control is forced. Let
11532 * tg3_adjust_link() do the final
11533 * flow control setup.
11535 return phy_start_aneg(phydev
);
11539 if (!epause
->autoneg
)
11540 tg3_setup_flow_control(tp
, 0, 0);
11542 tp
->link_config
.advertising
&=
11543 ~(ADVERTISED_Pause
|
11544 ADVERTISED_Asym_Pause
);
11545 tp
->link_config
.advertising
|= newadv
;
11550 if (netif_running(dev
)) {
11551 tg3_netif_stop(tp
);
11555 tg3_full_lock(tp
, irq_sync
);
11557 if (epause
->autoneg
)
11558 tg3_flag_set(tp
, PAUSE_AUTONEG
);
11560 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
11561 if (epause
->rx_pause
)
11562 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
11564 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
11565 if (epause
->tx_pause
)
11566 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
11568 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
11570 if (netif_running(dev
)) {
11571 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11572 err
= tg3_restart_hw(tp
, 1);
11574 tg3_netif_start(tp
);
11577 tg3_full_unlock(tp
);
11583 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
11587 return TG3_NUM_TEST
;
11589 return TG3_NUM_STATS
;
11591 return -EOPNOTSUPP
;
11595 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
11596 u32
*rules __always_unused
)
11598 struct tg3
*tp
= netdev_priv(dev
);
11600 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11601 return -EOPNOTSUPP
;
11603 switch (info
->cmd
) {
11604 case ETHTOOL_GRXRINGS
:
11605 if (netif_running(tp
->dev
))
11606 info
->data
= tp
->rxq_cnt
;
11608 info
->data
= num_online_cpus();
11609 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
11610 info
->data
= TG3_RSS_MAX_NUM_QS
;
11613 /* The first interrupt vector only
11614 * handles link interrupts.
11620 return -EOPNOTSUPP
;
11624 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
11627 struct tg3
*tp
= netdev_priv(dev
);
11629 if (tg3_flag(tp
, SUPPORT_MSIX
))
11630 size
= TG3_RSS_INDIR_TBL_SIZE
;
11635 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
11637 struct tg3
*tp
= netdev_priv(dev
);
11640 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11641 indir
[i
] = tp
->rss_ind_tbl
[i
];
11646 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
11648 struct tg3
*tp
= netdev_priv(dev
);
11651 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
11652 tp
->rss_ind_tbl
[i
] = indir
[i
];
11654 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
11657 /* It is legal to write the indirection
11658 * table while the device is running.
11660 tg3_full_lock(tp
, 0);
11661 tg3_rss_write_indir_tbl(tp
);
11662 tg3_full_unlock(tp
);
11667 static void tg3_get_channels(struct net_device
*dev
,
11668 struct ethtool_channels
*channel
)
11670 struct tg3
*tp
= netdev_priv(dev
);
11671 u32 deflt_qs
= netif_get_num_default_rss_queues();
11673 channel
->max_rx
= tp
->rxq_max
;
11674 channel
->max_tx
= tp
->txq_max
;
11676 if (netif_running(dev
)) {
11677 channel
->rx_count
= tp
->rxq_cnt
;
11678 channel
->tx_count
= tp
->txq_cnt
;
11681 channel
->rx_count
= tp
->rxq_req
;
11683 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
11686 channel
->tx_count
= tp
->txq_req
;
11688 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
11692 static int tg3_set_channels(struct net_device
*dev
,
11693 struct ethtool_channels
*channel
)
11695 struct tg3
*tp
= netdev_priv(dev
);
11697 if (!tg3_flag(tp
, SUPPORT_MSIX
))
11698 return -EOPNOTSUPP
;
11700 if (channel
->rx_count
> tp
->rxq_max
||
11701 channel
->tx_count
> tp
->txq_max
)
11704 tp
->rxq_req
= channel
->rx_count
;
11705 tp
->txq_req
= channel
->tx_count
;
11707 if (!netif_running(dev
))
11712 tg3_carrier_off(tp
);
11714 tg3_start(tp
, true, false, false);
11719 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
11721 switch (stringset
) {
11723 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
11726 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
11729 WARN_ON(1); /* we need a WARN() */
11734 static int tg3_set_phys_id(struct net_device
*dev
,
11735 enum ethtool_phys_id_state state
)
11737 struct tg3
*tp
= netdev_priv(dev
);
11739 if (!netif_running(tp
->dev
))
11743 case ETHTOOL_ID_ACTIVE
:
11744 return 1; /* cycle on/off once per second */
11746 case ETHTOOL_ID_ON
:
11747 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11748 LED_CTRL_1000MBPS_ON
|
11749 LED_CTRL_100MBPS_ON
|
11750 LED_CTRL_10MBPS_ON
|
11751 LED_CTRL_TRAFFIC_OVERRIDE
|
11752 LED_CTRL_TRAFFIC_BLINK
|
11753 LED_CTRL_TRAFFIC_LED
);
11756 case ETHTOOL_ID_OFF
:
11757 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
11758 LED_CTRL_TRAFFIC_OVERRIDE
);
11761 case ETHTOOL_ID_INACTIVE
:
11762 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
11769 static void tg3_get_ethtool_stats(struct net_device
*dev
,
11770 struct ethtool_stats
*estats
, u64
*tmp_stats
)
11772 struct tg3
*tp
= netdev_priv(dev
);
11775 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
11777 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
11780 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
11784 u32 offset
= 0, len
= 0;
11787 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
11790 if (magic
== TG3_EEPROM_MAGIC
) {
11791 for (offset
= TG3_NVM_DIR_START
;
11792 offset
< TG3_NVM_DIR_END
;
11793 offset
+= TG3_NVM_DIRENT_SIZE
) {
11794 if (tg3_nvram_read(tp
, offset
, &val
))
11797 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
11798 TG3_NVM_DIRTYPE_EXTVPD
)
11802 if (offset
!= TG3_NVM_DIR_END
) {
11803 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
11804 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
11807 offset
= tg3_nvram_logical_addr(tp
, offset
);
11811 if (!offset
|| !len
) {
11812 offset
= TG3_NVM_VPD_OFF
;
11813 len
= TG3_NVM_VPD_LEN
;
11816 buf
= kmalloc(len
, GFP_KERNEL
);
11820 if (magic
== TG3_EEPROM_MAGIC
) {
11821 for (i
= 0; i
< len
; i
+= 4) {
11822 /* The data is in little-endian format in NVRAM.
11823 * Use the big-endian read routines to preserve
11824 * the byte order as it exists in NVRAM.
11826 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
11832 unsigned int pos
= 0;
11834 ptr
= (u8
*)&buf
[0];
11835 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
11836 cnt
= pci_read_vpd(tp
->pdev
, pos
,
11838 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
11856 #define NVRAM_TEST_SIZE 0x100
11857 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11858 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11859 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11860 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11861 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11862 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11863 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11864 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11866 static int tg3_test_nvram(struct tg3
*tp
)
11868 u32 csum
, magic
, len
;
11870 int i
, j
, k
, err
= 0, size
;
11872 if (tg3_flag(tp
, NO_NVRAM
))
11875 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
11878 if (magic
== TG3_EEPROM_MAGIC
)
11879 size
= NVRAM_TEST_SIZE
;
11880 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
11881 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
11882 TG3_EEPROM_SB_FORMAT_1
) {
11883 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
11884 case TG3_EEPROM_SB_REVISION_0
:
11885 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
11887 case TG3_EEPROM_SB_REVISION_2
:
11888 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
11890 case TG3_EEPROM_SB_REVISION_3
:
11891 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
11893 case TG3_EEPROM_SB_REVISION_4
:
11894 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
11896 case TG3_EEPROM_SB_REVISION_5
:
11897 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
11899 case TG3_EEPROM_SB_REVISION_6
:
11900 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
11907 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
11908 size
= NVRAM_SELFBOOT_HW_SIZE
;
11912 buf
= kmalloc(size
, GFP_KERNEL
);
11917 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
11918 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
11925 /* Selfboot format */
11926 magic
= be32_to_cpu(buf
[0]);
11927 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
11928 TG3_EEPROM_MAGIC_FW
) {
11929 u8
*buf8
= (u8
*) buf
, csum8
= 0;
11931 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
11932 TG3_EEPROM_SB_REVISION_2
) {
11933 /* For rev 2, the csum doesn't include the MBA. */
11934 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
11936 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
11939 for (i
= 0; i
< size
; i
++)
11952 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
11953 TG3_EEPROM_MAGIC_HW
) {
11954 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
11955 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
11956 u8
*buf8
= (u8
*) buf
;
11958 /* Separate the parity bits and the data bytes. */
11959 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
11960 if ((i
== 0) || (i
== 8)) {
11964 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
11965 parity
[k
++] = buf8
[i
] & msk
;
11967 } else if (i
== 16) {
11971 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
11972 parity
[k
++] = buf8
[i
] & msk
;
11975 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
11976 parity
[k
++] = buf8
[i
] & msk
;
11979 data
[j
++] = buf8
[i
];
11983 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
11984 u8 hw8
= hweight8(data
[i
]);
11986 if ((hw8
& 0x1) && parity
[i
])
11988 else if (!(hw8
& 0x1) && !parity
[i
])
11997 /* Bootstrap checksum at offset 0x10 */
11998 csum
= calc_crc((unsigned char *) buf
, 0x10);
11999 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12002 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12003 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12004 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12009 buf
= tg3_vpd_readblock(tp
, &len
);
12013 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12015 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12019 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12022 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12023 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12024 PCI_VPD_RO_KEYWORD_CHKSUM
);
12028 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12030 for (i
= 0; i
<= j
; i
++)
12031 csum8
+= ((u8
*)buf
)[i
];
12045 #define TG3_SERDES_TIMEOUT_SEC 2
12046 #define TG3_COPPER_TIMEOUT_SEC 6
12048 static int tg3_test_link(struct tg3
*tp
)
12052 if (!netif_running(tp
->dev
))
12055 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12056 max
= TG3_SERDES_TIMEOUT_SEC
;
12058 max
= TG3_COPPER_TIMEOUT_SEC
;
12060 for (i
= 0; i
< max
; i
++) {
12064 if (msleep_interruptible(1000))
12071 /* Only test the commonly used registers */
12072 static int tg3_test_registers(struct tg3
*tp
)
12074 int i
, is_5705
, is_5750
;
12075 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12079 #define TG3_FL_5705 0x1
12080 #define TG3_FL_NOT_5705 0x2
12081 #define TG3_FL_NOT_5788 0x4
12082 #define TG3_FL_NOT_5750 0x8
12086 /* MAC Control Registers */
12087 { MAC_MODE
, TG3_FL_NOT_5705
,
12088 0x00000000, 0x00ef6f8c },
12089 { MAC_MODE
, TG3_FL_5705
,
12090 0x00000000, 0x01ef6b8c },
12091 { MAC_STATUS
, TG3_FL_NOT_5705
,
12092 0x03800107, 0x00000000 },
12093 { MAC_STATUS
, TG3_FL_5705
,
12094 0x03800100, 0x00000000 },
12095 { MAC_ADDR_0_HIGH
, 0x0000,
12096 0x00000000, 0x0000ffff },
12097 { MAC_ADDR_0_LOW
, 0x0000,
12098 0x00000000, 0xffffffff },
12099 { MAC_RX_MTU_SIZE
, 0x0000,
12100 0x00000000, 0x0000ffff },
12101 { MAC_TX_MODE
, 0x0000,
12102 0x00000000, 0x00000070 },
12103 { MAC_TX_LENGTHS
, 0x0000,
12104 0x00000000, 0x00003fff },
12105 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12106 0x00000000, 0x000007fc },
12107 { MAC_RX_MODE
, TG3_FL_5705
,
12108 0x00000000, 0x000007dc },
12109 { MAC_HASH_REG_0
, 0x0000,
12110 0x00000000, 0xffffffff },
12111 { MAC_HASH_REG_1
, 0x0000,
12112 0x00000000, 0xffffffff },
12113 { MAC_HASH_REG_2
, 0x0000,
12114 0x00000000, 0xffffffff },
12115 { MAC_HASH_REG_3
, 0x0000,
12116 0x00000000, 0xffffffff },
12118 /* Receive Data and Receive BD Initiator Control Registers. */
12119 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12120 0x00000000, 0xffffffff },
12121 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12122 0x00000000, 0xffffffff },
12123 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12124 0x00000000, 0x00000003 },
12125 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12126 0x00000000, 0xffffffff },
12127 { RCVDBDI_STD_BD
+0, 0x0000,
12128 0x00000000, 0xffffffff },
12129 { RCVDBDI_STD_BD
+4, 0x0000,
12130 0x00000000, 0xffffffff },
12131 { RCVDBDI_STD_BD
+8, 0x0000,
12132 0x00000000, 0xffff0002 },
12133 { RCVDBDI_STD_BD
+0xc, 0x0000,
12134 0x00000000, 0xffffffff },
12136 /* Receive BD Initiator Control Registers. */
12137 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12138 0x00000000, 0xffffffff },
12139 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12140 0x00000000, 0x000003ff },
12141 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12142 0x00000000, 0xffffffff },
12144 /* Host Coalescing Control Registers. */
12145 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12146 0x00000000, 0x00000004 },
12147 { HOSTCC_MODE
, TG3_FL_5705
,
12148 0x00000000, 0x000000f6 },
12149 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12150 0x00000000, 0xffffffff },
12151 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12152 0x00000000, 0x000003ff },
12153 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12154 0x00000000, 0xffffffff },
12155 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12156 0x00000000, 0x000003ff },
12157 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12158 0x00000000, 0xffffffff },
12159 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12160 0x00000000, 0x000000ff },
12161 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12162 0x00000000, 0xffffffff },
12163 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12164 0x00000000, 0x000000ff },
12165 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12166 0x00000000, 0xffffffff },
12167 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12168 0x00000000, 0xffffffff },
12169 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12170 0x00000000, 0xffffffff },
12171 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12172 0x00000000, 0x000000ff },
12173 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12174 0x00000000, 0xffffffff },
12175 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12176 0x00000000, 0x000000ff },
12177 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12178 0x00000000, 0xffffffff },
12179 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12180 0x00000000, 0xffffffff },
12181 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12182 0x00000000, 0xffffffff },
12183 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12184 0x00000000, 0xffffffff },
12185 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12186 0x00000000, 0xffffffff },
12187 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12188 0xffffffff, 0x00000000 },
12189 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12190 0xffffffff, 0x00000000 },
12192 /* Buffer Manager Control Registers. */
12193 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12194 0x00000000, 0x007fff80 },
12195 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12196 0x00000000, 0x007fffff },
12197 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12198 0x00000000, 0x0000003f },
12199 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12200 0x00000000, 0x000001ff },
12201 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12202 0x00000000, 0x000001ff },
12203 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12204 0xffffffff, 0x00000000 },
12205 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12206 0xffffffff, 0x00000000 },
12208 /* Mailbox Registers */
12209 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12210 0x00000000, 0x000001ff },
12211 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12212 0x00000000, 0x000001ff },
12213 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12214 0x00000000, 0x000007ff },
12215 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12216 0x00000000, 0x000001ff },
12218 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12221 is_5705
= is_5750
= 0;
12222 if (tg3_flag(tp
, 5705_PLUS
)) {
12224 if (tg3_flag(tp
, 5750_PLUS
))
12228 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
12229 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
12232 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
12235 if (tg3_flag(tp
, IS_5788
) &&
12236 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
12239 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
12242 offset
= (u32
) reg_tbl
[i
].offset
;
12243 read_mask
= reg_tbl
[i
].read_mask
;
12244 write_mask
= reg_tbl
[i
].write_mask
;
12246 /* Save the original register content */
12247 save_val
= tr32(offset
);
12249 /* Determine the read-only value. */
12250 read_val
= save_val
& read_mask
;
12252 /* Write zero to the register, then make sure the read-only bits
12253 * are not changed and the read/write bits are all zeros.
12257 val
= tr32(offset
);
12259 /* Test the read-only and read/write bits. */
12260 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
12263 /* Write ones to all the bits defined by RdMask and WrMask, then
12264 * make sure the read-only bits are not changed and the
12265 * read/write bits are all ones.
12267 tw32(offset
, read_mask
| write_mask
);
12269 val
= tr32(offset
);
12271 /* Test the read-only bits. */
12272 if ((val
& read_mask
) != read_val
)
12275 /* Test the read/write bits. */
12276 if ((val
& write_mask
) != write_mask
)
12279 tw32(offset
, save_val
);
12285 if (netif_msg_hw(tp
))
12286 netdev_err(tp
->dev
,
12287 "Register test failed at offset %x\n", offset
);
12288 tw32(offset
, save_val
);
12292 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
12294 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12298 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
12299 for (j
= 0; j
< len
; j
+= 4) {
12302 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
12303 tg3_read_mem(tp
, offset
+ j
, &val
);
12304 if (val
!= test_pattern
[i
])
12311 static int tg3_test_memory(struct tg3
*tp
)
12313 static struct mem_entry
{
12316 } mem_tbl_570x
[] = {
12317 { 0x00000000, 0x00b50},
12318 { 0x00002000, 0x1c000},
12319 { 0xffffffff, 0x00000}
12320 }, mem_tbl_5705
[] = {
12321 { 0x00000100, 0x0000c},
12322 { 0x00000200, 0x00008},
12323 { 0x00004000, 0x00800},
12324 { 0x00006000, 0x01000},
12325 { 0x00008000, 0x02000},
12326 { 0x00010000, 0x0e000},
12327 { 0xffffffff, 0x00000}
12328 }, mem_tbl_5755
[] = {
12329 { 0x00000200, 0x00008},
12330 { 0x00004000, 0x00800},
12331 { 0x00006000, 0x00800},
12332 { 0x00008000, 0x02000},
12333 { 0x00010000, 0x0c000},
12334 { 0xffffffff, 0x00000}
12335 }, mem_tbl_5906
[] = {
12336 { 0x00000200, 0x00008},
12337 { 0x00004000, 0x00400},
12338 { 0x00006000, 0x00400},
12339 { 0x00008000, 0x01000},
12340 { 0x00010000, 0x01000},
12341 { 0xffffffff, 0x00000}
12342 }, mem_tbl_5717
[] = {
12343 { 0x00000200, 0x00008},
12344 { 0x00010000, 0x0a000},
12345 { 0x00020000, 0x13c00},
12346 { 0xffffffff, 0x00000}
12347 }, mem_tbl_57765
[] = {
12348 { 0x00000200, 0x00008},
12349 { 0x00004000, 0x00800},
12350 { 0x00006000, 0x09800},
12351 { 0x00010000, 0x0a000},
12352 { 0xffffffff, 0x00000}
12354 struct mem_entry
*mem_tbl
;
12358 if (tg3_flag(tp
, 5717_PLUS
))
12359 mem_tbl
= mem_tbl_5717
;
12360 else if (tg3_flag(tp
, 57765_CLASS
))
12361 mem_tbl
= mem_tbl_57765
;
12362 else if (tg3_flag(tp
, 5755_PLUS
))
12363 mem_tbl
= mem_tbl_5755
;
12364 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
12365 mem_tbl
= mem_tbl_5906
;
12366 else if (tg3_flag(tp
, 5705_PLUS
))
12367 mem_tbl
= mem_tbl_5705
;
12369 mem_tbl
= mem_tbl_570x
;
12371 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
12372 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
12380 #define TG3_TSO_MSS 500
12382 #define TG3_TSO_IP_HDR_LEN 20
12383 #define TG3_TSO_TCP_HDR_LEN 20
12384 #define TG3_TSO_TCP_OPT_LEN 12
12386 static const u8 tg3_tso_header
[] = {
12388 0x45, 0x00, 0x00, 0x00,
12389 0x00, 0x00, 0x40, 0x00,
12390 0x40, 0x06, 0x00, 0x00,
12391 0x0a, 0x00, 0x00, 0x01,
12392 0x0a, 0x00, 0x00, 0x02,
12393 0x0d, 0x00, 0xe0, 0x00,
12394 0x00, 0x00, 0x01, 0x00,
12395 0x00, 0x00, 0x02, 0x00,
12396 0x80, 0x10, 0x10, 0x00,
12397 0x14, 0x09, 0x00, 0x00,
12398 0x01, 0x01, 0x08, 0x0a,
12399 0x11, 0x11, 0x11, 0x11,
12400 0x11, 0x11, 0x11, 0x11,
12403 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
12405 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
12406 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
12408 struct sk_buff
*skb
;
12409 u8
*tx_data
, *rx_data
;
12411 int num_pkts
, tx_len
, rx_len
, i
, err
;
12412 struct tg3_rx_buffer_desc
*desc
;
12413 struct tg3_napi
*tnapi
, *rnapi
;
12414 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
12416 tnapi
= &tp
->napi
[0];
12417 rnapi
= &tp
->napi
[0];
12418 if (tp
->irq_cnt
> 1) {
12419 if (tg3_flag(tp
, ENABLE_RSS
))
12420 rnapi
= &tp
->napi
[1];
12421 if (tg3_flag(tp
, ENABLE_TSS
))
12422 tnapi
= &tp
->napi
[1];
12424 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
12429 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
12433 tx_data
= skb_put(skb
, tx_len
);
12434 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
12435 memset(tx_data
+ 6, 0x0, 8);
12437 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
12439 if (tso_loopback
) {
12440 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
12442 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
12443 TG3_TSO_TCP_OPT_LEN
;
12445 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
12446 sizeof(tg3_tso_header
));
12449 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
12450 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
12452 /* Set the total length field in the IP header */
12453 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
12455 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
12456 TXD_FLAG_CPU_POST_DMA
);
12458 if (tg3_flag(tp
, HW_TSO_1
) ||
12459 tg3_flag(tp
, HW_TSO_2
) ||
12460 tg3_flag(tp
, HW_TSO_3
)) {
12462 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
12463 th
= (struct tcphdr
*)&tx_data
[val
];
12466 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
12468 if (tg3_flag(tp
, HW_TSO_3
)) {
12469 mss
|= (hdr_len
& 0xc) << 12;
12470 if (hdr_len
& 0x10)
12471 base_flags
|= 0x00000010;
12472 base_flags
|= (hdr_len
& 0x3e0) << 5;
12473 } else if (tg3_flag(tp
, HW_TSO_2
))
12474 mss
|= hdr_len
<< 9;
12475 else if (tg3_flag(tp
, HW_TSO_1
) ||
12476 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
12477 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
12479 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
12482 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
12485 data_off
= ETH_HLEN
;
12487 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
12488 tx_len
> VLAN_ETH_FRAME_LEN
)
12489 base_flags
|= TXD_FLAG_JMB_PKT
;
12492 for (i
= data_off
; i
< tx_len
; i
++)
12493 tx_data
[i
] = (u8
) (i
& 0xff);
12495 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
12496 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
12497 dev_kfree_skb(skb
);
12501 val
= tnapi
->tx_prod
;
12502 tnapi
->tx_buffers
[val
].skb
= skb
;
12503 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
12505 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12510 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12512 budget
= tg3_tx_avail(tnapi
);
12513 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
12514 base_flags
| TXD_FLAG_END
, mss
, 0)) {
12515 tnapi
->tx_buffers
[val
].skb
= NULL
;
12516 dev_kfree_skb(skb
);
12522 /* Sync BD data before updating mailbox */
12525 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
12526 tr32_mailbox(tnapi
->prodmbox
);
12530 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12531 for (i
= 0; i
< 35; i
++) {
12532 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
12537 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
12538 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
12539 if ((tx_idx
== tnapi
->tx_prod
) &&
12540 (rx_idx
== (rx_start_idx
+ num_pkts
)))
12544 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
12545 dev_kfree_skb(skb
);
12547 if (tx_idx
!= tnapi
->tx_prod
)
12550 if (rx_idx
!= rx_start_idx
+ num_pkts
)
12554 while (rx_idx
!= rx_start_idx
) {
12555 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
12556 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
12557 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
12559 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
12560 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
12563 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
12566 if (!tso_loopback
) {
12567 if (rx_len
!= tx_len
)
12570 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
12571 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
12574 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
12577 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
12578 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
12579 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
12583 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
12584 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
12585 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
12587 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
12588 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
12589 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
12594 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
12595 PCI_DMA_FROMDEVICE
);
12597 rx_data
+= TG3_RX_OFFSET(tp
);
12598 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
12599 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
12606 /* tg3_free_rings will unmap and free the rx_data */
12611 #define TG3_STD_LOOPBACK_FAILED 1
12612 #define TG3_JMB_LOOPBACK_FAILED 2
12613 #define TG3_TSO_LOOPBACK_FAILED 4
12614 #define TG3_LOOPBACK_FAILED \
12615 (TG3_STD_LOOPBACK_FAILED | \
12616 TG3_JMB_LOOPBACK_FAILED | \
12617 TG3_TSO_LOOPBACK_FAILED)
12619 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
12623 u32 jmb_pkt_sz
= 9000;
12626 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
12628 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
12629 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
12631 if (!netif_running(tp
->dev
)) {
12632 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12633 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12635 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12639 err
= tg3_reset_hw(tp
, 1);
12641 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12642 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12644 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
12648 if (tg3_flag(tp
, ENABLE_RSS
)) {
12651 /* Reroute all rx packets to the 1st queue */
12652 for (i
= MAC_RSS_INDIR_TBL_0
;
12653 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
12657 /* HW errata - mac loopback fails in some cases on 5780.
12658 * Normal traffic and PHY loopback are not affected by
12659 * errata. Also, the MAC loopback test is deprecated for
12660 * all newer ASIC revisions.
12662 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
12663 !tg3_flag(tp
, CPMU_PRESENT
)) {
12664 tg3_mac_loopback(tp
, true);
12666 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12667 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12669 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12670 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12671 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12673 tg3_mac_loopback(tp
, false);
12676 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
12677 !tg3_flag(tp
, USE_PHYLIB
)) {
12680 tg3_phy_lpbk_set(tp
, 0, false);
12682 /* Wait for link */
12683 for (i
= 0; i
< 100; i
++) {
12684 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
12689 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12690 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
12691 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12692 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12693 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
12694 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12695 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12696 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
12699 tg3_phy_lpbk_set(tp
, 0, true);
12701 /* All link indications report up, but the hardware
12702 * isn't really ready for about 20 msec. Double it
12707 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
12708 data
[TG3_EXT_LOOPB_TEST
] |=
12709 TG3_STD_LOOPBACK_FAILED
;
12710 if (tg3_flag(tp
, TSO_CAPABLE
) &&
12711 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
12712 data
[TG3_EXT_LOOPB_TEST
] |=
12713 TG3_TSO_LOOPBACK_FAILED
;
12714 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
12715 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
12716 data
[TG3_EXT_LOOPB_TEST
] |=
12717 TG3_JMB_LOOPBACK_FAILED
;
12720 /* Re-enable gphy autopowerdown. */
12721 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
12722 tg3_phy_toggle_apd(tp
, true);
12725 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
12726 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
12729 tp
->phy_flags
|= eee_cap
;
12734 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
12737 struct tg3
*tp
= netdev_priv(dev
);
12738 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
12740 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
12741 tg3_power_up(tp
)) {
12742 etest
->flags
|= ETH_TEST_FL_FAILED
;
12743 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
12747 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
12749 if (tg3_test_nvram(tp
) != 0) {
12750 etest
->flags
|= ETH_TEST_FL_FAILED
;
12751 data
[TG3_NVRAM_TEST
] = 1;
12753 if (!doextlpbk
&& tg3_test_link(tp
)) {
12754 etest
->flags
|= ETH_TEST_FL_FAILED
;
12755 data
[TG3_LINK_TEST
] = 1;
12757 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
12758 int err
, err2
= 0, irq_sync
= 0;
12760 if (netif_running(dev
)) {
12762 tg3_netif_stop(tp
);
12766 tg3_full_lock(tp
, irq_sync
);
12767 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
12768 err
= tg3_nvram_lock(tp
);
12769 tg3_halt_cpu(tp
, RX_CPU_BASE
);
12770 if (!tg3_flag(tp
, 5705_PLUS
))
12771 tg3_halt_cpu(tp
, TX_CPU_BASE
);
12773 tg3_nvram_unlock(tp
);
12775 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
12778 if (tg3_test_registers(tp
) != 0) {
12779 etest
->flags
|= ETH_TEST_FL_FAILED
;
12780 data
[TG3_REGISTER_TEST
] = 1;
12783 if (tg3_test_memory(tp
) != 0) {
12784 etest
->flags
|= ETH_TEST_FL_FAILED
;
12785 data
[TG3_MEMORY_TEST
] = 1;
12789 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
12791 if (tg3_test_loopback(tp
, data
, doextlpbk
))
12792 etest
->flags
|= ETH_TEST_FL_FAILED
;
12794 tg3_full_unlock(tp
);
12796 if (tg3_test_interrupt(tp
) != 0) {
12797 etest
->flags
|= ETH_TEST_FL_FAILED
;
12798 data
[TG3_INTERRUPT_TEST
] = 1;
12801 tg3_full_lock(tp
, 0);
12803 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12804 if (netif_running(dev
)) {
12805 tg3_flag_set(tp
, INIT_COMPLETE
);
12806 err2
= tg3_restart_hw(tp
, 1);
12808 tg3_netif_start(tp
);
12811 tg3_full_unlock(tp
);
12813 if (irq_sync
&& !err2
)
12816 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
12817 tg3_power_down(tp
);
12821 static int tg3_hwtstamp_ioctl(struct net_device
*dev
,
12822 struct ifreq
*ifr
, int cmd
)
12824 struct tg3
*tp
= netdev_priv(dev
);
12825 struct hwtstamp_config stmpconf
;
12827 if (!tg3_flag(tp
, PTP_CAPABLE
))
12830 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
12833 if (stmpconf
.flags
)
12836 switch (stmpconf
.tx_type
) {
12837 case HWTSTAMP_TX_ON
:
12838 tg3_flag_set(tp
, TX_TSTAMP_EN
);
12840 case HWTSTAMP_TX_OFF
:
12841 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
12847 switch (stmpconf
.rx_filter
) {
12848 case HWTSTAMP_FILTER_NONE
:
12851 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
12852 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
12853 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
12855 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
12856 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
12857 TG3_RX_PTP_CTL_SYNC_EVNT
;
12859 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
12860 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
12861 TG3_RX_PTP_CTL_DELAY_REQ
;
12863 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
12864 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
12865 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
12867 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
12868 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
12869 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
12871 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
12872 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
12873 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
12875 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
12876 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
12877 TG3_RX_PTP_CTL_SYNC_EVNT
;
12879 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
12880 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
12881 TG3_RX_PTP_CTL_SYNC_EVNT
;
12883 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
12884 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
12885 TG3_RX_PTP_CTL_SYNC_EVNT
;
12887 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
12888 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
12889 TG3_RX_PTP_CTL_DELAY_REQ
;
12891 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
12892 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
12893 TG3_RX_PTP_CTL_DELAY_REQ
;
12895 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
12896 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
12897 TG3_RX_PTP_CTL_DELAY_REQ
;
12903 if (netif_running(dev
) && tp
->rxptpctl
)
12904 tw32(TG3_RX_PTP_CTL
,
12905 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
12907 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
12911 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
12913 struct mii_ioctl_data
*data
= if_mii(ifr
);
12914 struct tg3
*tp
= netdev_priv(dev
);
12917 if (tg3_flag(tp
, USE_PHYLIB
)) {
12918 struct phy_device
*phydev
;
12919 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12921 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
12922 return phy_mii_ioctl(phydev
, ifr
, cmd
);
12927 data
->phy_id
= tp
->phy_addr
;
12930 case SIOCGMIIREG
: {
12933 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12934 break; /* We have no PHY */
12936 if (!netif_running(dev
))
12939 spin_lock_bh(&tp
->lock
);
12940 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
12941 spin_unlock_bh(&tp
->lock
);
12943 data
->val_out
= mii_regval
;
12949 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12950 break; /* We have no PHY */
12952 if (!netif_running(dev
))
12955 spin_lock_bh(&tp
->lock
);
12956 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
12957 spin_unlock_bh(&tp
->lock
);
12961 case SIOCSHWTSTAMP
:
12962 return tg3_hwtstamp_ioctl(dev
, ifr
, cmd
);
12968 return -EOPNOTSUPP
;
12971 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
12973 struct tg3
*tp
= netdev_priv(dev
);
12975 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
12979 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
12981 struct tg3
*tp
= netdev_priv(dev
);
12982 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
12983 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
12985 if (!tg3_flag(tp
, 5705_PLUS
)) {
12986 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
12987 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
12988 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
12989 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
12992 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
12993 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
12994 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
12995 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
12996 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
12997 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
12998 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
12999 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13000 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13001 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13004 /* No rx interrupts will be generated if both are zero */
13005 if ((ec
->rx_coalesce_usecs
== 0) &&
13006 (ec
->rx_max_coalesced_frames
== 0))
13009 /* No tx interrupts will be generated if both are zero */
13010 if ((ec
->tx_coalesce_usecs
== 0) &&
13011 (ec
->tx_max_coalesced_frames
== 0))
13014 /* Only copy relevant parameters, ignore all others. */
13015 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13016 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13017 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13018 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13019 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13020 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13021 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13022 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13023 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13025 if (netif_running(dev
)) {
13026 tg3_full_lock(tp
, 0);
13027 __tg3_set_coalesce(tp
, &tp
->coal
);
13028 tg3_full_unlock(tp
);
13033 static const struct ethtool_ops tg3_ethtool_ops
= {
13034 .get_settings
= tg3_get_settings
,
13035 .set_settings
= tg3_set_settings
,
13036 .get_drvinfo
= tg3_get_drvinfo
,
13037 .get_regs_len
= tg3_get_regs_len
,
13038 .get_regs
= tg3_get_regs
,
13039 .get_wol
= tg3_get_wol
,
13040 .set_wol
= tg3_set_wol
,
13041 .get_msglevel
= tg3_get_msglevel
,
13042 .set_msglevel
= tg3_set_msglevel
,
13043 .nway_reset
= tg3_nway_reset
,
13044 .get_link
= ethtool_op_get_link
,
13045 .get_eeprom_len
= tg3_get_eeprom_len
,
13046 .get_eeprom
= tg3_get_eeprom
,
13047 .set_eeprom
= tg3_set_eeprom
,
13048 .get_ringparam
= tg3_get_ringparam
,
13049 .set_ringparam
= tg3_set_ringparam
,
13050 .get_pauseparam
= tg3_get_pauseparam
,
13051 .set_pauseparam
= tg3_set_pauseparam
,
13052 .self_test
= tg3_self_test
,
13053 .get_strings
= tg3_get_strings
,
13054 .set_phys_id
= tg3_set_phys_id
,
13055 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13056 .get_coalesce
= tg3_get_coalesce
,
13057 .set_coalesce
= tg3_set_coalesce
,
13058 .get_sset_count
= tg3_get_sset_count
,
13059 .get_rxnfc
= tg3_get_rxnfc
,
13060 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13061 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13062 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13063 .get_channels
= tg3_get_channels
,
13064 .set_channels
= tg3_set_channels
,
13065 .get_ts_info
= tg3_get_ts_info
,
13068 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13069 struct rtnl_link_stats64
*stats
)
13071 struct tg3
*tp
= netdev_priv(dev
);
13073 spin_lock_bh(&tp
->lock
);
13074 if (!tp
->hw_stats
) {
13075 spin_unlock_bh(&tp
->lock
);
13076 return &tp
->net_stats_prev
;
13079 tg3_get_nstats(tp
, stats
);
13080 spin_unlock_bh(&tp
->lock
);
13085 static void tg3_set_rx_mode(struct net_device
*dev
)
13087 struct tg3
*tp
= netdev_priv(dev
);
13089 if (!netif_running(dev
))
13092 tg3_full_lock(tp
, 0);
13093 __tg3_set_rx_mode(dev
);
13094 tg3_full_unlock(tp
);
13097 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13100 dev
->mtu
= new_mtu
;
13102 if (new_mtu
> ETH_DATA_LEN
) {
13103 if (tg3_flag(tp
, 5780_CLASS
)) {
13104 netdev_update_features(dev
);
13105 tg3_flag_clear(tp
, TSO_CAPABLE
);
13107 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13110 if (tg3_flag(tp
, 5780_CLASS
)) {
13111 tg3_flag_set(tp
, TSO_CAPABLE
);
13112 netdev_update_features(dev
);
13114 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
13118 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
13120 struct tg3
*tp
= netdev_priv(dev
);
13121 int err
, reset_phy
= 0;
13123 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
13126 if (!netif_running(dev
)) {
13127 /* We'll just catch it later when the
13130 tg3_set_mtu(dev
, tp
, new_mtu
);
13136 tg3_netif_stop(tp
);
13138 tg3_full_lock(tp
, 1);
13140 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13142 tg3_set_mtu(dev
, tp
, new_mtu
);
13144 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13145 * breaks all requests to 256 bytes.
13147 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
13150 err
= tg3_restart_hw(tp
, reset_phy
);
13153 tg3_netif_start(tp
);
13155 tg3_full_unlock(tp
);
13163 static const struct net_device_ops tg3_netdev_ops
= {
13164 .ndo_open
= tg3_open
,
13165 .ndo_stop
= tg3_close
,
13166 .ndo_start_xmit
= tg3_start_xmit
,
13167 .ndo_get_stats64
= tg3_get_stats64
,
13168 .ndo_validate_addr
= eth_validate_addr
,
13169 .ndo_set_rx_mode
= tg3_set_rx_mode
,
13170 .ndo_set_mac_address
= tg3_set_mac_addr
,
13171 .ndo_do_ioctl
= tg3_ioctl
,
13172 .ndo_tx_timeout
= tg3_tx_timeout
,
13173 .ndo_change_mtu
= tg3_change_mtu
,
13174 .ndo_fix_features
= tg3_fix_features
,
13175 .ndo_set_features
= tg3_set_features
,
13176 #ifdef CONFIG_NET_POLL_CONTROLLER
13177 .ndo_poll_controller
= tg3_poll_controller
,
13181 static void tg3_get_eeprom_size(struct tg3
*tp
)
13183 u32 cursize
, val
, magic
;
13185 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
13187 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
13190 if ((magic
!= TG3_EEPROM_MAGIC
) &&
13191 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
13192 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
13196 * Size the chip by reading offsets at increasing powers of two.
13197 * When we encounter our validation signature, we know the addressing
13198 * has wrapped around, and thus have our chip size.
13202 while (cursize
< tp
->nvram_size
) {
13203 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
13212 tp
->nvram_size
= cursize
;
13215 static void tg3_get_nvram_size(struct tg3
*tp
)
13219 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
13222 /* Selfboot format */
13223 if (val
!= TG3_EEPROM_MAGIC
) {
13224 tg3_get_eeprom_size(tp
);
13228 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
13230 /* This is confusing. We want to operate on the
13231 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13232 * call will read from NVRAM and byteswap the data
13233 * according to the byteswapping settings for all
13234 * other register accesses. This ensures the data we
13235 * want will always reside in the lower 16-bits.
13236 * However, the data in NVRAM is in LE format, which
13237 * means the data from the NVRAM read will always be
13238 * opposite the endianness of the CPU. The 16-bit
13239 * byteswap then brings the data to CPU endianness.
13241 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
13245 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13248 static void tg3_get_nvram_info(struct tg3
*tp
)
13252 nvcfg1
= tr32(NVRAM_CFG1
);
13253 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
13254 tg3_flag_set(tp
, FLASH
);
13256 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13257 tw32(NVRAM_CFG1
, nvcfg1
);
13260 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
13261 tg3_flag(tp
, 5780_CLASS
)) {
13262 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
13263 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
13264 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13265 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13266 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13268 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
13269 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13270 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
13272 case FLASH_VENDOR_ATMEL_EEPROM
:
13273 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13274 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13275 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13277 case FLASH_VENDOR_ST
:
13278 tp
->nvram_jedecnum
= JEDEC_ST
;
13279 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
13280 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13282 case FLASH_VENDOR_SAIFUN
:
13283 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
13284 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
13286 case FLASH_VENDOR_SST_SMALL
:
13287 case FLASH_VENDOR_SST_LARGE
:
13288 tp
->nvram_jedecnum
= JEDEC_SST
;
13289 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
13293 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13294 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13295 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13299 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
13301 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
13302 case FLASH_5752PAGE_SIZE_256
:
13303 tp
->nvram_pagesize
= 256;
13305 case FLASH_5752PAGE_SIZE_512
:
13306 tp
->nvram_pagesize
= 512;
13308 case FLASH_5752PAGE_SIZE_1K
:
13309 tp
->nvram_pagesize
= 1024;
13311 case FLASH_5752PAGE_SIZE_2K
:
13312 tp
->nvram_pagesize
= 2048;
13314 case FLASH_5752PAGE_SIZE_4K
:
13315 tp
->nvram_pagesize
= 4096;
13317 case FLASH_5752PAGE_SIZE_264
:
13318 tp
->nvram_pagesize
= 264;
13320 case FLASH_5752PAGE_SIZE_528
:
13321 tp
->nvram_pagesize
= 528;
13326 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
13330 nvcfg1
= tr32(NVRAM_CFG1
);
13332 /* NVRAM protection for TPM */
13333 if (nvcfg1
& (1 << 27))
13334 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13336 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13337 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
13338 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
13339 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13340 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13342 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13343 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13344 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13345 tg3_flag_set(tp
, FLASH
);
13347 case FLASH_5752VENDOR_ST_M45PE10
:
13348 case FLASH_5752VENDOR_ST_M45PE20
:
13349 case FLASH_5752VENDOR_ST_M45PE40
:
13350 tp
->nvram_jedecnum
= JEDEC_ST
;
13351 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13352 tg3_flag_set(tp
, FLASH
);
13356 if (tg3_flag(tp
, FLASH
)) {
13357 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13359 /* For eeprom, set pagesize to maximum eeprom size */
13360 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13362 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13363 tw32(NVRAM_CFG1
, nvcfg1
);
13367 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
13369 u32 nvcfg1
, protect
= 0;
13371 nvcfg1
= tr32(NVRAM_CFG1
);
13373 /* NVRAM protection for TPM */
13374 if (nvcfg1
& (1 << 27)) {
13375 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13379 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13381 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13382 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13383 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13384 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
13385 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13386 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13387 tg3_flag_set(tp
, FLASH
);
13388 tp
->nvram_pagesize
= 264;
13389 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
13390 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
13391 tp
->nvram_size
= (protect
? 0x3e200 :
13392 TG3_NVRAM_SIZE_512KB
);
13393 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
13394 tp
->nvram_size
= (protect
? 0x1f200 :
13395 TG3_NVRAM_SIZE_256KB
);
13397 tp
->nvram_size
= (protect
? 0x1f200 :
13398 TG3_NVRAM_SIZE_128KB
);
13400 case FLASH_5752VENDOR_ST_M45PE10
:
13401 case FLASH_5752VENDOR_ST_M45PE20
:
13402 case FLASH_5752VENDOR_ST_M45PE40
:
13403 tp
->nvram_jedecnum
= JEDEC_ST
;
13404 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13405 tg3_flag_set(tp
, FLASH
);
13406 tp
->nvram_pagesize
= 256;
13407 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
13408 tp
->nvram_size
= (protect
?
13409 TG3_NVRAM_SIZE_64KB
:
13410 TG3_NVRAM_SIZE_128KB
);
13411 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
13412 tp
->nvram_size
= (protect
?
13413 TG3_NVRAM_SIZE_64KB
:
13414 TG3_NVRAM_SIZE_256KB
);
13416 tp
->nvram_size
= (protect
?
13417 TG3_NVRAM_SIZE_128KB
:
13418 TG3_NVRAM_SIZE_512KB
);
13423 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
13427 nvcfg1
= tr32(NVRAM_CFG1
);
13429 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13430 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
13431 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13432 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
13433 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13434 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13435 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13436 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13438 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13439 tw32(NVRAM_CFG1
, nvcfg1
);
13441 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13442 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
13443 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
13444 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
13445 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13446 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13447 tg3_flag_set(tp
, FLASH
);
13448 tp
->nvram_pagesize
= 264;
13450 case FLASH_5752VENDOR_ST_M45PE10
:
13451 case FLASH_5752VENDOR_ST_M45PE20
:
13452 case FLASH_5752VENDOR_ST_M45PE40
:
13453 tp
->nvram_jedecnum
= JEDEC_ST
;
13454 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13455 tg3_flag_set(tp
, FLASH
);
13456 tp
->nvram_pagesize
= 256;
13461 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
13463 u32 nvcfg1
, protect
= 0;
13465 nvcfg1
= tr32(NVRAM_CFG1
);
13467 /* NVRAM protection for TPM */
13468 if (nvcfg1
& (1 << 27)) {
13469 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13473 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
13475 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13476 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13477 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13478 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13479 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13480 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13481 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13482 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13483 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13484 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13485 tg3_flag_set(tp
, FLASH
);
13486 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13487 tp
->nvram_pagesize
= 256;
13489 case FLASH_5761VENDOR_ST_A_M45PE20
:
13490 case FLASH_5761VENDOR_ST_A_M45PE40
:
13491 case FLASH_5761VENDOR_ST_A_M45PE80
:
13492 case FLASH_5761VENDOR_ST_A_M45PE16
:
13493 case FLASH_5761VENDOR_ST_M_M45PE20
:
13494 case FLASH_5761VENDOR_ST_M_M45PE40
:
13495 case FLASH_5761VENDOR_ST_M_M45PE80
:
13496 case FLASH_5761VENDOR_ST_M_M45PE16
:
13497 tp
->nvram_jedecnum
= JEDEC_ST
;
13498 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13499 tg3_flag_set(tp
, FLASH
);
13500 tp
->nvram_pagesize
= 256;
13505 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
13508 case FLASH_5761VENDOR_ATMEL_ADB161D
:
13509 case FLASH_5761VENDOR_ATMEL_MDB161D
:
13510 case FLASH_5761VENDOR_ST_A_M45PE16
:
13511 case FLASH_5761VENDOR_ST_M_M45PE16
:
13512 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
13514 case FLASH_5761VENDOR_ATMEL_ADB081D
:
13515 case FLASH_5761VENDOR_ATMEL_MDB081D
:
13516 case FLASH_5761VENDOR_ST_A_M45PE80
:
13517 case FLASH_5761VENDOR_ST_M_M45PE80
:
13518 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13520 case FLASH_5761VENDOR_ATMEL_ADB041D
:
13521 case FLASH_5761VENDOR_ATMEL_MDB041D
:
13522 case FLASH_5761VENDOR_ST_A_M45PE40
:
13523 case FLASH_5761VENDOR_ST_M_M45PE40
:
13524 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13526 case FLASH_5761VENDOR_ATMEL_ADB021D
:
13527 case FLASH_5761VENDOR_ATMEL_MDB021D
:
13528 case FLASH_5761VENDOR_ST_A_M45PE20
:
13529 case FLASH_5761VENDOR_ST_M_M45PE20
:
13530 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13536 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
13538 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13539 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13540 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13543 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
13547 nvcfg1
= tr32(NVRAM_CFG1
);
13549 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13550 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
13551 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
13552 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13553 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13554 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13556 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13557 tw32(NVRAM_CFG1
, nvcfg1
);
13559 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13560 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13561 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13562 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13563 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13564 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13565 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13566 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13567 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13568 tg3_flag_set(tp
, FLASH
);
13570 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13571 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13572 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
13573 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
13574 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13576 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
13577 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
13578 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13580 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
13581 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
13582 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13586 case FLASH_5752VENDOR_ST_M45PE10
:
13587 case FLASH_5752VENDOR_ST_M45PE20
:
13588 case FLASH_5752VENDOR_ST_M45PE40
:
13589 tp
->nvram_jedecnum
= JEDEC_ST
;
13590 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13591 tg3_flag_set(tp
, FLASH
);
13593 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13594 case FLASH_5752VENDOR_ST_M45PE10
:
13595 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13597 case FLASH_5752VENDOR_ST_M45PE20
:
13598 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13600 case FLASH_5752VENDOR_ST_M45PE40
:
13601 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13606 tg3_flag_set(tp
, NO_NVRAM
);
13610 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13611 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13612 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13616 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
13620 nvcfg1
= tr32(NVRAM_CFG1
);
13622 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13623 case FLASH_5717VENDOR_ATMEL_EEPROM
:
13624 case FLASH_5717VENDOR_MICRO_EEPROM
:
13625 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13626 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13627 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13629 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13630 tw32(NVRAM_CFG1
, nvcfg1
);
13632 case FLASH_5717VENDOR_ATMEL_MDB011D
:
13633 case FLASH_5717VENDOR_ATMEL_ADB011B
:
13634 case FLASH_5717VENDOR_ATMEL_ADB011D
:
13635 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13636 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13637 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13638 case FLASH_5717VENDOR_ATMEL_45USPT
:
13639 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13640 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13641 tg3_flag_set(tp
, FLASH
);
13643 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13644 case FLASH_5717VENDOR_ATMEL_MDB021D
:
13645 /* Detect size with tg3_nvram_get_size() */
13647 case FLASH_5717VENDOR_ATMEL_ADB021B
:
13648 case FLASH_5717VENDOR_ATMEL_ADB021D
:
13649 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13652 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13656 case FLASH_5717VENDOR_ST_M_M25PE10
:
13657 case FLASH_5717VENDOR_ST_A_M25PE10
:
13658 case FLASH_5717VENDOR_ST_M_M45PE10
:
13659 case FLASH_5717VENDOR_ST_A_M45PE10
:
13660 case FLASH_5717VENDOR_ST_M_M25PE20
:
13661 case FLASH_5717VENDOR_ST_A_M25PE20
:
13662 case FLASH_5717VENDOR_ST_M_M45PE20
:
13663 case FLASH_5717VENDOR_ST_A_M45PE20
:
13664 case FLASH_5717VENDOR_ST_25USPT
:
13665 case FLASH_5717VENDOR_ST_45USPT
:
13666 tp
->nvram_jedecnum
= JEDEC_ST
;
13667 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13668 tg3_flag_set(tp
, FLASH
);
13670 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13671 case FLASH_5717VENDOR_ST_M_M25PE20
:
13672 case FLASH_5717VENDOR_ST_M_M45PE20
:
13673 /* Detect size with tg3_nvram_get_size() */
13675 case FLASH_5717VENDOR_ST_A_M25PE20
:
13676 case FLASH_5717VENDOR_ST_A_M45PE20
:
13677 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13680 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13685 tg3_flag_set(tp
, NO_NVRAM
);
13689 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13690 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13691 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13694 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
13696 u32 nvcfg1
, nvmpinstrp
;
13698 nvcfg1
= tr32(NVRAM_CFG1
);
13699 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
13701 switch (nvmpinstrp
) {
13702 case FLASH_5720_EEPROM_HD
:
13703 case FLASH_5720_EEPROM_LD
:
13704 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13705 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13707 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13708 tw32(NVRAM_CFG1
, nvcfg1
);
13709 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
13710 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13712 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
13714 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
13715 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
13716 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
13717 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13718 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13719 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13720 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13721 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13722 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13723 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13724 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13725 case FLASH_5720VENDOR_ATMEL_45USPT
:
13726 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13727 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13728 tg3_flag_set(tp
, FLASH
);
13730 switch (nvmpinstrp
) {
13731 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
13732 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
13733 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
13734 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13736 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
13737 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
13738 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
13739 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13741 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
13742 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
13743 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13746 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13750 case FLASH_5720VENDOR_M_ST_M25PE10
:
13751 case FLASH_5720VENDOR_M_ST_M45PE10
:
13752 case FLASH_5720VENDOR_A_ST_M25PE10
:
13753 case FLASH_5720VENDOR_A_ST_M45PE10
:
13754 case FLASH_5720VENDOR_M_ST_M25PE20
:
13755 case FLASH_5720VENDOR_M_ST_M45PE20
:
13756 case FLASH_5720VENDOR_A_ST_M25PE20
:
13757 case FLASH_5720VENDOR_A_ST_M45PE20
:
13758 case FLASH_5720VENDOR_M_ST_M25PE40
:
13759 case FLASH_5720VENDOR_M_ST_M45PE40
:
13760 case FLASH_5720VENDOR_A_ST_M25PE40
:
13761 case FLASH_5720VENDOR_A_ST_M45PE40
:
13762 case FLASH_5720VENDOR_M_ST_M25PE80
:
13763 case FLASH_5720VENDOR_M_ST_M45PE80
:
13764 case FLASH_5720VENDOR_A_ST_M25PE80
:
13765 case FLASH_5720VENDOR_A_ST_M45PE80
:
13766 case FLASH_5720VENDOR_ST_25USPT
:
13767 case FLASH_5720VENDOR_ST_45USPT
:
13768 tp
->nvram_jedecnum
= JEDEC_ST
;
13769 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13770 tg3_flag_set(tp
, FLASH
);
13772 switch (nvmpinstrp
) {
13773 case FLASH_5720VENDOR_M_ST_M25PE20
:
13774 case FLASH_5720VENDOR_M_ST_M45PE20
:
13775 case FLASH_5720VENDOR_A_ST_M25PE20
:
13776 case FLASH_5720VENDOR_A_ST_M45PE20
:
13777 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
13779 case FLASH_5720VENDOR_M_ST_M25PE40
:
13780 case FLASH_5720VENDOR_M_ST_M45PE40
:
13781 case FLASH_5720VENDOR_A_ST_M25PE40
:
13782 case FLASH_5720VENDOR_A_ST_M45PE40
:
13783 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13785 case FLASH_5720VENDOR_M_ST_M25PE80
:
13786 case FLASH_5720VENDOR_M_ST_M45PE80
:
13787 case FLASH_5720VENDOR_A_ST_M25PE80
:
13788 case FLASH_5720VENDOR_A_ST_M45PE80
:
13789 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
13792 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
13797 tg3_flag_set(tp
, NO_NVRAM
);
13801 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13802 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
13803 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
13806 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13807 static void tg3_nvram_init(struct tg3
*tp
)
13809 tw32_f(GRC_EEPROM_ADDR
,
13810 (EEPROM_ADDR_FSM_RESET
|
13811 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
13812 EEPROM_ADDR_CLKPERD_SHIFT
)));
13816 /* Enable seeprom accesses. */
13817 tw32_f(GRC_LOCAL_CTRL
,
13818 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
13821 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13822 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
13823 tg3_flag_set(tp
, NVRAM
);
13825 if (tg3_nvram_lock(tp
)) {
13826 netdev_warn(tp
->dev
,
13827 "Cannot get nvram lock, %s failed\n",
13831 tg3_enable_nvram_access(tp
);
13833 tp
->nvram_size
= 0;
13835 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
13836 tg3_get_5752_nvram_info(tp
);
13837 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
13838 tg3_get_5755_nvram_info(tp
);
13839 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
13840 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
13841 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
13842 tg3_get_5787_nvram_info(tp
);
13843 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
)
13844 tg3_get_5761_nvram_info(tp
);
13845 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
13846 tg3_get_5906_nvram_info(tp
);
13847 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
13848 tg3_flag(tp
, 57765_CLASS
))
13849 tg3_get_57780_nvram_info(tp
);
13850 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
13851 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
13852 tg3_get_5717_nvram_info(tp
);
13853 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
13854 tg3_get_5720_nvram_info(tp
);
13856 tg3_get_nvram_info(tp
);
13858 if (tp
->nvram_size
== 0)
13859 tg3_get_nvram_size(tp
);
13861 tg3_disable_nvram_access(tp
);
13862 tg3_nvram_unlock(tp
);
13865 tg3_flag_clear(tp
, NVRAM
);
13866 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
13868 tg3_get_eeprom_size(tp
);
13872 struct subsys_tbl_ent
{
13873 u16 subsys_vendor
, subsys_devid
;
13877 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
13878 /* Broadcom boards. */
13879 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13880 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
13881 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13882 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
13883 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13884 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
13885 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13886 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
13887 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13888 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
13889 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13890 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
13891 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13892 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
13893 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13894 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
13895 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13896 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
13897 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13898 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
13899 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
13900 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
13903 { TG3PCI_SUBVENDOR_ID_3COM
,
13904 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
13905 { TG3PCI_SUBVENDOR_ID_3COM
,
13906 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
13907 { TG3PCI_SUBVENDOR_ID_3COM
,
13908 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
13909 { TG3PCI_SUBVENDOR_ID_3COM
,
13910 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
13911 { TG3PCI_SUBVENDOR_ID_3COM
,
13912 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
13915 { TG3PCI_SUBVENDOR_ID_DELL
,
13916 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
13917 { TG3PCI_SUBVENDOR_ID_DELL
,
13918 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
13919 { TG3PCI_SUBVENDOR_ID_DELL
,
13920 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
13921 { TG3PCI_SUBVENDOR_ID_DELL
,
13922 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
13924 /* Compaq boards. */
13925 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13926 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
13927 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13928 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
13929 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13930 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
13931 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13932 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
13933 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
13934 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
13937 { TG3PCI_SUBVENDOR_ID_IBM
,
13938 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
13941 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
13945 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
13946 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
13947 tp
->pdev
->subsystem_vendor
) &&
13948 (subsys_id_to_phy_id
[i
].subsys_devid
==
13949 tp
->pdev
->subsystem_device
))
13950 return &subsys_id_to_phy_id
[i
];
13955 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
13959 tp
->phy_id
= TG3_PHY_ID_INVALID
;
13960 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
13962 /* Assume an onboard device and WOL capable by default. */
13963 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
13964 tg3_flag_set(tp
, WOL_CAP
);
13966 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
13967 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
13968 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
13969 tg3_flag_set(tp
, IS_NIC
);
13971 val
= tr32(VCPU_CFGSHDW
);
13972 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
13973 tg3_flag_set(tp
, ASPM_WORKAROUND
);
13974 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
13975 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
13976 tg3_flag_set(tp
, WOL_ENABLE
);
13977 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
13982 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
13983 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
13984 u32 nic_cfg
, led_cfg
;
13985 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
13986 int eeprom_phy_serdes
= 0;
13988 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
13989 tp
->nic_sram_data_cfg
= nic_cfg
;
13991 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
13992 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
13993 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
13994 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
13995 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
&&
13996 (ver
> 0) && (ver
< 0x100))
13997 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
13999 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
)
14000 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14002 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14003 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14004 eeprom_phy_serdes
= 1;
14006 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14007 if (nic_phy_id
!= 0) {
14008 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14009 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14011 eeprom_phy_id
= (id1
>> 16) << 10;
14012 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14013 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14017 tp
->phy_id
= eeprom_phy_id
;
14018 if (eeprom_phy_serdes
) {
14019 if (!tg3_flag(tp
, 5705_PLUS
))
14020 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14022 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14025 if (tg3_flag(tp
, 5750_PLUS
))
14026 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14027 SHASTA_EXT_LED_MODE_MASK
);
14029 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14033 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14034 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14037 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14038 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14041 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14042 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14044 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14045 * read on some older 5700/5701 bootcode.
14047 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
14049 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
14051 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14055 case SHASTA_EXT_LED_SHARED
:
14056 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
14057 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
14058 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
14059 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14060 LED_CTRL_MODE_PHY_2
);
14063 case SHASTA_EXT_LED_MAC
:
14064 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
14067 case SHASTA_EXT_LED_COMBO
:
14068 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
14069 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
14070 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14071 LED_CTRL_MODE_PHY_2
);
14076 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
14077 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
14078 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
14079 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14081 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5784_AX
)
14082 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14084 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
14085 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14086 if ((tp
->pdev
->subsystem_vendor
==
14087 PCI_VENDOR_ID_ARIMA
) &&
14088 (tp
->pdev
->subsystem_device
== 0x205a ||
14089 tp
->pdev
->subsystem_device
== 0x2063))
14090 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14092 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14093 tg3_flag_set(tp
, IS_NIC
);
14096 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
14097 tg3_flag_set(tp
, ENABLE_ASF
);
14098 if (tg3_flag(tp
, 5750_PLUS
))
14099 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
14102 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
14103 tg3_flag(tp
, 5750_PLUS
))
14104 tg3_flag_set(tp
, ENABLE_APE
);
14106 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
14107 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
14108 tg3_flag_clear(tp
, WOL_CAP
);
14110 if (tg3_flag(tp
, WOL_CAP
) &&
14111 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
14112 tg3_flag_set(tp
, WOL_ENABLE
);
14113 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14116 if (cfg2
& (1 << 17))
14117 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
14119 /* serdes signal pre-emphasis in register 0x590 set by */
14120 /* bootcode if bit 18 is set */
14121 if (cfg2
& (1 << 18))
14122 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
14124 if ((tg3_flag(tp
, 57765_PLUS
) ||
14125 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
14126 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
)) &&
14127 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
14128 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
14130 if (tg3_flag(tp
, PCI_EXPRESS
) &&
14131 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
14132 !tg3_flag(tp
, 57765_PLUS
)) {
14135 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
14136 if (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
)
14137 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14140 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
14141 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
14142 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
14143 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
14144 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
14145 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
14148 if (tg3_flag(tp
, WOL_CAP
))
14149 device_set_wakeup_enable(&tp
->pdev
->dev
,
14150 tg3_flag(tp
, WOL_ENABLE
));
14152 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
14155 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
14160 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
14161 tw32(OTP_CTRL
, cmd
);
14163 /* Wait for up to 1 ms for command to execute. */
14164 for (i
= 0; i
< 100; i
++) {
14165 val
= tr32(OTP_STATUS
);
14166 if (val
& OTP_STATUS_CMD_DONE
)
14171 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
14174 /* Read the gphy configuration from the OTP region of the chip. The gphy
14175 * configuration is a 32-bit value that straddles the alignment boundary.
14176 * We do two 32-bit reads and then shift and merge the results.
14178 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
14180 u32 bhalf_otp
, thalf_otp
;
14182 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
14184 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
14187 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
14189 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14192 thalf_otp
= tr32(OTP_READ_DATA
);
14194 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
14196 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14199 bhalf_otp
= tr32(OTP_READ_DATA
);
14201 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
14204 static void tg3_phy_init_link_config(struct tg3
*tp
)
14206 u32 adv
= ADVERTISED_Autoneg
;
14208 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14209 adv
|= ADVERTISED_1000baseT_Half
|
14210 ADVERTISED_1000baseT_Full
;
14212 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14213 adv
|= ADVERTISED_100baseT_Half
|
14214 ADVERTISED_100baseT_Full
|
14215 ADVERTISED_10baseT_Half
|
14216 ADVERTISED_10baseT_Full
|
14219 adv
|= ADVERTISED_FIBRE
;
14221 tp
->link_config
.advertising
= adv
;
14222 tp
->link_config
.speed
= SPEED_UNKNOWN
;
14223 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
14224 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
14225 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
14226 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
14231 static int tg3_phy_probe(struct tg3
*tp
)
14233 u32 hw_phy_id_1
, hw_phy_id_2
;
14234 u32 hw_phy_id
, hw_phy_id_masked
;
14237 /* flow control autonegotiation is default behavior */
14238 tg3_flag_set(tp
, PAUSE_AUTONEG
);
14239 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
14241 if (tg3_flag(tp
, ENABLE_APE
)) {
14242 switch (tp
->pci_fn
) {
14244 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
14247 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
14250 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
14253 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
14258 if (tg3_flag(tp
, USE_PHYLIB
))
14259 return tg3_phy_init(tp
);
14261 /* Reading the PHY ID register can conflict with ASF
14262 * firmware access to the PHY hardware.
14265 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
14266 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
14268 /* Now read the physical PHY_ID from the chip and verify
14269 * that it is sane. If it doesn't look good, we fall back
14270 * to either the hard-coded table based PHY_ID and failing
14271 * that the value found in the eeprom area.
14273 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
14274 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
14276 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
14277 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
14278 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
14280 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
14283 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
14284 tp
->phy_id
= hw_phy_id
;
14285 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
14286 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14288 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
14290 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
14291 /* Do nothing, phy ID already set up in
14292 * tg3_get_eeprom_hw_cfg().
14295 struct subsys_tbl_ent
*p
;
14297 /* No eeprom signature? Try the hardcoded
14298 * subsys device table.
14300 p
= tg3_lookup_by_subsys(tp
);
14304 tp
->phy_id
= p
->phy_id
;
14306 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
14307 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14311 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14312 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14313 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
||
14314 (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
&&
14315 tp
->pci_chip_rev_id
!= CHIPREV_ID_5717_A0
) ||
14316 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
&&
14317 tp
->pci_chip_rev_id
!= CHIPREV_ID_57765_A0
)))
14318 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
14320 tg3_phy_init_link_config(tp
);
14322 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14323 !tg3_flag(tp
, ENABLE_APE
) &&
14324 !tg3_flag(tp
, ENABLE_ASF
)) {
14327 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
14328 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
14329 (bmsr
& BMSR_LSTATUS
))
14330 goto skip_phy_reset
;
14332 err
= tg3_phy_reset(tp
);
14336 tg3_phy_set_wirespeed(tp
);
14338 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
14339 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
14340 tp
->link_config
.flowctrl
);
14342 tg3_writephy(tp
, MII_BMCR
,
14343 BMCR_ANENABLE
| BMCR_ANRESTART
);
14348 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
14349 err
= tg3_init_5401phy_dsp(tp
);
14353 err
= tg3_init_5401phy_dsp(tp
);
14359 static void tg3_read_vpd(struct tg3
*tp
)
14362 unsigned int block_end
, rosize
, len
;
14366 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
14370 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
14372 goto out_not_found
;
14374 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
14375 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
14376 i
+= PCI_VPD_LRDT_TAG_SIZE
;
14378 if (block_end
> vpdlen
)
14379 goto out_not_found
;
14381 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14382 PCI_VPD_RO_KEYWORD_MFR_ID
);
14384 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14386 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14387 if (j
+ len
> block_end
|| len
!= 4 ||
14388 memcmp(&vpd_data
[j
], "1028", 4))
14391 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14392 PCI_VPD_RO_KEYWORD_VENDOR0
);
14396 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
14398 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14399 if (j
+ len
> block_end
)
14402 memcpy(tp
->fw_ver
, &vpd_data
[j
], len
);
14403 strncat(tp
->fw_ver
, " bc ", vpdlen
- len
- 1);
14407 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
14408 PCI_VPD_RO_KEYWORD_PARTNO
);
14410 goto out_not_found
;
14412 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
14414 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
14415 if (len
> TG3_BPN_SIZE
||
14416 (len
+ i
) > vpdlen
)
14417 goto out_not_found
;
14419 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
14423 if (tp
->board_part_number
[0])
14427 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
14428 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
14429 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
14430 strcpy(tp
->board_part_number
, "BCM5717");
14431 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
14432 strcpy(tp
->board_part_number
, "BCM5718");
14435 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
) {
14436 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
14437 strcpy(tp
->board_part_number
, "BCM57780");
14438 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
14439 strcpy(tp
->board_part_number
, "BCM57760");
14440 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
14441 strcpy(tp
->board_part_number
, "BCM57790");
14442 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
14443 strcpy(tp
->board_part_number
, "BCM57788");
14446 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
) {
14447 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
14448 strcpy(tp
->board_part_number
, "BCM57761");
14449 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
14450 strcpy(tp
->board_part_number
, "BCM57765");
14451 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
14452 strcpy(tp
->board_part_number
, "BCM57781");
14453 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
14454 strcpy(tp
->board_part_number
, "BCM57785");
14455 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
14456 strcpy(tp
->board_part_number
, "BCM57791");
14457 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
14458 strcpy(tp
->board_part_number
, "BCM57795");
14461 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
) {
14462 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
14463 strcpy(tp
->board_part_number
, "BCM57762");
14464 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
14465 strcpy(tp
->board_part_number
, "BCM57766");
14466 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
14467 strcpy(tp
->board_part_number
, "BCM57782");
14468 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
14469 strcpy(tp
->board_part_number
, "BCM57786");
14472 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
14473 strcpy(tp
->board_part_number
, "BCM95906");
14476 strcpy(tp
->board_part_number
, "none");
14480 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
14484 if (tg3_nvram_read(tp
, offset
, &val
) ||
14485 (val
& 0xfc000000) != 0x0c000000 ||
14486 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
14493 static void tg3_read_bc_ver(struct tg3
*tp
)
14495 u32 val
, offset
, start
, ver_offset
;
14497 bool newver
= false;
14499 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
14500 tg3_nvram_read(tp
, 0x4, &start
))
14503 offset
= tg3_nvram_logical_addr(tp
, offset
);
14505 if (tg3_nvram_read(tp
, offset
, &val
))
14508 if ((val
& 0xfc000000) == 0x0c000000) {
14509 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
14516 dst_off
= strlen(tp
->fw_ver
);
14519 if (TG3_VER_SIZE
- dst_off
< 16 ||
14520 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
14523 offset
= offset
+ ver_offset
- start
;
14524 for (i
= 0; i
< 16; i
+= 4) {
14526 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
14529 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
14534 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
14537 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
14538 TG3_NVM_BCVER_MAJSFT
;
14539 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
14540 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
14541 "v%d.%02d", major
, minor
);
14545 static void tg3_read_hwsb_ver(struct tg3
*tp
)
14547 u32 val
, major
, minor
;
14549 /* Use native endian representation */
14550 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
14553 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
14554 TG3_NVM_HWSB_CFG1_MAJSFT
;
14555 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
14556 TG3_NVM_HWSB_CFG1_MINSFT
;
14558 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
14561 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
14563 u32 offset
, major
, minor
, build
;
14565 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
14567 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
14570 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
14571 case TG3_EEPROM_SB_REVISION_0
:
14572 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
14574 case TG3_EEPROM_SB_REVISION_2
:
14575 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
14577 case TG3_EEPROM_SB_REVISION_3
:
14578 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
14580 case TG3_EEPROM_SB_REVISION_4
:
14581 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
14583 case TG3_EEPROM_SB_REVISION_5
:
14584 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
14586 case TG3_EEPROM_SB_REVISION_6
:
14587 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
14593 if (tg3_nvram_read(tp
, offset
, &val
))
14596 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
14597 TG3_EEPROM_SB_EDH_BLD_SHFT
;
14598 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
14599 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
14600 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
14602 if (minor
> 99 || build
> 26)
14605 offset
= strlen(tp
->fw_ver
);
14606 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
14607 " v%d.%02d", major
, minor
);
14610 offset
= strlen(tp
->fw_ver
);
14611 if (offset
< TG3_VER_SIZE
- 1)
14612 tp
->fw_ver
[offset
] = 'a' + build
- 1;
14616 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
14618 u32 val
, offset
, start
;
14621 for (offset
= TG3_NVM_DIR_START
;
14622 offset
< TG3_NVM_DIR_END
;
14623 offset
+= TG3_NVM_DIRENT_SIZE
) {
14624 if (tg3_nvram_read(tp
, offset
, &val
))
14627 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
14631 if (offset
== TG3_NVM_DIR_END
)
14634 if (!tg3_flag(tp
, 5705_PLUS
))
14635 start
= 0x08000000;
14636 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
14639 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
14640 !tg3_fw_img_is_valid(tp
, offset
) ||
14641 tg3_nvram_read(tp
, offset
+ 8, &val
))
14644 offset
+= val
- start
;
14646 vlen
= strlen(tp
->fw_ver
);
14648 tp
->fw_ver
[vlen
++] = ',';
14649 tp
->fw_ver
[vlen
++] = ' ';
14651 for (i
= 0; i
< 4; i
++) {
14653 if (tg3_nvram_read_be32(tp
, offset
, &v
))
14656 offset
+= sizeof(v
);
14658 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
14659 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
14663 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
14668 static void tg3_probe_ncsi(struct tg3
*tp
)
14672 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
14673 if (apedata
!= APE_SEG_SIG_MAGIC
)
14676 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
14677 if (!(apedata
& APE_FW_STATUS_READY
))
14680 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
14681 tg3_flag_set(tp
, APE_HAS_NCSI
);
14684 static void tg3_read_dash_ver(struct tg3
*tp
)
14690 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
14692 if (tg3_flag(tp
, APE_HAS_NCSI
))
14697 vlen
= strlen(tp
->fw_ver
);
14699 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
14701 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
14702 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
14703 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
14704 (apedata
& APE_FW_VERSION_BLDMSK
));
14707 static void tg3_read_fw_ver(struct tg3
*tp
)
14710 bool vpd_vers
= false;
14712 if (tp
->fw_ver
[0] != 0)
14715 if (tg3_flag(tp
, NO_NVRAM
)) {
14716 strcat(tp
->fw_ver
, "sb");
14720 if (tg3_nvram_read(tp
, 0, &val
))
14723 if (val
== TG3_EEPROM_MAGIC
)
14724 tg3_read_bc_ver(tp
);
14725 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
14726 tg3_read_sb_ver(tp
, val
);
14727 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
14728 tg3_read_hwsb_ver(tp
);
14730 if (tg3_flag(tp
, ENABLE_ASF
)) {
14731 if (tg3_flag(tp
, ENABLE_APE
)) {
14732 tg3_probe_ncsi(tp
);
14734 tg3_read_dash_ver(tp
);
14735 } else if (!vpd_vers
) {
14736 tg3_read_mgmtfw_ver(tp
);
14740 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
14743 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
14745 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
14746 return TG3_RX_RET_MAX_SIZE_5717
;
14747 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
14748 return TG3_RX_RET_MAX_SIZE_5700
;
14750 return TG3_RX_RET_MAX_SIZE_5705
;
14753 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
14754 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
14755 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
14756 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
14760 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
14762 struct pci_dev
*peer
;
14763 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
14765 for (func
= 0; func
< 8; func
++) {
14766 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
14767 if (peer
&& peer
!= tp
->pdev
)
14771 /* 5704 can be configured in single-port mode, set peer to
14772 * tp->pdev in that case.
14780 * We don't need to keep the refcount elevated; there's no way
14781 * to remove one half of this device without removing the other
14788 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
14790 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
14791 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_USE_PROD_ID_REG
) {
14794 /* All devices that use the alternate
14795 * ASIC REV location have a CPMU.
14797 tg3_flag_set(tp
, CPMU_PRESENT
);
14799 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
14800 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
14801 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
14802 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
14803 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
)
14804 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
14805 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
14806 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
14807 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
14808 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
14809 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
14810 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
14811 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
14812 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
14813 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
14814 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
14815 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
14817 reg
= TG3PCI_PRODID_ASICREV
;
14819 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
14822 /* Wrong chip ID in 5752 A0. This code can be removed later
14823 * as A0 is not in production.
14825 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
14826 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
14828 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_C0
)
14829 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
14831 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
14832 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
14833 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
14834 tg3_flag_set(tp
, 5717_PLUS
);
14836 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57765
||
14837 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57766
)
14838 tg3_flag_set(tp
, 57765_CLASS
);
14840 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
))
14841 tg3_flag_set(tp
, 57765_PLUS
);
14843 /* Intentionally exclude ASIC_REV_5906 */
14844 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
14845 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
14846 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
14847 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
14848 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
14849 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
14850 tg3_flag(tp
, 57765_PLUS
))
14851 tg3_flag_set(tp
, 5755_PLUS
);
14853 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
14854 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
14855 tg3_flag_set(tp
, 5780_CLASS
);
14857 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
14858 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
14859 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
14860 tg3_flag(tp
, 5755_PLUS
) ||
14861 tg3_flag(tp
, 5780_CLASS
))
14862 tg3_flag_set(tp
, 5750_PLUS
);
14864 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
14865 tg3_flag(tp
, 5750_PLUS
))
14866 tg3_flag_set(tp
, 5705_PLUS
);
14869 static bool tg3_10_100_only_device(struct tg3
*tp
,
14870 const struct pci_device_id
*ent
)
14872 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
14874 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
14875 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
14876 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
14879 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
14880 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
14881 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
14891 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
14894 u32 pci_state_reg
, grc_misc_cfg
;
14899 /* Force memory write invalidate off. If we leave it on,
14900 * then on 5700_BX chips we have to enable a workaround.
14901 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14902 * to match the cacheline size. The Broadcom driver have this
14903 * workaround but turns MWI off all the times so never uses
14904 * it. This seems to suggest that the workaround is insufficient.
14906 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
14907 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
14908 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
14910 /* Important! -- Make sure register accesses are byteswapped
14911 * correctly. Also, for those chips that require it, make
14912 * sure that indirect register accesses are enabled before
14913 * the first operation.
14915 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14917 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
14918 MISC_HOST_CTRL_CHIPREV
);
14919 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
14920 tp
->misc_host_ctrl
);
14922 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
14924 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14925 * we need to disable memory and use config. cycles
14926 * only to access all registers. The 5702/03 chips
14927 * can mistakenly decode the special cycles from the
14928 * ICH chipsets as memory write cycles, causing corruption
14929 * of register and memory space. Only certain ICH bridges
14930 * will drive special cycles with non-zero data during the
14931 * address phase which can fall within the 5703's address
14932 * range. This is not an ICH bug as the PCI spec allows
14933 * non-zero address during special cycles. However, only
14934 * these ICH bridges are known to drive non-zero addresses
14935 * during special cycles.
14937 * Since special cycles do not cross PCI bridges, we only
14938 * enable this workaround if the 5703 is on the secondary
14939 * bus of these ICH bridges.
14941 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
14942 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
14943 static struct tg3_dev_id
{
14947 } ich_chipsets
[] = {
14948 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
14950 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
14952 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
14954 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
14958 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
14959 struct pci_dev
*bridge
= NULL
;
14961 while (pci_id
->vendor
!= 0) {
14962 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
14968 if (pci_id
->rev
!= PCI_ANY_ID
) {
14969 if (bridge
->revision
> pci_id
->rev
)
14972 if (bridge
->subordinate
&&
14973 (bridge
->subordinate
->number
==
14974 tp
->pdev
->bus
->number
)) {
14975 tg3_flag_set(tp
, ICH_WORKAROUND
);
14976 pci_dev_put(bridge
);
14982 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
14983 static struct tg3_dev_id
{
14986 } bridge_chipsets
[] = {
14987 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
14988 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
14991 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
14992 struct pci_dev
*bridge
= NULL
;
14994 while (pci_id
->vendor
!= 0) {
14995 bridge
= pci_get_device(pci_id
->vendor
,
15002 if (bridge
->subordinate
&&
15003 (bridge
->subordinate
->number
<=
15004 tp
->pdev
->bus
->number
) &&
15005 (bridge
->subordinate
->busn_res
.end
>=
15006 tp
->pdev
->bus
->number
)) {
15007 tg3_flag_set(tp
, 5701_DMA_BUG
);
15008 pci_dev_put(bridge
);
15014 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15015 * DMA addresses > 40-bit. This bridge may have other additional
15016 * 57xx devices behind it in some 4-port NIC designs for example.
15017 * Any tg3 device found behind the bridge will also need the 40-bit
15020 if (tg3_flag(tp
, 5780_CLASS
)) {
15021 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15022 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
15024 struct pci_dev
*bridge
= NULL
;
15027 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
15028 PCI_DEVICE_ID_SERVERWORKS_EPB
,
15030 if (bridge
&& bridge
->subordinate
&&
15031 (bridge
->subordinate
->number
<=
15032 tp
->pdev
->bus
->number
) &&
15033 (bridge
->subordinate
->busn_res
.end
>=
15034 tp
->pdev
->bus
->number
)) {
15035 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15036 pci_dev_put(bridge
);
15042 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
15043 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)
15044 tp
->pdev_peer
= tg3_find_peer(tp
);
15046 /* Determine TSO capabilities */
15047 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
)
15048 ; /* Do nothing. HW bug. */
15049 else if (tg3_flag(tp
, 57765_PLUS
))
15050 tg3_flag_set(tp
, HW_TSO_3
);
15051 else if (tg3_flag(tp
, 5755_PLUS
) ||
15052 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
15053 tg3_flag_set(tp
, HW_TSO_2
);
15054 else if (tg3_flag(tp
, 5750_PLUS
)) {
15055 tg3_flag_set(tp
, HW_TSO_1
);
15056 tg3_flag_set(tp
, TSO_BUG
);
15057 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
&&
15058 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
15059 tg3_flag_clear(tp
, TSO_BUG
);
15060 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15061 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
15062 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) {
15063 tg3_flag_set(tp
, TSO_BUG
);
15064 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)
15065 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
15067 tp
->fw_needed
= FIRMWARE_TG3TSO
;
15070 /* Selectively allow TSO based on operating conditions */
15071 if (tg3_flag(tp
, HW_TSO_1
) ||
15072 tg3_flag(tp
, HW_TSO_2
) ||
15073 tg3_flag(tp
, HW_TSO_3
) ||
15075 /* For firmware TSO, assume ASF is disabled.
15076 * We'll disable TSO later if we discover ASF
15077 * is enabled in tg3_get_eeprom_hw_cfg().
15079 tg3_flag_set(tp
, TSO_CAPABLE
);
15081 tg3_flag_clear(tp
, TSO_CAPABLE
);
15082 tg3_flag_clear(tp
, TSO_BUG
);
15083 tp
->fw_needed
= NULL
;
15086 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
)
15087 tp
->fw_needed
= FIRMWARE_TG3
;
15091 if (tg3_flag(tp
, 5750_PLUS
)) {
15092 tg3_flag_set(tp
, SUPPORT_MSI
);
15093 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
||
15094 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
||
15095 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
&&
15096 tp
->pci_chip_rev_id
<= CHIPREV_ID_5714_A2
&&
15097 tp
->pdev_peer
== tp
->pdev
))
15098 tg3_flag_clear(tp
, SUPPORT_MSI
);
15100 if (tg3_flag(tp
, 5755_PLUS
) ||
15101 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
15102 tg3_flag_set(tp
, 1SHOT_MSI
);
15105 if (tg3_flag(tp
, 57765_PLUS
)) {
15106 tg3_flag_set(tp
, SUPPORT_MSIX
);
15107 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
15113 if (tp
->irq_max
> 1) {
15114 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
15115 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
15117 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
15118 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
15119 tp
->txq_max
= tp
->irq_max
- 1;
15122 if (tg3_flag(tp
, 5755_PLUS
) ||
15123 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
15124 tg3_flag_set(tp
, SHORT_DMA_BUG
);
15126 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
)
15127 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
15129 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
15130 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
15131 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
15132 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
15134 if (tg3_flag(tp
, 57765_PLUS
) &&
15135 tp
->pci_chip_rev_id
!= CHIPREV_ID_5719_A0
)
15136 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
15138 if (!tg3_flag(tp
, 5705_PLUS
) ||
15139 tg3_flag(tp
, 5780_CLASS
) ||
15140 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
15141 tg3_flag_set(tp
, JUMBO_CAPABLE
);
15143 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15146 if (pci_is_pcie(tp
->pdev
)) {
15149 tg3_flag_set(tp
, PCI_EXPRESS
);
15151 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
15152 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
15153 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
15155 tg3_flag_clear(tp
, HW_TSO_2
);
15156 tg3_flag_clear(tp
, TSO_CAPABLE
);
15158 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
15159 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
15160 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A0
||
15161 tp
->pci_chip_rev_id
== CHIPREV_ID_57780_A1
)
15162 tg3_flag_set(tp
, CLKREQ_BUG
);
15163 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5717_A0
) {
15164 tg3_flag_set(tp
, L1PLLPD_EN
);
15166 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
) {
15167 /* BCM5785 devices are effectively PCIe devices, and should
15168 * follow PCIe codepaths, but do not have a PCIe capabilities
15171 tg3_flag_set(tp
, PCI_EXPRESS
);
15172 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
15173 tg3_flag(tp
, 5780_CLASS
)) {
15174 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
15175 if (!tp
->pcix_cap
) {
15176 dev_err(&tp
->pdev
->dev
,
15177 "Cannot find PCI-X capability, aborting\n");
15181 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
15182 tg3_flag_set(tp
, PCIX_MODE
);
15185 /* If we have an AMD 762 or VIA K8T800 chipset, write
15186 * reordering to the mailbox registers done by the host
15187 * controller can cause major troubles. We read back from
15188 * every mailbox register write to force the writes to be
15189 * posted to the chip in order.
15191 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
15192 !tg3_flag(tp
, PCI_EXPRESS
))
15193 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
15195 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
15196 &tp
->pci_cacheline_sz
);
15197 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15198 &tp
->pci_lat_timer
);
15199 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
15200 tp
->pci_lat_timer
< 64) {
15201 tp
->pci_lat_timer
= 64;
15202 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15203 tp
->pci_lat_timer
);
15206 /* Important! -- It is critical that the PCI-X hw workaround
15207 * situation is decided before the first MMIO register access.
15209 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
15210 /* 5700 BX chips need to have their TX producer index
15211 * mailboxes written twice to workaround a bug.
15213 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
15215 /* If we are in PCI-X mode, enable register write workaround.
15217 * The workaround is to use indirect register accesses
15218 * for all chip writes not to mailbox registers.
15220 if (tg3_flag(tp
, PCIX_MODE
)) {
15223 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15225 /* The chip can have it's power management PCI config
15226 * space registers clobbered due to this bug.
15227 * So explicitly force the chip into D0 here.
15229 pci_read_config_dword(tp
->pdev
,
15230 tp
->pm_cap
+ PCI_PM_CTRL
,
15232 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
15233 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
15234 pci_write_config_dword(tp
->pdev
,
15235 tp
->pm_cap
+ PCI_PM_CTRL
,
15238 /* Also, force SERR#/PERR# in PCI command. */
15239 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15240 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
15241 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15245 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
15246 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
15247 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
15248 tg3_flag_set(tp
, PCI_32BIT
);
15250 /* Chip-specific fixup from Broadcom driver */
15251 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
15252 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
15253 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
15254 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
15257 /* Default fast path register access methods */
15258 tp
->read32
= tg3_read32
;
15259 tp
->write32
= tg3_write32
;
15260 tp
->read32_mbox
= tg3_read32
;
15261 tp
->write32_mbox
= tg3_write32
;
15262 tp
->write32_tx_mbox
= tg3_write32
;
15263 tp
->write32_rx_mbox
= tg3_write32
;
15265 /* Various workaround register access methods */
15266 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
15267 tp
->write32
= tg3_write_indirect_reg32
;
15268 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
15269 (tg3_flag(tp
, PCI_EXPRESS
) &&
15270 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
)) {
15272 * Back to back register writes can cause problems on these
15273 * chips, the workaround is to read back all reg writes
15274 * except those to mailbox regs.
15276 * See tg3_write_indirect_reg32().
15278 tp
->write32
= tg3_write_flush_reg32
;
15281 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
15282 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
15283 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
15284 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
15287 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
15288 tp
->read32
= tg3_read_indirect_reg32
;
15289 tp
->write32
= tg3_write_indirect_reg32
;
15290 tp
->read32_mbox
= tg3_read_indirect_mbox
;
15291 tp
->write32_mbox
= tg3_write_indirect_mbox
;
15292 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
15293 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
15298 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15299 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
15300 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15302 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
15303 tp
->read32_mbox
= tg3_read32_mbox_5906
;
15304 tp
->write32_mbox
= tg3_write32_mbox_5906
;
15305 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
15306 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
15309 if (tp
->write32
== tg3_write_indirect_reg32
||
15310 (tg3_flag(tp
, PCIX_MODE
) &&
15311 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
15312 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
15313 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
15315 /* The memory arbiter has to be enabled in order for SRAM accesses
15316 * to succeed. Normally on powerup the tg3 chip firmware will make
15317 * sure it is enabled, but other entities such as system netboot
15318 * code might disable it.
15320 val
= tr32(MEMARB_MODE
);
15321 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
15323 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
15324 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
15325 tg3_flag(tp
, 5780_CLASS
)) {
15326 if (tg3_flag(tp
, PCIX_MODE
)) {
15327 pci_read_config_dword(tp
->pdev
,
15328 tp
->pcix_cap
+ PCI_X_STATUS
,
15330 tp
->pci_fn
= val
& 0x7;
15332 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
) {
15333 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
15334 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
15335 NIC_SRAM_CPMUSTAT_SIG
) {
15336 tp
->pci_fn
= val
& TG3_CPMU_STATUS_FMSK_5717
;
15337 tp
->pci_fn
= tp
->pci_fn
? 1 : 0;
15339 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
15340 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
) {
15341 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
15342 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) ==
15343 NIC_SRAM_CPMUSTAT_SIG
) {
15344 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
15345 TG3_CPMU_STATUS_FSHFT_5719
;
15349 /* Get eeprom hw config before calling tg3_set_power_state().
15350 * In particular, the TG3_FLAG_IS_NIC flag must be
15351 * determined before calling tg3_set_power_state() so that
15352 * we know whether or not to switch out of Vaux power.
15353 * When the flag is set, it means that GPIO1 is used for eeprom
15354 * write protect and also implies that it is a LOM where GPIOs
15355 * are not used to switch power.
15357 tg3_get_eeprom_hw_cfg(tp
);
15359 if (tp
->fw_needed
&& tg3_flag(tp
, ENABLE_ASF
)) {
15360 tg3_flag_clear(tp
, TSO_CAPABLE
);
15361 tg3_flag_clear(tp
, TSO_BUG
);
15362 tp
->fw_needed
= NULL
;
15365 if (tg3_flag(tp
, ENABLE_APE
)) {
15366 /* Allow reads and writes to the
15367 * APE register and memory space.
15369 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
15370 PCISTATE_ALLOW_APE_SHMEM_WR
|
15371 PCISTATE_ALLOW_APE_PSPACE_WR
;
15372 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15375 tg3_ape_lock_init(tp
);
15378 /* Set up tp->grc_local_ctrl before calling
15379 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
15380 * will bring 5700's external PHY out of reset.
15381 * It is also used as eeprom write protect on LOMs.
15383 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
15384 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
15385 tg3_flag(tp
, EEPROM_WRITE_PROT
))
15386 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
15387 GRC_LCLCTRL_GPIO_OUTPUT1
);
15388 /* Unused GPIO3 must be driven as output on 5752 because there
15389 * are no pull-up resistors on unused GPIO pins.
15391 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
15392 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
15394 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
15395 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
||
15396 tg3_flag(tp
, 57765_CLASS
))
15397 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15399 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
15400 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
15401 /* Turn off the debug UART. */
15402 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
15403 if (tg3_flag(tp
, IS_NIC
))
15404 /* Keep VMain power. */
15405 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
15406 GRC_LCLCTRL_GPIO_OUTPUT0
;
15409 /* Switch out of Vaux if it is a NIC */
15410 tg3_pwrsrc_switch_to_vmain(tp
);
15412 /* Derive initial jumbo mode from MTU assigned in
15413 * ether_setup() via the alloc_etherdev() call
15415 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
15416 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
15418 /* Determine WakeOnLan speed to use. */
15419 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
15420 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
15421 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
15422 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
15423 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
15425 tg3_flag_set(tp
, WOL_SPEED_100MB
);
15428 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
15429 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
15431 /* A few boards don't want Ethernet@WireSpeed phy feature */
15432 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
15433 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
15434 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
15435 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
15436 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
15437 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
15438 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
15440 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
15441 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
15442 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
15443 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
15444 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
15446 if (tg3_flag(tp
, 5705_PLUS
) &&
15447 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
15448 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5785
&&
15449 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_57780
&&
15450 !tg3_flag(tp
, 57765_PLUS
)) {
15451 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
15452 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
15453 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
||
15454 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
) {
15455 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
15456 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
15457 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
15458 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
15459 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
15461 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
15464 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
15465 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) {
15466 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
15467 if (tp
->phy_otp
== 0)
15468 tp
->phy_otp
= TG3_OTP_DEFAULT
;
15471 if (tg3_flag(tp
, CPMU_PRESENT
))
15472 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
15474 tp
->mi_mode
= MAC_MI_MODE_BASE
;
15476 tp
->coalesce_mode
= 0;
15477 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
15478 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
15479 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
15481 /* Set these bits to enable statistics workaround. */
15482 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5717
||
15483 tp
->pci_chip_rev_id
== CHIPREV_ID_5719_A0
||
15484 tp
->pci_chip_rev_id
== CHIPREV_ID_5720_A0
) {
15485 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
15486 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
15489 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
15490 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
15491 tg3_flag_set(tp
, USE_PHYLIB
);
15493 err
= tg3_mdio_init(tp
);
15497 /* Initialize data/descriptor byte/word swapping. */
15498 val
= tr32(GRC_MODE
);
15499 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
15500 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
15501 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
15502 GRC_MODE_B2HRX_ENABLE
|
15503 GRC_MODE_HTX2B_ENABLE
|
15504 GRC_MODE_HOST_STACKUP
);
15506 val
&= GRC_MODE_HOST_STACKUP
;
15508 tw32(GRC_MODE
, val
| tp
->grc_mode
);
15510 tg3_switch_clocks(tp
);
15512 /* Clear this out for sanity. */
15513 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15515 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15517 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
15518 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
15519 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
15521 if (chiprevid
== CHIPREV_ID_5701_A0
||
15522 chiprevid
== CHIPREV_ID_5701_B0
||
15523 chiprevid
== CHIPREV_ID_5701_B2
||
15524 chiprevid
== CHIPREV_ID_5701_B5
) {
15525 void __iomem
*sram_base
;
15527 /* Write some dummy words into the SRAM status block
15528 * area, see if it reads back correctly. If the return
15529 * value is bad, force enable the PCIX workaround.
15531 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
15533 writel(0x00000000, sram_base
);
15534 writel(0x00000000, sram_base
+ 4);
15535 writel(0xffffffff, sram_base
+ 4);
15536 if (readl(sram_base
) != 0x00000000)
15537 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15542 tg3_nvram_init(tp
);
15544 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
15545 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
15547 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
15548 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
15549 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
15550 tg3_flag_set(tp
, IS_5788
);
15552 if (!tg3_flag(tp
, IS_5788
) &&
15553 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
15554 tg3_flag_set(tp
, TAGGED_STATUS
);
15555 if (tg3_flag(tp
, TAGGED_STATUS
)) {
15556 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
15557 HOSTCC_MODE_CLRTICK_TXBD
);
15559 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
15560 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15561 tp
->misc_host_ctrl
);
15564 /* Preserve the APE MAC_MODE bits */
15565 if (tg3_flag(tp
, ENABLE_APE
))
15566 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
15570 if (tg3_10_100_only_device(tp
, ent
))
15571 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
15573 err
= tg3_phy_probe(tp
);
15575 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
15576 /* ... but do not return immediately ... */
15581 tg3_read_fw_ver(tp
);
15583 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
15584 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15586 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
15587 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15589 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
15592 /* 5700 {AX,BX} chips have a broken status block link
15593 * change bit implementation, so we must use the
15594 * status register in those cases.
15596 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
15597 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15599 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
15601 /* The led_ctrl is set during tg3_phy_probe, here we might
15602 * have to force the link status polling mechanism based
15603 * upon subsystem IDs.
15605 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
15606 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
15607 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
15608 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
15609 tg3_flag_set(tp
, USE_LINKCHG_REG
);
15612 /* For all SERDES we poll the MAC status register. */
15613 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
15614 tg3_flag_set(tp
, POLL_SERDES
);
15616 tg3_flag_clear(tp
, POLL_SERDES
);
15618 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
15619 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
15620 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
15621 tg3_flag(tp
, PCIX_MODE
)) {
15622 tp
->rx_offset
= NET_SKB_PAD
;
15623 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15624 tp
->rx_copy_thresh
= ~(u16
)0;
15628 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
15629 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
15630 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
15632 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
15634 /* Increment the rx prod index on the rx std ring by at most
15635 * 8 for these chips to workaround hw errata.
15637 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
15638 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
15639 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
15640 tp
->rx_std_max_post
= 8;
15642 if (tg3_flag(tp
, ASPM_WORKAROUND
))
15643 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
15644 PCIE_PWR_MGMT_L1_THRESH_MSK
;
15649 #ifdef CONFIG_SPARC
15650 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
15652 struct net_device
*dev
= tp
->dev
;
15653 struct pci_dev
*pdev
= tp
->pdev
;
15654 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
15655 const unsigned char *addr
;
15658 addr
= of_get_property(dp
, "local-mac-address", &len
);
15659 if (addr
&& len
== 6) {
15660 memcpy(dev
->dev_addr
, addr
, 6);
15661 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
15667 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
15669 struct net_device
*dev
= tp
->dev
;
15671 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
15672 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
15677 static int tg3_get_device_address(struct tg3
*tp
)
15679 struct net_device
*dev
= tp
->dev
;
15680 u32 hi
, lo
, mac_offset
;
15683 #ifdef CONFIG_SPARC
15684 if (!tg3_get_macaddr_sparc(tp
))
15689 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
15690 tg3_flag(tp
, 5780_CLASS
)) {
15691 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
15693 if (tg3_nvram_lock(tp
))
15694 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
15696 tg3_nvram_unlock(tp
);
15697 } else if (tg3_flag(tp
, 5717_PLUS
)) {
15698 if (tp
->pci_fn
& 1)
15700 if (tp
->pci_fn
> 1)
15701 mac_offset
+= 0x18c;
15702 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
15705 /* First try to get it from MAC address mailbox. */
15706 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
15707 if ((hi
>> 16) == 0x484b) {
15708 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
15709 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
15711 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
15712 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
15713 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
15714 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
15715 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
15717 /* Some old bootcode may report a 0 MAC address in SRAM */
15718 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
15721 /* Next, try NVRAM. */
15722 if (!tg3_flag(tp
, NO_NVRAM
) &&
15723 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
15724 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
15725 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
15726 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
15728 /* Finally just fetch it out of the MAC control regs. */
15730 hi
= tr32(MAC_ADDR_0_HIGH
);
15731 lo
= tr32(MAC_ADDR_0_LOW
);
15733 dev
->dev_addr
[5] = lo
& 0xff;
15734 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
15735 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
15736 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
15737 dev
->dev_addr
[1] = hi
& 0xff;
15738 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
15742 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
15743 #ifdef CONFIG_SPARC
15744 if (!tg3_get_default_macaddr_sparc(tp
))
15749 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
15753 #define BOUNDARY_SINGLE_CACHELINE 1
15754 #define BOUNDARY_MULTI_CACHELINE 2
15756 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
15758 int cacheline_size
;
15762 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
15764 cacheline_size
= 1024;
15766 cacheline_size
= (int) byte
* 4;
15768 /* On 5703 and later chips, the boundary bits have no
15771 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
15772 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
15773 !tg3_flag(tp
, PCI_EXPRESS
))
15776 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15777 goal
= BOUNDARY_MULTI_CACHELINE
;
15779 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15780 goal
= BOUNDARY_SINGLE_CACHELINE
;
15786 if (tg3_flag(tp
, 57765_PLUS
)) {
15787 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
15794 /* PCI controllers on most RISC systems tend to disconnect
15795 * when a device tries to burst across a cache-line boundary.
15796 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15798 * Unfortunately, for PCI-E there are only limited
15799 * write-side controls for this, and thus for reads
15800 * we will still get the disconnects. We'll also waste
15801 * these PCI cycles for both read and write for chips
15802 * other than 5700 and 5701 which do not implement the
15805 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
15806 switch (cacheline_size
) {
15811 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15812 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
15813 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
15815 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
15816 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
15821 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
15822 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
15826 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
15827 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
15830 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
15831 switch (cacheline_size
) {
15835 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15836 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
15837 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
15843 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
15844 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
15848 switch (cacheline_size
) {
15850 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15851 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
15852 DMA_RWCTRL_WRITE_BNDRY_16
);
15857 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15858 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
15859 DMA_RWCTRL_WRITE_BNDRY_32
);
15864 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15865 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
15866 DMA_RWCTRL_WRITE_BNDRY_64
);
15871 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
15872 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
15873 DMA_RWCTRL_WRITE_BNDRY_128
);
15878 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
15879 DMA_RWCTRL_WRITE_BNDRY_256
);
15882 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
15883 DMA_RWCTRL_WRITE_BNDRY_512
);
15887 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
15888 DMA_RWCTRL_WRITE_BNDRY_1024
);
15897 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
15898 int size
, int to_device
)
15900 struct tg3_internal_buffer_desc test_desc
;
15901 u32 sram_dma_descs
;
15904 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
15906 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
15907 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
15908 tw32(RDMAC_STATUS
, 0);
15909 tw32(WDMAC_STATUS
, 0);
15911 tw32(BUFMGR_MODE
, 0);
15912 tw32(FTQ_RESET
, 0);
15914 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
15915 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
15916 test_desc
.nic_mbuf
= 0x00002100;
15917 test_desc
.len
= size
;
15920 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15921 * the *second* time the tg3 driver was getting loaded after an
15924 * Broadcom tells me:
15925 * ...the DMA engine is connected to the GRC block and a DMA
15926 * reset may affect the GRC block in some unpredictable way...
15927 * The behavior of resets to individual blocks has not been tested.
15929 * Broadcom noted the GRC reset will also reset all sub-components.
15932 test_desc
.cqid_sqid
= (13 << 8) | 2;
15934 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
15937 test_desc
.cqid_sqid
= (16 << 8) | 7;
15939 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
15942 test_desc
.flags
= 0x00000005;
15944 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
15947 val
= *(((u32
*)&test_desc
) + i
);
15948 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
15949 sram_dma_descs
+ (i
* sizeof(u32
)));
15950 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
15952 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
15955 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
15957 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
15960 for (i
= 0; i
< 40; i
++) {
15964 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
15966 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
15967 if ((val
& 0xffff) == sram_dma_descs
) {
15978 #define TEST_BUFFER_SIZE 0x2000
15980 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
15981 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
15985 static int tg3_test_dma(struct tg3
*tp
)
15987 dma_addr_t buf_dma
;
15988 u32
*buf
, saved_dma_rwctrl
;
15991 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
15992 &buf_dma
, GFP_KERNEL
);
15998 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
15999 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
16001 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
16003 if (tg3_flag(tp
, 57765_PLUS
))
16006 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16007 /* DMA read watermark not used on PCIE */
16008 tp
->dma_rwctrl
|= 0x00180000;
16009 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
16010 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
16011 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
16012 tp
->dma_rwctrl
|= 0x003f0000;
16014 tp
->dma_rwctrl
|= 0x003f000f;
16016 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
16017 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
16018 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
16019 u32 read_water
= 0x7;
16021 /* If the 5704 is behind the EPB bridge, we can
16022 * do the less restrictive ONE_DMA workaround for
16023 * better performance.
16025 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
16026 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
16027 tp
->dma_rwctrl
|= 0x8000;
16028 else if (ccval
== 0x6 || ccval
== 0x7)
16029 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16031 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
)
16033 /* Set bit 23 to enable PCIX hw bug fix */
16035 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
16036 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
16038 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
16039 /* 5780 always in PCIX mode */
16040 tp
->dma_rwctrl
|= 0x00144000;
16041 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
16042 /* 5714 always in PCIX mode */
16043 tp
->dma_rwctrl
|= 0x00148000;
16045 tp
->dma_rwctrl
|= 0x001b000f;
16049 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
16050 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
16051 tp
->dma_rwctrl
&= 0xfffffff0;
16053 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
16054 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
16055 /* Remove this if it causes problems for some boards. */
16056 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
16058 /* On 5700/5701 chips, we need to set this bit.
16059 * Otherwise the chip will issue cacheline transactions
16060 * to streamable DMA memory with not all the byte
16061 * enables turned on. This is an error on several
16062 * RISC PCI controllers, in particular sparc64.
16064 * On 5703/5704 chips, this bit has been reassigned
16065 * a different meaning. In particular, it is used
16066 * on those chips to enable a PCI-X workaround.
16068 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
16071 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16074 /* Unneeded, already done by tg3_get_invariants. */
16075 tg3_switch_clocks(tp
);
16078 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
16079 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
16082 /* It is best to perform DMA test with maximum write burst size
16083 * to expose the 5700/5701 write DMA bug.
16085 saved_dma_rwctrl
= tp
->dma_rwctrl
;
16086 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16087 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16092 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
16095 /* Send the buffer to the chip. */
16096 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
16098 dev_err(&tp
->pdev
->dev
,
16099 "%s: Buffer write failed. err = %d\n",
16105 /* validate data reached card RAM correctly. */
16106 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16108 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
16109 if (le32_to_cpu(val
) != p
[i
]) {
16110 dev_err(&tp
->pdev
->dev
,
16111 "%s: Buffer corrupted on device! "
16112 "(%d != %d)\n", __func__
, val
, i
);
16113 /* ret = -ENODEV here? */
16118 /* Now read it back. */
16119 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
16121 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
16122 "err = %d\n", __func__
, ret
);
16127 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16131 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16132 DMA_RWCTRL_WRITE_BNDRY_16
) {
16133 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16134 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16135 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16138 dev_err(&tp
->pdev
->dev
,
16139 "%s: Buffer corrupted on read back! "
16140 "(%d != %d)\n", __func__
, p
[i
], i
);
16146 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
16152 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16153 DMA_RWCTRL_WRITE_BNDRY_16
) {
16154 /* DMA test passed without adjusting DMA boundary,
16155 * now look for chipsets that are known to expose the
16156 * DMA bug without failing the test.
16158 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
16159 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16160 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16162 /* Safe to use the calculated DMA boundary. */
16163 tp
->dma_rwctrl
= saved_dma_rwctrl
;
16166 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16170 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
16175 static void tg3_init_bufmgr_config(struct tg3
*tp
)
16177 if (tg3_flag(tp
, 57765_PLUS
)) {
16178 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16179 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16180 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16181 DEFAULT_MB_MACRX_LOW_WATER_57765
;
16182 tp
->bufmgr_config
.mbuf_high_water
=
16183 DEFAULT_MB_HIGH_WATER_57765
;
16185 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16186 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16187 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16188 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
16189 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16190 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
16191 } else if (tg3_flag(tp
, 5705_PLUS
)) {
16192 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16193 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16194 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16195 DEFAULT_MB_MACRX_LOW_WATER_5705
;
16196 tp
->bufmgr_config
.mbuf_high_water
=
16197 DEFAULT_MB_HIGH_WATER_5705
;
16198 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
16199 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16200 DEFAULT_MB_MACRX_LOW_WATER_5906
;
16201 tp
->bufmgr_config
.mbuf_high_water
=
16202 DEFAULT_MB_HIGH_WATER_5906
;
16205 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16206 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
16207 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16208 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
16209 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16210 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
16212 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16213 DEFAULT_MB_RDMA_LOW_WATER
;
16214 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16215 DEFAULT_MB_MACRX_LOW_WATER
;
16216 tp
->bufmgr_config
.mbuf_high_water
=
16217 DEFAULT_MB_HIGH_WATER
;
16219 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16220 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
16221 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16222 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
16223 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16224 DEFAULT_MB_HIGH_WATER_JUMBO
;
16227 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
16228 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
16231 static char *tg3_phy_string(struct tg3
*tp
)
16233 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
16234 case TG3_PHY_ID_BCM5400
: return "5400";
16235 case TG3_PHY_ID_BCM5401
: return "5401";
16236 case TG3_PHY_ID_BCM5411
: return "5411";
16237 case TG3_PHY_ID_BCM5701
: return "5701";
16238 case TG3_PHY_ID_BCM5703
: return "5703";
16239 case TG3_PHY_ID_BCM5704
: return "5704";
16240 case TG3_PHY_ID_BCM5705
: return "5705";
16241 case TG3_PHY_ID_BCM5750
: return "5750";
16242 case TG3_PHY_ID_BCM5752
: return "5752";
16243 case TG3_PHY_ID_BCM5714
: return "5714";
16244 case TG3_PHY_ID_BCM5780
: return "5780";
16245 case TG3_PHY_ID_BCM5755
: return "5755";
16246 case TG3_PHY_ID_BCM5787
: return "5787";
16247 case TG3_PHY_ID_BCM5784
: return "5784";
16248 case TG3_PHY_ID_BCM5756
: return "5722/5756";
16249 case TG3_PHY_ID_BCM5906
: return "5906";
16250 case TG3_PHY_ID_BCM5761
: return "5761";
16251 case TG3_PHY_ID_BCM5718C
: return "5718C";
16252 case TG3_PHY_ID_BCM5718S
: return "5718S";
16253 case TG3_PHY_ID_BCM57765
: return "57765";
16254 case TG3_PHY_ID_BCM5719C
: return "5719C";
16255 case TG3_PHY_ID_BCM5720C
: return "5720C";
16256 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
16257 case 0: return "serdes";
16258 default: return "unknown";
16262 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
16264 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16265 strcpy(str
, "PCI Express");
16267 } else if (tg3_flag(tp
, PCIX_MODE
)) {
16268 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
16270 strcpy(str
, "PCIX:");
16272 if ((clock_ctrl
== 7) ||
16273 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
16274 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
16275 strcat(str
, "133MHz");
16276 else if (clock_ctrl
== 0)
16277 strcat(str
, "33MHz");
16278 else if (clock_ctrl
== 2)
16279 strcat(str
, "50MHz");
16280 else if (clock_ctrl
== 4)
16281 strcat(str
, "66MHz");
16282 else if (clock_ctrl
== 6)
16283 strcat(str
, "100MHz");
16285 strcpy(str
, "PCI:");
16286 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
16287 strcat(str
, "66MHz");
16289 strcat(str
, "33MHz");
16291 if (tg3_flag(tp
, PCI_32BIT
))
16292 strcat(str
, ":32-bit");
16294 strcat(str
, ":64-bit");
16298 static void tg3_init_coal(struct tg3
*tp
)
16300 struct ethtool_coalesce
*ec
= &tp
->coal
;
16302 memset(ec
, 0, sizeof(*ec
));
16303 ec
->cmd
= ETHTOOL_GCOALESCE
;
16304 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
16305 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
16306 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
16307 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
16308 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
16309 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
16310 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
16311 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
16312 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
16314 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
16315 HOSTCC_MODE_CLRTICK_TXBD
)) {
16316 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
16317 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
16318 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
16319 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
16322 if (tg3_flag(tp
, 5705_PLUS
)) {
16323 ec
->rx_coalesce_usecs_irq
= 0;
16324 ec
->tx_coalesce_usecs_irq
= 0;
16325 ec
->stats_block_coalesce_usecs
= 0;
16329 static int tg3_init_one(struct pci_dev
*pdev
,
16330 const struct pci_device_id
*ent
)
16332 struct net_device
*dev
;
16334 int i
, err
, pm_cap
;
16335 u32 sndmbx
, rcvmbx
, intmbx
;
16337 u64 dma_mask
, persist_dma_mask
;
16338 netdev_features_t features
= 0;
16340 printk_once(KERN_INFO
"%s\n", version
);
16342 err
= pci_enable_device(pdev
);
16344 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
16348 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
16350 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
16351 goto err_out_disable_pdev
;
16354 pci_set_master(pdev
);
16356 /* Find power-management capability. */
16357 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
16359 dev_err(&pdev
->dev
,
16360 "Cannot find Power Management capability, aborting\n");
16362 goto err_out_free_res
;
16365 err
= pci_set_power_state(pdev
, PCI_D0
);
16367 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
16368 goto err_out_free_res
;
16371 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
16374 goto err_out_power_down
;
16377 SET_NETDEV_DEV(dev
, &pdev
->dev
);
16379 tp
= netdev_priv(dev
);
16382 tp
->pm_cap
= pm_cap
;
16383 tp
->rx_mode
= TG3_DEF_RX_MODE
;
16384 tp
->tx_mode
= TG3_DEF_TX_MODE
;
16388 tp
->msg_enable
= tg3_debug
;
16390 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
16392 /* The word/byte swap controls here control register access byte
16393 * swapping. DMA data byte swapping is controlled in the GRC_MODE
16396 tp
->misc_host_ctrl
=
16397 MISC_HOST_CTRL_MASK_PCI_INT
|
16398 MISC_HOST_CTRL_WORD_SWAP
|
16399 MISC_HOST_CTRL_INDIR_ACCESS
|
16400 MISC_HOST_CTRL_PCISTATE_RW
;
16402 /* The NONFRM (non-frame) byte/word swap controls take effect
16403 * on descriptor entries, anything which isn't packet data.
16405 * The StrongARM chips on the board (one for tx, one for rx)
16406 * are running in big-endian mode.
16408 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
16409 GRC_MODE_WSWAP_NONFRM_DATA
);
16410 #ifdef __BIG_ENDIAN
16411 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
16413 spin_lock_init(&tp
->lock
);
16414 spin_lock_init(&tp
->indirect_lock
);
16415 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
16417 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
16419 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
16421 goto err_out_free_dev
;
16424 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16425 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
16426 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
16427 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
16428 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
16429 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
16430 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
16431 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
16432 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
) {
16433 tg3_flag_set(tp
, ENABLE_APE
);
16434 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
16435 if (!tp
->aperegs
) {
16436 dev_err(&pdev
->dev
,
16437 "Cannot map APE registers, aborting\n");
16439 goto err_out_iounmap
;
16443 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
16444 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
16446 dev
->ethtool_ops
= &tg3_ethtool_ops
;
16447 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
16448 dev
->netdev_ops
= &tg3_netdev_ops
;
16449 dev
->irq
= pdev
->irq
;
16451 err
= tg3_get_invariants(tp
, ent
);
16453 dev_err(&pdev
->dev
,
16454 "Problem fetching invariants of chip, aborting\n");
16455 goto err_out_apeunmap
;
16458 /* The EPB bridge inside 5714, 5715, and 5780 and any
16459 * device behind the EPB cannot support DMA addresses > 40-bit.
16460 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16461 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16462 * do DMA address check in tg3_start_xmit().
16464 if (tg3_flag(tp
, IS_5788
))
16465 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
16466 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
16467 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
16468 #ifdef CONFIG_HIGHMEM
16469 dma_mask
= DMA_BIT_MASK(64);
16472 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
16474 /* Configure DMA attributes. */
16475 if (dma_mask
> DMA_BIT_MASK(32)) {
16476 err
= pci_set_dma_mask(pdev
, dma_mask
);
16478 features
|= NETIF_F_HIGHDMA
;
16479 err
= pci_set_consistent_dma_mask(pdev
,
16482 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
16483 "DMA for consistent allocations\n");
16484 goto err_out_apeunmap
;
16488 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
16489 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
16491 dev_err(&pdev
->dev
,
16492 "No usable DMA configuration, aborting\n");
16493 goto err_out_apeunmap
;
16497 tg3_init_bufmgr_config(tp
);
16499 features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
16501 /* 5700 B0 chips do not support checksumming correctly due
16502 * to hardware bugs.
16504 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5700_B0
) {
16505 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
16507 if (tg3_flag(tp
, 5755_PLUS
))
16508 features
|= NETIF_F_IPV6_CSUM
;
16511 /* TSO is on by default on chips that support hardware TSO.
16512 * Firmware TSO on older chips gives lower performance, so it
16513 * is off by default, but can be enabled using ethtool.
16515 if ((tg3_flag(tp
, HW_TSO_1
) ||
16516 tg3_flag(tp
, HW_TSO_2
) ||
16517 tg3_flag(tp
, HW_TSO_3
)) &&
16518 (features
& NETIF_F_IP_CSUM
))
16519 features
|= NETIF_F_TSO
;
16520 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
16521 if (features
& NETIF_F_IPV6_CSUM
)
16522 features
|= NETIF_F_TSO6
;
16523 if (tg3_flag(tp
, HW_TSO_3
) ||
16524 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5761
||
16525 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5784
&&
16526 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5784_AX
) ||
16527 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5785
||
16528 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_57780
)
16529 features
|= NETIF_F_TSO_ECN
;
16532 dev
->features
|= features
;
16533 dev
->vlan_features
|= features
;
16536 * Add loopback capability only for a subset of devices that support
16537 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16538 * loopback for the remaining devices.
16540 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5780
&&
16541 !tg3_flag(tp
, CPMU_PRESENT
))
16542 /* Add the loopback capability */
16543 features
|= NETIF_F_LOOPBACK
;
16545 dev
->hw_features
|= features
;
16547 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
16548 !tg3_flag(tp
, TSO_CAPABLE
) &&
16549 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
16550 tg3_flag_set(tp
, MAX_RXPEND_64
);
16551 tp
->rx_pending
= 63;
16554 err
= tg3_get_device_address(tp
);
16556 dev_err(&pdev
->dev
,
16557 "Could not obtain valid ethernet address, aborting\n");
16558 goto err_out_apeunmap
;
16562 * Reset chip in case UNDI or EFI driver did not shutdown
16563 * DMA self test will enable WDMAC and we'll see (spurious)
16564 * pending DMA on the PCI bus at that point.
16566 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
16567 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
16568 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
16569 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
16572 err
= tg3_test_dma(tp
);
16574 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
16575 goto err_out_apeunmap
;
16578 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
16579 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
16580 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
16581 for (i
= 0; i
< tp
->irq_max
; i
++) {
16582 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
16585 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
16587 tnapi
->int_mbox
= intmbx
;
16593 tnapi
->consmbox
= rcvmbx
;
16594 tnapi
->prodmbox
= sndmbx
;
16597 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
16599 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
16601 if (!tg3_flag(tp
, SUPPORT_MSIX
))
16605 * If we support MSIX, we'll be using RSS. If we're using
16606 * RSS, the first vector only handles link interrupts and the
16607 * remaining vectors handle rx and tx interrupts. Reuse the
16608 * mailbox values for the next iteration. The values we setup
16609 * above are still useful for the single vectored mode.
16624 pci_set_drvdata(pdev
, dev
);
16626 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5719
||
16627 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5720
)
16628 tg3_flag_set(tp
, PTP_CAPABLE
);
16630 if (tg3_flag(tp
, 5717_PLUS
)) {
16631 /* Resume a low-power mode */
16632 tg3_frob_aux_power(tp
, false);
16635 tg3_timer_init(tp
);
16637 err
= register_netdev(dev
);
16639 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
16640 goto err_out_apeunmap
;
16643 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16644 tp
->board_part_number
,
16645 tp
->pci_chip_rev_id
,
16646 tg3_bus_string(tp
, str
),
16649 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
16650 struct phy_device
*phydev
;
16651 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
16653 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16654 phydev
->drv
->name
, dev_name(&phydev
->dev
));
16658 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
16659 ethtype
= "10/100Base-TX";
16660 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
16661 ethtype
= "1000Base-SX";
16663 ethtype
= "10/100/1000Base-T";
16665 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
16666 "(WireSpeed[%d], EEE[%d])\n",
16667 tg3_phy_string(tp
), ethtype
,
16668 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
16669 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
16672 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16673 (dev
->features
& NETIF_F_RXCSUM
) != 0,
16674 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
16675 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
16676 tg3_flag(tp
, ENABLE_ASF
) != 0,
16677 tg3_flag(tp
, TSO_CAPABLE
) != 0);
16678 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16680 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
16681 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
16683 pci_save_state(pdev
);
16689 iounmap(tp
->aperegs
);
16690 tp
->aperegs
= NULL
;
16702 err_out_power_down
:
16703 pci_set_power_state(pdev
, PCI_D3hot
);
16706 pci_release_regions(pdev
);
16708 err_out_disable_pdev
:
16709 pci_disable_device(pdev
);
16710 pci_set_drvdata(pdev
, NULL
);
16714 static void tg3_remove_one(struct pci_dev
*pdev
)
16716 struct net_device
*dev
= pci_get_drvdata(pdev
);
16719 struct tg3
*tp
= netdev_priv(dev
);
16721 release_firmware(tp
->fw
);
16723 tg3_reset_task_cancel(tp
);
16725 if (tg3_flag(tp
, USE_PHYLIB
)) {
16730 unregister_netdev(dev
);
16732 iounmap(tp
->aperegs
);
16733 tp
->aperegs
= NULL
;
16740 pci_release_regions(pdev
);
16741 pci_disable_device(pdev
);
16742 pci_set_drvdata(pdev
, NULL
);
16746 #ifdef CONFIG_PM_SLEEP
16747 static int tg3_suspend(struct device
*device
)
16749 struct pci_dev
*pdev
= to_pci_dev(device
);
16750 struct net_device
*dev
= pci_get_drvdata(pdev
);
16751 struct tg3
*tp
= netdev_priv(dev
);
16754 if (!netif_running(dev
))
16757 tg3_reset_task_cancel(tp
);
16759 tg3_netif_stop(tp
);
16761 tg3_timer_stop(tp
);
16763 tg3_full_lock(tp
, 1);
16764 tg3_disable_ints(tp
);
16765 tg3_full_unlock(tp
);
16767 netif_device_detach(dev
);
16769 tg3_full_lock(tp
, 0);
16770 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
16771 tg3_flag_clear(tp
, INIT_COMPLETE
);
16772 tg3_full_unlock(tp
);
16774 err
= tg3_power_down_prepare(tp
);
16778 tg3_full_lock(tp
, 0);
16780 tg3_flag_set(tp
, INIT_COMPLETE
);
16781 err2
= tg3_restart_hw(tp
, 1);
16785 tg3_timer_start(tp
);
16787 netif_device_attach(dev
);
16788 tg3_netif_start(tp
);
16791 tg3_full_unlock(tp
);
16800 static int tg3_resume(struct device
*device
)
16802 struct pci_dev
*pdev
= to_pci_dev(device
);
16803 struct net_device
*dev
= pci_get_drvdata(pdev
);
16804 struct tg3
*tp
= netdev_priv(dev
);
16807 if (!netif_running(dev
))
16810 netif_device_attach(dev
);
16812 tg3_full_lock(tp
, 0);
16814 tg3_flag_set(tp
, INIT_COMPLETE
);
16815 err
= tg3_restart_hw(tp
, 1);
16819 tg3_timer_start(tp
);
16821 tg3_netif_start(tp
);
16824 tg3_full_unlock(tp
);
16832 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
16833 #define TG3_PM_OPS (&tg3_pm_ops)
16837 #define TG3_PM_OPS NULL
16839 #endif /* CONFIG_PM_SLEEP */
16842 * tg3_io_error_detected - called when PCI error is detected
16843 * @pdev: Pointer to PCI device
16844 * @state: The current pci connection state
16846 * This function is called after a PCI bus error affecting
16847 * this device has been detected.
16849 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
16850 pci_channel_state_t state
)
16852 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16853 struct tg3
*tp
= netdev_priv(netdev
);
16854 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
16856 netdev_info(netdev
, "PCI I/O error detected\n");
16860 if (!netif_running(netdev
))
16865 tg3_netif_stop(tp
);
16867 tg3_timer_stop(tp
);
16869 /* Want to make sure that the reset task doesn't run */
16870 tg3_reset_task_cancel(tp
);
16872 netif_device_detach(netdev
);
16874 /* Clean up software state, even if MMIO is blocked */
16875 tg3_full_lock(tp
, 0);
16876 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
16877 tg3_full_unlock(tp
);
16880 if (state
== pci_channel_io_perm_failure
)
16881 err
= PCI_ERS_RESULT_DISCONNECT
;
16883 pci_disable_device(pdev
);
16891 * tg3_io_slot_reset - called after the pci bus has been reset.
16892 * @pdev: Pointer to PCI device
16894 * Restart the card from scratch, as if from a cold-boot.
16895 * At this point, the card has exprienced a hard reset,
16896 * followed by fixups by BIOS, and has its config space
16897 * set up identically to what it was at cold boot.
16899 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
16901 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16902 struct tg3
*tp
= netdev_priv(netdev
);
16903 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
16908 if (pci_enable_device(pdev
)) {
16909 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
16913 pci_set_master(pdev
);
16914 pci_restore_state(pdev
);
16915 pci_save_state(pdev
);
16917 if (!netif_running(netdev
)) {
16918 rc
= PCI_ERS_RESULT_RECOVERED
;
16922 err
= tg3_power_up(tp
);
16926 rc
= PCI_ERS_RESULT_RECOVERED
;
16935 * tg3_io_resume - called when traffic can start flowing again.
16936 * @pdev: Pointer to PCI device
16938 * This callback is called when the error recovery driver tells
16939 * us that its OK to resume normal operation.
16941 static void tg3_io_resume(struct pci_dev
*pdev
)
16943 struct net_device
*netdev
= pci_get_drvdata(pdev
);
16944 struct tg3
*tp
= netdev_priv(netdev
);
16949 if (!netif_running(netdev
))
16952 tg3_full_lock(tp
, 0);
16953 tg3_flag_set(tp
, INIT_COMPLETE
);
16954 err
= tg3_restart_hw(tp
, 1);
16956 tg3_full_unlock(tp
);
16957 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
16961 netif_device_attach(netdev
);
16963 tg3_timer_start(tp
);
16965 tg3_netif_start(tp
);
16967 tg3_full_unlock(tp
);
16975 static const struct pci_error_handlers tg3_err_handler
= {
16976 .error_detected
= tg3_io_error_detected
,
16977 .slot_reset
= tg3_io_slot_reset
,
16978 .resume
= tg3_io_resume
16981 static struct pci_driver tg3_driver
= {
16982 .name
= DRV_MODULE_NAME
,
16983 .id_table
= tg3_pci_tbl
,
16984 .probe
= tg3_init_one
,
16985 .remove
= tg3_remove_one
,
16986 .err_handler
= &tg3_err_handler
,
16987 .driver
.pm
= TG3_PM_OPS
,
16990 static int __init
tg3_init(void)
16992 return pci_register_driver(&tg3_driver
);
16995 static void __exit
tg3_cleanup(void)
16997 pci_unregister_driver(&tg3_driver
);
17000 module_init(tg3_init
);
17001 module_exit(tg3_cleanup
);