2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version
[] =
220 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION
);
226 MODULE_FIRMWARE(FIRMWARE_TG3
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
230 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug
, int, 0);
232 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
257 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
258 TG3_DRV_DATA_FLAG_5705_10_100
},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
260 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
261 TG3_DRV_DATA_FLAG_5705_10_100
},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
264 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
265 TG3_DRV_DATA_FLAG_5705_10_100
},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
272 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
278 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
287 PCI_VENDOR_ID_LENOVO
,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
289 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
292 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
312 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
313 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
315 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
316 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
320 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
330 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
332 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
353 static const struct {
354 const char string
[ETH_GSTRING_LEN
];
355 } ethtool_stats_keys
[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string
[ETH_GSTRING_LEN
];
449 } ethtool_test_keys
[] = {
450 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
451 [TG3_LINK_TEST
] = { "link test (online) " },
452 [TG3_REGISTER_TEST
] = { "register test (offline)" },
453 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
465 writel(val
, tp
->regs
+ off
);
468 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
470 return readl(tp
->regs
+ off
);
473 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
475 writel(val
, tp
->aperegs
+ off
);
478 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
480 return readl(tp
->aperegs
+ off
);
483 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
487 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
489 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
490 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
493 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
495 writel(val
, tp
->regs
+ off
);
496 readl(tp
->regs
+ off
);
499 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
504 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
505 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
506 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
507 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
511 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
515 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
516 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
517 TG3_64BIT_REG_LOW
, val
);
520 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
521 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
522 TG3_64BIT_REG_LOW
, val
);
526 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
527 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
528 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
529 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
536 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
537 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
541 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
546 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
547 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
548 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
549 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
560 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
561 /* Non-posted methods */
562 tp
->write32(tp
, off
, val
);
565 tg3_write32(tp
, off
, val
);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
579 tp
->write32_mbox(tp
, off
, val
);
580 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
581 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
582 !tg3_flag(tp
, ICH_WORKAROUND
)))
583 tp
->read32_mbox(tp
, off
);
586 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
588 void __iomem
*mbox
= tp
->regs
+ off
;
590 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
592 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
593 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
597 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
599 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
602 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
604 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
622 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
623 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
626 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
627 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
628 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
629 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
635 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
640 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
643 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
647 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
648 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
653 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
654 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
655 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
656 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
662 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
667 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
670 static void tg3_ape_lock_init(struct tg3
*tp
)
675 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
676 regbase
= TG3_APE_LOCK_GRANT
;
678 regbase
= TG3_APE_PER_LOCK_GRANT
;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
683 case TG3_APE_LOCK_PHY0
:
684 case TG3_APE_LOCK_PHY1
:
685 case TG3_APE_LOCK_PHY2
:
686 case TG3_APE_LOCK_PHY3
:
687 bit
= APE_LOCK_GRANT_DRIVER
;
691 bit
= APE_LOCK_GRANT_DRIVER
;
693 bit
= 1 << tp
->pci_fn
;
695 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
700 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
704 u32 status
, req
, gnt
, bit
;
706 if (!tg3_flag(tp
, ENABLE_APE
))
710 case TG3_APE_LOCK_GPIO
:
711 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
713 case TG3_APE_LOCK_GRC
:
714 case TG3_APE_LOCK_MEM
:
716 bit
= APE_LOCK_REQ_DRIVER
;
718 bit
= 1 << tp
->pci_fn
;
720 case TG3_APE_LOCK_PHY0
:
721 case TG3_APE_LOCK_PHY1
:
722 case TG3_APE_LOCK_PHY2
:
723 case TG3_APE_LOCK_PHY3
:
724 bit
= APE_LOCK_REQ_DRIVER
;
730 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
731 req
= TG3_APE_LOCK_REQ
;
732 gnt
= TG3_APE_LOCK_GRANT
;
734 req
= TG3_APE_PER_LOCK_REQ
;
735 gnt
= TG3_APE_PER_LOCK_GRANT
;
740 tg3_ape_write32(tp
, req
+ off
, bit
);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i
= 0; i
< 100; i
++) {
744 status
= tg3_ape_read32(tp
, gnt
+ off
);
747 if (pci_channel_offline(tp
->pdev
))
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp
, gnt
+ off
, bit
);
762 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
766 if (!tg3_flag(tp
, ENABLE_APE
))
770 case TG3_APE_LOCK_GPIO
:
771 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
773 case TG3_APE_LOCK_GRC
:
774 case TG3_APE_LOCK_MEM
:
776 bit
= APE_LOCK_GRANT_DRIVER
;
778 bit
= 1 << tp
->pci_fn
;
780 case TG3_APE_LOCK_PHY0
:
781 case TG3_APE_LOCK_PHY1
:
782 case TG3_APE_LOCK_PHY2
:
783 case TG3_APE_LOCK_PHY3
:
784 bit
= APE_LOCK_GRANT_DRIVER
;
790 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
791 gnt
= TG3_APE_LOCK_GRANT
;
793 gnt
= TG3_APE_PER_LOCK_GRANT
;
795 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
798 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
803 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
806 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
807 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
810 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
813 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
816 return timeout_us
? 0 : -EBUSY
;
819 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
823 for (i
= 0; i
< timeout_us
/ 10; i
++) {
824 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
826 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
832 return i
== timeout_us
/ 10;
835 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
839 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
841 if (!tg3_flag(tp
, APE_HAS_NCSI
))
844 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
845 if (apedata
!= APE_SEG_SIG_MAGIC
)
848 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
849 if (!(apedata
& APE_FW_STATUS_READY
))
852 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
854 msgoff
= bufoff
+ 2 * sizeof(u32
);
855 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
860 /* Cap xfer sizes to scratchpad limits. */
861 length
= (len
> maxlen
) ? maxlen
: len
;
864 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
865 if (!(apedata
& APE_FW_STATUS_READY
))
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err
= tg3_ape_event_lock(tp
, 1000);
873 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
874 APE_EVENT_STATUS_SCRTCHPD_READ
|
875 APE_EVENT_STATUS_EVENT_PENDING
;
876 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
878 tg3_ape_write32(tp
, bufoff
, base_off
);
879 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
881 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
882 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
886 if (tg3_ape_wait_for_event(tp
, 30000))
889 for (i
= 0; length
; i
+= 4, length
-= 4) {
890 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
891 memcpy(data
, &val
, sizeof(u32
));
899 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
904 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
905 if (apedata
!= APE_SEG_SIG_MAGIC
)
908 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
909 if (!(apedata
& APE_FW_STATUS_READY
))
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err
= tg3_ape_event_lock(tp
, 1000);
917 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
918 event
| APE_EVENT_STATUS_EVENT_PENDING
);
920 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
921 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
926 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
931 if (!tg3_flag(tp
, ENABLE_APE
))
935 case RESET_KIND_INIT
:
936 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
937 APE_HOST_SEG_SIG_MAGIC
);
938 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
939 APE_HOST_SEG_LEN_MAGIC
);
940 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
941 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
942 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
944 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
945 APE_HOST_BEHAV_NO_PHYLOCK
);
946 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
947 TG3_APE_HOST_DRVR_STATE_START
);
949 event
= APE_EVENT_STATUS_STATE_START
;
951 case RESET_KIND_SHUTDOWN
:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
957 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
959 if (device_may_wakeup(&tp
->pdev
->dev
) &&
960 tg3_flag(tp
, WOL_ENABLE
)) {
961 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
962 TG3_APE_HOST_WOL_SPEED_AUTO
);
963 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
965 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
967 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
969 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
971 case RESET_KIND_SUSPEND
:
972 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
978 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
980 tg3_ape_send_event(tp
, event
);
983 static void tg3_disable_ints(struct tg3
*tp
)
987 tw32(TG3PCI_MISC_HOST_CTRL
,
988 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
989 for (i
= 0; i
< tp
->irq_max
; i
++)
990 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
993 static void tg3_enable_ints(struct tg3
*tp
)
1000 tw32(TG3PCI_MISC_HOST_CTRL
,
1001 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1003 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1004 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1005 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1007 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1008 if (tg3_flag(tp
, 1SHOT_MSI
))
1009 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1011 tp
->coal_now
|= tnapi
->coal_now
;
1014 /* Force an initial interrupt */
1015 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1016 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1017 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1019 tw32(HOSTCC_MODE
, tp
->coal_now
);
1021 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1024 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1026 struct tg3
*tp
= tnapi
->tp
;
1027 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1028 unsigned int work_exists
= 0;
1030 /* check for phy events */
1031 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1032 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1036 /* check for TX work to do */
1037 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1040 /* check for RX work to do */
1041 if (tnapi
->rx_rcb_prod_idx
&&
1042 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1049 * similar to tg3_enable_ints, but it accurately determines whether there
1050 * is new work pending and can return without flushing the PIO write
1051 * which reenables interrupts
1053 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1055 struct tg3
*tp
= tnapi
->tp
;
1057 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1060 /* When doing tagged status, this work check is unnecessary.
1061 * The last_tag we write above tells the chip which piece of
1062 * work we've completed.
1064 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1065 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1066 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1069 static void tg3_switch_clocks(struct tg3
*tp
)
1072 u32 orig_clock_ctrl
;
1074 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1077 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1079 orig_clock_ctrl
= clock_ctrl
;
1080 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1081 CLOCK_CTRL_CLKRUN_OENABLE
|
1083 tp
->pci_clock_ctrl
= clock_ctrl
;
1085 if (tg3_flag(tp
, 5705_PLUS
)) {
1086 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1088 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1090 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1093 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1096 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1099 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1102 #define PHY_BUSY_LOOPS 5000
1104 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1111 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1113 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1117 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1121 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1122 MI_COM_PHY_ADDR_MASK
);
1123 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1124 MI_COM_REG_ADDR_MASK
);
1125 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1127 tw32_f(MAC_MI_COM
, frame_val
);
1129 loops
= PHY_BUSY_LOOPS
;
1130 while (loops
!= 0) {
1132 frame_val
= tr32(MAC_MI_COM
);
1134 if ((frame_val
& MI_COM_BUSY
) == 0) {
1136 frame_val
= tr32(MAC_MI_COM
);
1144 *val
= frame_val
& MI_COM_DATA_MASK
;
1148 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1149 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1153 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1158 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1160 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1163 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1170 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1171 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1174 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1176 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1180 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1182 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1183 MI_COM_PHY_ADDR_MASK
);
1184 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1185 MI_COM_REG_ADDR_MASK
);
1186 frame_val
|= (val
& MI_COM_DATA_MASK
);
1187 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1189 tw32_f(MAC_MI_COM
, frame_val
);
1191 loops
= PHY_BUSY_LOOPS
;
1192 while (loops
!= 0) {
1194 frame_val
= tr32(MAC_MI_COM
);
1195 if ((frame_val
& MI_COM_BUSY
) == 0) {
1197 frame_val
= tr32(MAC_MI_COM
);
1207 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1208 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1212 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1217 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1219 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1222 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1226 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1230 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1234 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1235 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1239 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1245 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1249 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1253 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1257 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1258 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1262 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1268 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1272 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1274 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1279 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1283 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1285 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1290 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1294 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1295 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1296 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1298 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1303 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1305 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1306 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1308 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1316 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1322 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1324 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1326 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1327 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1332 static int tg3_bmcr_reset(struct tg3
*tp
)
1337 /* OK, reset it, and poll the BMCR_RESET bit until it
1338 * clears or we time out.
1340 phy_control
= BMCR_RESET
;
1341 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1347 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1351 if ((phy_control
& BMCR_RESET
) == 0) {
1363 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1365 struct tg3
*tp
= bp
->priv
;
1368 spin_lock_bh(&tp
->lock
);
1370 if (tg3_readphy(tp
, reg
, &val
))
1373 spin_unlock_bh(&tp
->lock
);
1378 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1380 struct tg3
*tp
= bp
->priv
;
1383 spin_lock_bh(&tp
->lock
);
1385 if (tg3_writephy(tp
, reg
, val
))
1388 spin_unlock_bh(&tp
->lock
);
1393 static int tg3_mdio_reset(struct mii_bus
*bp
)
1398 static void tg3_mdio_config_5785(struct tg3
*tp
)
1401 struct phy_device
*phydev
;
1403 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1404 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1405 case PHY_ID_BCM50610
:
1406 case PHY_ID_BCM50610M
:
1407 val
= MAC_PHYCFG2_50610_LED_MODES
;
1409 case PHY_ID_BCMAC131
:
1410 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1412 case PHY_ID_RTL8211C
:
1413 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1415 case PHY_ID_RTL8201E
:
1416 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1422 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1423 tw32(MAC_PHYCFG2
, val
);
1425 val
= tr32(MAC_PHYCFG1
);
1426 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1427 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1428 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1429 tw32(MAC_PHYCFG1
, val
);
1434 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1435 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1436 MAC_PHYCFG2_FMODE_MASK_MASK
|
1437 MAC_PHYCFG2_GMODE_MASK_MASK
|
1438 MAC_PHYCFG2_ACT_MASK_MASK
|
1439 MAC_PHYCFG2_QUAL_MASK_MASK
|
1440 MAC_PHYCFG2_INBAND_ENABLE
;
1442 tw32(MAC_PHYCFG2
, val
);
1444 val
= tr32(MAC_PHYCFG1
);
1445 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1446 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1447 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1448 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1449 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1450 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1451 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1453 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1454 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1455 tw32(MAC_PHYCFG1
, val
);
1457 val
= tr32(MAC_EXT_RGMII_MODE
);
1458 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1459 MAC_RGMII_MODE_RX_QUALITY
|
1460 MAC_RGMII_MODE_RX_ACTIVITY
|
1461 MAC_RGMII_MODE_RX_ENG_DET
|
1462 MAC_RGMII_MODE_TX_ENABLE
|
1463 MAC_RGMII_MODE_TX_LOWPWR
|
1464 MAC_RGMII_MODE_TX_RESET
);
1465 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1466 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1467 val
|= MAC_RGMII_MODE_RX_INT_B
|
1468 MAC_RGMII_MODE_RX_QUALITY
|
1469 MAC_RGMII_MODE_RX_ACTIVITY
|
1470 MAC_RGMII_MODE_RX_ENG_DET
;
1471 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1472 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1473 MAC_RGMII_MODE_TX_LOWPWR
|
1474 MAC_RGMII_MODE_TX_RESET
;
1476 tw32(MAC_EXT_RGMII_MODE
, val
);
1479 static void tg3_mdio_start(struct tg3
*tp
)
1481 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1482 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1485 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1486 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1487 tg3_mdio_config_5785(tp
);
1490 static int tg3_mdio_init(struct tg3
*tp
)
1494 struct phy_device
*phydev
;
1496 if (tg3_flag(tp
, 5717_PLUS
)) {
1499 tp
->phy_addr
= tp
->pci_fn
+ 1;
1501 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1502 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1504 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1505 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1509 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1513 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1516 tp
->mdio_bus
= mdiobus_alloc();
1517 if (tp
->mdio_bus
== NULL
)
1520 tp
->mdio_bus
->name
= "tg3 mdio bus";
1521 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1522 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1523 tp
->mdio_bus
->priv
= tp
;
1524 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1525 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1526 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1527 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1528 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1529 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1531 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1532 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1534 /* The bus registration will look for all the PHYs on the mdio bus.
1535 * Unfortunately, it does not ensure the PHY is powered up before
1536 * accessing the PHY ID registers. A chip reset is the
1537 * quickest way to bring the device back to an operational state..
1539 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1542 i
= mdiobus_register(tp
->mdio_bus
);
1544 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1545 mdiobus_free(tp
->mdio_bus
);
1549 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1551 if (!phydev
|| !phydev
->drv
) {
1552 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1553 mdiobus_unregister(tp
->mdio_bus
);
1554 mdiobus_free(tp
->mdio_bus
);
1558 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1559 case PHY_ID_BCM57780
:
1560 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1561 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1563 case PHY_ID_BCM50610
:
1564 case PHY_ID_BCM50610M
:
1565 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1566 PHY_BRCM_RX_REFCLK_UNUSED
|
1567 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1568 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1569 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1570 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1571 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1572 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1573 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1574 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1576 case PHY_ID_RTL8211C
:
1577 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1579 case PHY_ID_RTL8201E
:
1580 case PHY_ID_BCMAC131
:
1581 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1582 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1583 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1587 tg3_flag_set(tp
, MDIOBUS_INITED
);
1589 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1590 tg3_mdio_config_5785(tp
);
1595 static void tg3_mdio_fini(struct tg3
*tp
)
1597 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1598 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1599 mdiobus_unregister(tp
->mdio_bus
);
1600 mdiobus_free(tp
->mdio_bus
);
1604 /* tp->lock is held. */
1605 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1609 val
= tr32(GRC_RX_CPU_EVENT
);
1610 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1611 tw32_f(GRC_RX_CPU_EVENT
, val
);
1613 tp
->last_event_jiffies
= jiffies
;
1616 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1618 /* tp->lock is held. */
1619 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1622 unsigned int delay_cnt
;
1625 /* If enough time has passed, no wait is necessary. */
1626 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1627 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1629 if (time_remain
< 0)
1632 /* Check if we can shorten the wait time. */
1633 delay_cnt
= jiffies_to_usecs(time_remain
);
1634 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1635 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1636 delay_cnt
= (delay_cnt
>> 3) + 1;
1638 for (i
= 0; i
< delay_cnt
; i
++) {
1639 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1641 if (pci_channel_offline(tp
->pdev
))
1648 /* tp->lock is held. */
1649 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1654 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1656 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1657 val
|= (reg
& 0xffff);
1661 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1663 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1664 val
|= (reg
& 0xffff);
1668 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1669 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1671 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1672 val
|= (reg
& 0xffff);
1676 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1683 /* tp->lock is held. */
1684 static void tg3_ump_link_report(struct tg3
*tp
)
1688 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1691 tg3_phy_gather_ump_data(tp
, data
);
1693 tg3_wait_for_event_ack(tp
);
1695 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1696 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1697 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1698 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1699 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1700 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1702 tg3_generate_fw_event(tp
);
1705 /* tp->lock is held. */
1706 static void tg3_stop_fw(struct tg3
*tp
)
1708 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1709 /* Wait for RX cpu to ACK the previous event. */
1710 tg3_wait_for_event_ack(tp
);
1712 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1714 tg3_generate_fw_event(tp
);
1716 /* Wait for RX cpu to ACK this event. */
1717 tg3_wait_for_event_ack(tp
);
1721 /* tp->lock is held. */
1722 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1724 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1725 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1727 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1729 case RESET_KIND_INIT
:
1730 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1734 case RESET_KIND_SHUTDOWN
:
1735 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1739 case RESET_KIND_SUSPEND
:
1740 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1749 if (kind
== RESET_KIND_INIT
||
1750 kind
== RESET_KIND_SUSPEND
)
1751 tg3_ape_driver_state_change(tp
, kind
);
1754 /* tp->lock is held. */
1755 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1757 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1759 case RESET_KIND_INIT
:
1760 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1761 DRV_STATE_START_DONE
);
1764 case RESET_KIND_SHUTDOWN
:
1765 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1766 DRV_STATE_UNLOAD_DONE
);
1774 if (kind
== RESET_KIND_SHUTDOWN
)
1775 tg3_ape_driver_state_change(tp
, kind
);
1778 /* tp->lock is held. */
1779 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1781 if (tg3_flag(tp
, ENABLE_ASF
)) {
1783 case RESET_KIND_INIT
:
1784 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1788 case RESET_KIND_SHUTDOWN
:
1789 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1793 case RESET_KIND_SUSPEND
:
1794 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1804 static int tg3_poll_fw(struct tg3
*tp
)
1809 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1812 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1813 /* We don't use firmware. */
1817 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1818 /* Wait up to 20ms for init done. */
1819 for (i
= 0; i
< 200; i
++) {
1820 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1822 if (pci_channel_offline(tp
->pdev
))
1830 /* Wait for firmware initialization to complete. */
1831 for (i
= 0; i
< 100000; i
++) {
1832 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1833 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1835 if (pci_channel_offline(tp
->pdev
)) {
1836 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1837 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1838 netdev_info(tp
->dev
, "No firmware running\n");
1847 /* Chip might not be fitted with firmware. Some Sun onboard
1848 * parts are configured like that. So don't signal the timeout
1849 * of the above loop as an error, but do report the lack of
1850 * running firmware once.
1852 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1853 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1855 netdev_info(tp
->dev
, "No firmware running\n");
1858 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1859 /* The 57765 A0 needs a little more
1860 * time to do some important work.
1868 static void tg3_link_report(struct tg3
*tp
)
1870 if (!netif_carrier_ok(tp
->dev
)) {
1871 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1872 tg3_ump_link_report(tp
);
1873 } else if (netif_msg_link(tp
)) {
1874 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1875 (tp
->link_config
.active_speed
== SPEED_1000
?
1877 (tp
->link_config
.active_speed
== SPEED_100
?
1879 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1882 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1883 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1885 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1888 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1889 netdev_info(tp
->dev
, "EEE is %s\n",
1890 tp
->setlpicnt
? "enabled" : "disabled");
1892 tg3_ump_link_report(tp
);
1895 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1898 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1902 if (adv
& ADVERTISE_PAUSE_CAP
) {
1903 flowctrl
|= FLOW_CTRL_RX
;
1904 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1905 flowctrl
|= FLOW_CTRL_TX
;
1906 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1907 flowctrl
|= FLOW_CTRL_TX
;
1912 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1916 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1917 miireg
= ADVERTISE_1000XPAUSE
;
1918 else if (flow_ctrl
& FLOW_CTRL_TX
)
1919 miireg
= ADVERTISE_1000XPSE_ASYM
;
1920 else if (flow_ctrl
& FLOW_CTRL_RX
)
1921 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1928 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1932 if (adv
& ADVERTISE_1000XPAUSE
) {
1933 flowctrl
|= FLOW_CTRL_RX
;
1934 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1935 flowctrl
|= FLOW_CTRL_TX
;
1936 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1937 flowctrl
|= FLOW_CTRL_TX
;
1942 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1946 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1947 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1948 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1949 if (lcladv
& ADVERTISE_1000XPAUSE
)
1951 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1958 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1962 u32 old_rx_mode
= tp
->rx_mode
;
1963 u32 old_tx_mode
= tp
->tx_mode
;
1965 if (tg3_flag(tp
, USE_PHYLIB
))
1966 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1968 autoneg
= tp
->link_config
.autoneg
;
1970 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1971 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1972 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1974 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1976 flowctrl
= tp
->link_config
.flowctrl
;
1978 tp
->link_config
.active_flowctrl
= flowctrl
;
1980 if (flowctrl
& FLOW_CTRL_RX
)
1981 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1983 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1985 if (old_rx_mode
!= tp
->rx_mode
)
1986 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1988 if (flowctrl
& FLOW_CTRL_TX
)
1989 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1991 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1993 if (old_tx_mode
!= tp
->tx_mode
)
1994 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1997 static void tg3_adjust_link(struct net_device
*dev
)
1999 u8 oldflowctrl
, linkmesg
= 0;
2000 u32 mac_mode
, lcl_adv
, rmt_adv
;
2001 struct tg3
*tp
= netdev_priv(dev
);
2002 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2004 spin_lock_bh(&tp
->lock
);
2006 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2007 MAC_MODE_HALF_DUPLEX
);
2009 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2015 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2016 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2017 else if (phydev
->speed
== SPEED_1000
||
2018 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2019 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2021 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2023 if (phydev
->duplex
== DUPLEX_HALF
)
2024 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2026 lcl_adv
= mii_advertise_flowctrl(
2027 tp
->link_config
.flowctrl
);
2030 rmt_adv
= LPA_PAUSE_CAP
;
2031 if (phydev
->asym_pause
)
2032 rmt_adv
|= LPA_PAUSE_ASYM
;
2035 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2037 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2039 if (mac_mode
!= tp
->mac_mode
) {
2040 tp
->mac_mode
= mac_mode
;
2041 tw32_f(MAC_MODE
, tp
->mac_mode
);
2045 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2046 if (phydev
->speed
== SPEED_10
)
2048 MAC_MI_STAT_10MBPS_MODE
|
2049 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2051 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2054 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2055 tw32(MAC_TX_LENGTHS
,
2056 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2057 (6 << TX_LENGTHS_IPG_SHIFT
) |
2058 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2060 tw32(MAC_TX_LENGTHS
,
2061 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2062 (6 << TX_LENGTHS_IPG_SHIFT
) |
2063 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2065 if (phydev
->link
!= tp
->old_link
||
2066 phydev
->speed
!= tp
->link_config
.active_speed
||
2067 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2068 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2071 tp
->old_link
= phydev
->link
;
2072 tp
->link_config
.active_speed
= phydev
->speed
;
2073 tp
->link_config
.active_duplex
= phydev
->duplex
;
2075 spin_unlock_bh(&tp
->lock
);
2078 tg3_link_report(tp
);
2081 static int tg3_phy_init(struct tg3
*tp
)
2083 struct phy_device
*phydev
;
2085 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2088 /* Bring the PHY back to a known state. */
2091 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2093 /* Attach the MAC to the PHY. */
2094 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2095 tg3_adjust_link
, phydev
->interface
);
2096 if (IS_ERR(phydev
)) {
2097 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2098 return PTR_ERR(phydev
);
2101 /* Mask with MAC supported features. */
2102 switch (phydev
->interface
) {
2103 case PHY_INTERFACE_MODE_GMII
:
2104 case PHY_INTERFACE_MODE_RGMII
:
2105 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2106 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2108 SUPPORTED_Asym_Pause
);
2112 case PHY_INTERFACE_MODE_MII
:
2113 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2115 SUPPORTED_Asym_Pause
);
2118 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2122 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2124 phydev
->advertising
= phydev
->supported
;
2129 static void tg3_phy_start(struct tg3
*tp
)
2131 struct phy_device
*phydev
;
2133 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2136 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2138 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2139 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2140 phydev
->speed
= tp
->link_config
.speed
;
2141 phydev
->duplex
= tp
->link_config
.duplex
;
2142 phydev
->autoneg
= tp
->link_config
.autoneg
;
2143 phydev
->advertising
= tp
->link_config
.advertising
;
2148 phy_start_aneg(phydev
);
2151 static void tg3_phy_stop(struct tg3
*tp
)
2153 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2156 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2159 static void tg3_phy_fini(struct tg3
*tp
)
2161 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2162 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2163 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2167 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2172 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2175 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2176 /* Cannot do read-modify-write on 5401 */
2177 err
= tg3_phy_auxctl_write(tp
,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2184 err
= tg3_phy_auxctl_read(tp
,
2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2189 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2190 err
= tg3_phy_auxctl_write(tp
,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2197 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2201 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2204 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2205 phytest
| MII_TG3_FET_SHADOW_EN
);
2206 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2208 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2210 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2211 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2213 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2217 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2221 if (!tg3_flag(tp
, 5705_PLUS
) ||
2222 (tg3_flag(tp
, 5717_PLUS
) &&
2223 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2226 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2227 tg3_phy_fet_toggle_apd(tp
, enable
);
2231 reg
= MII_TG3_MISC_SHDW_WREN
|
2232 MII_TG3_MISC_SHDW_SCR5_SEL
|
2233 MII_TG3_MISC_SHDW_SCR5_LPED
|
2234 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2235 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2236 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2237 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2238 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2240 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2243 reg
= MII_TG3_MISC_SHDW_WREN
|
2244 MII_TG3_MISC_SHDW_APD_SEL
|
2245 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2247 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2249 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2252 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2256 if (!tg3_flag(tp
, 5705_PLUS
) ||
2257 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2260 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2263 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2264 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2266 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2267 ephy
| MII_TG3_FET_SHADOW_EN
);
2268 if (!tg3_readphy(tp
, reg
, &phy
)) {
2270 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2272 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2273 tg3_writephy(tp
, reg
, phy
);
2275 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2280 ret
= tg3_phy_auxctl_read(tp
,
2281 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2284 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2286 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2287 tg3_phy_auxctl_write(tp
,
2288 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2293 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2298 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2301 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2303 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2304 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2307 static void tg3_phy_apply_otp(struct tg3
*tp
)
2316 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2319 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2320 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2321 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2323 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2324 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2325 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2327 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2328 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2329 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2331 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2332 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2334 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2335 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2337 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2338 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2339 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2341 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2344 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2348 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2353 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2355 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2356 (tp
->link_config
.active_speed
== SPEED_100
||
2357 tp
->link_config
.active_speed
== SPEED_1000
)) {
2360 if (tp
->link_config
.active_speed
== SPEED_1000
)
2361 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2363 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2365 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2367 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2368 TG3_CL45_D7_EEERES_STAT
, &val
);
2370 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2371 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2375 if (!tp
->setlpicnt
) {
2376 if (current_link_up
&&
2377 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2378 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2379 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2382 val
= tr32(TG3_CPMU_EEE_MODE
);
2383 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2387 static void tg3_phy_eee_enable(struct tg3
*tp
)
2391 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2392 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2393 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2394 tg3_flag(tp
, 57765_CLASS
)) &&
2395 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2396 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2397 MII_TG3_DSP_TAP26_RMRXSTO
;
2398 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2399 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2402 val
= tr32(TG3_CPMU_EEE_MODE
);
2403 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2406 static int tg3_wait_macro_done(struct tg3
*tp
)
2413 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2414 if ((tmp32
& 0x1000) == 0)
2424 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2426 static const u32 test_pat
[4][6] = {
2427 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2428 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2429 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2430 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2434 for (chan
= 0; chan
< 4; chan
++) {
2437 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2438 (chan
* 0x2000) | 0x0200);
2439 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2441 for (i
= 0; i
< 6; i
++)
2442 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2445 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2446 if (tg3_wait_macro_done(tp
)) {
2451 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2452 (chan
* 0x2000) | 0x0200);
2453 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2454 if (tg3_wait_macro_done(tp
)) {
2459 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2460 if (tg3_wait_macro_done(tp
)) {
2465 for (i
= 0; i
< 6; i
+= 2) {
2468 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2469 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2470 tg3_wait_macro_done(tp
)) {
2476 if (low
!= test_pat
[chan
][i
] ||
2477 high
!= test_pat
[chan
][i
+1]) {
2478 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2479 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2480 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2490 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2494 for (chan
= 0; chan
< 4; chan
++) {
2497 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2498 (chan
* 0x2000) | 0x0200);
2499 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2500 for (i
= 0; i
< 6; i
++)
2501 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2502 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2503 if (tg3_wait_macro_done(tp
))
2510 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2512 u32 reg32
, phy9_orig
;
2513 int retries
, do_phy_reset
, err
;
2519 err
= tg3_bmcr_reset(tp
);
2525 /* Disable transmitter and interrupt. */
2526 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2530 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2532 /* Set full-duplex, 1000 mbps. */
2533 tg3_writephy(tp
, MII_BMCR
,
2534 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2536 /* Set to master mode. */
2537 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2540 tg3_writephy(tp
, MII_CTRL1000
,
2541 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2543 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2547 /* Block the PHY control access. */
2548 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2550 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2553 } while (--retries
);
2555 err
= tg3_phy_reset_chanpat(tp
);
2559 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2561 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2562 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2564 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2566 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2568 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2570 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2577 static void tg3_carrier_off(struct tg3
*tp
)
2579 netif_carrier_off(tp
->dev
);
2580 tp
->link_up
= false;
2583 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2585 if (tg3_flag(tp
, ENABLE_ASF
))
2586 netdev_warn(tp
->dev
,
2587 "Management side-band traffic will be interrupted during phy settings change\n");
2590 /* This will reset the tigon3 PHY if there is no valid
2591 * link unless the FORCE argument is non-zero.
2593 static int tg3_phy_reset(struct tg3
*tp
)
2598 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2599 val
= tr32(GRC_MISC_CFG
);
2600 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2603 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2604 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2608 if (netif_running(tp
->dev
) && tp
->link_up
) {
2609 netif_carrier_off(tp
->dev
);
2610 tg3_link_report(tp
);
2613 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2614 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2615 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2616 err
= tg3_phy_reset_5703_4_5(tp
);
2623 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2624 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2625 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2626 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2628 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2631 err
= tg3_bmcr_reset(tp
);
2635 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2636 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2637 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2639 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2642 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2643 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2644 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2645 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2646 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2647 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2649 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2653 if (tg3_flag(tp
, 5717_PLUS
) &&
2654 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2657 tg3_phy_apply_otp(tp
);
2659 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2660 tg3_phy_toggle_apd(tp
, true);
2662 tg3_phy_toggle_apd(tp
, false);
2665 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2666 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2667 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2668 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2669 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2672 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2673 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2674 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2677 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2678 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2679 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2680 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2681 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2682 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2684 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2685 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2686 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2687 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2688 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2689 tg3_writephy(tp
, MII_TG3_TEST1
,
2690 MII_TG3_TEST1_TRIM_EN
| 0x4);
2692 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2694 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2698 /* Set Extended packet length bit (bit 14) on all chips that */
2699 /* support jumbo frames */
2700 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2701 /* Cannot do read-modify-write on 5401 */
2702 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2703 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2704 /* Set bit 14 with read-modify-write to preserve other bits */
2705 err
= tg3_phy_auxctl_read(tp
,
2706 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2708 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2709 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2712 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2713 * jumbo frames transmission.
2715 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2716 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2717 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2718 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2721 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2722 /* adjust output voltage */
2723 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2726 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2727 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2729 tg3_phy_toggle_automdix(tp
, true);
2730 tg3_phy_set_wirespeed(tp
);
2734 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2735 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2736 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2737 TG3_GPIO_MSG_NEED_VAUX)
2738 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2739 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2740 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2741 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2742 (TG3_GPIO_MSG_DRVR_PRES << 12))
2744 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2745 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2746 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2747 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2748 (TG3_GPIO_MSG_NEED_VAUX << 12))
2750 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2754 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2755 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2756 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2758 status
= tr32(TG3_CPMU_DRV_STATUS
);
2760 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2761 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2762 status
|= (newstat
<< shift
);
2764 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2765 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2766 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2768 tw32(TG3_CPMU_DRV_STATUS
, status
);
2770 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2773 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2775 if (!tg3_flag(tp
, IS_NIC
))
2778 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2779 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2780 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2781 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2784 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2786 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2787 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2789 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2791 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2792 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2798 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2802 if (!tg3_flag(tp
, IS_NIC
) ||
2803 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2804 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2807 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2809 tw32_wait_f(GRC_LOCAL_CTRL
,
2810 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2813 tw32_wait_f(GRC_LOCAL_CTRL
,
2815 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2817 tw32_wait_f(GRC_LOCAL_CTRL
,
2818 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2822 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2824 if (!tg3_flag(tp
, IS_NIC
))
2827 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2828 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2829 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2830 (GRC_LCLCTRL_GPIO_OE0
|
2831 GRC_LCLCTRL_GPIO_OE1
|
2832 GRC_LCLCTRL_GPIO_OE2
|
2833 GRC_LCLCTRL_GPIO_OUTPUT0
|
2834 GRC_LCLCTRL_GPIO_OUTPUT1
),
2835 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2836 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2837 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2838 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2839 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2840 GRC_LCLCTRL_GPIO_OE1
|
2841 GRC_LCLCTRL_GPIO_OE2
|
2842 GRC_LCLCTRL_GPIO_OUTPUT0
|
2843 GRC_LCLCTRL_GPIO_OUTPUT1
|
2845 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2848 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2849 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2852 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2853 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2857 u32 grc_local_ctrl
= 0;
2859 /* Workaround to prevent overdrawing Amps. */
2860 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2861 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2862 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2864 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2867 /* On 5753 and variants, GPIO2 cannot be used. */
2868 no_gpio2
= tp
->nic_sram_data_cfg
&
2869 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2871 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2872 GRC_LCLCTRL_GPIO_OE1
|
2873 GRC_LCLCTRL_GPIO_OE2
|
2874 GRC_LCLCTRL_GPIO_OUTPUT1
|
2875 GRC_LCLCTRL_GPIO_OUTPUT2
;
2877 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2878 GRC_LCLCTRL_GPIO_OUTPUT2
);
2880 tw32_wait_f(GRC_LOCAL_CTRL
,
2881 tp
->grc_local_ctrl
| grc_local_ctrl
,
2882 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2884 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2886 tw32_wait_f(GRC_LOCAL_CTRL
,
2887 tp
->grc_local_ctrl
| grc_local_ctrl
,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2891 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2892 tw32_wait_f(GRC_LOCAL_CTRL
,
2893 tp
->grc_local_ctrl
| grc_local_ctrl
,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2899 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2903 /* Serialize power state transitions */
2904 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2907 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2908 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2910 msg
= tg3_set_function_status(tp
, msg
);
2912 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2915 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2916 tg3_pwrsrc_switch_to_vaux(tp
);
2918 tg3_pwrsrc_die_with_vmain(tp
);
2921 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2924 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2926 bool need_vaux
= false;
2928 /* The GPIOs do something completely different on 57765. */
2929 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2932 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2933 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2934 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2935 tg3_frob_aux_power_5717(tp
, include_wol
?
2936 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2940 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2941 struct net_device
*dev_peer
;
2943 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2945 /* remove_one() may have been run on the peer. */
2947 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2949 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2952 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2953 tg3_flag(tp_peer
, ENABLE_ASF
))
2958 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2959 tg3_flag(tp
, ENABLE_ASF
))
2963 tg3_pwrsrc_switch_to_vaux(tp
);
2965 tg3_pwrsrc_die_with_vmain(tp
);
2968 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2970 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2972 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2973 if (speed
!= SPEED_10
)
2975 } else if (speed
== SPEED_10
)
2981 static bool tg3_phy_power_bug(struct tg3
*tp
)
2983 switch (tg3_asic_rev(tp
)) {
2988 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
2997 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3006 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3010 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3013 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3014 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3015 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3016 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3019 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3020 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3021 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3026 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3028 val
= tr32(GRC_MISC_CFG
);
3029 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3032 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3034 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3037 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3038 tg3_writephy(tp
, MII_BMCR
,
3039 BMCR_ANENABLE
| BMCR_ANRESTART
);
3041 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3042 phytest
| MII_TG3_FET_SHADOW_EN
);
3043 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3044 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3046 MII_TG3_FET_SHDW_AUXMODE4
,
3049 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3052 } else if (do_low_power
) {
3053 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3054 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3056 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3057 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3058 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3059 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3062 /* The PHY should not be powered down on some chips because
3065 if (tg3_phy_power_bug(tp
))
3068 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3069 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3070 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3071 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3072 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3073 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3076 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3079 /* tp->lock is held. */
3080 static int tg3_nvram_lock(struct tg3
*tp
)
3082 if (tg3_flag(tp
, NVRAM
)) {
3085 if (tp
->nvram_lock_cnt
== 0) {
3086 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3087 for (i
= 0; i
< 8000; i
++) {
3088 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3093 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3097 tp
->nvram_lock_cnt
++;
3102 /* tp->lock is held. */
3103 static void tg3_nvram_unlock(struct tg3
*tp
)
3105 if (tg3_flag(tp
, NVRAM
)) {
3106 if (tp
->nvram_lock_cnt
> 0)
3107 tp
->nvram_lock_cnt
--;
3108 if (tp
->nvram_lock_cnt
== 0)
3109 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3113 /* tp->lock is held. */
3114 static void tg3_enable_nvram_access(struct tg3
*tp
)
3116 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3117 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3119 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3123 /* tp->lock is held. */
3124 static void tg3_disable_nvram_access(struct tg3
*tp
)
3126 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3127 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3129 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3133 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3134 u32 offset
, u32
*val
)
3139 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3142 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3143 EEPROM_ADDR_DEVID_MASK
|
3145 tw32(GRC_EEPROM_ADDR
,
3147 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3148 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3149 EEPROM_ADDR_ADDR_MASK
) |
3150 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3152 for (i
= 0; i
< 1000; i
++) {
3153 tmp
= tr32(GRC_EEPROM_ADDR
);
3155 if (tmp
& EEPROM_ADDR_COMPLETE
)
3159 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3162 tmp
= tr32(GRC_EEPROM_DATA
);
3165 * The data will always be opposite the native endian
3166 * format. Perform a blind byteswap to compensate.
3173 #define NVRAM_CMD_TIMEOUT 10000
3175 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3179 tw32(NVRAM_CMD
, nvram_cmd
);
3180 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3182 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3188 if (i
== NVRAM_CMD_TIMEOUT
)
3194 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3196 if (tg3_flag(tp
, NVRAM
) &&
3197 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3198 tg3_flag(tp
, FLASH
) &&
3199 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3200 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3202 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3203 ATMEL_AT45DB0X1B_PAGE_POS
) +
3204 (addr
% tp
->nvram_pagesize
);
3209 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3211 if (tg3_flag(tp
, NVRAM
) &&
3212 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3213 tg3_flag(tp
, FLASH
) &&
3214 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3215 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3217 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3218 tp
->nvram_pagesize
) +
3219 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3224 /* NOTE: Data read in from NVRAM is byteswapped according to
3225 * the byteswapping settings for all other register accesses.
3226 * tg3 devices are BE devices, so on a BE machine, the data
3227 * returned will be exactly as it is seen in NVRAM. On a LE
3228 * machine, the 32-bit value will be byteswapped.
3230 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3234 if (!tg3_flag(tp
, NVRAM
))
3235 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3237 offset
= tg3_nvram_phys_addr(tp
, offset
);
3239 if (offset
> NVRAM_ADDR_MSK
)
3242 ret
= tg3_nvram_lock(tp
);
3246 tg3_enable_nvram_access(tp
);
3248 tw32(NVRAM_ADDR
, offset
);
3249 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3250 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3253 *val
= tr32(NVRAM_RDDATA
);
3255 tg3_disable_nvram_access(tp
);
3257 tg3_nvram_unlock(tp
);
3262 /* Ensures NVRAM data is in bytestream format. */
3263 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3266 int res
= tg3_nvram_read(tp
, offset
, &v
);
3268 *val
= cpu_to_be32(v
);
3272 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3273 u32 offset
, u32 len
, u8
*buf
)
3278 for (i
= 0; i
< len
; i
+= 4) {
3284 memcpy(&data
, buf
+ i
, 4);
3287 * The SEEPROM interface expects the data to always be opposite
3288 * the native endian format. We accomplish this by reversing
3289 * all the operations that would have been performed on the
3290 * data from a call to tg3_nvram_read_be32().
3292 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3294 val
= tr32(GRC_EEPROM_ADDR
);
3295 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3297 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3299 tw32(GRC_EEPROM_ADDR
, val
|
3300 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3301 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3305 for (j
= 0; j
< 1000; j
++) {
3306 val
= tr32(GRC_EEPROM_ADDR
);
3308 if (val
& EEPROM_ADDR_COMPLETE
)
3312 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3321 /* offset and length are dword aligned */
3322 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3326 u32 pagesize
= tp
->nvram_pagesize
;
3327 u32 pagemask
= pagesize
- 1;
3331 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3337 u32 phy_addr
, page_off
, size
;
3339 phy_addr
= offset
& ~pagemask
;
3341 for (j
= 0; j
< pagesize
; j
+= 4) {
3342 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3343 (__be32
*) (tmp
+ j
));
3350 page_off
= offset
& pagemask
;
3357 memcpy(tmp
+ page_off
, buf
, size
);
3359 offset
= offset
+ (pagesize
- page_off
);
3361 tg3_enable_nvram_access(tp
);
3364 * Before we can erase the flash page, we need
3365 * to issue a special "write enable" command.
3367 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3369 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3372 /* Erase the target page */
3373 tw32(NVRAM_ADDR
, phy_addr
);
3375 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3376 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3378 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3381 /* Issue another write enable to start the write. */
3382 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3384 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3387 for (j
= 0; j
< pagesize
; j
+= 4) {
3390 data
= *((__be32
*) (tmp
+ j
));
3392 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3394 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3396 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3400 nvram_cmd
|= NVRAM_CMD_FIRST
;
3401 else if (j
== (pagesize
- 4))
3402 nvram_cmd
|= NVRAM_CMD_LAST
;
3404 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3412 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3413 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3420 /* offset and length are dword aligned */
3421 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3426 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3427 u32 page_off
, phy_addr
, nvram_cmd
;
3430 memcpy(&data
, buf
+ i
, 4);
3431 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3433 page_off
= offset
% tp
->nvram_pagesize
;
3435 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3437 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3439 if (page_off
== 0 || i
== 0)
3440 nvram_cmd
|= NVRAM_CMD_FIRST
;
3441 if (page_off
== (tp
->nvram_pagesize
- 4))
3442 nvram_cmd
|= NVRAM_CMD_LAST
;
3445 nvram_cmd
|= NVRAM_CMD_LAST
;
3447 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3448 !tg3_flag(tp
, FLASH
) ||
3449 !tg3_flag(tp
, 57765_PLUS
))
3450 tw32(NVRAM_ADDR
, phy_addr
);
3452 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3453 !tg3_flag(tp
, 5755_PLUS
) &&
3454 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3455 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3458 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3459 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3463 if (!tg3_flag(tp
, FLASH
)) {
3464 /* We always do complete word writes to eeprom. */
3465 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3468 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3475 /* offset and length are dword aligned */
3476 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3480 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3481 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3482 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3486 if (!tg3_flag(tp
, NVRAM
)) {
3487 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3491 ret
= tg3_nvram_lock(tp
);
3495 tg3_enable_nvram_access(tp
);
3496 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3497 tw32(NVRAM_WRITE1
, 0x406);
3499 grc_mode
= tr32(GRC_MODE
);
3500 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3502 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3503 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3506 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3510 grc_mode
= tr32(GRC_MODE
);
3511 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3513 tg3_disable_nvram_access(tp
);
3514 tg3_nvram_unlock(tp
);
3517 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3518 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3525 #define RX_CPU_SCRATCH_BASE 0x30000
3526 #define RX_CPU_SCRATCH_SIZE 0x04000
3527 #define TX_CPU_SCRATCH_BASE 0x34000
3528 #define TX_CPU_SCRATCH_SIZE 0x04000
3530 /* tp->lock is held. */
3531 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3534 const int iters
= 10000;
3536 for (i
= 0; i
< iters
; i
++) {
3537 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3538 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3539 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3541 if (pci_channel_offline(tp
->pdev
))
3545 return (i
== iters
) ? -EBUSY
: 0;
3548 /* tp->lock is held. */
3549 static int tg3_rxcpu_pause(struct tg3
*tp
)
3551 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3553 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3554 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3560 /* tp->lock is held. */
3561 static int tg3_txcpu_pause(struct tg3
*tp
)
3563 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3566 /* tp->lock is held. */
3567 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3569 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3570 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3573 /* tp->lock is held. */
3574 static void tg3_rxcpu_resume(struct tg3
*tp
)
3576 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3579 /* tp->lock is held. */
3580 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3584 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3586 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3587 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3589 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3592 if (cpu_base
== RX_CPU_BASE
) {
3593 rc
= tg3_rxcpu_pause(tp
);
3596 * There is only an Rx CPU for the 5750 derivative in the
3599 if (tg3_flag(tp
, IS_SSB_CORE
))
3602 rc
= tg3_txcpu_pause(tp
);
3606 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3607 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3611 /* Clear firmware's nvram arbitration. */
3612 if (tg3_flag(tp
, NVRAM
))
3613 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3617 static int tg3_fw_data_len(struct tg3
*tp
,
3618 const struct tg3_firmware_hdr
*fw_hdr
)
3622 /* Non fragmented firmware have one firmware header followed by a
3623 * contiguous chunk of data to be written. The length field in that
3624 * header is not the length of data to be written but the complete
3625 * length of the bss. The data length is determined based on
3626 * tp->fw->size minus headers.
3628 * Fragmented firmware have a main header followed by multiple
3629 * fragments. Each fragment is identical to non fragmented firmware
3630 * with a firmware header followed by a contiguous chunk of data. In
3631 * the main header, the length field is unused and set to 0xffffffff.
3632 * In each fragment header the length is the entire size of that
3633 * fragment i.e. fragment data + header length. Data length is
3634 * therefore length field in the header minus TG3_FW_HDR_LEN.
3636 if (tp
->fw_len
== 0xffffffff)
3637 fw_len
= be32_to_cpu(fw_hdr
->len
);
3639 fw_len
= tp
->fw
->size
;
3641 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3644 /* tp->lock is held. */
3645 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3646 u32 cpu_scratch_base
, int cpu_scratch_size
,
3647 const struct tg3_firmware_hdr
*fw_hdr
)
3650 void (*write_op
)(struct tg3
*, u32
, u32
);
3651 int total_len
= tp
->fw
->size
;
3653 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3655 "%s: Trying to load TX cpu firmware which is 5705\n",
3660 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3661 write_op
= tg3_write_mem
;
3663 write_op
= tg3_write_indirect_reg32
;
3665 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3666 /* It is possible that bootcode is still loading at this point.
3667 * Get the nvram lock first before halting the cpu.
3669 int lock_err
= tg3_nvram_lock(tp
);
3670 err
= tg3_halt_cpu(tp
, cpu_base
);
3672 tg3_nvram_unlock(tp
);
3676 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3677 write_op(tp
, cpu_scratch_base
+ i
, 0);
3678 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3679 tw32(cpu_base
+ CPU_MODE
,
3680 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3682 /* Subtract additional main header for fragmented firmware and
3683 * advance to the first fragment
3685 total_len
-= TG3_FW_HDR_LEN
;
3690 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3691 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3692 write_op(tp
, cpu_scratch_base
+
3693 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3695 be32_to_cpu(fw_data
[i
]));
3697 total_len
-= be32_to_cpu(fw_hdr
->len
);
3699 /* Advance to next fragment */
3700 fw_hdr
= (struct tg3_firmware_hdr
*)
3701 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3702 } while (total_len
> 0);
3710 /* tp->lock is held. */
3711 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3714 const int iters
= 5;
3716 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3717 tw32_f(cpu_base
+ CPU_PC
, pc
);
3719 for (i
= 0; i
< iters
; i
++) {
3720 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3722 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3723 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3724 tw32_f(cpu_base
+ CPU_PC
, pc
);
3728 return (i
== iters
) ? -EBUSY
: 0;
3731 /* tp->lock is held. */
3732 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3734 const struct tg3_firmware_hdr
*fw_hdr
;
3737 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3739 /* Firmware blob starts with version numbers, followed by
3740 start address and length. We are setting complete length.
3741 length = end_address_of_bss - start_address_of_text.
3742 Remainder is the blob to be loaded contiguously
3743 from start address. */
3745 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3746 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3751 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3752 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3757 /* Now startup only the RX cpu. */
3758 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3759 be32_to_cpu(fw_hdr
->base_addr
));
3761 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3762 "should be %08x\n", __func__
,
3763 tr32(RX_CPU_BASE
+ CPU_PC
),
3764 be32_to_cpu(fw_hdr
->base_addr
));
3768 tg3_rxcpu_resume(tp
);
3773 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3775 const int iters
= 1000;
3779 /* Wait for boot code to complete initialization and enter service
3780 * loop. It is then safe to download service patches
3782 for (i
= 0; i
< iters
; i
++) {
3783 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3790 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3794 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3796 netdev_warn(tp
->dev
,
3797 "Other patches exist. Not downloading EEE patch\n");
3804 /* tp->lock is held. */
3805 static void tg3_load_57766_firmware(struct tg3
*tp
)
3807 struct tg3_firmware_hdr
*fw_hdr
;
3809 if (!tg3_flag(tp
, NO_NVRAM
))
3812 if (tg3_validate_rxcpu_state(tp
))
3818 /* This firmware blob has a different format than older firmware
3819 * releases as given below. The main difference is we have fragmented
3820 * data to be written to non-contiguous locations.
3822 * In the beginning we have a firmware header identical to other
3823 * firmware which consists of version, base addr and length. The length
3824 * here is unused and set to 0xffffffff.
3826 * This is followed by a series of firmware fragments which are
3827 * individually identical to previous firmware. i.e. they have the
3828 * firmware header and followed by data for that fragment. The version
3829 * field of the individual fragment header is unused.
3832 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3833 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3836 if (tg3_rxcpu_pause(tp
))
3839 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3840 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3842 tg3_rxcpu_resume(tp
);
3845 /* tp->lock is held. */
3846 static int tg3_load_tso_firmware(struct tg3
*tp
)
3848 const struct tg3_firmware_hdr
*fw_hdr
;
3849 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3852 if (!tg3_flag(tp
, FW_TSO
))
3855 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3857 /* Firmware blob starts with version numbers, followed by
3858 start address and length. We are setting complete length.
3859 length = end_address_of_bss - start_address_of_text.
3860 Remainder is the blob to be loaded contiguously
3861 from start address. */
3863 cpu_scratch_size
= tp
->fw_len
;
3865 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3866 cpu_base
= RX_CPU_BASE
;
3867 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3869 cpu_base
= TX_CPU_BASE
;
3870 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3871 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3874 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3875 cpu_scratch_base
, cpu_scratch_size
,
3880 /* Now startup the cpu. */
3881 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3882 be32_to_cpu(fw_hdr
->base_addr
));
3885 "%s fails to set CPU PC, is %08x should be %08x\n",
3886 __func__
, tr32(cpu_base
+ CPU_PC
),
3887 be32_to_cpu(fw_hdr
->base_addr
));
3891 tg3_resume_cpu(tp
, cpu_base
);
3896 /* tp->lock is held. */
3897 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3899 u32 addr_high
, addr_low
;
3902 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3903 tp
->dev
->dev_addr
[1]);
3904 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3905 (tp
->dev
->dev_addr
[3] << 16) |
3906 (tp
->dev
->dev_addr
[4] << 8) |
3907 (tp
->dev
->dev_addr
[5] << 0));
3908 for (i
= 0; i
< 4; i
++) {
3909 if (i
== 1 && skip_mac_1
)
3911 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3912 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3915 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3916 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3917 for (i
= 0; i
< 12; i
++) {
3918 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3919 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3923 addr_high
= (tp
->dev
->dev_addr
[0] +
3924 tp
->dev
->dev_addr
[1] +
3925 tp
->dev
->dev_addr
[2] +
3926 tp
->dev
->dev_addr
[3] +
3927 tp
->dev
->dev_addr
[4] +
3928 tp
->dev
->dev_addr
[5]) &
3929 TX_BACKOFF_SEED_MASK
;
3930 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3933 static void tg3_enable_register_access(struct tg3
*tp
)
3936 * Make sure register accesses (indirect or otherwise) will function
3939 pci_write_config_dword(tp
->pdev
,
3940 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3943 static int tg3_power_up(struct tg3
*tp
)
3947 tg3_enable_register_access(tp
);
3949 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3951 /* Switch out of Vaux if it is a NIC */
3952 tg3_pwrsrc_switch_to_vmain(tp
);
3954 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3960 static int tg3_setup_phy(struct tg3
*, bool);
3962 static int tg3_power_down_prepare(struct tg3
*tp
)
3965 bool device_should_wake
, do_low_power
;
3967 tg3_enable_register_access(tp
);
3969 /* Restore the CLKREQ setting. */
3970 if (tg3_flag(tp
, CLKREQ_BUG
))
3971 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3972 PCI_EXP_LNKCTL_CLKREQ_EN
);
3974 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3975 tw32(TG3PCI_MISC_HOST_CTRL
,
3976 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3978 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3979 tg3_flag(tp
, WOL_ENABLE
);
3981 if (tg3_flag(tp
, USE_PHYLIB
)) {
3982 do_low_power
= false;
3983 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3984 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3985 struct phy_device
*phydev
;
3986 u32 phyid
, advertising
;
3988 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
3990 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
3992 tp
->link_config
.speed
= phydev
->speed
;
3993 tp
->link_config
.duplex
= phydev
->duplex
;
3994 tp
->link_config
.autoneg
= phydev
->autoneg
;
3995 tp
->link_config
.advertising
= phydev
->advertising
;
3997 advertising
= ADVERTISED_TP
|
3999 ADVERTISED_Autoneg
|
4000 ADVERTISED_10baseT_Half
;
4002 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4003 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4005 ADVERTISED_100baseT_Half
|
4006 ADVERTISED_100baseT_Full
|
4007 ADVERTISED_10baseT_Full
;
4009 advertising
|= ADVERTISED_10baseT_Full
;
4012 phydev
->advertising
= advertising
;
4014 phy_start_aneg(phydev
);
4016 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4017 if (phyid
!= PHY_ID_BCMAC131
) {
4018 phyid
&= PHY_BCM_OUI_MASK
;
4019 if (phyid
== PHY_BCM_OUI_1
||
4020 phyid
== PHY_BCM_OUI_2
||
4021 phyid
== PHY_BCM_OUI_3
)
4022 do_low_power
= true;
4026 do_low_power
= true;
4028 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4029 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4031 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4032 tg3_setup_phy(tp
, false);
4035 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4038 val
= tr32(GRC_VCPU_EXT_CTRL
);
4039 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4040 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4044 for (i
= 0; i
< 200; i
++) {
4045 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4046 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4051 if (tg3_flag(tp
, WOL_CAP
))
4052 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4053 WOL_DRV_STATE_SHUTDOWN
|
4057 if (device_should_wake
) {
4060 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4062 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4063 tg3_phy_auxctl_write(tp
,
4064 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4065 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4066 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4067 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4071 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4072 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4073 else if (tp
->phy_flags
&
4074 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4075 if (tp
->link_config
.active_speed
== SPEED_1000
)
4076 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4078 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4080 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4082 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4083 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4084 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4085 SPEED_100
: SPEED_10
;
4086 if (tg3_5700_link_polarity(tp
, speed
))
4087 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4089 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4092 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4095 if (!tg3_flag(tp
, 5750_PLUS
))
4096 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4098 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4099 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4100 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4101 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4103 if (tg3_flag(tp
, ENABLE_APE
))
4104 mac_mode
|= MAC_MODE_APE_TX_EN
|
4105 MAC_MODE_APE_RX_EN
|
4106 MAC_MODE_TDE_ENABLE
;
4108 tw32_f(MAC_MODE
, mac_mode
);
4111 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4115 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4116 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4117 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4120 base_val
= tp
->pci_clock_ctrl
;
4121 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4122 CLOCK_CTRL_TXCLK_DISABLE
);
4124 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4125 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4126 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4127 tg3_flag(tp
, CPMU_PRESENT
) ||
4128 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4130 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4131 u32 newbits1
, newbits2
;
4133 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4134 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4135 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4136 CLOCK_CTRL_TXCLK_DISABLE
|
4138 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4139 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4140 newbits1
= CLOCK_CTRL_625_CORE
;
4141 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4143 newbits1
= CLOCK_CTRL_ALTCLK
;
4144 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4147 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4150 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4153 if (!tg3_flag(tp
, 5705_PLUS
)) {
4156 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4157 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4158 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4159 CLOCK_CTRL_TXCLK_DISABLE
|
4160 CLOCK_CTRL_44MHZ_CORE
);
4162 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4165 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4166 tp
->pci_clock_ctrl
| newbits3
, 40);
4170 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4171 tg3_power_down_phy(tp
, do_low_power
);
4173 tg3_frob_aux_power(tp
, true);
4175 /* Workaround for unstable PLL clock */
4176 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4177 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4178 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4179 u32 val
= tr32(0x7d00);
4181 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4183 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4186 err
= tg3_nvram_lock(tp
);
4187 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4189 tg3_nvram_unlock(tp
);
4193 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4198 static void tg3_power_down(struct tg3
*tp
)
4200 tg3_power_down_prepare(tp
);
4202 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4203 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4206 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4208 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4209 case MII_TG3_AUX_STAT_10HALF
:
4211 *duplex
= DUPLEX_HALF
;
4214 case MII_TG3_AUX_STAT_10FULL
:
4216 *duplex
= DUPLEX_FULL
;
4219 case MII_TG3_AUX_STAT_100HALF
:
4221 *duplex
= DUPLEX_HALF
;
4224 case MII_TG3_AUX_STAT_100FULL
:
4226 *duplex
= DUPLEX_FULL
;
4229 case MII_TG3_AUX_STAT_1000HALF
:
4230 *speed
= SPEED_1000
;
4231 *duplex
= DUPLEX_HALF
;
4234 case MII_TG3_AUX_STAT_1000FULL
:
4235 *speed
= SPEED_1000
;
4236 *duplex
= DUPLEX_FULL
;
4240 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4241 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4243 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4247 *speed
= SPEED_UNKNOWN
;
4248 *duplex
= DUPLEX_UNKNOWN
;
4253 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4258 new_adv
= ADVERTISE_CSMA
;
4259 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4260 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4262 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4266 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4267 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4269 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4270 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4271 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4273 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4278 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4281 tw32(TG3_CPMU_EEE_MODE
,
4282 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4284 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4289 /* Advertise 100-BaseTX EEE ability */
4290 if (advertise
& ADVERTISED_100baseT_Full
)
4291 val
|= MDIO_AN_EEE_ADV_100TX
;
4292 /* Advertise 1000-BaseT EEE ability */
4293 if (advertise
& ADVERTISED_1000baseT_Full
)
4294 val
|= MDIO_AN_EEE_ADV_1000T
;
4295 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4299 switch (tg3_asic_rev(tp
)) {
4301 case ASIC_REV_57765
:
4302 case ASIC_REV_57766
:
4304 /* If we advertised any eee advertisements above... */
4306 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4307 MII_TG3_DSP_TAP26_RMRXSTO
|
4308 MII_TG3_DSP_TAP26_OPCSINPT
;
4309 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4313 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4314 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4315 MII_TG3_DSP_CH34TP2_HIBW01
);
4318 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4327 static void tg3_phy_copper_begin(struct tg3
*tp
)
4329 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4330 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4333 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4334 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4335 adv
= ADVERTISED_10baseT_Half
|
4336 ADVERTISED_10baseT_Full
;
4337 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4338 adv
|= ADVERTISED_100baseT_Half
|
4339 ADVERTISED_100baseT_Full
;
4340 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
)
4341 adv
|= ADVERTISED_1000baseT_Half
|
4342 ADVERTISED_1000baseT_Full
;
4344 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4346 adv
= tp
->link_config
.advertising
;
4347 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4348 adv
&= ~(ADVERTISED_1000baseT_Half
|
4349 ADVERTISED_1000baseT_Full
);
4351 fc
= tp
->link_config
.flowctrl
;
4354 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4356 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4357 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4358 /* Normally during power down we want to autonegotiate
4359 * the lowest possible speed for WOL. However, to avoid
4360 * link flap, we leave it untouched.
4365 tg3_writephy(tp
, MII_BMCR
,
4366 BMCR_ANENABLE
| BMCR_ANRESTART
);
4369 u32 bmcr
, orig_bmcr
;
4371 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4372 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4374 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4375 /* With autoneg disabled, 5715 only links up when the
4376 * advertisement register has the configured speed
4379 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4383 switch (tp
->link_config
.speed
) {
4389 bmcr
|= BMCR_SPEED100
;
4393 bmcr
|= BMCR_SPEED1000
;
4397 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4398 bmcr
|= BMCR_FULLDPLX
;
4400 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4401 (bmcr
!= orig_bmcr
)) {
4402 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4403 for (i
= 0; i
< 1500; i
++) {
4407 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4408 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4410 if (!(tmp
& BMSR_LSTATUS
)) {
4415 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4421 static int tg3_phy_pull_config(struct tg3
*tp
)
4426 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4430 if (!(val
& BMCR_ANENABLE
)) {
4431 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4432 tp
->link_config
.advertising
= 0;
4433 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4437 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4439 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4442 tp
->link_config
.speed
= SPEED_10
;
4445 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4448 tp
->link_config
.speed
= SPEED_100
;
4450 case BMCR_SPEED1000
:
4451 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4452 tp
->link_config
.speed
= SPEED_1000
;
4460 if (val
& BMCR_FULLDPLX
)
4461 tp
->link_config
.duplex
= DUPLEX_FULL
;
4463 tp
->link_config
.duplex
= DUPLEX_HALF
;
4465 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4471 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4472 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4473 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4475 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4478 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4482 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4483 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4485 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4487 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4490 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4493 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4494 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4498 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4500 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4504 adv
= tg3_decode_flowctrl_1000X(val
);
4505 tp
->link_config
.flowctrl
= adv
;
4507 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4508 adv
= mii_adv_to_ethtool_adv_x(val
);
4511 tp
->link_config
.advertising
|= adv
;
4518 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4522 /* Turn off tap power management. */
4523 /* Set Extended packet length bit */
4524 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4526 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4527 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4528 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4529 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4530 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4537 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4541 u32 advertising
= tp
->link_config
.advertising
;
4543 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4546 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
4549 val
&= (MDIO_AN_EEE_ADV_100TX
| MDIO_AN_EEE_ADV_1000T
);
4552 if (advertising
& ADVERTISED_100baseT_Full
)
4553 tgtadv
|= MDIO_AN_EEE_ADV_100TX
;
4554 if (advertising
& ADVERTISED_1000baseT_Full
)
4555 tgtadv
|= MDIO_AN_EEE_ADV_1000T
;
4563 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4565 u32 advmsk
, tgtadv
, advertising
;
4567 advertising
= tp
->link_config
.advertising
;
4568 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4570 advmsk
= ADVERTISE_ALL
;
4571 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4572 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4573 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4576 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4579 if ((*lcladv
& advmsk
) != tgtadv
)
4582 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4585 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4587 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4591 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4592 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4593 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4594 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4595 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4597 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4600 if (tg3_ctrl
!= tgtadv
)
4607 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4611 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4614 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4617 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4620 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4623 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4624 tp
->link_config
.rmt_adv
= lpeth
;
4629 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4631 if (curr_link_up
!= tp
->link_up
) {
4633 netif_carrier_on(tp
->dev
);
4635 netif_carrier_off(tp
->dev
);
4636 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4637 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4640 tg3_link_report(tp
);
4647 static void tg3_clear_mac_status(struct tg3
*tp
)
4652 MAC_STATUS_SYNC_CHANGED
|
4653 MAC_STATUS_CFG_CHANGED
|
4654 MAC_STATUS_MI_COMPLETION
|
4655 MAC_STATUS_LNKSTATE_CHANGED
);
4659 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4661 bool current_link_up
;
4663 u32 lcl_adv
, rmt_adv
;
4668 tg3_clear_mac_status(tp
);
4670 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4672 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4676 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4678 /* Some third-party PHYs need to be reset on link going
4681 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4682 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4683 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4685 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4686 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4687 !(bmsr
& BMSR_LSTATUS
))
4693 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4694 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4695 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4696 !tg3_flag(tp
, INIT_COMPLETE
))
4699 if (!(bmsr
& BMSR_LSTATUS
)) {
4700 err
= tg3_init_5401phy_dsp(tp
);
4704 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4705 for (i
= 0; i
< 1000; i
++) {
4707 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4708 (bmsr
& BMSR_LSTATUS
)) {
4714 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4715 TG3_PHY_REV_BCM5401_B0
&&
4716 !(bmsr
& BMSR_LSTATUS
) &&
4717 tp
->link_config
.active_speed
== SPEED_1000
) {
4718 err
= tg3_phy_reset(tp
);
4720 err
= tg3_init_5401phy_dsp(tp
);
4725 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4726 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4727 /* 5701 {A0,B0} CRC bug workaround */
4728 tg3_writephy(tp
, 0x15, 0x0a75);
4729 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4730 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4731 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4734 /* Clear pending interrupts... */
4735 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4736 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4738 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4739 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4740 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4741 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4743 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4744 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4745 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4746 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4747 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4749 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4752 current_link_up
= false;
4753 current_speed
= SPEED_UNKNOWN
;
4754 current_duplex
= DUPLEX_UNKNOWN
;
4755 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4756 tp
->link_config
.rmt_adv
= 0;
4758 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4759 err
= tg3_phy_auxctl_read(tp
,
4760 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4762 if (!err
&& !(val
& (1 << 10))) {
4763 tg3_phy_auxctl_write(tp
,
4764 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4771 for (i
= 0; i
< 100; i
++) {
4772 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4773 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4774 (bmsr
& BMSR_LSTATUS
))
4779 if (bmsr
& BMSR_LSTATUS
) {
4782 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4783 for (i
= 0; i
< 2000; i
++) {
4785 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4790 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4795 for (i
= 0; i
< 200; i
++) {
4796 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4797 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4799 if (bmcr
&& bmcr
!= 0x7fff)
4807 tp
->link_config
.active_speed
= current_speed
;
4808 tp
->link_config
.active_duplex
= current_duplex
;
4810 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4811 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4813 if ((bmcr
& BMCR_ANENABLE
) &&
4815 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4816 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4817 current_link_up
= true;
4819 /* EEE settings changes take effect only after a phy
4820 * reset. If we have skipped a reset due to Link Flap
4821 * Avoidance being enabled, do it now.
4823 if (!eee_config_ok
&&
4824 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4828 if (!(bmcr
& BMCR_ANENABLE
) &&
4829 tp
->link_config
.speed
== current_speed
&&
4830 tp
->link_config
.duplex
== current_duplex
) {
4831 current_link_up
= true;
4835 if (current_link_up
&&
4836 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4839 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4840 reg
= MII_TG3_FET_GEN_STAT
;
4841 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4843 reg
= MII_TG3_EXT_STAT
;
4844 bit
= MII_TG3_EXT_STAT_MDIX
;
4847 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4848 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4850 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4855 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4856 tg3_phy_copper_begin(tp
);
4858 if (tg3_flag(tp
, ROBOSWITCH
)) {
4859 current_link_up
= true;
4860 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4861 current_speed
= SPEED_1000
;
4862 current_duplex
= DUPLEX_FULL
;
4863 tp
->link_config
.active_speed
= current_speed
;
4864 tp
->link_config
.active_duplex
= current_duplex
;
4867 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4868 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4869 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4870 current_link_up
= true;
4873 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4874 if (current_link_up
) {
4875 if (tp
->link_config
.active_speed
== SPEED_100
||
4876 tp
->link_config
.active_speed
== SPEED_10
)
4877 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4879 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4880 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4881 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4883 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4885 /* In order for the 5750 core in BCM4785 chip to work properly
4886 * in RGMII mode, the Led Control Register must be set up.
4888 if (tg3_flag(tp
, RGMII_MODE
)) {
4889 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
4890 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
4892 if (tp
->link_config
.active_speed
== SPEED_10
)
4893 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
4894 else if (tp
->link_config
.active_speed
== SPEED_100
)
4895 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4896 LED_CTRL_100MBPS_ON
);
4897 else if (tp
->link_config
.active_speed
== SPEED_1000
)
4898 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4899 LED_CTRL_1000MBPS_ON
);
4901 tw32(MAC_LED_CTRL
, led_ctrl
);
4905 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4906 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4907 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4909 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4910 if (current_link_up
&&
4911 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4912 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4914 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4917 /* ??? Without this setting Netgear GA302T PHY does not
4918 * ??? send/receive packets...
4920 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4921 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
4922 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4923 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4927 tw32_f(MAC_MODE
, tp
->mac_mode
);
4930 tg3_phy_eee_adjust(tp
, current_link_up
);
4932 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4933 /* Polled via timer. */
4934 tw32_f(MAC_EVENT
, 0);
4936 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4940 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
4942 tp
->link_config
.active_speed
== SPEED_1000
&&
4943 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4946 (MAC_STATUS_SYNC_CHANGED
|
4947 MAC_STATUS_CFG_CHANGED
));
4950 NIC_SRAM_FIRMWARE_MBOX
,
4951 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4954 /* Prevent send BD corruption. */
4955 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4956 if (tp
->link_config
.active_speed
== SPEED_100
||
4957 tp
->link_config
.active_speed
== SPEED_10
)
4958 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4959 PCI_EXP_LNKCTL_CLKREQ_EN
);
4961 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4962 PCI_EXP_LNKCTL_CLKREQ_EN
);
4965 tg3_test_and_report_link_chg(tp
, current_link_up
);
4970 struct tg3_fiber_aneginfo
{
4972 #define ANEG_STATE_UNKNOWN 0
4973 #define ANEG_STATE_AN_ENABLE 1
4974 #define ANEG_STATE_RESTART_INIT 2
4975 #define ANEG_STATE_RESTART 3
4976 #define ANEG_STATE_DISABLE_LINK_OK 4
4977 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4978 #define ANEG_STATE_ABILITY_DETECT 6
4979 #define ANEG_STATE_ACK_DETECT_INIT 7
4980 #define ANEG_STATE_ACK_DETECT 8
4981 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4982 #define ANEG_STATE_COMPLETE_ACK 10
4983 #define ANEG_STATE_IDLE_DETECT_INIT 11
4984 #define ANEG_STATE_IDLE_DETECT 12
4985 #define ANEG_STATE_LINK_OK 13
4986 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4987 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4990 #define MR_AN_ENABLE 0x00000001
4991 #define MR_RESTART_AN 0x00000002
4992 #define MR_AN_COMPLETE 0x00000004
4993 #define MR_PAGE_RX 0x00000008
4994 #define MR_NP_LOADED 0x00000010
4995 #define MR_TOGGLE_TX 0x00000020
4996 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4997 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4998 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4999 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5000 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5001 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5002 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5003 #define MR_TOGGLE_RX 0x00002000
5004 #define MR_NP_RX 0x00004000
5006 #define MR_LINK_OK 0x80000000
5008 unsigned long link_time
, cur_time
;
5010 u32 ability_match_cfg
;
5011 int ability_match_count
;
5013 char ability_match
, idle_match
, ack_match
;
5015 u32 txconfig
, rxconfig
;
5016 #define ANEG_CFG_NP 0x00000080
5017 #define ANEG_CFG_ACK 0x00000040
5018 #define ANEG_CFG_RF2 0x00000020
5019 #define ANEG_CFG_RF1 0x00000010
5020 #define ANEG_CFG_PS2 0x00000001
5021 #define ANEG_CFG_PS1 0x00008000
5022 #define ANEG_CFG_HD 0x00004000
5023 #define ANEG_CFG_FD 0x00002000
5024 #define ANEG_CFG_INVAL 0x00001f06
5029 #define ANEG_TIMER_ENAB 2
5030 #define ANEG_FAILED -1
5032 #define ANEG_STATE_SETTLE_TIME 10000
5034 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5035 struct tg3_fiber_aneginfo
*ap
)
5038 unsigned long delta
;
5042 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5046 ap
->ability_match_cfg
= 0;
5047 ap
->ability_match_count
= 0;
5048 ap
->ability_match
= 0;
5054 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5055 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5057 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5058 ap
->ability_match_cfg
= rx_cfg_reg
;
5059 ap
->ability_match
= 0;
5060 ap
->ability_match_count
= 0;
5062 if (++ap
->ability_match_count
> 1) {
5063 ap
->ability_match
= 1;
5064 ap
->ability_match_cfg
= rx_cfg_reg
;
5067 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5075 ap
->ability_match_cfg
= 0;
5076 ap
->ability_match_count
= 0;
5077 ap
->ability_match
= 0;
5083 ap
->rxconfig
= rx_cfg_reg
;
5086 switch (ap
->state
) {
5087 case ANEG_STATE_UNKNOWN
:
5088 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5089 ap
->state
= ANEG_STATE_AN_ENABLE
;
5092 case ANEG_STATE_AN_ENABLE
:
5093 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5094 if (ap
->flags
& MR_AN_ENABLE
) {
5097 ap
->ability_match_cfg
= 0;
5098 ap
->ability_match_count
= 0;
5099 ap
->ability_match
= 0;
5103 ap
->state
= ANEG_STATE_RESTART_INIT
;
5105 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5109 case ANEG_STATE_RESTART_INIT
:
5110 ap
->link_time
= ap
->cur_time
;
5111 ap
->flags
&= ~(MR_NP_LOADED
);
5113 tw32(MAC_TX_AUTO_NEG
, 0);
5114 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5115 tw32_f(MAC_MODE
, tp
->mac_mode
);
5118 ret
= ANEG_TIMER_ENAB
;
5119 ap
->state
= ANEG_STATE_RESTART
;
5122 case ANEG_STATE_RESTART
:
5123 delta
= ap
->cur_time
- ap
->link_time
;
5124 if (delta
> ANEG_STATE_SETTLE_TIME
)
5125 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5127 ret
= ANEG_TIMER_ENAB
;
5130 case ANEG_STATE_DISABLE_LINK_OK
:
5134 case ANEG_STATE_ABILITY_DETECT_INIT
:
5135 ap
->flags
&= ~(MR_TOGGLE_TX
);
5136 ap
->txconfig
= ANEG_CFG_FD
;
5137 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5138 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5139 ap
->txconfig
|= ANEG_CFG_PS1
;
5140 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5141 ap
->txconfig
|= ANEG_CFG_PS2
;
5142 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5143 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5144 tw32_f(MAC_MODE
, tp
->mac_mode
);
5147 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5150 case ANEG_STATE_ABILITY_DETECT
:
5151 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5152 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5155 case ANEG_STATE_ACK_DETECT_INIT
:
5156 ap
->txconfig
|= ANEG_CFG_ACK
;
5157 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5158 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5159 tw32_f(MAC_MODE
, tp
->mac_mode
);
5162 ap
->state
= ANEG_STATE_ACK_DETECT
;
5165 case ANEG_STATE_ACK_DETECT
:
5166 if (ap
->ack_match
!= 0) {
5167 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5168 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5169 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5171 ap
->state
= ANEG_STATE_AN_ENABLE
;
5173 } else if (ap
->ability_match
!= 0 &&
5174 ap
->rxconfig
== 0) {
5175 ap
->state
= ANEG_STATE_AN_ENABLE
;
5179 case ANEG_STATE_COMPLETE_ACK_INIT
:
5180 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5184 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5185 MR_LP_ADV_HALF_DUPLEX
|
5186 MR_LP_ADV_SYM_PAUSE
|
5187 MR_LP_ADV_ASYM_PAUSE
|
5188 MR_LP_ADV_REMOTE_FAULT1
|
5189 MR_LP_ADV_REMOTE_FAULT2
|
5190 MR_LP_ADV_NEXT_PAGE
|
5193 if (ap
->rxconfig
& ANEG_CFG_FD
)
5194 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5195 if (ap
->rxconfig
& ANEG_CFG_HD
)
5196 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5197 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5198 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5199 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5200 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5201 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5202 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5203 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5204 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5205 if (ap
->rxconfig
& ANEG_CFG_NP
)
5206 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5208 ap
->link_time
= ap
->cur_time
;
5210 ap
->flags
^= (MR_TOGGLE_TX
);
5211 if (ap
->rxconfig
& 0x0008)
5212 ap
->flags
|= MR_TOGGLE_RX
;
5213 if (ap
->rxconfig
& ANEG_CFG_NP
)
5214 ap
->flags
|= MR_NP_RX
;
5215 ap
->flags
|= MR_PAGE_RX
;
5217 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5218 ret
= ANEG_TIMER_ENAB
;
5221 case ANEG_STATE_COMPLETE_ACK
:
5222 if (ap
->ability_match
!= 0 &&
5223 ap
->rxconfig
== 0) {
5224 ap
->state
= ANEG_STATE_AN_ENABLE
;
5227 delta
= ap
->cur_time
- ap
->link_time
;
5228 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5229 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5230 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5232 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5233 !(ap
->flags
& MR_NP_RX
)) {
5234 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5242 case ANEG_STATE_IDLE_DETECT_INIT
:
5243 ap
->link_time
= ap
->cur_time
;
5244 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5245 tw32_f(MAC_MODE
, tp
->mac_mode
);
5248 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5249 ret
= ANEG_TIMER_ENAB
;
5252 case ANEG_STATE_IDLE_DETECT
:
5253 if (ap
->ability_match
!= 0 &&
5254 ap
->rxconfig
== 0) {
5255 ap
->state
= ANEG_STATE_AN_ENABLE
;
5258 delta
= ap
->cur_time
- ap
->link_time
;
5259 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5260 /* XXX another gem from the Broadcom driver :( */
5261 ap
->state
= ANEG_STATE_LINK_OK
;
5265 case ANEG_STATE_LINK_OK
:
5266 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5270 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5271 /* ??? unimplemented */
5274 case ANEG_STATE_NEXT_PAGE_WAIT
:
5275 /* ??? unimplemented */
5286 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5289 struct tg3_fiber_aneginfo aninfo
;
5290 int status
= ANEG_FAILED
;
5294 tw32_f(MAC_TX_AUTO_NEG
, 0);
5296 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5297 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5300 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5303 memset(&aninfo
, 0, sizeof(aninfo
));
5304 aninfo
.flags
|= MR_AN_ENABLE
;
5305 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5306 aninfo
.cur_time
= 0;
5308 while (++tick
< 195000) {
5309 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5310 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5316 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5317 tw32_f(MAC_MODE
, tp
->mac_mode
);
5320 *txflags
= aninfo
.txconfig
;
5321 *rxflags
= aninfo
.flags
;
5323 if (status
== ANEG_DONE
&&
5324 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5325 MR_LP_ADV_FULL_DUPLEX
)))
5331 static void tg3_init_bcm8002(struct tg3
*tp
)
5333 u32 mac_status
= tr32(MAC_STATUS
);
5336 /* Reset when initting first time or we have a link. */
5337 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5338 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5341 /* Set PLL lock range. */
5342 tg3_writephy(tp
, 0x16, 0x8007);
5345 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5347 /* Wait for reset to complete. */
5348 /* XXX schedule_timeout() ... */
5349 for (i
= 0; i
< 500; i
++)
5352 /* Config mode; select PMA/Ch 1 regs. */
5353 tg3_writephy(tp
, 0x10, 0x8411);
5355 /* Enable auto-lock and comdet, select txclk for tx. */
5356 tg3_writephy(tp
, 0x11, 0x0a10);
5358 tg3_writephy(tp
, 0x18, 0x00a0);
5359 tg3_writephy(tp
, 0x16, 0x41ff);
5361 /* Assert and deassert POR. */
5362 tg3_writephy(tp
, 0x13, 0x0400);
5364 tg3_writephy(tp
, 0x13, 0x0000);
5366 tg3_writephy(tp
, 0x11, 0x0a50);
5368 tg3_writephy(tp
, 0x11, 0x0a10);
5370 /* Wait for signal to stabilize */
5371 /* XXX schedule_timeout() ... */
5372 for (i
= 0; i
< 15000; i
++)
5375 /* Deselect the channel register so we can read the PHYID
5378 tg3_writephy(tp
, 0x10, 0x8011);
5381 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5384 bool current_link_up
;
5385 u32 sg_dig_ctrl
, sg_dig_status
;
5386 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5387 int workaround
, port_a
;
5390 expected_sg_dig_ctrl
= 0;
5393 current_link_up
= false;
5395 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5396 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5398 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5401 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5402 /* preserve bits 20-23 for voltage regulator */
5403 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5406 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5408 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5409 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5411 u32 val
= serdes_cfg
;
5417 tw32_f(MAC_SERDES_CFG
, val
);
5420 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5422 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5423 tg3_setup_flow_control(tp
, 0, 0);
5424 current_link_up
= true;
5429 /* Want auto-negotiation. */
5430 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5432 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5433 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5434 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5435 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5436 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5438 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5439 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5440 tp
->serdes_counter
&&
5441 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5442 MAC_STATUS_RCVD_CFG
)) ==
5443 MAC_STATUS_PCS_SYNCED
)) {
5444 tp
->serdes_counter
--;
5445 current_link_up
= true;
5450 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5451 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5453 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5455 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5456 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5457 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5458 MAC_STATUS_SIGNAL_DET
)) {
5459 sg_dig_status
= tr32(SG_DIG_STATUS
);
5460 mac_status
= tr32(MAC_STATUS
);
5462 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5463 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5464 u32 local_adv
= 0, remote_adv
= 0;
5466 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5467 local_adv
|= ADVERTISE_1000XPAUSE
;
5468 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5469 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5471 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5472 remote_adv
|= LPA_1000XPAUSE
;
5473 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5474 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5476 tp
->link_config
.rmt_adv
=
5477 mii_adv_to_ethtool_adv_x(remote_adv
);
5479 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5480 current_link_up
= true;
5481 tp
->serdes_counter
= 0;
5482 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5483 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5484 if (tp
->serdes_counter
)
5485 tp
->serdes_counter
--;
5488 u32 val
= serdes_cfg
;
5495 tw32_f(MAC_SERDES_CFG
, val
);
5498 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5501 /* Link parallel detection - link is up */
5502 /* only if we have PCS_SYNC and not */
5503 /* receiving config code words */
5504 mac_status
= tr32(MAC_STATUS
);
5505 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5506 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5507 tg3_setup_flow_control(tp
, 0, 0);
5508 current_link_up
= true;
5510 TG3_PHYFLG_PARALLEL_DETECT
;
5511 tp
->serdes_counter
=
5512 SERDES_PARALLEL_DET_TIMEOUT
;
5514 goto restart_autoneg
;
5518 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5519 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5523 return current_link_up
;
5526 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5528 bool current_link_up
= false;
5530 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5533 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5534 u32 txflags
, rxflags
;
5537 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5538 u32 local_adv
= 0, remote_adv
= 0;
5540 if (txflags
& ANEG_CFG_PS1
)
5541 local_adv
|= ADVERTISE_1000XPAUSE
;
5542 if (txflags
& ANEG_CFG_PS2
)
5543 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5545 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5546 remote_adv
|= LPA_1000XPAUSE
;
5547 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5548 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5550 tp
->link_config
.rmt_adv
=
5551 mii_adv_to_ethtool_adv_x(remote_adv
);
5553 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5555 current_link_up
= true;
5557 for (i
= 0; i
< 30; i
++) {
5560 (MAC_STATUS_SYNC_CHANGED
|
5561 MAC_STATUS_CFG_CHANGED
));
5563 if ((tr32(MAC_STATUS
) &
5564 (MAC_STATUS_SYNC_CHANGED
|
5565 MAC_STATUS_CFG_CHANGED
)) == 0)
5569 mac_status
= tr32(MAC_STATUS
);
5570 if (!current_link_up
&&
5571 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5572 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5573 current_link_up
= true;
5575 tg3_setup_flow_control(tp
, 0, 0);
5577 /* Forcing 1000FD link up. */
5578 current_link_up
= true;
5580 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5583 tw32_f(MAC_MODE
, tp
->mac_mode
);
5588 return current_link_up
;
5591 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5594 u16 orig_active_speed
;
5595 u8 orig_active_duplex
;
5597 bool current_link_up
;
5600 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5601 orig_active_speed
= tp
->link_config
.active_speed
;
5602 orig_active_duplex
= tp
->link_config
.active_duplex
;
5604 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5606 tg3_flag(tp
, INIT_COMPLETE
)) {
5607 mac_status
= tr32(MAC_STATUS
);
5608 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5609 MAC_STATUS_SIGNAL_DET
|
5610 MAC_STATUS_CFG_CHANGED
|
5611 MAC_STATUS_RCVD_CFG
);
5612 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5613 MAC_STATUS_SIGNAL_DET
)) {
5614 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5615 MAC_STATUS_CFG_CHANGED
));
5620 tw32_f(MAC_TX_AUTO_NEG
, 0);
5622 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5623 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5624 tw32_f(MAC_MODE
, tp
->mac_mode
);
5627 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5628 tg3_init_bcm8002(tp
);
5630 /* Enable link change event even when serdes polling. */
5631 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5634 current_link_up
= false;
5635 tp
->link_config
.rmt_adv
= 0;
5636 mac_status
= tr32(MAC_STATUS
);
5638 if (tg3_flag(tp
, HW_AUTONEG
))
5639 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5641 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5643 tp
->napi
[0].hw_status
->status
=
5644 (SD_STATUS_UPDATED
|
5645 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5647 for (i
= 0; i
< 100; i
++) {
5648 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5649 MAC_STATUS_CFG_CHANGED
));
5651 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5652 MAC_STATUS_CFG_CHANGED
|
5653 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5657 mac_status
= tr32(MAC_STATUS
);
5658 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5659 current_link_up
= false;
5660 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5661 tp
->serdes_counter
== 0) {
5662 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5663 MAC_MODE_SEND_CONFIGS
));
5665 tw32_f(MAC_MODE
, tp
->mac_mode
);
5669 if (current_link_up
) {
5670 tp
->link_config
.active_speed
= SPEED_1000
;
5671 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5672 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5673 LED_CTRL_LNKLED_OVERRIDE
|
5674 LED_CTRL_1000MBPS_ON
));
5676 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5677 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5678 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5679 LED_CTRL_LNKLED_OVERRIDE
|
5680 LED_CTRL_TRAFFIC_OVERRIDE
));
5683 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5684 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5685 if (orig_pause_cfg
!= now_pause_cfg
||
5686 orig_active_speed
!= tp
->link_config
.active_speed
||
5687 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5688 tg3_link_report(tp
);
5694 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5698 u16 current_speed
= SPEED_UNKNOWN
;
5699 u8 current_duplex
= DUPLEX_UNKNOWN
;
5700 bool current_link_up
= false;
5701 u32 local_adv
, remote_adv
, sgsr
;
5703 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5704 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5705 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5706 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5711 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5713 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5714 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5716 current_link_up
= true;
5717 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5718 current_speed
= SPEED_1000
;
5719 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5720 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5721 current_speed
= SPEED_100
;
5722 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5724 current_speed
= SPEED_10
;
5725 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5728 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5729 current_duplex
= DUPLEX_FULL
;
5731 current_duplex
= DUPLEX_HALF
;
5734 tw32_f(MAC_MODE
, tp
->mac_mode
);
5737 tg3_clear_mac_status(tp
);
5739 goto fiber_setup_done
;
5742 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5743 tw32_f(MAC_MODE
, tp
->mac_mode
);
5746 tg3_clear_mac_status(tp
);
5751 tp
->link_config
.rmt_adv
= 0;
5753 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5754 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5755 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5756 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5757 bmsr
|= BMSR_LSTATUS
;
5759 bmsr
&= ~BMSR_LSTATUS
;
5762 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5764 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5765 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5766 /* do nothing, just check for link up at the end */
5767 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5770 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5771 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5772 ADVERTISE_1000XPAUSE
|
5773 ADVERTISE_1000XPSE_ASYM
|
5776 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5777 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5779 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5780 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5781 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5782 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5784 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5785 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5786 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5793 bmcr
&= ~BMCR_SPEED1000
;
5794 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5796 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5797 new_bmcr
|= BMCR_FULLDPLX
;
5799 if (new_bmcr
!= bmcr
) {
5800 /* BMCR_SPEED1000 is a reserved bit that needs
5801 * to be set on write.
5803 new_bmcr
|= BMCR_SPEED1000
;
5805 /* Force a linkdown */
5809 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5810 adv
&= ~(ADVERTISE_1000XFULL
|
5811 ADVERTISE_1000XHALF
|
5813 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5814 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5818 tg3_carrier_off(tp
);
5820 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5822 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5823 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5824 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5825 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5826 bmsr
|= BMSR_LSTATUS
;
5828 bmsr
&= ~BMSR_LSTATUS
;
5830 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5834 if (bmsr
& BMSR_LSTATUS
) {
5835 current_speed
= SPEED_1000
;
5836 current_link_up
= true;
5837 if (bmcr
& BMCR_FULLDPLX
)
5838 current_duplex
= DUPLEX_FULL
;
5840 current_duplex
= DUPLEX_HALF
;
5845 if (bmcr
& BMCR_ANENABLE
) {
5848 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5849 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5850 common
= local_adv
& remote_adv
;
5851 if (common
& (ADVERTISE_1000XHALF
|
5852 ADVERTISE_1000XFULL
)) {
5853 if (common
& ADVERTISE_1000XFULL
)
5854 current_duplex
= DUPLEX_FULL
;
5856 current_duplex
= DUPLEX_HALF
;
5858 tp
->link_config
.rmt_adv
=
5859 mii_adv_to_ethtool_adv_x(remote_adv
);
5860 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5861 /* Link is up via parallel detect */
5863 current_link_up
= false;
5869 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5870 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5872 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5873 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5874 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5876 tw32_f(MAC_MODE
, tp
->mac_mode
);
5879 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5881 tp
->link_config
.active_speed
= current_speed
;
5882 tp
->link_config
.active_duplex
= current_duplex
;
5884 tg3_test_and_report_link_chg(tp
, current_link_up
);
5888 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5890 if (tp
->serdes_counter
) {
5891 /* Give autoneg time to complete. */
5892 tp
->serdes_counter
--;
5897 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5900 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5901 if (bmcr
& BMCR_ANENABLE
) {
5904 /* Select shadow register 0x1f */
5905 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5906 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5908 /* Select expansion interrupt status register */
5909 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5910 MII_TG3_DSP_EXP1_INT_STAT
);
5911 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5912 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5914 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5915 /* We have signal detect and not receiving
5916 * config code words, link is up by parallel
5920 bmcr
&= ~BMCR_ANENABLE
;
5921 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5922 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5923 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5926 } else if (tp
->link_up
&&
5927 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5928 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5931 /* Select expansion interrupt status register */
5932 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5933 MII_TG3_DSP_EXP1_INT_STAT
);
5934 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5938 /* Config code words received, turn on autoneg. */
5939 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5940 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5942 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5948 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
5953 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5954 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5955 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5956 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5958 err
= tg3_setup_copper_phy(tp
, force_reset
);
5960 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
5963 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5964 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5966 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5971 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5972 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5973 tw32(GRC_MISC_CFG
, val
);
5976 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5977 (6 << TX_LENGTHS_IPG_SHIFT
);
5978 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
5979 tg3_asic_rev(tp
) == ASIC_REV_5762
)
5980 val
|= tr32(MAC_TX_LENGTHS
) &
5981 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5982 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5984 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5985 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5986 tw32(MAC_TX_LENGTHS
, val
|
5987 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
5989 tw32(MAC_TX_LENGTHS
, val
|
5990 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5992 if (!tg3_flag(tp
, 5705_PLUS
)) {
5994 tw32(HOSTCC_STAT_COAL_TICKS
,
5995 tp
->coal
.stats_block_coalesce_usecs
);
5997 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6001 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6002 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6004 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6007 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6008 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6014 /* tp->lock must be held */
6015 static u64
tg3_refclk_read(struct tg3
*tp
)
6017 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6018 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6021 /* tp->lock must be held */
6022 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6024 tw32(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_STOP
);
6025 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6026 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6027 tw32_f(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_RESUME
);
6030 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6031 static inline void tg3_full_unlock(struct tg3
*tp
);
6032 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6034 struct tg3
*tp
= netdev_priv(dev
);
6036 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6037 SOF_TIMESTAMPING_RX_SOFTWARE
|
6038 SOF_TIMESTAMPING_SOFTWARE
;
6040 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6041 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6042 SOF_TIMESTAMPING_RX_HARDWARE
|
6043 SOF_TIMESTAMPING_RAW_HARDWARE
;
6047 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6049 info
->phc_index
= -1;
6051 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6053 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6054 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6055 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6056 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6060 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6062 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6063 bool neg_adj
= false;
6071 /* Frequency adjustment is performed using hardware with a 24 bit
6072 * accumulator and a programmable correction value. On each clk, the
6073 * correction value gets added to the accumulator and when it
6074 * overflows, the time counter is incremented/decremented.
6076 * So conversion from ppb to correction value is
6077 * ppb * (1 << 24) / 1000000000
6079 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6080 TG3_EAV_REF_CLK_CORRECT_MASK
;
6082 tg3_full_lock(tp
, 0);
6085 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6086 TG3_EAV_REF_CLK_CORRECT_EN
|
6087 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6089 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6091 tg3_full_unlock(tp
);
6096 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6098 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6100 tg3_full_lock(tp
, 0);
6101 tp
->ptp_adjust
+= delta
;
6102 tg3_full_unlock(tp
);
6107 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
6111 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6113 tg3_full_lock(tp
, 0);
6114 ns
= tg3_refclk_read(tp
);
6115 ns
+= tp
->ptp_adjust
;
6116 tg3_full_unlock(tp
);
6118 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
6119 ts
->tv_nsec
= remainder
;
6124 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6125 const struct timespec
*ts
)
6128 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6130 ns
= timespec_to_ns(ts
);
6132 tg3_full_lock(tp
, 0);
6133 tg3_refclk_write(tp
, ns
);
6135 tg3_full_unlock(tp
);
6140 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6141 struct ptp_clock_request
*rq
, int on
)
6146 static const struct ptp_clock_info tg3_ptp_caps
= {
6147 .owner
= THIS_MODULE
,
6148 .name
= "tg3 clock",
6149 .max_adj
= 250000000,
6154 .adjfreq
= tg3_ptp_adjfreq
,
6155 .adjtime
= tg3_ptp_adjtime
,
6156 .gettime
= tg3_ptp_gettime
,
6157 .settime
= tg3_ptp_settime
,
6158 .enable
= tg3_ptp_enable
,
6161 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6162 struct skb_shared_hwtstamps
*timestamp
)
6164 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6165 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6169 /* tp->lock must be held */
6170 static void tg3_ptp_init(struct tg3
*tp
)
6172 if (!tg3_flag(tp
, PTP_CAPABLE
))
6175 /* Initialize the hardware clock to the system time. */
6176 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6178 tp
->ptp_info
= tg3_ptp_caps
;
6181 /* tp->lock must be held */
6182 static void tg3_ptp_resume(struct tg3
*tp
)
6184 if (!tg3_flag(tp
, PTP_CAPABLE
))
6187 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6191 static void tg3_ptp_fini(struct tg3
*tp
)
6193 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6196 ptp_clock_unregister(tp
->ptp_clock
);
6197 tp
->ptp_clock
= NULL
;
6201 static inline int tg3_irq_sync(struct tg3
*tp
)
6203 return tp
->irq_sync
;
6206 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6210 dst
= (u32
*)((u8
*)dst
+ off
);
6211 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6212 *dst
++ = tr32(off
+ i
);
6215 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6217 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6218 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6219 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6220 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6221 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6222 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6223 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6224 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6225 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6226 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6227 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6228 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6229 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6230 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6231 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6232 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6233 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6234 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6235 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6237 if (tg3_flag(tp
, SUPPORT_MSIX
))
6238 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6240 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6241 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6242 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6243 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6244 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6245 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6246 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6247 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6249 if (!tg3_flag(tp
, 5705_PLUS
)) {
6250 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6251 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6252 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6255 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6256 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6257 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6258 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6259 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6261 if (tg3_flag(tp
, NVRAM
))
6262 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6265 static void tg3_dump_state(struct tg3
*tp
)
6270 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6274 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6275 /* Read up to but not including private PCI registers */
6276 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6277 regs
[i
/ sizeof(u32
)] = tr32(i
);
6279 tg3_dump_legacy_regs(tp
, regs
);
6281 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6282 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6283 !regs
[i
+ 2] && !regs
[i
+ 3])
6286 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6288 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6293 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6294 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6296 /* SW status block */
6298 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6300 tnapi
->hw_status
->status
,
6301 tnapi
->hw_status
->status_tag
,
6302 tnapi
->hw_status
->rx_jumbo_consumer
,
6303 tnapi
->hw_status
->rx_consumer
,
6304 tnapi
->hw_status
->rx_mini_consumer
,
6305 tnapi
->hw_status
->idx
[0].rx_producer
,
6306 tnapi
->hw_status
->idx
[0].tx_consumer
);
6309 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6311 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6312 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6314 tnapi
->prodring
.rx_std_prod_idx
,
6315 tnapi
->prodring
.rx_std_cons_idx
,
6316 tnapi
->prodring
.rx_jmb_prod_idx
,
6317 tnapi
->prodring
.rx_jmb_cons_idx
);
6321 /* This is called whenever we suspect that the system chipset is re-
6322 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6323 * is bogus tx completions. We try to recover by setting the
6324 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6327 static void tg3_tx_recover(struct tg3
*tp
)
6329 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6330 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6332 netdev_warn(tp
->dev
,
6333 "The system may be re-ordering memory-mapped I/O "
6334 "cycles to the network device, attempting to recover. "
6335 "Please report the problem to the driver maintainer "
6336 "and include system chipset information.\n");
6338 spin_lock(&tp
->lock
);
6339 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6340 spin_unlock(&tp
->lock
);
6343 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6345 /* Tell compiler to fetch tx indices from memory. */
6347 return tnapi
->tx_pending
-
6348 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6351 /* Tigon3 never reports partial packet sends. So we do not
6352 * need special logic to handle SKBs that have not had all
6353 * of their frags sent yet, like SunGEM does.
6355 static void tg3_tx(struct tg3_napi
*tnapi
)
6357 struct tg3
*tp
= tnapi
->tp
;
6358 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6359 u32 sw_idx
= tnapi
->tx_cons
;
6360 struct netdev_queue
*txq
;
6361 int index
= tnapi
- tp
->napi
;
6362 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6364 if (tg3_flag(tp
, ENABLE_TSS
))
6367 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6369 while (sw_idx
!= hw_idx
) {
6370 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6371 struct sk_buff
*skb
= ri
->skb
;
6374 if (unlikely(skb
== NULL
)) {
6379 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6380 struct skb_shared_hwtstamps timestamp
;
6381 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6382 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6384 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6386 skb_tstamp_tx(skb
, ×tamp
);
6389 pci_unmap_single(tp
->pdev
,
6390 dma_unmap_addr(ri
, mapping
),
6396 while (ri
->fragmented
) {
6397 ri
->fragmented
= false;
6398 sw_idx
= NEXT_TX(sw_idx
);
6399 ri
= &tnapi
->tx_buffers
[sw_idx
];
6402 sw_idx
= NEXT_TX(sw_idx
);
6404 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6405 ri
= &tnapi
->tx_buffers
[sw_idx
];
6406 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6409 pci_unmap_page(tp
->pdev
,
6410 dma_unmap_addr(ri
, mapping
),
6411 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6414 while (ri
->fragmented
) {
6415 ri
->fragmented
= false;
6416 sw_idx
= NEXT_TX(sw_idx
);
6417 ri
= &tnapi
->tx_buffers
[sw_idx
];
6420 sw_idx
= NEXT_TX(sw_idx
);
6424 bytes_compl
+= skb
->len
;
6428 if (unlikely(tx_bug
)) {
6434 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6436 tnapi
->tx_cons
= sw_idx
;
6438 /* Need to make the tx_cons update visible to tg3_start_xmit()
6439 * before checking for netif_queue_stopped(). Without the
6440 * memory barrier, there is a small possibility that tg3_start_xmit()
6441 * will miss it and cause the queue to be stopped forever.
6445 if (unlikely(netif_tx_queue_stopped(txq
) &&
6446 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6447 __netif_tx_lock(txq
, smp_processor_id());
6448 if (netif_tx_queue_stopped(txq
) &&
6449 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6450 netif_tx_wake_queue(txq
);
6451 __netif_tx_unlock(txq
);
6455 static void tg3_frag_free(bool is_frag
, void *data
)
6458 put_page(virt_to_head_page(data
));
6463 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6465 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6466 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6471 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6472 map_sz
, PCI_DMA_FROMDEVICE
);
6473 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6478 /* Returns size of skb allocated or < 0 on error.
6480 * We only need to fill in the address because the other members
6481 * of the RX descriptor are invariant, see tg3_init_rings.
6483 * Note the purposeful assymetry of cpu vs. chip accesses. For
6484 * posting buffers we only dirty the first cache line of the RX
6485 * descriptor (containing the address). Whereas for the RX status
6486 * buffers the cpu only reads the last cacheline of the RX descriptor
6487 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6489 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6490 u32 opaque_key
, u32 dest_idx_unmasked
,
6491 unsigned int *frag_size
)
6493 struct tg3_rx_buffer_desc
*desc
;
6494 struct ring_info
*map
;
6497 int skb_size
, data_size
, dest_idx
;
6499 switch (opaque_key
) {
6500 case RXD_OPAQUE_RING_STD
:
6501 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6502 desc
= &tpr
->rx_std
[dest_idx
];
6503 map
= &tpr
->rx_std_buffers
[dest_idx
];
6504 data_size
= tp
->rx_pkt_map_sz
;
6507 case RXD_OPAQUE_RING_JUMBO
:
6508 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6509 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6510 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6511 data_size
= TG3_RX_JMB_MAP_SZ
;
6518 /* Do not overwrite any of the map or rp information
6519 * until we are sure we can commit to a new buffer.
6521 * Callers depend upon this behavior and assume that
6522 * we leave everything unchanged if we fail.
6524 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6525 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6526 if (skb_size
<= PAGE_SIZE
) {
6527 data
= netdev_alloc_frag(skb_size
);
6528 *frag_size
= skb_size
;
6530 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6536 mapping
= pci_map_single(tp
->pdev
,
6537 data
+ TG3_RX_OFFSET(tp
),
6539 PCI_DMA_FROMDEVICE
);
6540 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6541 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6546 dma_unmap_addr_set(map
, mapping
, mapping
);
6548 desc
->addr_hi
= ((u64
)mapping
>> 32);
6549 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6554 /* We only need to move over in the address because the other
6555 * members of the RX descriptor are invariant. See notes above
6556 * tg3_alloc_rx_data for full details.
6558 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6559 struct tg3_rx_prodring_set
*dpr
,
6560 u32 opaque_key
, int src_idx
,
6561 u32 dest_idx_unmasked
)
6563 struct tg3
*tp
= tnapi
->tp
;
6564 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6565 struct ring_info
*src_map
, *dest_map
;
6566 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6569 switch (opaque_key
) {
6570 case RXD_OPAQUE_RING_STD
:
6571 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6572 dest_desc
= &dpr
->rx_std
[dest_idx
];
6573 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6574 src_desc
= &spr
->rx_std
[src_idx
];
6575 src_map
= &spr
->rx_std_buffers
[src_idx
];
6578 case RXD_OPAQUE_RING_JUMBO
:
6579 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6580 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6581 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6582 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6583 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6590 dest_map
->data
= src_map
->data
;
6591 dma_unmap_addr_set(dest_map
, mapping
,
6592 dma_unmap_addr(src_map
, mapping
));
6593 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6594 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6596 /* Ensure that the update to the skb happens after the physical
6597 * addresses have been transferred to the new BD location.
6601 src_map
->data
= NULL
;
6604 /* The RX ring scheme is composed of multiple rings which post fresh
6605 * buffers to the chip, and one special ring the chip uses to report
6606 * status back to the host.
6608 * The special ring reports the status of received packets to the
6609 * host. The chip does not write into the original descriptor the
6610 * RX buffer was obtained from. The chip simply takes the original
6611 * descriptor as provided by the host, updates the status and length
6612 * field, then writes this into the next status ring entry.
6614 * Each ring the host uses to post buffers to the chip is described
6615 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6616 * it is first placed into the on-chip ram. When the packet's length
6617 * is known, it walks down the TG3_BDINFO entries to select the ring.
6618 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6619 * which is within the range of the new packet's length is chosen.
6621 * The "separate ring for rx status" scheme may sound queer, but it makes
6622 * sense from a cache coherency perspective. If only the host writes
6623 * to the buffer post rings, and only the chip writes to the rx status
6624 * rings, then cache lines never move beyond shared-modified state.
6625 * If both the host and chip were to write into the same ring, cache line
6626 * eviction could occur since both entities want it in an exclusive state.
6628 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6630 struct tg3
*tp
= tnapi
->tp
;
6631 u32 work_mask
, rx_std_posted
= 0;
6632 u32 std_prod_idx
, jmb_prod_idx
;
6633 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6636 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6638 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6640 * We need to order the read of hw_idx and the read of
6641 * the opaque cookie.
6646 std_prod_idx
= tpr
->rx_std_prod_idx
;
6647 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6648 while (sw_idx
!= hw_idx
&& budget
> 0) {
6649 struct ring_info
*ri
;
6650 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6652 struct sk_buff
*skb
;
6653 dma_addr_t dma_addr
;
6654 u32 opaque_key
, desc_idx
, *post_ptr
;
6658 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6659 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6660 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6661 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6662 dma_addr
= dma_unmap_addr(ri
, mapping
);
6664 post_ptr
= &std_prod_idx
;
6666 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6667 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6668 dma_addr
= dma_unmap_addr(ri
, mapping
);
6670 post_ptr
= &jmb_prod_idx
;
6672 goto next_pkt_nopost
;
6674 work_mask
|= opaque_key
;
6676 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6677 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6679 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6680 desc_idx
, *post_ptr
);
6682 /* Other statistics kept track of by card. */
6687 prefetch(data
+ TG3_RX_OFFSET(tp
));
6688 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6691 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6692 RXD_FLAG_PTPSTAT_PTPV1
||
6693 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6694 RXD_FLAG_PTPSTAT_PTPV2
) {
6695 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6696 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6699 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6701 unsigned int frag_size
;
6703 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6704 *post_ptr
, &frag_size
);
6708 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6709 PCI_DMA_FROMDEVICE
);
6711 skb
= build_skb(data
, frag_size
);
6713 tg3_frag_free(frag_size
!= 0, data
);
6714 goto drop_it_no_recycle
;
6716 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6717 /* Ensure that the update to the data happens
6718 * after the usage of the old DMA mapping.
6725 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6726 desc_idx
, *post_ptr
);
6728 skb
= netdev_alloc_skb(tp
->dev
,
6729 len
+ TG3_RAW_IP_ALIGN
);
6731 goto drop_it_no_recycle
;
6733 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6734 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6736 data
+ TG3_RX_OFFSET(tp
),
6738 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6743 tg3_hwclock_to_timestamp(tp
, tstamp
,
6744 skb_hwtstamps(skb
));
6746 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6747 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6748 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6749 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6750 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6752 skb_checksum_none_assert(skb
);
6754 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6756 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6757 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6759 goto drop_it_no_recycle
;
6762 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6763 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6764 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6765 desc
->err_vlan
& RXD_VLAN_MASK
);
6767 napi_gro_receive(&tnapi
->napi
, skb
);
6775 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6776 tpr
->rx_std_prod_idx
= std_prod_idx
&
6777 tp
->rx_std_ring_mask
;
6778 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6779 tpr
->rx_std_prod_idx
);
6780 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6785 sw_idx
&= tp
->rx_ret_ring_mask
;
6787 /* Refresh hw_idx to see if there is new work */
6788 if (sw_idx
== hw_idx
) {
6789 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6794 /* ACK the status ring. */
6795 tnapi
->rx_rcb_ptr
= sw_idx
;
6796 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6798 /* Refill RX ring(s). */
6799 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6800 /* Sync BD data before updating mailbox */
6803 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6804 tpr
->rx_std_prod_idx
= std_prod_idx
&
6805 tp
->rx_std_ring_mask
;
6806 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6807 tpr
->rx_std_prod_idx
);
6809 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6810 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6811 tp
->rx_jmb_ring_mask
;
6812 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6813 tpr
->rx_jmb_prod_idx
);
6816 } else if (work_mask
) {
6817 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6818 * updated before the producer indices can be updated.
6822 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6823 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6825 if (tnapi
!= &tp
->napi
[1]) {
6826 tp
->rx_refill
= true;
6827 napi_schedule(&tp
->napi
[1].napi
);
6834 static void tg3_poll_link(struct tg3
*tp
)
6836 /* handle link change and other phy events */
6837 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6838 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6840 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6841 sblk
->status
= SD_STATUS_UPDATED
|
6842 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6843 spin_lock(&tp
->lock
);
6844 if (tg3_flag(tp
, USE_PHYLIB
)) {
6846 (MAC_STATUS_SYNC_CHANGED
|
6847 MAC_STATUS_CFG_CHANGED
|
6848 MAC_STATUS_MI_COMPLETION
|
6849 MAC_STATUS_LNKSTATE_CHANGED
));
6852 tg3_setup_phy(tp
, false);
6853 spin_unlock(&tp
->lock
);
6858 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6859 struct tg3_rx_prodring_set
*dpr
,
6860 struct tg3_rx_prodring_set
*spr
)
6862 u32 si
, di
, cpycnt
, src_prod_idx
;
6866 src_prod_idx
= spr
->rx_std_prod_idx
;
6868 /* Make sure updates to the rx_std_buffers[] entries and the
6869 * standard producer index are seen in the correct order.
6873 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6876 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6877 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6879 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6880 spr
->rx_std_cons_idx
;
6882 cpycnt
= min(cpycnt
,
6883 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6885 si
= spr
->rx_std_cons_idx
;
6886 di
= dpr
->rx_std_prod_idx
;
6888 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6889 if (dpr
->rx_std_buffers
[i
].data
) {
6899 /* Ensure that updates to the rx_std_buffers ring and the
6900 * shadowed hardware producer ring from tg3_recycle_skb() are
6901 * ordered correctly WRT the skb check above.
6905 memcpy(&dpr
->rx_std_buffers
[di
],
6906 &spr
->rx_std_buffers
[si
],
6907 cpycnt
* sizeof(struct ring_info
));
6909 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6910 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6911 sbd
= &spr
->rx_std
[si
];
6912 dbd
= &dpr
->rx_std
[di
];
6913 dbd
->addr_hi
= sbd
->addr_hi
;
6914 dbd
->addr_lo
= sbd
->addr_lo
;
6917 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6918 tp
->rx_std_ring_mask
;
6919 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6920 tp
->rx_std_ring_mask
;
6924 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6926 /* Make sure updates to the rx_jmb_buffers[] entries and
6927 * the jumbo producer index are seen in the correct order.
6931 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6934 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6935 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6937 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6938 spr
->rx_jmb_cons_idx
;
6940 cpycnt
= min(cpycnt
,
6941 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6943 si
= spr
->rx_jmb_cons_idx
;
6944 di
= dpr
->rx_jmb_prod_idx
;
6946 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6947 if (dpr
->rx_jmb_buffers
[i
].data
) {
6957 /* Ensure that updates to the rx_jmb_buffers ring and the
6958 * shadowed hardware producer ring from tg3_recycle_skb() are
6959 * ordered correctly WRT the skb check above.
6963 memcpy(&dpr
->rx_jmb_buffers
[di
],
6964 &spr
->rx_jmb_buffers
[si
],
6965 cpycnt
* sizeof(struct ring_info
));
6967 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6968 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6969 sbd
= &spr
->rx_jmb
[si
].std
;
6970 dbd
= &dpr
->rx_jmb
[di
].std
;
6971 dbd
->addr_hi
= sbd
->addr_hi
;
6972 dbd
->addr_lo
= sbd
->addr_lo
;
6975 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6976 tp
->rx_jmb_ring_mask
;
6977 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6978 tp
->rx_jmb_ring_mask
;
6984 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
6986 struct tg3
*tp
= tnapi
->tp
;
6988 /* run TX completion thread */
6989 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
6991 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
6995 if (!tnapi
->rx_rcb_prod_idx
)
6998 /* run RX thread, within the bounds set by NAPI.
6999 * All RX "locking" is done by ensuring outside
7000 * code synchronizes with tg3->napi.poll()
7002 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7003 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7005 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7006 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7008 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7009 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7011 tp
->rx_refill
= false;
7012 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7013 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7014 &tp
->napi
[i
].prodring
);
7018 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7019 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7020 dpr
->rx_std_prod_idx
);
7022 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7023 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7024 dpr
->rx_jmb_prod_idx
);
7029 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7035 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7037 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7038 schedule_work(&tp
->reset_task
);
7041 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7043 cancel_work_sync(&tp
->reset_task
);
7044 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7045 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7048 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7050 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7051 struct tg3
*tp
= tnapi
->tp
;
7053 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7056 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7058 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7061 if (unlikely(work_done
>= budget
))
7064 /* tp->last_tag is used in tg3_int_reenable() below
7065 * to tell the hw how much work has been processed,
7066 * so we must read it before checking for more work.
7068 tnapi
->last_tag
= sblk
->status_tag
;
7069 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7072 /* check for RX/TX work to do */
7073 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7074 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7076 /* This test here is not race free, but will reduce
7077 * the number of interrupts by looping again.
7079 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7082 napi_complete(napi
);
7083 /* Reenable interrupts. */
7084 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7086 /* This test here is synchronized by napi_schedule()
7087 * and napi_complete() to close the race condition.
7089 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7090 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7091 HOSTCC_MODE_ENABLE
|
7102 /* work_done is guaranteed to be less than budget. */
7103 napi_complete(napi
);
7104 tg3_reset_task_schedule(tp
);
7108 static void tg3_process_error(struct tg3
*tp
)
7111 bool real_error
= false;
7113 if (tg3_flag(tp
, ERROR_PROCESSED
))
7116 /* Check Flow Attention register */
7117 val
= tr32(HOSTCC_FLOW_ATTN
);
7118 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7119 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7123 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7124 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7128 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7129 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7138 tg3_flag_set(tp
, ERROR_PROCESSED
);
7139 tg3_reset_task_schedule(tp
);
7142 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7144 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7145 struct tg3
*tp
= tnapi
->tp
;
7147 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7150 if (sblk
->status
& SD_STATUS_ERROR
)
7151 tg3_process_error(tp
);
7155 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7157 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7160 if (unlikely(work_done
>= budget
))
7163 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7164 /* tp->last_tag is used in tg3_int_reenable() below
7165 * to tell the hw how much work has been processed,
7166 * so we must read it before checking for more work.
7168 tnapi
->last_tag
= sblk
->status_tag
;
7169 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7172 sblk
->status
&= ~SD_STATUS_UPDATED
;
7174 if (likely(!tg3_has_work(tnapi
))) {
7175 napi_complete(napi
);
7176 tg3_int_reenable(tnapi
);
7184 /* work_done is guaranteed to be less than budget. */
7185 napi_complete(napi
);
7186 tg3_reset_task_schedule(tp
);
7190 static void tg3_napi_disable(struct tg3
*tp
)
7194 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7195 napi_disable(&tp
->napi
[i
].napi
);
7198 static void tg3_napi_enable(struct tg3
*tp
)
7202 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7203 napi_enable(&tp
->napi
[i
].napi
);
7206 static void tg3_napi_init(struct tg3
*tp
)
7210 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7211 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7212 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7215 static void tg3_napi_fini(struct tg3
*tp
)
7219 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7220 netif_napi_del(&tp
->napi
[i
].napi
);
7223 static inline void tg3_netif_stop(struct tg3
*tp
)
7225 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
7226 tg3_napi_disable(tp
);
7227 netif_carrier_off(tp
->dev
);
7228 netif_tx_disable(tp
->dev
);
7231 /* tp->lock must be held */
7232 static inline void tg3_netif_start(struct tg3
*tp
)
7236 /* NOTE: unconditional netif_tx_wake_all_queues is only
7237 * appropriate so long as all callers are assured to
7238 * have free tx slots (such as after tg3_init_hw)
7240 netif_tx_wake_all_queues(tp
->dev
);
7243 netif_carrier_on(tp
->dev
);
7245 tg3_napi_enable(tp
);
7246 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7247 tg3_enable_ints(tp
);
7250 static void tg3_irq_quiesce(struct tg3
*tp
)
7254 BUG_ON(tp
->irq_sync
);
7259 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7260 synchronize_irq(tp
->napi
[i
].irq_vec
);
7263 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7264 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7265 * with as well. Most of the time, this is not necessary except when
7266 * shutting down the device.
7268 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7270 spin_lock_bh(&tp
->lock
);
7272 tg3_irq_quiesce(tp
);
7275 static inline void tg3_full_unlock(struct tg3
*tp
)
7277 spin_unlock_bh(&tp
->lock
);
7280 /* One-shot MSI handler - Chip automatically disables interrupt
7281 * after sending MSI so driver doesn't have to do it.
7283 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7285 struct tg3_napi
*tnapi
= dev_id
;
7286 struct tg3
*tp
= tnapi
->tp
;
7288 prefetch(tnapi
->hw_status
);
7290 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7292 if (likely(!tg3_irq_sync(tp
)))
7293 napi_schedule(&tnapi
->napi
);
7298 /* MSI ISR - No need to check for interrupt sharing and no need to
7299 * flush status block and interrupt mailbox. PCI ordering rules
7300 * guarantee that MSI will arrive after the status block.
7302 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7304 struct tg3_napi
*tnapi
= dev_id
;
7305 struct tg3
*tp
= tnapi
->tp
;
7307 prefetch(tnapi
->hw_status
);
7309 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7311 * Writing any value to intr-mbox-0 clears PCI INTA# and
7312 * chip-internal interrupt pending events.
7313 * Writing non-zero to intr-mbox-0 additional tells the
7314 * NIC to stop sending us irqs, engaging "in-intr-handler"
7317 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7318 if (likely(!tg3_irq_sync(tp
)))
7319 napi_schedule(&tnapi
->napi
);
7321 return IRQ_RETVAL(1);
7324 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7326 struct tg3_napi
*tnapi
= dev_id
;
7327 struct tg3
*tp
= tnapi
->tp
;
7328 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7329 unsigned int handled
= 1;
7331 /* In INTx mode, it is possible for the interrupt to arrive at
7332 * the CPU before the status block posted prior to the interrupt.
7333 * Reading the PCI State register will confirm whether the
7334 * interrupt is ours and will flush the status block.
7336 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7337 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7338 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7345 * Writing any value to intr-mbox-0 clears PCI INTA# and
7346 * chip-internal interrupt pending events.
7347 * Writing non-zero to intr-mbox-0 additional tells the
7348 * NIC to stop sending us irqs, engaging "in-intr-handler"
7351 * Flush the mailbox to de-assert the IRQ immediately to prevent
7352 * spurious interrupts. The flush impacts performance but
7353 * excessive spurious interrupts can be worse in some cases.
7355 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7356 if (tg3_irq_sync(tp
))
7358 sblk
->status
&= ~SD_STATUS_UPDATED
;
7359 if (likely(tg3_has_work(tnapi
))) {
7360 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7361 napi_schedule(&tnapi
->napi
);
7363 /* No work, shared interrupt perhaps? re-enable
7364 * interrupts, and flush that PCI write
7366 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7370 return IRQ_RETVAL(handled
);
7373 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7375 struct tg3_napi
*tnapi
= dev_id
;
7376 struct tg3
*tp
= tnapi
->tp
;
7377 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7378 unsigned int handled
= 1;
7380 /* In INTx mode, it is possible for the interrupt to arrive at
7381 * the CPU before the status block posted prior to the interrupt.
7382 * Reading the PCI State register will confirm whether the
7383 * interrupt is ours and will flush the status block.
7385 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7386 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7387 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7394 * writing any value to intr-mbox-0 clears PCI INTA# and
7395 * chip-internal interrupt pending events.
7396 * writing non-zero to intr-mbox-0 additional tells the
7397 * NIC to stop sending us irqs, engaging "in-intr-handler"
7400 * Flush the mailbox to de-assert the IRQ immediately to prevent
7401 * spurious interrupts. The flush impacts performance but
7402 * excessive spurious interrupts can be worse in some cases.
7404 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7407 * In a shared interrupt configuration, sometimes other devices'
7408 * interrupts will scream. We record the current status tag here
7409 * so that the above check can report that the screaming interrupts
7410 * are unhandled. Eventually they will be silenced.
7412 tnapi
->last_irq_tag
= sblk
->status_tag
;
7414 if (tg3_irq_sync(tp
))
7417 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7419 napi_schedule(&tnapi
->napi
);
7422 return IRQ_RETVAL(handled
);
7425 /* ISR for interrupt test */
7426 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7428 struct tg3_napi
*tnapi
= dev_id
;
7429 struct tg3
*tp
= tnapi
->tp
;
7430 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7432 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7433 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7434 tg3_disable_ints(tp
);
7435 return IRQ_RETVAL(1);
7437 return IRQ_RETVAL(0);
7440 #ifdef CONFIG_NET_POLL_CONTROLLER
7441 static void tg3_poll_controller(struct net_device
*dev
)
7444 struct tg3
*tp
= netdev_priv(dev
);
7446 if (tg3_irq_sync(tp
))
7449 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7450 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7454 static void tg3_tx_timeout(struct net_device
*dev
)
7456 struct tg3
*tp
= netdev_priv(dev
);
7458 if (netif_msg_tx_err(tp
)) {
7459 netdev_err(dev
, "transmit timed out, resetting\n");
7463 tg3_reset_task_schedule(tp
);
7466 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7467 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7469 u32 base
= (u32
) mapping
& 0xffffffff;
7471 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
7474 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7475 * of any 4GB boundaries: 4G, 8G, etc
7477 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7480 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7481 u32 base
= (u32
) mapping
& 0xffffffff;
7483 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7488 /* Test for DMA addresses > 40-bit */
7489 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7492 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7493 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7494 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7501 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7502 dma_addr_t mapping
, u32 len
, u32 flags
,
7505 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7506 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7507 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7508 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7511 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7512 dma_addr_t map
, u32 len
, u32 flags
,
7515 struct tg3
*tp
= tnapi
->tp
;
7518 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7521 if (tg3_4g_overflow_test(map
, len
))
7524 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7527 if (tg3_40bit_overflow_test(tp
, map
, len
))
7530 if (tp
->dma_limit
) {
7531 u32 prvidx
= *entry
;
7532 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7533 while (len
> tp
->dma_limit
&& *budget
) {
7534 u32 frag_len
= tp
->dma_limit
;
7535 len
-= tp
->dma_limit
;
7537 /* Avoid the 8byte DMA problem */
7539 len
+= tp
->dma_limit
/ 2;
7540 frag_len
= tp
->dma_limit
/ 2;
7543 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7545 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7546 frag_len
, tmp_flag
, mss
, vlan
);
7549 *entry
= NEXT_TX(*entry
);
7556 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7557 len
, flags
, mss
, vlan
);
7559 *entry
= NEXT_TX(*entry
);
7562 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7566 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7567 len
, flags
, mss
, vlan
);
7568 *entry
= NEXT_TX(*entry
);
7574 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7577 struct sk_buff
*skb
;
7578 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7583 pci_unmap_single(tnapi
->tp
->pdev
,
7584 dma_unmap_addr(txb
, mapping
),
7588 while (txb
->fragmented
) {
7589 txb
->fragmented
= false;
7590 entry
= NEXT_TX(entry
);
7591 txb
= &tnapi
->tx_buffers
[entry
];
7594 for (i
= 0; i
<= last
; i
++) {
7595 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7597 entry
= NEXT_TX(entry
);
7598 txb
= &tnapi
->tx_buffers
[entry
];
7600 pci_unmap_page(tnapi
->tp
->pdev
,
7601 dma_unmap_addr(txb
, mapping
),
7602 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7604 while (txb
->fragmented
) {
7605 txb
->fragmented
= false;
7606 entry
= NEXT_TX(entry
);
7607 txb
= &tnapi
->tx_buffers
[entry
];
7612 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7613 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7614 struct sk_buff
**pskb
,
7615 u32
*entry
, u32
*budget
,
7616 u32 base_flags
, u32 mss
, u32 vlan
)
7618 struct tg3
*tp
= tnapi
->tp
;
7619 struct sk_buff
*new_skb
, *skb
= *pskb
;
7620 dma_addr_t new_addr
= 0;
7623 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7624 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7626 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7628 new_skb
= skb_copy_expand(skb
,
7629 skb_headroom(skb
) + more_headroom
,
7630 skb_tailroom(skb
), GFP_ATOMIC
);
7636 /* New SKB is guaranteed to be linear. */
7637 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7639 /* Make sure the mapping succeeded */
7640 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7641 dev_kfree_skb(new_skb
);
7644 u32 save_entry
= *entry
;
7646 base_flags
|= TXD_FLAG_END
;
7648 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7649 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7652 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7653 new_skb
->len
, base_flags
,
7655 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7656 dev_kfree_skb(new_skb
);
7667 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7669 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7670 * TSO header is greater than 80 bytes.
7672 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7674 struct sk_buff
*segs
, *nskb
;
7675 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7677 /* Estimate the number of fragments in the worst case */
7678 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7679 netif_stop_queue(tp
->dev
);
7681 /* netif_tx_stop_queue() must be done before checking
7682 * checking tx index in tg3_tx_avail() below, because in
7683 * tg3_tx(), we update tx index before checking for
7684 * netif_tx_queue_stopped().
7687 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7688 return NETDEV_TX_BUSY
;
7690 netif_wake_queue(tp
->dev
);
7693 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7695 goto tg3_tso_bug_end
;
7701 tg3_start_xmit(nskb
, tp
->dev
);
7707 return NETDEV_TX_OK
;
7710 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7711 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7713 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7715 struct tg3
*tp
= netdev_priv(dev
);
7716 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7718 int i
= -1, would_hit_hwbug
;
7720 struct tg3_napi
*tnapi
;
7721 struct netdev_queue
*txq
;
7724 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7725 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7726 if (tg3_flag(tp
, ENABLE_TSS
))
7729 budget
= tg3_tx_avail(tnapi
);
7731 /* We are running in BH disabled context with netif_tx_lock
7732 * and TX reclaim runs via tp->napi.poll inside of a software
7733 * interrupt. Furthermore, IRQ processing runs lockless so we have
7734 * no IRQ context deadlocks to worry about either. Rejoice!
7736 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7737 if (!netif_tx_queue_stopped(txq
)) {
7738 netif_tx_stop_queue(txq
);
7740 /* This is a hard error, log it. */
7742 "BUG! Tx Ring full when queue awake!\n");
7744 return NETDEV_TX_BUSY
;
7747 entry
= tnapi
->tx_prod
;
7749 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7750 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7752 mss
= skb_shinfo(skb
)->gso_size
;
7755 u32 tcp_opt_len
, hdr_len
;
7757 if (skb_header_cloned(skb
) &&
7758 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7762 tcp_opt_len
= tcp_optlen(skb
);
7764 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7766 if (!skb_is_gso_v6(skb
)) {
7768 iph
->tot_len
= htons(mss
+ hdr_len
);
7771 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7772 tg3_flag(tp
, TSO_BUG
))
7773 return tg3_tso_bug(tp
, skb
);
7775 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7776 TXD_FLAG_CPU_POST_DMA
);
7778 if (tg3_flag(tp
, HW_TSO_1
) ||
7779 tg3_flag(tp
, HW_TSO_2
) ||
7780 tg3_flag(tp
, HW_TSO_3
)) {
7781 tcp_hdr(skb
)->check
= 0;
7782 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7784 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7789 if (tg3_flag(tp
, HW_TSO_3
)) {
7790 mss
|= (hdr_len
& 0xc) << 12;
7792 base_flags
|= 0x00000010;
7793 base_flags
|= (hdr_len
& 0x3e0) << 5;
7794 } else if (tg3_flag(tp
, HW_TSO_2
))
7795 mss
|= hdr_len
<< 9;
7796 else if (tg3_flag(tp
, HW_TSO_1
) ||
7797 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7798 if (tcp_opt_len
|| iph
->ihl
> 5) {
7801 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7802 mss
|= (tsflags
<< 11);
7805 if (tcp_opt_len
|| iph
->ihl
> 5) {
7808 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7809 base_flags
|= tsflags
<< 12;
7814 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7815 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7816 base_flags
|= TXD_FLAG_JMB_PKT
;
7818 if (vlan_tx_tag_present(skb
)) {
7819 base_flags
|= TXD_FLAG_VLAN
;
7820 vlan
= vlan_tx_tag_get(skb
);
7823 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7824 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7825 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7826 base_flags
|= TXD_FLAG_HWTSTAMP
;
7829 len
= skb_headlen(skb
);
7831 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7832 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7836 tnapi
->tx_buffers
[entry
].skb
= skb
;
7837 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7839 would_hit_hwbug
= 0;
7841 if (tg3_flag(tp
, 5701_DMA_BUG
))
7842 would_hit_hwbug
= 1;
7844 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7845 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7847 would_hit_hwbug
= 1;
7848 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7851 if (!tg3_flag(tp
, HW_TSO_1
) &&
7852 !tg3_flag(tp
, HW_TSO_2
) &&
7853 !tg3_flag(tp
, HW_TSO_3
))
7856 /* Now loop through additional data
7857 * fragments, and queue them.
7859 last
= skb_shinfo(skb
)->nr_frags
- 1;
7860 for (i
= 0; i
<= last
; i
++) {
7861 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7863 len
= skb_frag_size(frag
);
7864 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7865 len
, DMA_TO_DEVICE
);
7867 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7868 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7870 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7874 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7876 ((i
== last
) ? TXD_FLAG_END
: 0),
7878 would_hit_hwbug
= 1;
7884 if (would_hit_hwbug
) {
7885 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7887 /* If the workaround fails due to memory/mapping
7888 * failure, silently drop this packet.
7890 entry
= tnapi
->tx_prod
;
7891 budget
= tg3_tx_avail(tnapi
);
7892 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7893 base_flags
, mss
, vlan
))
7897 skb_tx_timestamp(skb
);
7898 netdev_tx_sent_queue(txq
, skb
->len
);
7900 /* Sync BD data before updating mailbox */
7903 /* Packets are ready, update Tx producer idx local and on card. */
7904 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7906 tnapi
->tx_prod
= entry
;
7907 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7908 netif_tx_stop_queue(txq
);
7910 /* netif_tx_stop_queue() must be done before checking
7911 * checking tx index in tg3_tx_avail() below, because in
7912 * tg3_tx(), we update tx index before checking for
7913 * netif_tx_queue_stopped().
7916 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7917 netif_tx_wake_queue(txq
);
7921 return NETDEV_TX_OK
;
7924 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7925 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7930 return NETDEV_TX_OK
;
7933 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7936 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7937 MAC_MODE_PORT_MODE_MASK
);
7939 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7941 if (!tg3_flag(tp
, 5705_PLUS
))
7942 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7944 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7945 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7947 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7949 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7951 if (tg3_flag(tp
, 5705_PLUS
) ||
7952 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7953 tg3_asic_rev(tp
) == ASIC_REV_5700
)
7954 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7957 tw32(MAC_MODE
, tp
->mac_mode
);
7961 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7963 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7965 tg3_phy_toggle_apd(tp
, false);
7966 tg3_phy_toggle_automdix(tp
, false);
7968 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7971 bmcr
= BMCR_FULLDPLX
;
7976 bmcr
|= BMCR_SPEED100
;
7980 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7982 bmcr
|= BMCR_SPEED100
;
7985 bmcr
|= BMCR_SPEED1000
;
7990 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
7991 tg3_readphy(tp
, MII_CTRL1000
, &val
);
7992 val
|= CTL1000_AS_MASTER
|
7993 CTL1000_ENABLE_MASTER
;
7994 tg3_writephy(tp
, MII_CTRL1000
, val
);
7996 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
7997 MII_TG3_FET_PTEST_TRIM_2
;
7998 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8001 bmcr
|= BMCR_LOOPBACK
;
8003 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8005 /* The write needs to be flushed for the FETs */
8006 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8007 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8011 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8012 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8013 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8014 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8015 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8017 /* The write needs to be flushed for the AC131 */
8018 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8021 /* Reset to prevent losing 1st rx packet intermittently */
8022 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8023 tg3_flag(tp
, 5780_CLASS
)) {
8024 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8026 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8029 mac_mode
= tp
->mac_mode
&
8030 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8031 if (speed
== SPEED_1000
)
8032 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8034 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8036 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8037 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8039 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8040 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8041 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8042 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8044 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8045 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8048 tw32(MAC_MODE
, mac_mode
);
8054 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8056 struct tg3
*tp
= netdev_priv(dev
);
8058 if (features
& NETIF_F_LOOPBACK
) {
8059 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8062 spin_lock_bh(&tp
->lock
);
8063 tg3_mac_loopback(tp
, true);
8064 netif_carrier_on(tp
->dev
);
8065 spin_unlock_bh(&tp
->lock
);
8066 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8068 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8071 spin_lock_bh(&tp
->lock
);
8072 tg3_mac_loopback(tp
, false);
8073 /* Force link status check */
8074 tg3_setup_phy(tp
, true);
8075 spin_unlock_bh(&tp
->lock
);
8076 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8080 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8081 netdev_features_t features
)
8083 struct tg3
*tp
= netdev_priv(dev
);
8085 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8086 features
&= ~NETIF_F_ALL_TSO
;
8091 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8093 netdev_features_t changed
= dev
->features
^ features
;
8095 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8096 tg3_set_loopback(dev
, features
);
8101 static void tg3_rx_prodring_free(struct tg3
*tp
,
8102 struct tg3_rx_prodring_set
*tpr
)
8106 if (tpr
!= &tp
->napi
[0].prodring
) {
8107 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8108 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8109 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8112 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8113 for (i
= tpr
->rx_jmb_cons_idx
;
8114 i
!= tpr
->rx_jmb_prod_idx
;
8115 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8116 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8124 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8125 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8128 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8129 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8130 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8135 /* Initialize rx rings for packet processing.
8137 * The chip has been shut down and the driver detached from
8138 * the networking, so no interrupts or new tx packets will
8139 * end up in the driver. tp->{tx,}lock are held and thus
8142 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8143 struct tg3_rx_prodring_set
*tpr
)
8145 u32 i
, rx_pkt_dma_sz
;
8147 tpr
->rx_std_cons_idx
= 0;
8148 tpr
->rx_std_prod_idx
= 0;
8149 tpr
->rx_jmb_cons_idx
= 0;
8150 tpr
->rx_jmb_prod_idx
= 0;
8152 if (tpr
!= &tp
->napi
[0].prodring
) {
8153 memset(&tpr
->rx_std_buffers
[0], 0,
8154 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8155 if (tpr
->rx_jmb_buffers
)
8156 memset(&tpr
->rx_jmb_buffers
[0], 0,
8157 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8161 /* Zero out all descriptors. */
8162 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8164 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8165 if (tg3_flag(tp
, 5780_CLASS
) &&
8166 tp
->dev
->mtu
> ETH_DATA_LEN
)
8167 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8168 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8170 /* Initialize invariants of the rings, we only set this
8171 * stuff once. This works because the card does not
8172 * write into the rx buffer posting rings.
8174 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8175 struct tg3_rx_buffer_desc
*rxd
;
8177 rxd
= &tpr
->rx_std
[i
];
8178 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8179 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8180 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8181 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8184 /* Now allocate fresh SKBs for each rx ring. */
8185 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8186 unsigned int frag_size
;
8188 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8190 netdev_warn(tp
->dev
,
8191 "Using a smaller RX standard ring. Only "
8192 "%d out of %d buffers were allocated "
8193 "successfully\n", i
, tp
->rx_pending
);
8201 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8204 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8206 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8209 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8210 struct tg3_rx_buffer_desc
*rxd
;
8212 rxd
= &tpr
->rx_jmb
[i
].std
;
8213 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8214 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8216 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8217 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8220 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8221 unsigned int frag_size
;
8223 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8225 netdev_warn(tp
->dev
,
8226 "Using a smaller RX jumbo ring. Only %d "
8227 "out of %d buffers were allocated "
8228 "successfully\n", i
, tp
->rx_jumbo_pending
);
8231 tp
->rx_jumbo_pending
= i
;
8240 tg3_rx_prodring_free(tp
, tpr
);
8244 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8245 struct tg3_rx_prodring_set
*tpr
)
8247 kfree(tpr
->rx_std_buffers
);
8248 tpr
->rx_std_buffers
= NULL
;
8249 kfree(tpr
->rx_jmb_buffers
);
8250 tpr
->rx_jmb_buffers
= NULL
;
8252 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8253 tpr
->rx_std
, tpr
->rx_std_mapping
);
8257 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8258 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8263 static int tg3_rx_prodring_init(struct tg3
*tp
,
8264 struct tg3_rx_prodring_set
*tpr
)
8266 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8268 if (!tpr
->rx_std_buffers
)
8271 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8272 TG3_RX_STD_RING_BYTES(tp
),
8273 &tpr
->rx_std_mapping
,
8278 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8279 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8281 if (!tpr
->rx_jmb_buffers
)
8284 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8285 TG3_RX_JMB_RING_BYTES(tp
),
8286 &tpr
->rx_jmb_mapping
,
8295 tg3_rx_prodring_fini(tp
, tpr
);
8299 /* Free up pending packets in all rx/tx rings.
8301 * The chip has been shut down and the driver detached from
8302 * the networking, so no interrupts or new tx packets will
8303 * end up in the driver. tp->{tx,}lock is not held and we are not
8304 * in an interrupt context and thus may sleep.
8306 static void tg3_free_rings(struct tg3
*tp
)
8310 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8311 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8313 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8315 if (!tnapi
->tx_buffers
)
8318 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8319 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8324 tg3_tx_skb_unmap(tnapi
, i
,
8325 skb_shinfo(skb
)->nr_frags
- 1);
8327 dev_kfree_skb_any(skb
);
8329 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8333 /* Initialize tx/rx rings for packet processing.
8335 * The chip has been shut down and the driver detached from
8336 * the networking, so no interrupts or new tx packets will
8337 * end up in the driver. tp->{tx,}lock are held and thus
8340 static int tg3_init_rings(struct tg3
*tp
)
8344 /* Free up all the SKBs. */
8347 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8348 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8350 tnapi
->last_tag
= 0;
8351 tnapi
->last_irq_tag
= 0;
8352 tnapi
->hw_status
->status
= 0;
8353 tnapi
->hw_status
->status_tag
= 0;
8354 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8359 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8361 tnapi
->rx_rcb_ptr
= 0;
8363 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8365 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8374 static void tg3_mem_tx_release(struct tg3
*tp
)
8378 for (i
= 0; i
< tp
->irq_max
; i
++) {
8379 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8381 if (tnapi
->tx_ring
) {
8382 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8383 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8384 tnapi
->tx_ring
= NULL
;
8387 kfree(tnapi
->tx_buffers
);
8388 tnapi
->tx_buffers
= NULL
;
8392 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8395 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8397 /* If multivector TSS is enabled, vector 0 does not handle
8398 * tx interrupts. Don't allocate any resources for it.
8400 if (tg3_flag(tp
, ENABLE_TSS
))
8403 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8404 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8405 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8406 if (!tnapi
->tx_buffers
)
8409 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8411 &tnapi
->tx_desc_mapping
,
8413 if (!tnapi
->tx_ring
)
8420 tg3_mem_tx_release(tp
);
8424 static void tg3_mem_rx_release(struct tg3
*tp
)
8428 for (i
= 0; i
< tp
->irq_max
; i
++) {
8429 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8431 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8436 dma_free_coherent(&tp
->pdev
->dev
,
8437 TG3_RX_RCB_RING_BYTES(tp
),
8439 tnapi
->rx_rcb_mapping
);
8440 tnapi
->rx_rcb
= NULL
;
8444 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8446 unsigned int i
, limit
;
8448 limit
= tp
->rxq_cnt
;
8450 /* If RSS is enabled, we need a (dummy) producer ring
8451 * set on vector zero. This is the true hw prodring.
8453 if (tg3_flag(tp
, ENABLE_RSS
))
8456 for (i
= 0; i
< limit
; i
++) {
8457 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8459 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8462 /* If multivector RSS is enabled, vector 0
8463 * does not handle rx or tx interrupts.
8464 * Don't allocate any resources for it.
8466 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8469 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8470 TG3_RX_RCB_RING_BYTES(tp
),
8471 &tnapi
->rx_rcb_mapping
,
8472 GFP_KERNEL
| __GFP_ZERO
);
8480 tg3_mem_rx_release(tp
);
8485 * Must not be invoked with interrupt sources disabled and
8486 * the hardware shutdown down.
8488 static void tg3_free_consistent(struct tg3
*tp
)
8492 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8493 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8495 if (tnapi
->hw_status
) {
8496 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8498 tnapi
->status_mapping
);
8499 tnapi
->hw_status
= NULL
;
8503 tg3_mem_rx_release(tp
);
8504 tg3_mem_tx_release(tp
);
8507 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8508 tp
->hw_stats
, tp
->stats_mapping
);
8509 tp
->hw_stats
= NULL
;
8514 * Must not be invoked with interrupt sources disabled and
8515 * the hardware shutdown down. Can sleep.
8517 static int tg3_alloc_consistent(struct tg3
*tp
)
8521 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8522 sizeof(struct tg3_hw_stats
),
8524 GFP_KERNEL
| __GFP_ZERO
);
8528 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8529 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8530 struct tg3_hw_status
*sblk
;
8532 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8534 &tnapi
->status_mapping
,
8535 GFP_KERNEL
| __GFP_ZERO
);
8536 if (!tnapi
->hw_status
)
8539 sblk
= tnapi
->hw_status
;
8541 if (tg3_flag(tp
, ENABLE_RSS
)) {
8542 u16
*prodptr
= NULL
;
8545 * When RSS is enabled, the status block format changes
8546 * slightly. The "rx_jumbo_consumer", "reserved",
8547 * and "rx_mini_consumer" members get mapped to the
8548 * other three rx return ring producer indexes.
8552 prodptr
= &sblk
->idx
[0].rx_producer
;
8555 prodptr
= &sblk
->rx_jumbo_consumer
;
8558 prodptr
= &sblk
->reserved
;
8561 prodptr
= &sblk
->rx_mini_consumer
;
8564 tnapi
->rx_rcb_prod_idx
= prodptr
;
8566 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8570 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8576 tg3_free_consistent(tp
);
8580 #define MAX_WAIT_CNT 1000
8582 /* To stop a block, clear the enable bit and poll till it
8583 * clears. tp->lock is held.
8585 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8590 if (tg3_flag(tp
, 5705_PLUS
)) {
8597 /* We can't enable/disable these bits of the
8598 * 5705/5750, just say success.
8611 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8612 if (pci_channel_offline(tp
->pdev
)) {
8613 dev_err(&tp
->pdev
->dev
,
8614 "tg3_stop_block device offline, "
8615 "ofs=%lx enable_bit=%x\n",
8622 if ((val
& enable_bit
) == 0)
8626 if (i
== MAX_WAIT_CNT
&& !silent
) {
8627 dev_err(&tp
->pdev
->dev
,
8628 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8636 /* tp->lock is held. */
8637 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8641 tg3_disable_ints(tp
);
8643 if (pci_channel_offline(tp
->pdev
)) {
8644 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8645 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8650 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8651 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8654 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8655 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8656 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8657 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8658 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8659 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8661 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8662 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8663 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8664 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8665 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8666 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8667 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8669 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8670 tw32_f(MAC_MODE
, tp
->mac_mode
);
8673 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8674 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8676 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8678 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8681 if (i
>= MAX_WAIT_CNT
) {
8682 dev_err(&tp
->pdev
->dev
,
8683 "%s timed out, TX_MODE_ENABLE will not clear "
8684 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8688 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8689 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8690 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8692 tw32(FTQ_RESET
, 0xffffffff);
8693 tw32(FTQ_RESET
, 0x00000000);
8695 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8696 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8699 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8700 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8701 if (tnapi
->hw_status
)
8702 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8708 /* Save PCI command register before chip reset */
8709 static void tg3_save_pci_state(struct tg3
*tp
)
8711 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8714 /* Restore PCI state after chip reset */
8715 static void tg3_restore_pci_state(struct tg3
*tp
)
8719 /* Re-enable indirect register accesses. */
8720 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8721 tp
->misc_host_ctrl
);
8723 /* Set MAX PCI retry to zero. */
8724 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8725 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8726 tg3_flag(tp
, PCIX_MODE
))
8727 val
|= PCISTATE_RETRY_SAME_DMA
;
8728 /* Allow reads and writes to the APE register and memory space. */
8729 if (tg3_flag(tp
, ENABLE_APE
))
8730 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8731 PCISTATE_ALLOW_APE_SHMEM_WR
|
8732 PCISTATE_ALLOW_APE_PSPACE_WR
;
8733 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8735 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8737 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8738 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8739 tp
->pci_cacheline_sz
);
8740 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8744 /* Make sure PCI-X relaxed ordering bit is clear. */
8745 if (tg3_flag(tp
, PCIX_MODE
)) {
8748 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8750 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8751 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8755 if (tg3_flag(tp
, 5780_CLASS
)) {
8757 /* Chip reset on 5780 will reset MSI enable bit,
8758 * so need to restore it.
8760 if (tg3_flag(tp
, USING_MSI
)) {
8763 pci_read_config_word(tp
->pdev
,
8764 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8766 pci_write_config_word(tp
->pdev
,
8767 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8768 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8769 val
= tr32(MSGINT_MODE
);
8770 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8775 /* tp->lock is held. */
8776 static int tg3_chip_reset(struct tg3
*tp
)
8779 void (*write_op
)(struct tg3
*, u32
, u32
);
8784 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8786 /* No matching tg3_nvram_unlock() after this because
8787 * chip reset below will undo the nvram lock.
8789 tp
->nvram_lock_cnt
= 0;
8791 /* GRC_MISC_CFG core clock reset will clear the memory
8792 * enable bit in PCI register 4 and the MSI enable bit
8793 * on some chips, so we save relevant registers here.
8795 tg3_save_pci_state(tp
);
8797 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
8798 tg3_flag(tp
, 5755_PLUS
))
8799 tw32(GRC_FASTBOOT_PC
, 0);
8802 * We must avoid the readl() that normally takes place.
8803 * It locks machines, causes machine checks, and other
8804 * fun things. So, temporarily disable the 5701
8805 * hardware workaround, while we do the reset.
8807 write_op
= tp
->write32
;
8808 if (write_op
== tg3_write_flush_reg32
)
8809 tp
->write32
= tg3_write32
;
8811 /* Prevent the irq handler from reading or writing PCI registers
8812 * during chip reset when the memory enable bit in the PCI command
8813 * register may be cleared. The chip does not generate interrupt
8814 * at this time, but the irq handler may still be called due to irq
8815 * sharing or irqpoll.
8817 tg3_flag_set(tp
, CHIP_RESETTING
);
8818 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8819 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8820 if (tnapi
->hw_status
) {
8821 tnapi
->hw_status
->status
= 0;
8822 tnapi
->hw_status
->status_tag
= 0;
8824 tnapi
->last_tag
= 0;
8825 tnapi
->last_irq_tag
= 0;
8829 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8830 synchronize_irq(tp
->napi
[i
].irq_vec
);
8832 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
8833 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8834 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8838 val
= GRC_MISC_CFG_CORECLK_RESET
;
8840 if (tg3_flag(tp
, PCI_EXPRESS
)) {
8841 /* Force PCIe 1.0a mode */
8842 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8843 !tg3_flag(tp
, 57765_PLUS
) &&
8844 tr32(TG3_PCIE_PHY_TSTCTL
) ==
8845 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
8846 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
8848 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
8849 tw32(GRC_MISC_CFG
, (1 << 29));
8854 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
8855 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8856 tw32(GRC_VCPU_EXT_CTRL
,
8857 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8860 /* Manage gphy power for all CPMU absent PCIe devices. */
8861 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8862 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8864 tw32(GRC_MISC_CFG
, val
);
8866 /* restore 5701 hardware bug workaround write method */
8867 tp
->write32
= write_op
;
8869 /* Unfortunately, we have to delay before the PCI read back.
8870 * Some 575X chips even will not respond to a PCI cfg access
8871 * when the reset command is given to the chip.
8873 * How do these hardware designers expect things to work
8874 * properly if the PCI write is posted for a long period
8875 * of time? It is always necessary to have some method by
8876 * which a register read back can occur to push the write
8877 * out which does the reset.
8879 * For most tg3 variants the trick below was working.
8884 /* Flush PCI posted writes. The normal MMIO registers
8885 * are inaccessible at this time so this is the only
8886 * way to make this reliably (actually, this is no longer
8887 * the case, see above). I tried to use indirect
8888 * register read/write but this upset some 5701 variants.
8890 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8894 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8897 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
8901 /* Wait for link training to complete. */
8902 for (j
= 0; j
< 5000; j
++)
8905 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8906 pci_write_config_dword(tp
->pdev
, 0xc4,
8907 cfg_val
| (1 << 15));
8910 /* Clear the "no snoop" and "relaxed ordering" bits. */
8911 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8913 * Older PCIe devices only support the 128 byte
8914 * MPS setting. Enforce the restriction.
8916 if (!tg3_flag(tp
, CPMU_PRESENT
))
8917 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8918 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8920 /* Clear error status */
8921 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8922 PCI_EXP_DEVSTA_CED
|
8923 PCI_EXP_DEVSTA_NFED
|
8924 PCI_EXP_DEVSTA_FED
|
8925 PCI_EXP_DEVSTA_URD
);
8928 tg3_restore_pci_state(tp
);
8930 tg3_flag_clear(tp
, CHIP_RESETTING
);
8931 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8934 if (tg3_flag(tp
, 5780_CLASS
))
8935 val
= tr32(MEMARB_MODE
);
8936 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8938 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
8940 tw32(0x5000, 0x400);
8943 if (tg3_flag(tp
, IS_SSB_CORE
)) {
8945 * BCM4785: In order to avoid repercussions from using
8946 * potentially defective internal ROM, stop the Rx RISC CPU,
8947 * which is not required.
8950 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8953 err
= tg3_poll_fw(tp
);
8957 tw32(GRC_MODE
, tp
->grc_mode
);
8959 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
8962 tw32(0xc4, val
| (1 << 15));
8965 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8966 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8967 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8968 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
8969 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8970 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8973 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8974 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8976 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8977 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8982 tw32_f(MAC_MODE
, val
);
8985 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
8989 if (tg3_flag(tp
, PCI_EXPRESS
) &&
8990 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
8991 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8992 !tg3_flag(tp
, 57765_PLUS
)) {
8995 tw32(0x7c00, val
| (1 << 25));
8998 if (tg3_asic_rev(tp
) == ASIC_REV_5720
) {
8999 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9000 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9003 /* Reprobe ASF enable state. */
9004 tg3_flag_clear(tp
, ENABLE_ASF
);
9005 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9006 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9008 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9009 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9010 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9013 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9014 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9015 tg3_flag_set(tp
, ENABLE_ASF
);
9016 tp
->last_event_jiffies
= jiffies
;
9017 if (tg3_flag(tp
, 5750_PLUS
))
9018 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9020 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9021 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9022 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9023 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9024 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9031 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9032 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9034 /* tp->lock is held. */
9035 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9041 tg3_write_sig_pre_reset(tp
, kind
);
9043 tg3_abort_hw(tp
, silent
);
9044 err
= tg3_chip_reset(tp
);
9046 __tg3_set_mac_addr(tp
, false);
9048 tg3_write_sig_legacy(tp
, kind
);
9049 tg3_write_sig_post_reset(tp
, kind
);
9052 /* Save the stats across chip resets... */
9053 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9054 tg3_get_estats(tp
, &tp
->estats_prev
);
9056 /* And make sure the next sample is new data */
9057 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9066 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9068 struct tg3
*tp
= netdev_priv(dev
);
9069 struct sockaddr
*addr
= p
;
9071 bool skip_mac_1
= false;
9073 if (!is_valid_ether_addr(addr
->sa_data
))
9074 return -EADDRNOTAVAIL
;
9076 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9078 if (!netif_running(dev
))
9081 if (tg3_flag(tp
, ENABLE_ASF
)) {
9082 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9084 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9085 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9086 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9087 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9089 /* Skip MAC addr 1 if ASF is using it. */
9090 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9091 !(addr1_high
== 0 && addr1_low
== 0))
9094 spin_lock_bh(&tp
->lock
);
9095 __tg3_set_mac_addr(tp
, skip_mac_1
);
9096 spin_unlock_bh(&tp
->lock
);
9101 /* tp->lock is held. */
9102 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9103 dma_addr_t mapping
, u32 maxlen_flags
,
9107 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9108 ((u64
) mapping
>> 32));
9110 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9111 ((u64
) mapping
& 0xffffffff));
9113 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9116 if (!tg3_flag(tp
, 5705_PLUS
))
9118 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9123 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9127 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9128 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9129 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9130 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9132 tw32(HOSTCC_TXCOL_TICKS
, 0);
9133 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9134 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9136 for (; i
< tp
->txq_cnt
; i
++) {
9139 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9140 tw32(reg
, ec
->tx_coalesce_usecs
);
9141 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9142 tw32(reg
, ec
->tx_max_coalesced_frames
);
9143 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9144 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9148 for (; i
< tp
->irq_max
- 1; i
++) {
9149 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9150 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9151 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9155 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9158 u32 limit
= tp
->rxq_cnt
;
9160 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9161 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9162 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9163 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9166 tw32(HOSTCC_RXCOL_TICKS
, 0);
9167 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9168 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9171 for (; i
< limit
; i
++) {
9174 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9175 tw32(reg
, ec
->rx_coalesce_usecs
);
9176 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9177 tw32(reg
, ec
->rx_max_coalesced_frames
);
9178 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9179 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9182 for (; i
< tp
->irq_max
- 1; i
++) {
9183 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9184 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9185 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9189 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9191 tg3_coal_tx_init(tp
, ec
);
9192 tg3_coal_rx_init(tp
, ec
);
9194 if (!tg3_flag(tp
, 5705_PLUS
)) {
9195 u32 val
= ec
->stats_block_coalesce_usecs
;
9197 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9198 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9203 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9207 /* tp->lock is held. */
9208 static void tg3_rings_reset(struct tg3
*tp
)
9211 u32 stblk
, txrcb
, rxrcb
, limit
;
9212 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9214 /* Disable all transmit rings but the first. */
9215 if (!tg3_flag(tp
, 5705_PLUS
))
9216 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9217 else if (tg3_flag(tp
, 5717_PLUS
))
9218 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9219 else if (tg3_flag(tp
, 57765_CLASS
) ||
9220 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9221 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9223 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9225 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9226 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9227 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9228 BDINFO_FLAGS_DISABLED
);
9231 /* Disable all receive return rings but the first. */
9232 if (tg3_flag(tp
, 5717_PLUS
))
9233 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9234 else if (!tg3_flag(tp
, 5705_PLUS
))
9235 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9236 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9237 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9238 tg3_flag(tp
, 57765_CLASS
))
9239 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9241 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9243 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9244 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9245 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9246 BDINFO_FLAGS_DISABLED
);
9248 /* Disable interrupts */
9249 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9250 tp
->napi
[0].chk_msi_cnt
= 0;
9251 tp
->napi
[0].last_rx_cons
= 0;
9252 tp
->napi
[0].last_tx_cons
= 0;
9254 /* Zero mailbox registers. */
9255 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9256 for (i
= 1; i
< tp
->irq_max
; i
++) {
9257 tp
->napi
[i
].tx_prod
= 0;
9258 tp
->napi
[i
].tx_cons
= 0;
9259 if (tg3_flag(tp
, ENABLE_TSS
))
9260 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9261 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9262 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9263 tp
->napi
[i
].chk_msi_cnt
= 0;
9264 tp
->napi
[i
].last_rx_cons
= 0;
9265 tp
->napi
[i
].last_tx_cons
= 0;
9267 if (!tg3_flag(tp
, ENABLE_TSS
))
9268 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9270 tp
->napi
[0].tx_prod
= 0;
9271 tp
->napi
[0].tx_cons
= 0;
9272 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9273 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9276 /* Make sure the NIC-based send BD rings are disabled. */
9277 if (!tg3_flag(tp
, 5705_PLUS
)) {
9278 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9279 for (i
= 0; i
< 16; i
++)
9280 tw32_tx_mbox(mbox
+ i
* 8, 0);
9283 txrcb
= NIC_SRAM_SEND_RCB
;
9284 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9286 /* Clear status block in ram. */
9287 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9289 /* Set status block DMA address */
9290 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9291 ((u64
) tnapi
->status_mapping
>> 32));
9292 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9293 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9295 if (tnapi
->tx_ring
) {
9296 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9297 (TG3_TX_RING_SIZE
<<
9298 BDINFO_FLAGS_MAXLEN_SHIFT
),
9299 NIC_SRAM_TX_BUFFER_DESC
);
9300 txrcb
+= TG3_BDINFO_SIZE
;
9303 if (tnapi
->rx_rcb
) {
9304 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9305 (tp
->rx_ret_ring_mask
+ 1) <<
9306 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9307 rxrcb
+= TG3_BDINFO_SIZE
;
9310 stblk
= HOSTCC_STATBLCK_RING1
;
9312 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9313 u64 mapping
= (u64
)tnapi
->status_mapping
;
9314 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9315 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9317 /* Clear status block in ram. */
9318 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9320 if (tnapi
->tx_ring
) {
9321 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9322 (TG3_TX_RING_SIZE
<<
9323 BDINFO_FLAGS_MAXLEN_SHIFT
),
9324 NIC_SRAM_TX_BUFFER_DESC
);
9325 txrcb
+= TG3_BDINFO_SIZE
;
9328 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9329 ((tp
->rx_ret_ring_mask
+ 1) <<
9330 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
9333 rxrcb
+= TG3_BDINFO_SIZE
;
9337 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9339 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9341 if (!tg3_flag(tp
, 5750_PLUS
) ||
9342 tg3_flag(tp
, 5780_CLASS
) ||
9343 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9344 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9345 tg3_flag(tp
, 57765_PLUS
))
9346 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9347 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9348 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9349 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9351 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9353 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9354 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9356 val
= min(nic_rep_thresh
, host_rep_thresh
);
9357 tw32(RCVBDI_STD_THRESH
, val
);
9359 if (tg3_flag(tp
, 57765_PLUS
))
9360 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9362 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9365 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9367 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9369 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9370 tw32(RCVBDI_JUMBO_THRESH
, val
);
9372 if (tg3_flag(tp
, 57765_PLUS
))
9373 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9376 static inline u32
calc_crc(unsigned char *buf
, int len
)
9384 for (j
= 0; j
< len
; j
++) {
9387 for (k
= 0; k
< 8; k
++) {
9400 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9402 /* accept or reject all multicast frames */
9403 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9404 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9405 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9406 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9409 static void __tg3_set_rx_mode(struct net_device
*dev
)
9411 struct tg3
*tp
= netdev_priv(dev
);
9414 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9415 RX_MODE_KEEP_VLAN_TAG
);
9417 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9418 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9421 if (!tg3_flag(tp
, ENABLE_ASF
))
9422 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9425 if (dev
->flags
& IFF_PROMISC
) {
9426 /* Promiscuous mode. */
9427 rx_mode
|= RX_MODE_PROMISC
;
9428 } else if (dev
->flags
& IFF_ALLMULTI
) {
9429 /* Accept all multicast. */
9430 tg3_set_multi(tp
, 1);
9431 } else if (netdev_mc_empty(dev
)) {
9432 /* Reject all multicast. */
9433 tg3_set_multi(tp
, 0);
9435 /* Accept one or more multicast(s). */
9436 struct netdev_hw_addr
*ha
;
9437 u32 mc_filter
[4] = { 0, };
9442 netdev_for_each_mc_addr(ha
, dev
) {
9443 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9445 regidx
= (bit
& 0x60) >> 5;
9447 mc_filter
[regidx
] |= (1 << bit
);
9450 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9451 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9452 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9453 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9456 if (rx_mode
!= tp
->rx_mode
) {
9457 tp
->rx_mode
= rx_mode
;
9458 tw32_f(MAC_RX_MODE
, rx_mode
);
9463 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9467 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9468 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9471 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9475 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9478 if (tp
->rxq_cnt
== 1) {
9479 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9483 /* Validate table against current IRQ count */
9484 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9485 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9489 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9490 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9493 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9496 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9498 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9499 u32 val
= tp
->rss_ind_tbl
[i
];
9501 for (; i
% 8; i
++) {
9503 val
|= tp
->rss_ind_tbl
[i
];
9510 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9512 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9513 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9515 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9518 /* tp->lock is held. */
9519 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9521 u32 val
, rdmac_mode
;
9523 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9525 tg3_disable_ints(tp
);
9529 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9531 if (tg3_flag(tp
, INIT_COMPLETE
))
9532 tg3_abort_hw(tp
, 1);
9534 /* Enable MAC control of LPI */
9535 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
9536 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
9537 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
9538 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9539 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
9541 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
9543 tw32_f(TG3_CPMU_EEE_CTRL
,
9544 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
9546 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
9547 TG3_CPMU_EEEMD_LPI_IN_TX
|
9548 TG3_CPMU_EEEMD_LPI_IN_RX
|
9549 TG3_CPMU_EEEMD_EEE_ENABLE
;
9551 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
9552 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
9554 if (tg3_flag(tp
, ENABLE_APE
))
9555 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
9557 tw32_f(TG3_CPMU_EEE_MODE
, val
);
9559 tw32_f(TG3_CPMU_EEE_DBTMR1
,
9560 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
9561 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
9563 tw32_f(TG3_CPMU_EEE_DBTMR2
,
9564 TG3_CPMU_DBTMR2_APE_TX_2047US
|
9565 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
9568 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9569 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9570 tg3_phy_pull_config(tp
);
9571 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9577 err
= tg3_chip_reset(tp
);
9581 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9583 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9584 val
= tr32(TG3_CPMU_CTRL
);
9585 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9586 tw32(TG3_CPMU_CTRL
, val
);
9588 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9589 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9590 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9591 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9593 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9594 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9595 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9596 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9598 val
= tr32(TG3_CPMU_HST_ACC
);
9599 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9600 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9601 tw32(TG3_CPMU_HST_ACC
, val
);
9604 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9605 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9606 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9607 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9608 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9610 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9611 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9613 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9615 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9616 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9619 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9620 u32 grc_mode
= tr32(GRC_MODE
);
9622 /* Access the lower 1K of PL PCIE block registers. */
9623 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9624 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9626 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9627 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9628 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9630 tw32(GRC_MODE
, grc_mode
);
9633 if (tg3_flag(tp
, 57765_CLASS
)) {
9634 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9635 u32 grc_mode
= tr32(GRC_MODE
);
9637 /* Access the lower 1K of PL PCIE block registers. */
9638 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9639 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9641 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9642 TG3_PCIE_PL_LO_PHYCTL5
);
9643 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9644 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9646 tw32(GRC_MODE
, grc_mode
);
9649 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9652 /* Fix transmit hangs */
9653 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9654 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9655 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9657 grc_mode
= tr32(GRC_MODE
);
9659 /* Access the lower 1K of DL PCIE block registers. */
9660 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9661 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9663 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9664 TG3_PCIE_DL_LO_FTSMAX
);
9665 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9666 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9667 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9669 tw32(GRC_MODE
, grc_mode
);
9672 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9673 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9674 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9675 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9678 /* This works around an issue with Athlon chipsets on
9679 * B3 tigon3 silicon. This bit has no effect on any
9680 * other revision. But do not set this on PCI Express
9681 * chips and don't even touch the clocks if the CPMU is present.
9683 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9684 if (!tg3_flag(tp
, PCI_EXPRESS
))
9685 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9686 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9689 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9690 tg3_flag(tp
, PCIX_MODE
)) {
9691 val
= tr32(TG3PCI_PCISTATE
);
9692 val
|= PCISTATE_RETRY_SAME_DMA
;
9693 tw32(TG3PCI_PCISTATE
, val
);
9696 if (tg3_flag(tp
, ENABLE_APE
)) {
9697 /* Allow reads and writes to the
9698 * APE register and memory space.
9700 val
= tr32(TG3PCI_PCISTATE
);
9701 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9702 PCISTATE_ALLOW_APE_SHMEM_WR
|
9703 PCISTATE_ALLOW_APE_PSPACE_WR
;
9704 tw32(TG3PCI_PCISTATE
, val
);
9707 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9708 /* Enable some hw fixes. */
9709 val
= tr32(TG3PCI_MSI_DATA
);
9710 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9711 tw32(TG3PCI_MSI_DATA
, val
);
9714 /* Descriptor ring init may make accesses to the
9715 * NIC SRAM area to setup the TX descriptors, so we
9716 * can only do this after the hardware has been
9717 * successfully reset.
9719 err
= tg3_init_rings(tp
);
9723 if (tg3_flag(tp
, 57765_PLUS
)) {
9724 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9725 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9726 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9727 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9728 if (!tg3_flag(tp
, 57765_CLASS
) &&
9729 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9730 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9731 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9732 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9733 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9734 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9735 /* This value is determined during the probe time DMA
9736 * engine test, tg3_test_dma.
9738 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9741 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9742 GRC_MODE_4X_NIC_SEND_RINGS
|
9743 GRC_MODE_NO_TX_PHDR_CSUM
|
9744 GRC_MODE_NO_RX_PHDR_CSUM
);
9745 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9747 /* Pseudo-header checksum is done by hardware logic and not
9748 * the offload processers, so make the chip do the pseudo-
9749 * header checksums on receive. For transmit it is more
9750 * convenient to do the pseudo-header checksum in software
9751 * as Linux does that on transmit for us in all cases.
9753 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9755 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9757 tw32(TG3_RX_PTP_CTL
,
9758 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9760 if (tg3_flag(tp
, PTP_CAPABLE
))
9761 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9763 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9765 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9766 val
= tr32(GRC_MISC_CFG
);
9768 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9769 tw32(GRC_MISC_CFG
, val
);
9771 /* Initialize MBUF/DESC pool. */
9772 if (tg3_flag(tp
, 5750_PLUS
)) {
9774 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
9775 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9776 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
9777 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9779 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9780 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9781 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9782 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9785 fw_len
= tp
->fw_len
;
9786 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9787 tw32(BUFMGR_MB_POOL_ADDR
,
9788 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9789 tw32(BUFMGR_MB_POOL_SIZE
,
9790 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9793 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9794 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9795 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9796 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9797 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9798 tw32(BUFMGR_MB_HIGH_WATER
,
9799 tp
->bufmgr_config
.mbuf_high_water
);
9801 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9802 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9803 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9804 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9805 tw32(BUFMGR_MB_HIGH_WATER
,
9806 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9808 tw32(BUFMGR_DMA_LOW_WATER
,
9809 tp
->bufmgr_config
.dma_low_water
);
9810 tw32(BUFMGR_DMA_HIGH_WATER
,
9811 tp
->bufmgr_config
.dma_high_water
);
9813 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9814 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9815 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9816 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
9817 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9818 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
9819 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9820 tw32(BUFMGR_MODE
, val
);
9821 for (i
= 0; i
< 2000; i
++) {
9822 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9827 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9831 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
9832 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9834 tg3_setup_rxbd_thresholds(tp
);
9836 /* Initialize TG3_BDINFO's at:
9837 * RCVDBDI_STD_BD: standard eth size rx ring
9838 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9839 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9842 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9843 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9844 * ring attribute flags
9845 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9847 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9848 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9850 * The size of each ring is fixed in the firmware, but the location is
9853 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9854 ((u64
) tpr
->rx_std_mapping
>> 32));
9855 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9856 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
9857 if (!tg3_flag(tp
, 5717_PLUS
))
9858 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
9859 NIC_SRAM_RX_BUFFER_DESC
);
9861 /* Disable the mini ring */
9862 if (!tg3_flag(tp
, 5705_PLUS
))
9863 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9864 BDINFO_FLAGS_DISABLED
);
9866 /* Program the jumbo buffer descriptor ring control
9867 * blocks on those devices that have them.
9869 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9870 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
9872 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
9873 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9874 ((u64
) tpr
->rx_jmb_mapping
>> 32));
9875 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9876 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
9877 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
9878 BDINFO_FLAGS_MAXLEN_SHIFT
;
9879 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9880 val
| BDINFO_FLAGS_USE_EXT_RECV
);
9881 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
9882 tg3_flag(tp
, 57765_CLASS
) ||
9883 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9884 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
9885 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
9887 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9888 BDINFO_FLAGS_DISABLED
);
9891 if (tg3_flag(tp
, 57765_PLUS
)) {
9892 val
= TG3_RX_STD_RING_SIZE(tp
);
9893 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
9894 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
9896 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9898 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9900 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
9902 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
9903 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
9905 tpr
->rx_jmb_prod_idx
=
9906 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
9907 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
9909 tg3_rings_reset(tp
);
9911 /* Initialize MAC address and backoff seed. */
9912 __tg3_set_mac_addr(tp
, false);
9914 /* MTU + ethernet header + FCS + optional VLAN tag */
9915 tw32(MAC_RX_MTU_SIZE
,
9916 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
9918 /* The slot time is changed by tg3_setup_phy if we
9919 * run at gigabit with half duplex.
9921 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9922 (6 << TX_LENGTHS_IPG_SHIFT
) |
9923 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9925 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9926 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9927 val
|= tr32(MAC_TX_LENGTHS
) &
9928 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
9929 TX_LENGTHS_CNT_DWN_VAL_MSK
);
9931 tw32(MAC_TX_LENGTHS
, val
);
9933 /* Receive rules. */
9934 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
9935 tw32(RCVLPC_CONFIG
, 0x0181);
9937 /* Calculate RDMAC_MODE setting early, we need it to determine
9938 * the RCVLPC_STATE_ENABLE mask.
9940 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
9941 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
9942 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
9943 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
9944 RDMAC_MODE_LNGREAD_ENAB
);
9946 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
9947 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
9949 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
9950 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9951 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9952 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
9953 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
9954 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
9956 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9957 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9958 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9959 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9960 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
9961 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9962 !tg3_flag(tp
, IS_5788
)) {
9963 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9967 if (tg3_flag(tp
, PCI_EXPRESS
))
9968 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9970 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
9972 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9973 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
9974 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
9978 if (tg3_flag(tp
, HW_TSO_1
) ||
9979 tg3_flag(tp
, HW_TSO_2
) ||
9980 tg3_flag(tp
, HW_TSO_3
))
9981 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
9983 if (tg3_flag(tp
, 57765_PLUS
) ||
9984 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9985 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9986 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
9988 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9989 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9990 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
9992 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
9993 tg3_asic_rev(tp
) == ASIC_REV_5784
||
9994 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9995 tg3_asic_rev(tp
) == ASIC_REV_57780
||
9996 tg3_flag(tp
, 57765_PLUS
)) {
9999 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10000 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10002 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10004 val
= tr32(tgtreg
);
10005 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10006 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10007 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10008 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10009 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10010 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10011 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10012 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10014 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10017 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10018 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10019 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10022 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10023 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10025 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10027 val
= tr32(tgtreg
);
10029 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10030 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10033 /* Receive/send statistics. */
10034 if (tg3_flag(tp
, 5750_PLUS
)) {
10035 val
= tr32(RCVLPC_STATS_ENABLE
);
10036 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10037 tw32(RCVLPC_STATS_ENABLE
, val
);
10038 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10039 tg3_flag(tp
, TSO_CAPABLE
)) {
10040 val
= tr32(RCVLPC_STATS_ENABLE
);
10041 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10042 tw32(RCVLPC_STATS_ENABLE
, val
);
10044 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10046 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10047 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10048 tw32(SNDDATAI_STATSCTRL
,
10049 (SNDDATAI_SCTRL_ENABLE
|
10050 SNDDATAI_SCTRL_FASTUPD
));
10052 /* Setup host coalescing engine. */
10053 tw32(HOSTCC_MODE
, 0);
10054 for (i
= 0; i
< 2000; i
++) {
10055 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10060 __tg3_set_coalesce(tp
, &tp
->coal
);
10062 if (!tg3_flag(tp
, 5705_PLUS
)) {
10063 /* Status/statistics block address. See tg3_timer,
10064 * the tg3_periodic_fetch_stats call there, and
10065 * tg3_get_stats to see how this works for 5705/5750 chips.
10067 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10068 ((u64
) tp
->stats_mapping
>> 32));
10069 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10070 ((u64
) tp
->stats_mapping
& 0xffffffff));
10071 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10073 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10075 /* Clear statistics and status block memory areas */
10076 for (i
= NIC_SRAM_STATS_BLK
;
10077 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10078 i
+= sizeof(u32
)) {
10079 tg3_write_mem(tp
, i
, 0);
10084 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10086 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10087 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10088 if (!tg3_flag(tp
, 5705_PLUS
))
10089 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10091 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10092 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10093 /* reset to prevent losing 1st rx packet intermittently */
10094 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10098 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10099 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10100 MAC_MODE_FHDE_ENABLE
;
10101 if (tg3_flag(tp
, ENABLE_APE
))
10102 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10103 if (!tg3_flag(tp
, 5705_PLUS
) &&
10104 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10105 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10106 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10107 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10110 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10111 * If TG3_FLAG_IS_NIC is zero, we should read the
10112 * register to preserve the GPIO settings for LOMs. The GPIOs,
10113 * whether used as inputs or outputs, are set by boot code after
10116 if (!tg3_flag(tp
, IS_NIC
)) {
10119 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10120 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10121 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10123 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10124 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10125 GRC_LCLCTRL_GPIO_OUTPUT3
;
10127 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10128 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10130 tp
->grc_local_ctrl
&= ~gpio_mask
;
10131 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10133 /* GPIO1 must be driven high for eeprom write protect */
10134 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10135 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10136 GRC_LCLCTRL_GPIO_OUTPUT1
);
10138 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10141 if (tg3_flag(tp
, USING_MSIX
)) {
10142 val
= tr32(MSGINT_MODE
);
10143 val
|= MSGINT_MODE_ENABLE
;
10144 if (tp
->irq_cnt
> 1)
10145 val
|= MSGINT_MODE_MULTIVEC_EN
;
10146 if (!tg3_flag(tp
, 1SHOT_MSI
))
10147 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10148 tw32(MSGINT_MODE
, val
);
10151 if (!tg3_flag(tp
, 5705_PLUS
)) {
10152 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10156 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10157 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10158 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10159 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10160 WDMAC_MODE_LNGREAD_ENAB
);
10162 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10163 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10164 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10165 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10166 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10168 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10169 !tg3_flag(tp
, IS_5788
)) {
10170 val
|= WDMAC_MODE_RX_ACCEL
;
10174 /* Enable host coalescing bug fix */
10175 if (tg3_flag(tp
, 5755_PLUS
))
10176 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10178 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10179 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10181 tw32_f(WDMAC_MODE
, val
);
10184 if (tg3_flag(tp
, PCIX_MODE
)) {
10187 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10189 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10190 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10191 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10192 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10193 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10194 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10196 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10200 tw32_f(RDMAC_MODE
, rdmac_mode
);
10203 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10204 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10205 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10206 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10209 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10210 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10211 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10212 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10213 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10217 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10218 if (!tg3_flag(tp
, 5705_PLUS
))
10219 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10221 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10222 tw32(SNDDATAC_MODE
,
10223 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10225 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10227 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10228 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10229 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10230 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10231 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10232 tw32(RCVDBDI_MODE
, val
);
10233 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10234 if (tg3_flag(tp
, HW_TSO_1
) ||
10235 tg3_flag(tp
, HW_TSO_2
) ||
10236 tg3_flag(tp
, HW_TSO_3
))
10237 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10238 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10239 if (tg3_flag(tp
, ENABLE_TSS
))
10240 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10241 tw32(SNDBDI_MODE
, val
);
10242 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10244 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10245 err
= tg3_load_5701_a0_firmware_fix(tp
);
10250 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10251 /* Ignore any errors for the firmware download. If download
10252 * fails, the device will operate with EEE disabled
10254 tg3_load_57766_firmware(tp
);
10257 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10258 err
= tg3_load_tso_firmware(tp
);
10263 tp
->tx_mode
= TX_MODE_ENABLE
;
10265 if (tg3_flag(tp
, 5755_PLUS
) ||
10266 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10267 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10269 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10270 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10271 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10272 tp
->tx_mode
&= ~val
;
10273 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10276 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10279 if (tg3_flag(tp
, ENABLE_RSS
)) {
10280 tg3_rss_write_indir_tbl(tp
);
10282 /* Setup the "secret" hash key. */
10283 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
10284 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
10285 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
10286 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
10287 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
10288 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
10289 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
10290 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
10291 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
10292 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
10295 tp
->rx_mode
= RX_MODE_ENABLE
;
10296 if (tg3_flag(tp
, 5755_PLUS
))
10297 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10299 if (tg3_flag(tp
, ENABLE_RSS
))
10300 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10301 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10302 RX_MODE_RSS_IPV6_HASH_EN
|
10303 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10304 RX_MODE_RSS_IPV4_HASH_EN
|
10305 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10307 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10310 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10312 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10313 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10314 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10317 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10320 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10321 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10322 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10323 /* Set drive transmission level to 1.2V */
10324 /* only if the signal pre-emphasis bit is not set */
10325 val
= tr32(MAC_SERDES_CFG
);
10328 tw32(MAC_SERDES_CFG
, val
);
10330 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10331 tw32(MAC_SERDES_CFG
, 0x616000);
10334 /* Prevent chip from dropping frames when flow control
10337 if (tg3_flag(tp
, 57765_CLASS
))
10341 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10343 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10344 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10345 /* Use hardware link auto-negotiation */
10346 tg3_flag_set(tp
, HW_AUTONEG
);
10349 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10350 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10353 tmp
= tr32(SERDES_RX_CTRL
);
10354 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10355 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10356 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10357 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10360 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10361 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10362 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10364 err
= tg3_setup_phy(tp
, false);
10368 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10369 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10372 /* Clear CRC stats. */
10373 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10374 tg3_writephy(tp
, MII_TG3_TEST1
,
10375 tmp
| MII_TG3_TEST1_CRC_EN
);
10376 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10381 __tg3_set_rx_mode(tp
->dev
);
10383 /* Initialize receive rules. */
10384 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10385 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10386 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10387 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10389 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10393 if (tg3_flag(tp
, ENABLE_ASF
))
10397 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10399 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10401 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10403 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10405 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10407 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10409 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10411 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10413 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10415 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10417 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10419 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10421 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10423 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10431 if (tg3_flag(tp
, ENABLE_APE
))
10432 /* Write our heartbeat update interval to APE. */
10433 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10434 APE_HOST_HEARTBEAT_INT_DISABLE
);
10436 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10441 /* Called at device open time to get the chip ready for
10442 * packet processing. Invoked with tp->lock held.
10444 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10446 /* Chip may have been just powered on. If so, the boot code may still
10447 * be running initialization. Wait for it to finish to avoid races in
10448 * accessing the hardware.
10450 tg3_enable_register_access(tp
);
10453 tg3_switch_clocks(tp
);
10455 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10457 return tg3_reset_hw(tp
, reset_phy
);
10460 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10464 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10465 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10467 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10470 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10471 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10472 memset(ocir
, 0, TG3_OCIR_LEN
);
10476 /* sysfs attributes for hwmon */
10477 static ssize_t
tg3_show_temp(struct device
*dev
,
10478 struct device_attribute
*devattr
, char *buf
)
10480 struct pci_dev
*pdev
= to_pci_dev(dev
);
10481 struct net_device
*netdev
= pci_get_drvdata(pdev
);
10482 struct tg3
*tp
= netdev_priv(netdev
);
10483 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10486 spin_lock_bh(&tp
->lock
);
10487 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10488 sizeof(temperature
));
10489 spin_unlock_bh(&tp
->lock
);
10490 return sprintf(buf
, "%u\n", temperature
);
10494 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10495 TG3_TEMP_SENSOR_OFFSET
);
10496 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10497 TG3_TEMP_CAUTION_OFFSET
);
10498 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10499 TG3_TEMP_MAX_OFFSET
);
10501 static struct attribute
*tg3_attributes
[] = {
10502 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10503 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10504 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10508 static const struct attribute_group tg3_group
= {
10509 .attrs
= tg3_attributes
,
10512 static void tg3_hwmon_close(struct tg3
*tp
)
10514 if (tp
->hwmon_dev
) {
10515 hwmon_device_unregister(tp
->hwmon_dev
);
10516 tp
->hwmon_dev
= NULL
;
10517 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
10521 static void tg3_hwmon_open(struct tg3
*tp
)
10525 struct pci_dev
*pdev
= tp
->pdev
;
10526 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10528 tg3_sd_scan_scratchpad(tp
, ocirs
);
10530 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10531 if (!ocirs
[i
].src_data_length
)
10534 size
+= ocirs
[i
].src_hdr_length
;
10535 size
+= ocirs
[i
].src_data_length
;
10541 /* Register hwmon sysfs hooks */
10542 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
10544 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
10548 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
10549 if (IS_ERR(tp
->hwmon_dev
)) {
10550 tp
->hwmon_dev
= NULL
;
10551 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10552 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
10557 #define TG3_STAT_ADD32(PSTAT, REG) \
10558 do { u32 __val = tr32(REG); \
10559 (PSTAT)->low += __val; \
10560 if ((PSTAT)->low < __val) \
10561 (PSTAT)->high += 1; \
10564 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10566 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10571 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10572 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10573 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10574 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10575 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10576 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10577 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10578 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10579 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10580 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10581 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10582 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10583 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10584 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10585 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10586 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10589 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10590 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10591 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10592 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10595 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10596 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10597 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10598 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10599 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10600 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10601 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10602 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10603 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10604 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10605 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10606 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10607 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10608 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10610 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10611 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10612 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10613 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10614 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10616 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10617 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10619 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10620 sp
->rx_discards
.low
+= val
;
10621 if (sp
->rx_discards
.low
< val
)
10622 sp
->rx_discards
.high
+= 1;
10624 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10626 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10629 static void tg3_chk_missed_msi(struct tg3
*tp
)
10633 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10634 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10636 if (tg3_has_work(tnapi
)) {
10637 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10638 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10639 if (tnapi
->chk_msi_cnt
< 1) {
10640 tnapi
->chk_msi_cnt
++;
10646 tnapi
->chk_msi_cnt
= 0;
10647 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10648 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10652 static void tg3_timer(unsigned long __opaque
)
10654 struct tg3
*tp
= (struct tg3
*) __opaque
;
10656 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10657 goto restart_timer
;
10659 spin_lock(&tp
->lock
);
10661 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10662 tg3_flag(tp
, 57765_CLASS
))
10663 tg3_chk_missed_msi(tp
);
10665 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10666 /* BCM4785: Flush posted writes from GbE to host memory. */
10670 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10671 /* All of this garbage is because when using non-tagged
10672 * IRQ status the mailbox/status_block protocol the chip
10673 * uses with the cpu is race prone.
10675 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10676 tw32(GRC_LOCAL_CTRL
,
10677 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10679 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10680 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10683 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10684 spin_unlock(&tp
->lock
);
10685 tg3_reset_task_schedule(tp
);
10686 goto restart_timer
;
10690 /* This part only runs once per second. */
10691 if (!--tp
->timer_counter
) {
10692 if (tg3_flag(tp
, 5705_PLUS
))
10693 tg3_periodic_fetch_stats(tp
);
10695 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10696 tg3_phy_eee_enable(tp
);
10698 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10702 mac_stat
= tr32(MAC_STATUS
);
10705 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10706 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10708 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10712 tg3_setup_phy(tp
, false);
10713 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10714 u32 mac_stat
= tr32(MAC_STATUS
);
10715 int need_setup
= 0;
10718 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10721 if (!tp
->link_up
&&
10722 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10723 MAC_STATUS_SIGNAL_DET
))) {
10727 if (!tp
->serdes_counter
) {
10730 ~MAC_MODE_PORT_MODE_MASK
));
10732 tw32_f(MAC_MODE
, tp
->mac_mode
);
10735 tg3_setup_phy(tp
, false);
10737 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10738 tg3_flag(tp
, 5780_CLASS
)) {
10739 tg3_serdes_parallel_detect(tp
);
10742 tp
->timer_counter
= tp
->timer_multiplier
;
10745 /* Heartbeat is only sent once every 2 seconds.
10747 * The heartbeat is to tell the ASF firmware that the host
10748 * driver is still alive. In the event that the OS crashes,
10749 * ASF needs to reset the hardware to free up the FIFO space
10750 * that may be filled with rx packets destined for the host.
10751 * If the FIFO is full, ASF will no longer function properly.
10753 * Unintended resets have been reported on real time kernels
10754 * where the timer doesn't run on time. Netpoll will also have
10757 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10758 * to check the ring condition when the heartbeat is expiring
10759 * before doing the reset. This will prevent most unintended
10762 if (!--tp
->asf_counter
) {
10763 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10764 tg3_wait_for_event_ack(tp
);
10766 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10767 FWCMD_NICDRV_ALIVE3
);
10768 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10769 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10770 TG3_FW_UPDATE_TIMEOUT_SEC
);
10772 tg3_generate_fw_event(tp
);
10774 tp
->asf_counter
= tp
->asf_multiplier
;
10777 spin_unlock(&tp
->lock
);
10780 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10781 add_timer(&tp
->timer
);
10784 static void tg3_timer_init(struct tg3
*tp
)
10786 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10787 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10788 !tg3_flag(tp
, 57765_CLASS
))
10789 tp
->timer_offset
= HZ
;
10791 tp
->timer_offset
= HZ
/ 10;
10793 BUG_ON(tp
->timer_offset
> HZ
);
10795 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10796 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10797 TG3_FW_UPDATE_FREQ_SEC
;
10799 init_timer(&tp
->timer
);
10800 tp
->timer
.data
= (unsigned long) tp
;
10801 tp
->timer
.function
= tg3_timer
;
10804 static void tg3_timer_start(struct tg3
*tp
)
10806 tp
->asf_counter
= tp
->asf_multiplier
;
10807 tp
->timer_counter
= tp
->timer_multiplier
;
10809 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10810 add_timer(&tp
->timer
);
10813 static void tg3_timer_stop(struct tg3
*tp
)
10815 del_timer_sync(&tp
->timer
);
10818 /* Restart hardware after configuration changes, self-test, etc.
10819 * Invoked with tp->lock held.
10821 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
10822 __releases(tp
->lock
)
10823 __acquires(tp
->lock
)
10827 err
= tg3_init_hw(tp
, reset_phy
);
10829 netdev_err(tp
->dev
,
10830 "Failed to re-initialize device, aborting\n");
10831 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10832 tg3_full_unlock(tp
);
10833 tg3_timer_stop(tp
);
10835 tg3_napi_enable(tp
);
10836 dev_close(tp
->dev
);
10837 tg3_full_lock(tp
, 0);
10842 static void tg3_reset_task(struct work_struct
*work
)
10844 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10847 tg3_full_lock(tp
, 0);
10849 if (!netif_running(tp
->dev
)) {
10850 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10851 tg3_full_unlock(tp
);
10855 tg3_full_unlock(tp
);
10859 tg3_netif_stop(tp
);
10861 tg3_full_lock(tp
, 1);
10863 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
10864 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10865 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10866 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
10867 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
10870 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
10871 err
= tg3_init_hw(tp
, true);
10875 tg3_netif_start(tp
);
10878 tg3_full_unlock(tp
);
10883 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10886 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
10889 unsigned long flags
;
10891 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
10893 if (tp
->irq_cnt
== 1)
10894 name
= tp
->dev
->name
;
10896 name
= &tnapi
->irq_lbl
[0];
10897 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
10898 name
[IFNAMSIZ
-1] = 0;
10901 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10903 if (tg3_flag(tp
, 1SHOT_MSI
))
10904 fn
= tg3_msi_1shot
;
10907 fn
= tg3_interrupt
;
10908 if (tg3_flag(tp
, TAGGED_STATUS
))
10909 fn
= tg3_interrupt_tagged
;
10910 flags
= IRQF_SHARED
;
10913 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
10916 static int tg3_test_interrupt(struct tg3
*tp
)
10918 struct tg3_napi
*tnapi
= &tp
->napi
[0];
10919 struct net_device
*dev
= tp
->dev
;
10920 int err
, i
, intr_ok
= 0;
10923 if (!netif_running(dev
))
10926 tg3_disable_ints(tp
);
10928 free_irq(tnapi
->irq_vec
, tnapi
);
10931 * Turn off MSI one shot mode. Otherwise this test has no
10932 * observable way to know whether the interrupt was delivered.
10934 if (tg3_flag(tp
, 57765_PLUS
)) {
10935 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
10936 tw32(MSGINT_MODE
, val
);
10939 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
10940 IRQF_SHARED
, dev
->name
, tnapi
);
10944 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
10945 tg3_enable_ints(tp
);
10947 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
10950 for (i
= 0; i
< 5; i
++) {
10951 u32 int_mbox
, misc_host_ctrl
;
10953 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
10954 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
10956 if ((int_mbox
!= 0) ||
10957 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
10962 if (tg3_flag(tp
, 57765_PLUS
) &&
10963 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
10964 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
10969 tg3_disable_ints(tp
);
10971 free_irq(tnapi
->irq_vec
, tnapi
);
10973 err
= tg3_request_irq(tp
, 0);
10979 /* Reenable MSI one shot mode. */
10980 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
10981 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
10982 tw32(MSGINT_MODE
, val
);
10990 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10991 * successfully restored
10993 static int tg3_test_msi(struct tg3
*tp
)
10998 if (!tg3_flag(tp
, USING_MSI
))
11001 /* Turn off SERR reporting in case MSI terminates with Master
11004 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11005 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11006 pci_cmd
& ~PCI_COMMAND_SERR
);
11008 err
= tg3_test_interrupt(tp
);
11010 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11015 /* other failures */
11019 /* MSI test failed, go back to INTx mode */
11020 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11021 "to INTx mode. Please report this failure to the PCI "
11022 "maintainer and include system chipset information\n");
11024 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11026 pci_disable_msi(tp
->pdev
);
11028 tg3_flag_clear(tp
, USING_MSI
);
11029 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11031 err
= tg3_request_irq(tp
, 0);
11035 /* Need to reset the chip because the MSI cycle may have terminated
11036 * with Master Abort.
11038 tg3_full_lock(tp
, 1);
11040 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11041 err
= tg3_init_hw(tp
, true);
11043 tg3_full_unlock(tp
);
11046 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11051 static int tg3_request_firmware(struct tg3
*tp
)
11053 const struct tg3_firmware_hdr
*fw_hdr
;
11055 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11056 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11061 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11063 /* Firmware blob starts with version numbers, followed by
11064 * start address and _full_ length including BSS sections
11065 * (which must be longer than the actual data, of course
11068 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11069 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11070 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11071 tp
->fw_len
, tp
->fw_needed
);
11072 release_firmware(tp
->fw
);
11077 /* We no longer need firmware; we have it. */
11078 tp
->fw_needed
= NULL
;
11082 static u32
tg3_irq_count(struct tg3
*tp
)
11084 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11087 /* We want as many rx rings enabled as there are cpus.
11088 * In multiqueue MSI-X mode, the first MSI-X vector
11089 * only deals with link interrupts, etc, so we add
11090 * one to the number of vectors we are requesting.
11092 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11098 static bool tg3_enable_msix(struct tg3
*tp
)
11101 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11103 tp
->txq_cnt
= tp
->txq_req
;
11104 tp
->rxq_cnt
= tp
->rxq_req
;
11106 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11107 if (tp
->rxq_cnt
> tp
->rxq_max
)
11108 tp
->rxq_cnt
= tp
->rxq_max
;
11110 /* Disable multiple TX rings by default. Simple round-robin hardware
11111 * scheduling of the TX rings can cause starvation of rings with
11112 * small packets when other rings have TSO or jumbo packets.
11117 tp
->irq_cnt
= tg3_irq_count(tp
);
11119 for (i
= 0; i
< tp
->irq_max
; i
++) {
11120 msix_ent
[i
].entry
= i
;
11121 msix_ent
[i
].vector
= 0;
11124 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
11127 } else if (rc
!= 0) {
11128 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
11130 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11133 tp
->rxq_cnt
= max(rc
- 1, 1);
11135 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11138 for (i
= 0; i
< tp
->irq_max
; i
++)
11139 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11141 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11142 pci_disable_msix(tp
->pdev
);
11146 if (tp
->irq_cnt
== 1)
11149 tg3_flag_set(tp
, ENABLE_RSS
);
11151 if (tp
->txq_cnt
> 1)
11152 tg3_flag_set(tp
, ENABLE_TSS
);
11154 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11159 static void tg3_ints_init(struct tg3
*tp
)
11161 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11162 !tg3_flag(tp
, TAGGED_STATUS
)) {
11163 /* All MSI supporting chips should support tagged
11164 * status. Assert that this is the case.
11166 netdev_warn(tp
->dev
,
11167 "MSI without TAGGED_STATUS? Not using MSI\n");
11171 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11172 tg3_flag_set(tp
, USING_MSIX
);
11173 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11174 tg3_flag_set(tp
, USING_MSI
);
11176 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11177 u32 msi_mode
= tr32(MSGINT_MODE
);
11178 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11179 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11180 if (!tg3_flag(tp
, 1SHOT_MSI
))
11181 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11182 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11185 if (!tg3_flag(tp
, USING_MSIX
)) {
11187 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11190 if (tp
->irq_cnt
== 1) {
11193 netif_set_real_num_tx_queues(tp
->dev
, 1);
11194 netif_set_real_num_rx_queues(tp
->dev
, 1);
11198 static void tg3_ints_fini(struct tg3
*tp
)
11200 if (tg3_flag(tp
, USING_MSIX
))
11201 pci_disable_msix(tp
->pdev
);
11202 else if (tg3_flag(tp
, USING_MSI
))
11203 pci_disable_msi(tp
->pdev
);
11204 tg3_flag_clear(tp
, USING_MSI
);
11205 tg3_flag_clear(tp
, USING_MSIX
);
11206 tg3_flag_clear(tp
, ENABLE_RSS
);
11207 tg3_flag_clear(tp
, ENABLE_TSS
);
11210 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11213 struct net_device
*dev
= tp
->dev
;
11217 * Setup interrupts first so we know how
11218 * many NAPI resources to allocate
11222 tg3_rss_check_indir_tbl(tp
);
11224 /* The placement of this call is tied
11225 * to the setup and use of Host TX descriptors.
11227 err
= tg3_alloc_consistent(tp
);
11233 tg3_napi_enable(tp
);
11235 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11236 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11237 err
= tg3_request_irq(tp
, i
);
11239 for (i
--; i
>= 0; i
--) {
11240 tnapi
= &tp
->napi
[i
];
11241 free_irq(tnapi
->irq_vec
, tnapi
);
11247 tg3_full_lock(tp
, 0);
11249 err
= tg3_init_hw(tp
, reset_phy
);
11251 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11252 tg3_free_rings(tp
);
11255 tg3_full_unlock(tp
);
11260 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11261 err
= tg3_test_msi(tp
);
11264 tg3_full_lock(tp
, 0);
11265 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11266 tg3_free_rings(tp
);
11267 tg3_full_unlock(tp
);
11272 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11273 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11275 tw32(PCIE_TRANSACTION_CFG
,
11276 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11282 tg3_hwmon_open(tp
);
11284 tg3_full_lock(tp
, 0);
11286 tg3_timer_start(tp
);
11287 tg3_flag_set(tp
, INIT_COMPLETE
);
11288 tg3_enable_ints(tp
);
11293 tg3_ptp_resume(tp
);
11296 tg3_full_unlock(tp
);
11298 netif_tx_start_all_queues(dev
);
11301 * Reset loopback feature if it was turned on while the device was down
11302 * make sure that it's installed properly now.
11304 if (dev
->features
& NETIF_F_LOOPBACK
)
11305 tg3_set_loopback(dev
, dev
->features
);
11310 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11311 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11312 free_irq(tnapi
->irq_vec
, tnapi
);
11316 tg3_napi_disable(tp
);
11318 tg3_free_consistent(tp
);
11326 static void tg3_stop(struct tg3
*tp
)
11330 tg3_reset_task_cancel(tp
);
11331 tg3_netif_stop(tp
);
11333 tg3_timer_stop(tp
);
11335 tg3_hwmon_close(tp
);
11339 tg3_full_lock(tp
, 1);
11341 tg3_disable_ints(tp
);
11343 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11344 tg3_free_rings(tp
);
11345 tg3_flag_clear(tp
, INIT_COMPLETE
);
11347 tg3_full_unlock(tp
);
11349 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11350 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11351 free_irq(tnapi
->irq_vec
, tnapi
);
11358 tg3_free_consistent(tp
);
11361 static int tg3_open(struct net_device
*dev
)
11363 struct tg3
*tp
= netdev_priv(dev
);
11366 if (tp
->fw_needed
) {
11367 err
= tg3_request_firmware(tp
);
11368 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11370 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11371 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11372 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11373 netdev_warn(tp
->dev
, "EEE capability restored\n");
11374 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11376 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11380 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11381 tg3_flag_clear(tp
, TSO_CAPABLE
);
11382 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11383 netdev_notice(tp
->dev
, "TSO capability restored\n");
11384 tg3_flag_set(tp
, TSO_CAPABLE
);
11388 tg3_carrier_off(tp
);
11390 err
= tg3_power_up(tp
);
11394 tg3_full_lock(tp
, 0);
11396 tg3_disable_ints(tp
);
11397 tg3_flag_clear(tp
, INIT_COMPLETE
);
11399 tg3_full_unlock(tp
);
11401 err
= tg3_start(tp
,
11402 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11405 tg3_frob_aux_power(tp
, false);
11406 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11409 if (tg3_flag(tp
, PTP_CAPABLE
)) {
11410 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
11412 if (IS_ERR(tp
->ptp_clock
))
11413 tp
->ptp_clock
= NULL
;
11419 static int tg3_close(struct net_device
*dev
)
11421 struct tg3
*tp
= netdev_priv(dev
);
11427 /* Clear stats across close / open calls */
11428 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
11429 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
11431 tg3_power_down(tp
);
11433 tg3_carrier_off(tp
);
11438 static inline u64
get_stat64(tg3_stat64_t
*val
)
11440 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11443 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11445 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11447 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11448 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11449 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11452 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11453 tg3_writephy(tp
, MII_TG3_TEST1
,
11454 val
| MII_TG3_TEST1_CRC_EN
);
11455 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11459 tp
->phy_crc_errors
+= val
;
11461 return tp
->phy_crc_errors
;
11464 return get_stat64(&hw_stats
->rx_fcs_errors
);
11467 #define ESTAT_ADD(member) \
11468 estats->member = old_estats->member + \
11469 get_stat64(&hw_stats->member)
11471 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11473 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11474 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11476 ESTAT_ADD(rx_octets
);
11477 ESTAT_ADD(rx_fragments
);
11478 ESTAT_ADD(rx_ucast_packets
);
11479 ESTAT_ADD(rx_mcast_packets
);
11480 ESTAT_ADD(rx_bcast_packets
);
11481 ESTAT_ADD(rx_fcs_errors
);
11482 ESTAT_ADD(rx_align_errors
);
11483 ESTAT_ADD(rx_xon_pause_rcvd
);
11484 ESTAT_ADD(rx_xoff_pause_rcvd
);
11485 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11486 ESTAT_ADD(rx_xoff_entered
);
11487 ESTAT_ADD(rx_frame_too_long_errors
);
11488 ESTAT_ADD(rx_jabbers
);
11489 ESTAT_ADD(rx_undersize_packets
);
11490 ESTAT_ADD(rx_in_length_errors
);
11491 ESTAT_ADD(rx_out_length_errors
);
11492 ESTAT_ADD(rx_64_or_less_octet_packets
);
11493 ESTAT_ADD(rx_65_to_127_octet_packets
);
11494 ESTAT_ADD(rx_128_to_255_octet_packets
);
11495 ESTAT_ADD(rx_256_to_511_octet_packets
);
11496 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11497 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11498 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11499 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11500 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11501 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11503 ESTAT_ADD(tx_octets
);
11504 ESTAT_ADD(tx_collisions
);
11505 ESTAT_ADD(tx_xon_sent
);
11506 ESTAT_ADD(tx_xoff_sent
);
11507 ESTAT_ADD(tx_flow_control
);
11508 ESTAT_ADD(tx_mac_errors
);
11509 ESTAT_ADD(tx_single_collisions
);
11510 ESTAT_ADD(tx_mult_collisions
);
11511 ESTAT_ADD(tx_deferred
);
11512 ESTAT_ADD(tx_excessive_collisions
);
11513 ESTAT_ADD(tx_late_collisions
);
11514 ESTAT_ADD(tx_collide_2times
);
11515 ESTAT_ADD(tx_collide_3times
);
11516 ESTAT_ADD(tx_collide_4times
);
11517 ESTAT_ADD(tx_collide_5times
);
11518 ESTAT_ADD(tx_collide_6times
);
11519 ESTAT_ADD(tx_collide_7times
);
11520 ESTAT_ADD(tx_collide_8times
);
11521 ESTAT_ADD(tx_collide_9times
);
11522 ESTAT_ADD(tx_collide_10times
);
11523 ESTAT_ADD(tx_collide_11times
);
11524 ESTAT_ADD(tx_collide_12times
);
11525 ESTAT_ADD(tx_collide_13times
);
11526 ESTAT_ADD(tx_collide_14times
);
11527 ESTAT_ADD(tx_collide_15times
);
11528 ESTAT_ADD(tx_ucast_packets
);
11529 ESTAT_ADD(tx_mcast_packets
);
11530 ESTAT_ADD(tx_bcast_packets
);
11531 ESTAT_ADD(tx_carrier_sense_errors
);
11532 ESTAT_ADD(tx_discards
);
11533 ESTAT_ADD(tx_errors
);
11535 ESTAT_ADD(dma_writeq_full
);
11536 ESTAT_ADD(dma_write_prioq_full
);
11537 ESTAT_ADD(rxbds_empty
);
11538 ESTAT_ADD(rx_discards
);
11539 ESTAT_ADD(rx_errors
);
11540 ESTAT_ADD(rx_threshold_hit
);
11542 ESTAT_ADD(dma_readq_full
);
11543 ESTAT_ADD(dma_read_prioq_full
);
11544 ESTAT_ADD(tx_comp_queue_full
);
11546 ESTAT_ADD(ring_set_send_prod_index
);
11547 ESTAT_ADD(ring_status_update
);
11548 ESTAT_ADD(nic_irqs
);
11549 ESTAT_ADD(nic_avoided_irqs
);
11550 ESTAT_ADD(nic_tx_threshold_hit
);
11552 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11555 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11557 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11558 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11560 stats
->rx_packets
= old_stats
->rx_packets
+
11561 get_stat64(&hw_stats
->rx_ucast_packets
) +
11562 get_stat64(&hw_stats
->rx_mcast_packets
) +
11563 get_stat64(&hw_stats
->rx_bcast_packets
);
11565 stats
->tx_packets
= old_stats
->tx_packets
+
11566 get_stat64(&hw_stats
->tx_ucast_packets
) +
11567 get_stat64(&hw_stats
->tx_mcast_packets
) +
11568 get_stat64(&hw_stats
->tx_bcast_packets
);
11570 stats
->rx_bytes
= old_stats
->rx_bytes
+
11571 get_stat64(&hw_stats
->rx_octets
);
11572 stats
->tx_bytes
= old_stats
->tx_bytes
+
11573 get_stat64(&hw_stats
->tx_octets
);
11575 stats
->rx_errors
= old_stats
->rx_errors
+
11576 get_stat64(&hw_stats
->rx_errors
);
11577 stats
->tx_errors
= old_stats
->tx_errors
+
11578 get_stat64(&hw_stats
->tx_errors
) +
11579 get_stat64(&hw_stats
->tx_mac_errors
) +
11580 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11581 get_stat64(&hw_stats
->tx_discards
);
11583 stats
->multicast
= old_stats
->multicast
+
11584 get_stat64(&hw_stats
->rx_mcast_packets
);
11585 stats
->collisions
= old_stats
->collisions
+
11586 get_stat64(&hw_stats
->tx_collisions
);
11588 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11589 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11590 get_stat64(&hw_stats
->rx_undersize_packets
);
11592 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
11593 get_stat64(&hw_stats
->rxbds_empty
);
11594 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11595 get_stat64(&hw_stats
->rx_align_errors
);
11596 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11597 get_stat64(&hw_stats
->tx_discards
);
11598 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11599 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11601 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11602 tg3_calc_crc_errors(tp
);
11604 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11605 get_stat64(&hw_stats
->rx_discards
);
11607 stats
->rx_dropped
= tp
->rx_dropped
;
11608 stats
->tx_dropped
= tp
->tx_dropped
;
11611 static int tg3_get_regs_len(struct net_device
*dev
)
11613 return TG3_REG_BLK_SIZE
;
11616 static void tg3_get_regs(struct net_device
*dev
,
11617 struct ethtool_regs
*regs
, void *_p
)
11619 struct tg3
*tp
= netdev_priv(dev
);
11623 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11625 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11628 tg3_full_lock(tp
, 0);
11630 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11632 tg3_full_unlock(tp
);
11635 static int tg3_get_eeprom_len(struct net_device
*dev
)
11637 struct tg3
*tp
= netdev_priv(dev
);
11639 return tp
->nvram_size
;
11642 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11644 struct tg3
*tp
= netdev_priv(dev
);
11647 u32 i
, offset
, len
, b_offset
, b_count
;
11650 if (tg3_flag(tp
, NO_NVRAM
))
11653 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11656 offset
= eeprom
->offset
;
11660 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11663 /* adjustments to start on required 4 byte boundary */
11664 b_offset
= offset
& 3;
11665 b_count
= 4 - b_offset
;
11666 if (b_count
> len
) {
11667 /* i.e. offset=1 len=2 */
11670 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11673 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11676 eeprom
->len
+= b_count
;
11679 /* read bytes up to the last 4 byte boundary */
11680 pd
= &data
[eeprom
->len
];
11681 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11682 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11687 memcpy(pd
+ i
, &val
, 4);
11692 /* read last bytes not ending on 4 byte boundary */
11693 pd
= &data
[eeprom
->len
];
11695 b_offset
= offset
+ len
- b_count
;
11696 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11699 memcpy(pd
, &val
, b_count
);
11700 eeprom
->len
+= b_count
;
11705 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11707 struct tg3
*tp
= netdev_priv(dev
);
11709 u32 offset
, len
, b_offset
, odd_len
;
11713 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11716 if (tg3_flag(tp
, NO_NVRAM
) ||
11717 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11720 offset
= eeprom
->offset
;
11723 if ((b_offset
= (offset
& 3))) {
11724 /* adjustments to start on required 4 byte boundary */
11725 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11736 /* adjustments to end on required 4 byte boundary */
11738 len
= (len
+ 3) & ~3;
11739 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11745 if (b_offset
|| odd_len
) {
11746 buf
= kmalloc(len
, GFP_KERNEL
);
11750 memcpy(buf
, &start
, 4);
11752 memcpy(buf
+len
-4, &end
, 4);
11753 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11756 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11764 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11766 struct tg3
*tp
= netdev_priv(dev
);
11768 if (tg3_flag(tp
, USE_PHYLIB
)) {
11769 struct phy_device
*phydev
;
11770 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11772 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11773 return phy_ethtool_gset(phydev
, cmd
);
11776 cmd
->supported
= (SUPPORTED_Autoneg
);
11778 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11779 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11780 SUPPORTED_1000baseT_Full
);
11782 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11783 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11784 SUPPORTED_100baseT_Full
|
11785 SUPPORTED_10baseT_Half
|
11786 SUPPORTED_10baseT_Full
|
11788 cmd
->port
= PORT_TP
;
11790 cmd
->supported
|= SUPPORTED_FIBRE
;
11791 cmd
->port
= PORT_FIBRE
;
11794 cmd
->advertising
= tp
->link_config
.advertising
;
11795 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11796 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11797 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11798 cmd
->advertising
|= ADVERTISED_Pause
;
11800 cmd
->advertising
|= ADVERTISED_Pause
|
11801 ADVERTISED_Asym_Pause
;
11803 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11804 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11807 if (netif_running(dev
) && tp
->link_up
) {
11808 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11809 cmd
->duplex
= tp
->link_config
.active_duplex
;
11810 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11811 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11812 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11813 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11815 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11818 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11819 cmd
->duplex
= DUPLEX_UNKNOWN
;
11820 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11822 cmd
->phy_address
= tp
->phy_addr
;
11823 cmd
->transceiver
= XCVR_INTERNAL
;
11824 cmd
->autoneg
= tp
->link_config
.autoneg
;
11830 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11832 struct tg3
*tp
= netdev_priv(dev
);
11833 u32 speed
= ethtool_cmd_speed(cmd
);
11835 if (tg3_flag(tp
, USE_PHYLIB
)) {
11836 struct phy_device
*phydev
;
11837 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11839 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11840 return phy_ethtool_sset(phydev
, cmd
);
11843 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11844 cmd
->autoneg
!= AUTONEG_DISABLE
)
11847 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
11848 cmd
->duplex
!= DUPLEX_FULL
&&
11849 cmd
->duplex
!= DUPLEX_HALF
)
11852 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11853 u32 mask
= ADVERTISED_Autoneg
|
11855 ADVERTISED_Asym_Pause
;
11857 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11858 mask
|= ADVERTISED_1000baseT_Half
|
11859 ADVERTISED_1000baseT_Full
;
11861 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
11862 mask
|= ADVERTISED_100baseT_Half
|
11863 ADVERTISED_100baseT_Full
|
11864 ADVERTISED_10baseT_Half
|
11865 ADVERTISED_10baseT_Full
|
11868 mask
|= ADVERTISED_FIBRE
;
11870 if (cmd
->advertising
& ~mask
)
11873 mask
&= (ADVERTISED_1000baseT_Half
|
11874 ADVERTISED_1000baseT_Full
|
11875 ADVERTISED_100baseT_Half
|
11876 ADVERTISED_100baseT_Full
|
11877 ADVERTISED_10baseT_Half
|
11878 ADVERTISED_10baseT_Full
);
11880 cmd
->advertising
&= mask
;
11882 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
11883 if (speed
!= SPEED_1000
)
11886 if (cmd
->duplex
!= DUPLEX_FULL
)
11889 if (speed
!= SPEED_100
&&
11895 tg3_full_lock(tp
, 0);
11897 tp
->link_config
.autoneg
= cmd
->autoneg
;
11898 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11899 tp
->link_config
.advertising
= (cmd
->advertising
|
11900 ADVERTISED_Autoneg
);
11901 tp
->link_config
.speed
= SPEED_UNKNOWN
;
11902 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
11904 tp
->link_config
.advertising
= 0;
11905 tp
->link_config
.speed
= speed
;
11906 tp
->link_config
.duplex
= cmd
->duplex
;
11909 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
11911 tg3_warn_mgmt_link_flap(tp
);
11913 if (netif_running(dev
))
11914 tg3_setup_phy(tp
, true);
11916 tg3_full_unlock(tp
);
11921 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
11923 struct tg3
*tp
= netdev_priv(dev
);
11925 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
11926 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
11927 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
11928 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
11931 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11933 struct tg3
*tp
= netdev_priv(dev
);
11935 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
11936 wol
->supported
= WAKE_MAGIC
;
11938 wol
->supported
= 0;
11940 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
11941 wol
->wolopts
= WAKE_MAGIC
;
11942 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
11945 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11947 struct tg3
*tp
= netdev_priv(dev
);
11948 struct device
*dp
= &tp
->pdev
->dev
;
11950 if (wol
->wolopts
& ~WAKE_MAGIC
)
11952 if ((wol
->wolopts
& WAKE_MAGIC
) &&
11953 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
11956 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
11958 spin_lock_bh(&tp
->lock
);
11959 if (device_may_wakeup(dp
))
11960 tg3_flag_set(tp
, WOL_ENABLE
);
11962 tg3_flag_clear(tp
, WOL_ENABLE
);
11963 spin_unlock_bh(&tp
->lock
);
11968 static u32
tg3_get_msglevel(struct net_device
*dev
)
11970 struct tg3
*tp
= netdev_priv(dev
);
11971 return tp
->msg_enable
;
11974 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
11976 struct tg3
*tp
= netdev_priv(dev
);
11977 tp
->msg_enable
= value
;
11980 static int tg3_nway_reset(struct net_device
*dev
)
11982 struct tg3
*tp
= netdev_priv(dev
);
11985 if (!netif_running(dev
))
11988 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
11991 tg3_warn_mgmt_link_flap(tp
);
11993 if (tg3_flag(tp
, USE_PHYLIB
)) {
11994 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11996 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
12000 spin_lock_bh(&tp
->lock
);
12002 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12003 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12004 ((bmcr
& BMCR_ANENABLE
) ||
12005 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12006 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12010 spin_unlock_bh(&tp
->lock
);
12016 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12018 struct tg3
*tp
= netdev_priv(dev
);
12020 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12021 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12022 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12024 ering
->rx_jumbo_max_pending
= 0;
12026 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12028 ering
->rx_pending
= tp
->rx_pending
;
12029 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12030 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12032 ering
->rx_jumbo_pending
= 0;
12034 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12037 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12039 struct tg3
*tp
= netdev_priv(dev
);
12040 int i
, irq_sync
= 0, err
= 0;
12042 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12043 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12044 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12045 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12046 (tg3_flag(tp
, TSO_BUG
) &&
12047 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12050 if (netif_running(dev
)) {
12052 tg3_netif_stop(tp
);
12056 tg3_full_lock(tp
, irq_sync
);
12058 tp
->rx_pending
= ering
->rx_pending
;
12060 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12061 tp
->rx_pending
> 63)
12062 tp
->rx_pending
= 63;
12063 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12065 for (i
= 0; i
< tp
->irq_max
; i
++)
12066 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12068 if (netif_running(dev
)) {
12069 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12070 err
= tg3_restart_hw(tp
, false);
12072 tg3_netif_start(tp
);
12075 tg3_full_unlock(tp
);
12077 if (irq_sync
&& !err
)
12083 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12085 struct tg3
*tp
= netdev_priv(dev
);
12087 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12089 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12090 epause
->rx_pause
= 1;
12092 epause
->rx_pause
= 0;
12094 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12095 epause
->tx_pause
= 1;
12097 epause
->tx_pause
= 0;
12100 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12102 struct tg3
*tp
= netdev_priv(dev
);
12105 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12106 tg3_warn_mgmt_link_flap(tp
);
12108 if (tg3_flag(tp
, USE_PHYLIB
)) {
12110 struct phy_device
*phydev
;
12112 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
12114 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
12115 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
12116 (epause
->rx_pause
!= epause
->tx_pause
)))
12119 tp
->link_config
.flowctrl
= 0;
12120 if (epause
->rx_pause
) {
12121 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12123 if (epause
->tx_pause
) {
12124 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12125 newadv
= ADVERTISED_Pause
;
12127 newadv
= ADVERTISED_Pause
|
12128 ADVERTISED_Asym_Pause
;
12129 } else if (epause
->tx_pause
) {
12130 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12131 newadv
= ADVERTISED_Asym_Pause
;
12135 if (epause
->autoneg
)
12136 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12138 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12140 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12141 u32 oldadv
= phydev
->advertising
&
12142 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
12143 if (oldadv
!= newadv
) {
12144 phydev
->advertising
&=
12145 ~(ADVERTISED_Pause
|
12146 ADVERTISED_Asym_Pause
);
12147 phydev
->advertising
|= newadv
;
12148 if (phydev
->autoneg
) {
12150 * Always renegotiate the link to
12151 * inform our link partner of our
12152 * flow control settings, even if the
12153 * flow control is forced. Let
12154 * tg3_adjust_link() do the final
12155 * flow control setup.
12157 return phy_start_aneg(phydev
);
12161 if (!epause
->autoneg
)
12162 tg3_setup_flow_control(tp
, 0, 0);
12164 tp
->link_config
.advertising
&=
12165 ~(ADVERTISED_Pause
|
12166 ADVERTISED_Asym_Pause
);
12167 tp
->link_config
.advertising
|= newadv
;
12172 if (netif_running(dev
)) {
12173 tg3_netif_stop(tp
);
12177 tg3_full_lock(tp
, irq_sync
);
12179 if (epause
->autoneg
)
12180 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12182 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12183 if (epause
->rx_pause
)
12184 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12186 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12187 if (epause
->tx_pause
)
12188 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12190 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12192 if (netif_running(dev
)) {
12193 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12194 err
= tg3_restart_hw(tp
, false);
12196 tg3_netif_start(tp
);
12199 tg3_full_unlock(tp
);
12202 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12207 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12211 return TG3_NUM_TEST
;
12213 return TG3_NUM_STATS
;
12215 return -EOPNOTSUPP
;
12219 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12220 u32
*rules __always_unused
)
12222 struct tg3
*tp
= netdev_priv(dev
);
12224 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12225 return -EOPNOTSUPP
;
12227 switch (info
->cmd
) {
12228 case ETHTOOL_GRXRINGS
:
12229 if (netif_running(tp
->dev
))
12230 info
->data
= tp
->rxq_cnt
;
12232 info
->data
= num_online_cpus();
12233 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12234 info
->data
= TG3_RSS_MAX_NUM_QS
;
12237 /* The first interrupt vector only
12238 * handles link interrupts.
12244 return -EOPNOTSUPP
;
12248 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12251 struct tg3
*tp
= netdev_priv(dev
);
12253 if (tg3_flag(tp
, SUPPORT_MSIX
))
12254 size
= TG3_RSS_INDIR_TBL_SIZE
;
12259 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
12261 struct tg3
*tp
= netdev_priv(dev
);
12264 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12265 indir
[i
] = tp
->rss_ind_tbl
[i
];
12270 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
12272 struct tg3
*tp
= netdev_priv(dev
);
12275 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12276 tp
->rss_ind_tbl
[i
] = indir
[i
];
12278 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12281 /* It is legal to write the indirection
12282 * table while the device is running.
12284 tg3_full_lock(tp
, 0);
12285 tg3_rss_write_indir_tbl(tp
);
12286 tg3_full_unlock(tp
);
12291 static void tg3_get_channels(struct net_device
*dev
,
12292 struct ethtool_channels
*channel
)
12294 struct tg3
*tp
= netdev_priv(dev
);
12295 u32 deflt_qs
= netif_get_num_default_rss_queues();
12297 channel
->max_rx
= tp
->rxq_max
;
12298 channel
->max_tx
= tp
->txq_max
;
12300 if (netif_running(dev
)) {
12301 channel
->rx_count
= tp
->rxq_cnt
;
12302 channel
->tx_count
= tp
->txq_cnt
;
12305 channel
->rx_count
= tp
->rxq_req
;
12307 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12310 channel
->tx_count
= tp
->txq_req
;
12312 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12316 static int tg3_set_channels(struct net_device
*dev
,
12317 struct ethtool_channels
*channel
)
12319 struct tg3
*tp
= netdev_priv(dev
);
12321 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12322 return -EOPNOTSUPP
;
12324 if (channel
->rx_count
> tp
->rxq_max
||
12325 channel
->tx_count
> tp
->txq_max
)
12328 tp
->rxq_req
= channel
->rx_count
;
12329 tp
->txq_req
= channel
->tx_count
;
12331 if (!netif_running(dev
))
12336 tg3_carrier_off(tp
);
12338 tg3_start(tp
, true, false, false);
12343 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12345 switch (stringset
) {
12347 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12350 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12353 WARN_ON(1); /* we need a WARN() */
12358 static int tg3_set_phys_id(struct net_device
*dev
,
12359 enum ethtool_phys_id_state state
)
12361 struct tg3
*tp
= netdev_priv(dev
);
12363 if (!netif_running(tp
->dev
))
12367 case ETHTOOL_ID_ACTIVE
:
12368 return 1; /* cycle on/off once per second */
12370 case ETHTOOL_ID_ON
:
12371 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12372 LED_CTRL_1000MBPS_ON
|
12373 LED_CTRL_100MBPS_ON
|
12374 LED_CTRL_10MBPS_ON
|
12375 LED_CTRL_TRAFFIC_OVERRIDE
|
12376 LED_CTRL_TRAFFIC_BLINK
|
12377 LED_CTRL_TRAFFIC_LED
);
12380 case ETHTOOL_ID_OFF
:
12381 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12382 LED_CTRL_TRAFFIC_OVERRIDE
);
12385 case ETHTOOL_ID_INACTIVE
:
12386 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12393 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12394 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12396 struct tg3
*tp
= netdev_priv(dev
);
12399 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12401 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12404 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12408 u32 offset
= 0, len
= 0;
12411 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12414 if (magic
== TG3_EEPROM_MAGIC
) {
12415 for (offset
= TG3_NVM_DIR_START
;
12416 offset
< TG3_NVM_DIR_END
;
12417 offset
+= TG3_NVM_DIRENT_SIZE
) {
12418 if (tg3_nvram_read(tp
, offset
, &val
))
12421 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12422 TG3_NVM_DIRTYPE_EXTVPD
)
12426 if (offset
!= TG3_NVM_DIR_END
) {
12427 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12428 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12431 offset
= tg3_nvram_logical_addr(tp
, offset
);
12435 if (!offset
|| !len
) {
12436 offset
= TG3_NVM_VPD_OFF
;
12437 len
= TG3_NVM_VPD_LEN
;
12440 buf
= kmalloc(len
, GFP_KERNEL
);
12444 if (magic
== TG3_EEPROM_MAGIC
) {
12445 for (i
= 0; i
< len
; i
+= 4) {
12446 /* The data is in little-endian format in NVRAM.
12447 * Use the big-endian read routines to preserve
12448 * the byte order as it exists in NVRAM.
12450 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12456 unsigned int pos
= 0;
12458 ptr
= (u8
*)&buf
[0];
12459 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12460 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12462 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12480 #define NVRAM_TEST_SIZE 0x100
12481 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12482 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12483 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12484 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12485 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12486 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12487 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12488 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12490 static int tg3_test_nvram(struct tg3
*tp
)
12492 u32 csum
, magic
, len
;
12494 int i
, j
, k
, err
= 0, size
;
12496 if (tg3_flag(tp
, NO_NVRAM
))
12499 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12502 if (magic
== TG3_EEPROM_MAGIC
)
12503 size
= NVRAM_TEST_SIZE
;
12504 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12505 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12506 TG3_EEPROM_SB_FORMAT_1
) {
12507 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12508 case TG3_EEPROM_SB_REVISION_0
:
12509 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12511 case TG3_EEPROM_SB_REVISION_2
:
12512 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12514 case TG3_EEPROM_SB_REVISION_3
:
12515 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12517 case TG3_EEPROM_SB_REVISION_4
:
12518 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12520 case TG3_EEPROM_SB_REVISION_5
:
12521 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12523 case TG3_EEPROM_SB_REVISION_6
:
12524 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12531 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12532 size
= NVRAM_SELFBOOT_HW_SIZE
;
12536 buf
= kmalloc(size
, GFP_KERNEL
);
12541 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12542 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12549 /* Selfboot format */
12550 magic
= be32_to_cpu(buf
[0]);
12551 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12552 TG3_EEPROM_MAGIC_FW
) {
12553 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12555 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12556 TG3_EEPROM_SB_REVISION_2
) {
12557 /* For rev 2, the csum doesn't include the MBA. */
12558 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12560 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12563 for (i
= 0; i
< size
; i
++)
12576 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12577 TG3_EEPROM_MAGIC_HW
) {
12578 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12579 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12580 u8
*buf8
= (u8
*) buf
;
12582 /* Separate the parity bits and the data bytes. */
12583 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12584 if ((i
== 0) || (i
== 8)) {
12588 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12589 parity
[k
++] = buf8
[i
] & msk
;
12591 } else if (i
== 16) {
12595 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12596 parity
[k
++] = buf8
[i
] & msk
;
12599 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12600 parity
[k
++] = buf8
[i
] & msk
;
12603 data
[j
++] = buf8
[i
];
12607 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12608 u8 hw8
= hweight8(data
[i
]);
12610 if ((hw8
& 0x1) && parity
[i
])
12612 else if (!(hw8
& 0x1) && !parity
[i
])
12621 /* Bootstrap checksum at offset 0x10 */
12622 csum
= calc_crc((unsigned char *) buf
, 0x10);
12623 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12626 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12627 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12628 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12633 buf
= tg3_vpd_readblock(tp
, &len
);
12637 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12639 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12643 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12646 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12647 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12648 PCI_VPD_RO_KEYWORD_CHKSUM
);
12652 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12654 for (i
= 0; i
<= j
; i
++)
12655 csum8
+= ((u8
*)buf
)[i
];
12669 #define TG3_SERDES_TIMEOUT_SEC 2
12670 #define TG3_COPPER_TIMEOUT_SEC 6
12672 static int tg3_test_link(struct tg3
*tp
)
12676 if (!netif_running(tp
->dev
))
12679 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12680 max
= TG3_SERDES_TIMEOUT_SEC
;
12682 max
= TG3_COPPER_TIMEOUT_SEC
;
12684 for (i
= 0; i
< max
; i
++) {
12688 if (msleep_interruptible(1000))
12695 /* Only test the commonly used registers */
12696 static int tg3_test_registers(struct tg3
*tp
)
12698 int i
, is_5705
, is_5750
;
12699 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12703 #define TG3_FL_5705 0x1
12704 #define TG3_FL_NOT_5705 0x2
12705 #define TG3_FL_NOT_5788 0x4
12706 #define TG3_FL_NOT_5750 0x8
12710 /* MAC Control Registers */
12711 { MAC_MODE
, TG3_FL_NOT_5705
,
12712 0x00000000, 0x00ef6f8c },
12713 { MAC_MODE
, TG3_FL_5705
,
12714 0x00000000, 0x01ef6b8c },
12715 { MAC_STATUS
, TG3_FL_NOT_5705
,
12716 0x03800107, 0x00000000 },
12717 { MAC_STATUS
, TG3_FL_5705
,
12718 0x03800100, 0x00000000 },
12719 { MAC_ADDR_0_HIGH
, 0x0000,
12720 0x00000000, 0x0000ffff },
12721 { MAC_ADDR_0_LOW
, 0x0000,
12722 0x00000000, 0xffffffff },
12723 { MAC_RX_MTU_SIZE
, 0x0000,
12724 0x00000000, 0x0000ffff },
12725 { MAC_TX_MODE
, 0x0000,
12726 0x00000000, 0x00000070 },
12727 { MAC_TX_LENGTHS
, 0x0000,
12728 0x00000000, 0x00003fff },
12729 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12730 0x00000000, 0x000007fc },
12731 { MAC_RX_MODE
, TG3_FL_5705
,
12732 0x00000000, 0x000007dc },
12733 { MAC_HASH_REG_0
, 0x0000,
12734 0x00000000, 0xffffffff },
12735 { MAC_HASH_REG_1
, 0x0000,
12736 0x00000000, 0xffffffff },
12737 { MAC_HASH_REG_2
, 0x0000,
12738 0x00000000, 0xffffffff },
12739 { MAC_HASH_REG_3
, 0x0000,
12740 0x00000000, 0xffffffff },
12742 /* Receive Data and Receive BD Initiator Control Registers. */
12743 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12744 0x00000000, 0xffffffff },
12745 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12746 0x00000000, 0xffffffff },
12747 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12748 0x00000000, 0x00000003 },
12749 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12750 0x00000000, 0xffffffff },
12751 { RCVDBDI_STD_BD
+0, 0x0000,
12752 0x00000000, 0xffffffff },
12753 { RCVDBDI_STD_BD
+4, 0x0000,
12754 0x00000000, 0xffffffff },
12755 { RCVDBDI_STD_BD
+8, 0x0000,
12756 0x00000000, 0xffff0002 },
12757 { RCVDBDI_STD_BD
+0xc, 0x0000,
12758 0x00000000, 0xffffffff },
12760 /* Receive BD Initiator Control Registers. */
12761 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12762 0x00000000, 0xffffffff },
12763 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12764 0x00000000, 0x000003ff },
12765 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12766 0x00000000, 0xffffffff },
12768 /* Host Coalescing Control Registers. */
12769 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12770 0x00000000, 0x00000004 },
12771 { HOSTCC_MODE
, TG3_FL_5705
,
12772 0x00000000, 0x000000f6 },
12773 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12774 0x00000000, 0xffffffff },
12775 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12776 0x00000000, 0x000003ff },
12777 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12778 0x00000000, 0xffffffff },
12779 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12780 0x00000000, 0x000003ff },
12781 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12782 0x00000000, 0xffffffff },
12783 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12784 0x00000000, 0x000000ff },
12785 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12786 0x00000000, 0xffffffff },
12787 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12788 0x00000000, 0x000000ff },
12789 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12790 0x00000000, 0xffffffff },
12791 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12792 0x00000000, 0xffffffff },
12793 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12794 0x00000000, 0xffffffff },
12795 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12796 0x00000000, 0x000000ff },
12797 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12798 0x00000000, 0xffffffff },
12799 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12800 0x00000000, 0x000000ff },
12801 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12802 0x00000000, 0xffffffff },
12803 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12804 0x00000000, 0xffffffff },
12805 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12806 0x00000000, 0xffffffff },
12807 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12808 0x00000000, 0xffffffff },
12809 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12810 0x00000000, 0xffffffff },
12811 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12812 0xffffffff, 0x00000000 },
12813 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12814 0xffffffff, 0x00000000 },
12816 /* Buffer Manager Control Registers. */
12817 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12818 0x00000000, 0x007fff80 },
12819 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12820 0x00000000, 0x007fffff },
12821 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12822 0x00000000, 0x0000003f },
12823 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12824 0x00000000, 0x000001ff },
12825 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12826 0x00000000, 0x000001ff },
12827 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12828 0xffffffff, 0x00000000 },
12829 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12830 0xffffffff, 0x00000000 },
12832 /* Mailbox Registers */
12833 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12834 0x00000000, 0x000001ff },
12835 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12836 0x00000000, 0x000001ff },
12837 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12838 0x00000000, 0x000007ff },
12839 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12840 0x00000000, 0x000001ff },
12842 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12845 is_5705
= is_5750
= 0;
12846 if (tg3_flag(tp
, 5705_PLUS
)) {
12848 if (tg3_flag(tp
, 5750_PLUS
))
12852 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
12853 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
12856 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
12859 if (tg3_flag(tp
, IS_5788
) &&
12860 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
12863 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
12866 offset
= (u32
) reg_tbl
[i
].offset
;
12867 read_mask
= reg_tbl
[i
].read_mask
;
12868 write_mask
= reg_tbl
[i
].write_mask
;
12870 /* Save the original register content */
12871 save_val
= tr32(offset
);
12873 /* Determine the read-only value. */
12874 read_val
= save_val
& read_mask
;
12876 /* Write zero to the register, then make sure the read-only bits
12877 * are not changed and the read/write bits are all zeros.
12881 val
= tr32(offset
);
12883 /* Test the read-only and read/write bits. */
12884 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
12887 /* Write ones to all the bits defined by RdMask and WrMask, then
12888 * make sure the read-only bits are not changed and the
12889 * read/write bits are all ones.
12891 tw32(offset
, read_mask
| write_mask
);
12893 val
= tr32(offset
);
12895 /* Test the read-only bits. */
12896 if ((val
& read_mask
) != read_val
)
12899 /* Test the read/write bits. */
12900 if ((val
& write_mask
) != write_mask
)
12903 tw32(offset
, save_val
);
12909 if (netif_msg_hw(tp
))
12910 netdev_err(tp
->dev
,
12911 "Register test failed at offset %x\n", offset
);
12912 tw32(offset
, save_val
);
12916 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
12918 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12922 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
12923 for (j
= 0; j
< len
; j
+= 4) {
12926 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
12927 tg3_read_mem(tp
, offset
+ j
, &val
);
12928 if (val
!= test_pattern
[i
])
12935 static int tg3_test_memory(struct tg3
*tp
)
12937 static struct mem_entry
{
12940 } mem_tbl_570x
[] = {
12941 { 0x00000000, 0x00b50},
12942 { 0x00002000, 0x1c000},
12943 { 0xffffffff, 0x00000}
12944 }, mem_tbl_5705
[] = {
12945 { 0x00000100, 0x0000c},
12946 { 0x00000200, 0x00008},
12947 { 0x00004000, 0x00800},
12948 { 0x00006000, 0x01000},
12949 { 0x00008000, 0x02000},
12950 { 0x00010000, 0x0e000},
12951 { 0xffffffff, 0x00000}
12952 }, mem_tbl_5755
[] = {
12953 { 0x00000200, 0x00008},
12954 { 0x00004000, 0x00800},
12955 { 0x00006000, 0x00800},
12956 { 0x00008000, 0x02000},
12957 { 0x00010000, 0x0c000},
12958 { 0xffffffff, 0x00000}
12959 }, mem_tbl_5906
[] = {
12960 { 0x00000200, 0x00008},
12961 { 0x00004000, 0x00400},
12962 { 0x00006000, 0x00400},
12963 { 0x00008000, 0x01000},
12964 { 0x00010000, 0x01000},
12965 { 0xffffffff, 0x00000}
12966 }, mem_tbl_5717
[] = {
12967 { 0x00000200, 0x00008},
12968 { 0x00010000, 0x0a000},
12969 { 0x00020000, 0x13c00},
12970 { 0xffffffff, 0x00000}
12971 }, mem_tbl_57765
[] = {
12972 { 0x00000200, 0x00008},
12973 { 0x00004000, 0x00800},
12974 { 0x00006000, 0x09800},
12975 { 0x00010000, 0x0a000},
12976 { 0xffffffff, 0x00000}
12978 struct mem_entry
*mem_tbl
;
12982 if (tg3_flag(tp
, 5717_PLUS
))
12983 mem_tbl
= mem_tbl_5717
;
12984 else if (tg3_flag(tp
, 57765_CLASS
) ||
12985 tg3_asic_rev(tp
) == ASIC_REV_5762
)
12986 mem_tbl
= mem_tbl_57765
;
12987 else if (tg3_flag(tp
, 5755_PLUS
))
12988 mem_tbl
= mem_tbl_5755
;
12989 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
12990 mem_tbl
= mem_tbl_5906
;
12991 else if (tg3_flag(tp
, 5705_PLUS
))
12992 mem_tbl
= mem_tbl_5705
;
12994 mem_tbl
= mem_tbl_570x
;
12996 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
12997 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13005 #define TG3_TSO_MSS 500
13007 #define TG3_TSO_IP_HDR_LEN 20
13008 #define TG3_TSO_TCP_HDR_LEN 20
13009 #define TG3_TSO_TCP_OPT_LEN 12
13011 static const u8 tg3_tso_header
[] = {
13013 0x45, 0x00, 0x00, 0x00,
13014 0x00, 0x00, 0x40, 0x00,
13015 0x40, 0x06, 0x00, 0x00,
13016 0x0a, 0x00, 0x00, 0x01,
13017 0x0a, 0x00, 0x00, 0x02,
13018 0x0d, 0x00, 0xe0, 0x00,
13019 0x00, 0x00, 0x01, 0x00,
13020 0x00, 0x00, 0x02, 0x00,
13021 0x80, 0x10, 0x10, 0x00,
13022 0x14, 0x09, 0x00, 0x00,
13023 0x01, 0x01, 0x08, 0x0a,
13024 0x11, 0x11, 0x11, 0x11,
13025 0x11, 0x11, 0x11, 0x11,
13028 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13030 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13031 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13033 struct sk_buff
*skb
;
13034 u8
*tx_data
, *rx_data
;
13036 int num_pkts
, tx_len
, rx_len
, i
, err
;
13037 struct tg3_rx_buffer_desc
*desc
;
13038 struct tg3_napi
*tnapi
, *rnapi
;
13039 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13041 tnapi
= &tp
->napi
[0];
13042 rnapi
= &tp
->napi
[0];
13043 if (tp
->irq_cnt
> 1) {
13044 if (tg3_flag(tp
, ENABLE_RSS
))
13045 rnapi
= &tp
->napi
[1];
13046 if (tg3_flag(tp
, ENABLE_TSS
))
13047 tnapi
= &tp
->napi
[1];
13049 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13054 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13058 tx_data
= skb_put(skb
, tx_len
);
13059 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
13060 memset(tx_data
+ 6, 0x0, 8);
13062 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13064 if (tso_loopback
) {
13065 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13067 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13068 TG3_TSO_TCP_OPT_LEN
;
13070 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13071 sizeof(tg3_tso_header
));
13074 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13075 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13077 /* Set the total length field in the IP header */
13078 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13080 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13081 TXD_FLAG_CPU_POST_DMA
);
13083 if (tg3_flag(tp
, HW_TSO_1
) ||
13084 tg3_flag(tp
, HW_TSO_2
) ||
13085 tg3_flag(tp
, HW_TSO_3
)) {
13087 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13088 th
= (struct tcphdr
*)&tx_data
[val
];
13091 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13093 if (tg3_flag(tp
, HW_TSO_3
)) {
13094 mss
|= (hdr_len
& 0xc) << 12;
13095 if (hdr_len
& 0x10)
13096 base_flags
|= 0x00000010;
13097 base_flags
|= (hdr_len
& 0x3e0) << 5;
13098 } else if (tg3_flag(tp
, HW_TSO_2
))
13099 mss
|= hdr_len
<< 9;
13100 else if (tg3_flag(tp
, HW_TSO_1
) ||
13101 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13102 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13104 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13107 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13110 data_off
= ETH_HLEN
;
13112 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13113 tx_len
> VLAN_ETH_FRAME_LEN
)
13114 base_flags
|= TXD_FLAG_JMB_PKT
;
13117 for (i
= data_off
; i
< tx_len
; i
++)
13118 tx_data
[i
] = (u8
) (i
& 0xff);
13120 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13121 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13122 dev_kfree_skb(skb
);
13126 val
= tnapi
->tx_prod
;
13127 tnapi
->tx_buffers
[val
].skb
= skb
;
13128 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13130 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13135 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13137 budget
= tg3_tx_avail(tnapi
);
13138 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13139 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13140 tnapi
->tx_buffers
[val
].skb
= NULL
;
13141 dev_kfree_skb(skb
);
13147 /* Sync BD data before updating mailbox */
13150 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13151 tr32_mailbox(tnapi
->prodmbox
);
13155 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13156 for (i
= 0; i
< 35; i
++) {
13157 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13162 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13163 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13164 if ((tx_idx
== tnapi
->tx_prod
) &&
13165 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13169 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13170 dev_kfree_skb(skb
);
13172 if (tx_idx
!= tnapi
->tx_prod
)
13175 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13179 while (rx_idx
!= rx_start_idx
) {
13180 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13181 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13182 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13184 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13185 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13188 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13191 if (!tso_loopback
) {
13192 if (rx_len
!= tx_len
)
13195 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13196 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13199 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13202 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13203 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13204 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13208 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13209 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13210 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13212 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13213 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13214 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13219 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13220 PCI_DMA_FROMDEVICE
);
13222 rx_data
+= TG3_RX_OFFSET(tp
);
13223 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13224 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13231 /* tg3_free_rings will unmap and free the rx_data */
13236 #define TG3_STD_LOOPBACK_FAILED 1
13237 #define TG3_JMB_LOOPBACK_FAILED 2
13238 #define TG3_TSO_LOOPBACK_FAILED 4
13239 #define TG3_LOOPBACK_FAILED \
13240 (TG3_STD_LOOPBACK_FAILED | \
13241 TG3_JMB_LOOPBACK_FAILED | \
13242 TG3_TSO_LOOPBACK_FAILED)
13244 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13248 u32 jmb_pkt_sz
= 9000;
13251 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13253 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13254 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13256 if (!netif_running(tp
->dev
)) {
13257 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13258 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13260 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13264 err
= tg3_reset_hw(tp
, true);
13266 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13267 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13269 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13273 if (tg3_flag(tp
, ENABLE_RSS
)) {
13276 /* Reroute all rx packets to the 1st queue */
13277 for (i
= MAC_RSS_INDIR_TBL_0
;
13278 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13282 /* HW errata - mac loopback fails in some cases on 5780.
13283 * Normal traffic and PHY loopback are not affected by
13284 * errata. Also, the MAC loopback test is deprecated for
13285 * all newer ASIC revisions.
13287 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13288 !tg3_flag(tp
, CPMU_PRESENT
)) {
13289 tg3_mac_loopback(tp
, true);
13291 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13292 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13294 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13295 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13296 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13298 tg3_mac_loopback(tp
, false);
13301 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13302 !tg3_flag(tp
, USE_PHYLIB
)) {
13305 tg3_phy_lpbk_set(tp
, 0, false);
13307 /* Wait for link */
13308 for (i
= 0; i
< 100; i
++) {
13309 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13314 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13315 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13316 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13317 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13318 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13319 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13320 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13321 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13324 tg3_phy_lpbk_set(tp
, 0, true);
13326 /* All link indications report up, but the hardware
13327 * isn't really ready for about 20 msec. Double it
13332 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13333 data
[TG3_EXT_LOOPB_TEST
] |=
13334 TG3_STD_LOOPBACK_FAILED
;
13335 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13336 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13337 data
[TG3_EXT_LOOPB_TEST
] |=
13338 TG3_TSO_LOOPBACK_FAILED
;
13339 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13340 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13341 data
[TG3_EXT_LOOPB_TEST
] |=
13342 TG3_JMB_LOOPBACK_FAILED
;
13345 /* Re-enable gphy autopowerdown. */
13346 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13347 tg3_phy_toggle_apd(tp
, true);
13350 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13351 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13354 tp
->phy_flags
|= eee_cap
;
13359 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13362 struct tg3
*tp
= netdev_priv(dev
);
13363 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13365 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
13366 tg3_power_up(tp
)) {
13367 etest
->flags
|= ETH_TEST_FL_FAILED
;
13368 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13372 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13374 if (tg3_test_nvram(tp
) != 0) {
13375 etest
->flags
|= ETH_TEST_FL_FAILED
;
13376 data
[TG3_NVRAM_TEST
] = 1;
13378 if (!doextlpbk
&& tg3_test_link(tp
)) {
13379 etest
->flags
|= ETH_TEST_FL_FAILED
;
13380 data
[TG3_LINK_TEST
] = 1;
13382 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13383 int err
, err2
= 0, irq_sync
= 0;
13385 if (netif_running(dev
)) {
13387 tg3_netif_stop(tp
);
13391 tg3_full_lock(tp
, irq_sync
);
13392 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13393 err
= tg3_nvram_lock(tp
);
13394 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13395 if (!tg3_flag(tp
, 5705_PLUS
))
13396 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13398 tg3_nvram_unlock(tp
);
13400 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13403 if (tg3_test_registers(tp
) != 0) {
13404 etest
->flags
|= ETH_TEST_FL_FAILED
;
13405 data
[TG3_REGISTER_TEST
] = 1;
13408 if (tg3_test_memory(tp
) != 0) {
13409 etest
->flags
|= ETH_TEST_FL_FAILED
;
13410 data
[TG3_MEMORY_TEST
] = 1;
13414 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13416 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13417 etest
->flags
|= ETH_TEST_FL_FAILED
;
13419 tg3_full_unlock(tp
);
13421 if (tg3_test_interrupt(tp
) != 0) {
13422 etest
->flags
|= ETH_TEST_FL_FAILED
;
13423 data
[TG3_INTERRUPT_TEST
] = 1;
13426 tg3_full_lock(tp
, 0);
13428 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13429 if (netif_running(dev
)) {
13430 tg3_flag_set(tp
, INIT_COMPLETE
);
13431 err2
= tg3_restart_hw(tp
, true);
13433 tg3_netif_start(tp
);
13436 tg3_full_unlock(tp
);
13438 if (irq_sync
&& !err2
)
13441 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13442 tg3_power_down(tp
);
13446 static int tg3_hwtstamp_ioctl(struct net_device
*dev
,
13447 struct ifreq
*ifr
, int cmd
)
13449 struct tg3
*tp
= netdev_priv(dev
);
13450 struct hwtstamp_config stmpconf
;
13452 if (!tg3_flag(tp
, PTP_CAPABLE
))
13455 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13458 if (stmpconf
.flags
)
13461 switch (stmpconf
.tx_type
) {
13462 case HWTSTAMP_TX_ON
:
13463 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13465 case HWTSTAMP_TX_OFF
:
13466 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13472 switch (stmpconf
.rx_filter
) {
13473 case HWTSTAMP_FILTER_NONE
:
13476 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13477 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13478 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13480 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13481 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13482 TG3_RX_PTP_CTL_SYNC_EVNT
;
13484 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13485 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13486 TG3_RX_PTP_CTL_DELAY_REQ
;
13488 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13489 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13490 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13492 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13493 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13494 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13496 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13497 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13498 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13500 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13501 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13502 TG3_RX_PTP_CTL_SYNC_EVNT
;
13504 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13505 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13506 TG3_RX_PTP_CTL_SYNC_EVNT
;
13508 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13509 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13510 TG3_RX_PTP_CTL_SYNC_EVNT
;
13512 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13513 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13514 TG3_RX_PTP_CTL_DELAY_REQ
;
13516 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13517 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13518 TG3_RX_PTP_CTL_DELAY_REQ
;
13520 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13521 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13522 TG3_RX_PTP_CTL_DELAY_REQ
;
13528 if (netif_running(dev
) && tp
->rxptpctl
)
13529 tw32(TG3_RX_PTP_CTL
,
13530 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13532 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13536 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13538 struct mii_ioctl_data
*data
= if_mii(ifr
);
13539 struct tg3
*tp
= netdev_priv(dev
);
13542 if (tg3_flag(tp
, USE_PHYLIB
)) {
13543 struct phy_device
*phydev
;
13544 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13546 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
13547 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13552 data
->phy_id
= tp
->phy_addr
;
13555 case SIOCGMIIREG
: {
13558 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13559 break; /* We have no PHY */
13561 if (!netif_running(dev
))
13564 spin_lock_bh(&tp
->lock
);
13565 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13566 data
->reg_num
& 0x1f, &mii_regval
);
13567 spin_unlock_bh(&tp
->lock
);
13569 data
->val_out
= mii_regval
;
13575 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13576 break; /* We have no PHY */
13578 if (!netif_running(dev
))
13581 spin_lock_bh(&tp
->lock
);
13582 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13583 data
->reg_num
& 0x1f, data
->val_in
);
13584 spin_unlock_bh(&tp
->lock
);
13588 case SIOCSHWTSTAMP
:
13589 return tg3_hwtstamp_ioctl(dev
, ifr
, cmd
);
13595 return -EOPNOTSUPP
;
13598 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13600 struct tg3
*tp
= netdev_priv(dev
);
13602 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13606 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13608 struct tg3
*tp
= netdev_priv(dev
);
13609 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13610 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13612 if (!tg3_flag(tp
, 5705_PLUS
)) {
13613 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13614 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13615 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13616 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13619 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13620 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13621 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13622 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13623 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13624 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13625 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13626 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13627 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13628 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13631 /* No rx interrupts will be generated if both are zero */
13632 if ((ec
->rx_coalesce_usecs
== 0) &&
13633 (ec
->rx_max_coalesced_frames
== 0))
13636 /* No tx interrupts will be generated if both are zero */
13637 if ((ec
->tx_coalesce_usecs
== 0) &&
13638 (ec
->tx_max_coalesced_frames
== 0))
13641 /* Only copy relevant parameters, ignore all others. */
13642 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13643 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13644 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13645 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13646 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13647 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13648 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13649 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13650 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13652 if (netif_running(dev
)) {
13653 tg3_full_lock(tp
, 0);
13654 __tg3_set_coalesce(tp
, &tp
->coal
);
13655 tg3_full_unlock(tp
);
13660 static const struct ethtool_ops tg3_ethtool_ops
= {
13661 .get_settings
= tg3_get_settings
,
13662 .set_settings
= tg3_set_settings
,
13663 .get_drvinfo
= tg3_get_drvinfo
,
13664 .get_regs_len
= tg3_get_regs_len
,
13665 .get_regs
= tg3_get_regs
,
13666 .get_wol
= tg3_get_wol
,
13667 .set_wol
= tg3_set_wol
,
13668 .get_msglevel
= tg3_get_msglevel
,
13669 .set_msglevel
= tg3_set_msglevel
,
13670 .nway_reset
= tg3_nway_reset
,
13671 .get_link
= ethtool_op_get_link
,
13672 .get_eeprom_len
= tg3_get_eeprom_len
,
13673 .get_eeprom
= tg3_get_eeprom
,
13674 .set_eeprom
= tg3_set_eeprom
,
13675 .get_ringparam
= tg3_get_ringparam
,
13676 .set_ringparam
= tg3_set_ringparam
,
13677 .get_pauseparam
= tg3_get_pauseparam
,
13678 .set_pauseparam
= tg3_set_pauseparam
,
13679 .self_test
= tg3_self_test
,
13680 .get_strings
= tg3_get_strings
,
13681 .set_phys_id
= tg3_set_phys_id
,
13682 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13683 .get_coalesce
= tg3_get_coalesce
,
13684 .set_coalesce
= tg3_set_coalesce
,
13685 .get_sset_count
= tg3_get_sset_count
,
13686 .get_rxnfc
= tg3_get_rxnfc
,
13687 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13688 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13689 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13690 .get_channels
= tg3_get_channels
,
13691 .set_channels
= tg3_set_channels
,
13692 .get_ts_info
= tg3_get_ts_info
,
13695 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13696 struct rtnl_link_stats64
*stats
)
13698 struct tg3
*tp
= netdev_priv(dev
);
13700 spin_lock_bh(&tp
->lock
);
13701 if (!tp
->hw_stats
) {
13702 spin_unlock_bh(&tp
->lock
);
13703 return &tp
->net_stats_prev
;
13706 tg3_get_nstats(tp
, stats
);
13707 spin_unlock_bh(&tp
->lock
);
13712 static void tg3_set_rx_mode(struct net_device
*dev
)
13714 struct tg3
*tp
= netdev_priv(dev
);
13716 if (!netif_running(dev
))
13719 tg3_full_lock(tp
, 0);
13720 __tg3_set_rx_mode(dev
);
13721 tg3_full_unlock(tp
);
13724 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13727 dev
->mtu
= new_mtu
;
13729 if (new_mtu
> ETH_DATA_LEN
) {
13730 if (tg3_flag(tp
, 5780_CLASS
)) {
13731 netdev_update_features(dev
);
13732 tg3_flag_clear(tp
, TSO_CAPABLE
);
13734 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13737 if (tg3_flag(tp
, 5780_CLASS
)) {
13738 tg3_flag_set(tp
, TSO_CAPABLE
);
13739 netdev_update_features(dev
);
13741 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
13745 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
13747 struct tg3
*tp
= netdev_priv(dev
);
13749 bool reset_phy
= false;
13751 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
13754 if (!netif_running(dev
)) {
13755 /* We'll just catch it later when the
13758 tg3_set_mtu(dev
, tp
, new_mtu
);
13764 tg3_netif_stop(tp
);
13766 tg3_full_lock(tp
, 1);
13768 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13770 tg3_set_mtu(dev
, tp
, new_mtu
);
13772 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13773 * breaks all requests to 256 bytes.
13775 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
13778 err
= tg3_restart_hw(tp
, reset_phy
);
13781 tg3_netif_start(tp
);
13783 tg3_full_unlock(tp
);
13791 static const struct net_device_ops tg3_netdev_ops
= {
13792 .ndo_open
= tg3_open
,
13793 .ndo_stop
= tg3_close
,
13794 .ndo_start_xmit
= tg3_start_xmit
,
13795 .ndo_get_stats64
= tg3_get_stats64
,
13796 .ndo_validate_addr
= eth_validate_addr
,
13797 .ndo_set_rx_mode
= tg3_set_rx_mode
,
13798 .ndo_set_mac_address
= tg3_set_mac_addr
,
13799 .ndo_do_ioctl
= tg3_ioctl
,
13800 .ndo_tx_timeout
= tg3_tx_timeout
,
13801 .ndo_change_mtu
= tg3_change_mtu
,
13802 .ndo_fix_features
= tg3_fix_features
,
13803 .ndo_set_features
= tg3_set_features
,
13804 #ifdef CONFIG_NET_POLL_CONTROLLER
13805 .ndo_poll_controller
= tg3_poll_controller
,
13809 static void tg3_get_eeprom_size(struct tg3
*tp
)
13811 u32 cursize
, val
, magic
;
13813 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
13815 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
13818 if ((magic
!= TG3_EEPROM_MAGIC
) &&
13819 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
13820 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
13824 * Size the chip by reading offsets at increasing powers of two.
13825 * When we encounter our validation signature, we know the addressing
13826 * has wrapped around, and thus have our chip size.
13830 while (cursize
< tp
->nvram_size
) {
13831 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
13840 tp
->nvram_size
= cursize
;
13843 static void tg3_get_nvram_size(struct tg3
*tp
)
13847 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
13850 /* Selfboot format */
13851 if (val
!= TG3_EEPROM_MAGIC
) {
13852 tg3_get_eeprom_size(tp
);
13856 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
13858 /* This is confusing. We want to operate on the
13859 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13860 * call will read from NVRAM and byteswap the data
13861 * according to the byteswapping settings for all
13862 * other register accesses. This ensures the data we
13863 * want will always reside in the lower 16-bits.
13864 * However, the data in NVRAM is in LE format, which
13865 * means the data from the NVRAM read will always be
13866 * opposite the endianness of the CPU. The 16-bit
13867 * byteswap then brings the data to CPU endianness.
13869 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
13873 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13876 static void tg3_get_nvram_info(struct tg3
*tp
)
13880 nvcfg1
= tr32(NVRAM_CFG1
);
13881 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
13882 tg3_flag_set(tp
, FLASH
);
13884 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13885 tw32(NVRAM_CFG1
, nvcfg1
);
13888 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
13889 tg3_flag(tp
, 5780_CLASS
)) {
13890 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
13891 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
13892 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13893 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13894 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13896 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
13897 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13898 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
13900 case FLASH_VENDOR_ATMEL_EEPROM
:
13901 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13902 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13903 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13905 case FLASH_VENDOR_ST
:
13906 tp
->nvram_jedecnum
= JEDEC_ST
;
13907 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
13908 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13910 case FLASH_VENDOR_SAIFUN
:
13911 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
13912 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
13914 case FLASH_VENDOR_SST_SMALL
:
13915 case FLASH_VENDOR_SST_LARGE
:
13916 tp
->nvram_jedecnum
= JEDEC_SST
;
13917 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
13921 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13922 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13923 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13927 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
13929 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
13930 case FLASH_5752PAGE_SIZE_256
:
13931 tp
->nvram_pagesize
= 256;
13933 case FLASH_5752PAGE_SIZE_512
:
13934 tp
->nvram_pagesize
= 512;
13936 case FLASH_5752PAGE_SIZE_1K
:
13937 tp
->nvram_pagesize
= 1024;
13939 case FLASH_5752PAGE_SIZE_2K
:
13940 tp
->nvram_pagesize
= 2048;
13942 case FLASH_5752PAGE_SIZE_4K
:
13943 tp
->nvram_pagesize
= 4096;
13945 case FLASH_5752PAGE_SIZE_264
:
13946 tp
->nvram_pagesize
= 264;
13948 case FLASH_5752PAGE_SIZE_528
:
13949 tp
->nvram_pagesize
= 528;
13954 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
13958 nvcfg1
= tr32(NVRAM_CFG1
);
13960 /* NVRAM protection for TPM */
13961 if (nvcfg1
& (1 << 27))
13962 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13964 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13965 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
13966 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
13967 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13968 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13970 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13971 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13972 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13973 tg3_flag_set(tp
, FLASH
);
13975 case FLASH_5752VENDOR_ST_M45PE10
:
13976 case FLASH_5752VENDOR_ST_M45PE20
:
13977 case FLASH_5752VENDOR_ST_M45PE40
:
13978 tp
->nvram_jedecnum
= JEDEC_ST
;
13979 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13980 tg3_flag_set(tp
, FLASH
);
13984 if (tg3_flag(tp
, FLASH
)) {
13985 tg3_nvram_get_pagesize(tp
, nvcfg1
);
13987 /* For eeprom, set pagesize to maximum eeprom size */
13988 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13990 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13991 tw32(NVRAM_CFG1
, nvcfg1
);
13995 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
13997 u32 nvcfg1
, protect
= 0;
13999 nvcfg1
= tr32(NVRAM_CFG1
);
14001 /* NVRAM protection for TPM */
14002 if (nvcfg1
& (1 << 27)) {
14003 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14007 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14009 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14010 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14011 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14012 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14013 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14014 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14015 tg3_flag_set(tp
, FLASH
);
14016 tp
->nvram_pagesize
= 264;
14017 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14018 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14019 tp
->nvram_size
= (protect
? 0x3e200 :
14020 TG3_NVRAM_SIZE_512KB
);
14021 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14022 tp
->nvram_size
= (protect
? 0x1f200 :
14023 TG3_NVRAM_SIZE_256KB
);
14025 tp
->nvram_size
= (protect
? 0x1f200 :
14026 TG3_NVRAM_SIZE_128KB
);
14028 case FLASH_5752VENDOR_ST_M45PE10
:
14029 case FLASH_5752VENDOR_ST_M45PE20
:
14030 case FLASH_5752VENDOR_ST_M45PE40
:
14031 tp
->nvram_jedecnum
= JEDEC_ST
;
14032 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14033 tg3_flag_set(tp
, FLASH
);
14034 tp
->nvram_pagesize
= 256;
14035 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14036 tp
->nvram_size
= (protect
?
14037 TG3_NVRAM_SIZE_64KB
:
14038 TG3_NVRAM_SIZE_128KB
);
14039 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14040 tp
->nvram_size
= (protect
?
14041 TG3_NVRAM_SIZE_64KB
:
14042 TG3_NVRAM_SIZE_256KB
);
14044 tp
->nvram_size
= (protect
?
14045 TG3_NVRAM_SIZE_128KB
:
14046 TG3_NVRAM_SIZE_512KB
);
14051 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14055 nvcfg1
= tr32(NVRAM_CFG1
);
14057 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14058 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14059 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14060 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14061 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14062 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14063 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14064 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14066 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14067 tw32(NVRAM_CFG1
, nvcfg1
);
14069 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14070 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14071 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14072 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14073 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14074 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14075 tg3_flag_set(tp
, FLASH
);
14076 tp
->nvram_pagesize
= 264;
14078 case FLASH_5752VENDOR_ST_M45PE10
:
14079 case FLASH_5752VENDOR_ST_M45PE20
:
14080 case FLASH_5752VENDOR_ST_M45PE40
:
14081 tp
->nvram_jedecnum
= JEDEC_ST
;
14082 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14083 tg3_flag_set(tp
, FLASH
);
14084 tp
->nvram_pagesize
= 256;
14089 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14091 u32 nvcfg1
, protect
= 0;
14093 nvcfg1
= tr32(NVRAM_CFG1
);
14095 /* NVRAM protection for TPM */
14096 if (nvcfg1
& (1 << 27)) {
14097 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14101 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14103 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14104 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14105 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14106 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14107 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14108 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14109 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14110 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14111 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14112 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14113 tg3_flag_set(tp
, FLASH
);
14114 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14115 tp
->nvram_pagesize
= 256;
14117 case FLASH_5761VENDOR_ST_A_M45PE20
:
14118 case FLASH_5761VENDOR_ST_A_M45PE40
:
14119 case FLASH_5761VENDOR_ST_A_M45PE80
:
14120 case FLASH_5761VENDOR_ST_A_M45PE16
:
14121 case FLASH_5761VENDOR_ST_M_M45PE20
:
14122 case FLASH_5761VENDOR_ST_M_M45PE40
:
14123 case FLASH_5761VENDOR_ST_M_M45PE80
:
14124 case FLASH_5761VENDOR_ST_M_M45PE16
:
14125 tp
->nvram_jedecnum
= JEDEC_ST
;
14126 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14127 tg3_flag_set(tp
, FLASH
);
14128 tp
->nvram_pagesize
= 256;
14133 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14136 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14137 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14138 case FLASH_5761VENDOR_ST_A_M45PE16
:
14139 case FLASH_5761VENDOR_ST_M_M45PE16
:
14140 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14142 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14143 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14144 case FLASH_5761VENDOR_ST_A_M45PE80
:
14145 case FLASH_5761VENDOR_ST_M_M45PE80
:
14146 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14148 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14149 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14150 case FLASH_5761VENDOR_ST_A_M45PE40
:
14151 case FLASH_5761VENDOR_ST_M_M45PE40
:
14152 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14154 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14155 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14156 case FLASH_5761VENDOR_ST_A_M45PE20
:
14157 case FLASH_5761VENDOR_ST_M_M45PE20
:
14158 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14164 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14166 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14167 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14168 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14171 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14175 nvcfg1
= tr32(NVRAM_CFG1
);
14177 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14178 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14179 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14180 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14181 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14182 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14184 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14185 tw32(NVRAM_CFG1
, nvcfg1
);
14187 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14188 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14189 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14190 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14191 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14192 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14193 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14194 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14195 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14196 tg3_flag_set(tp
, FLASH
);
14198 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14199 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14200 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14201 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14202 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14204 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14205 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14206 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14208 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14209 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14210 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14214 case FLASH_5752VENDOR_ST_M45PE10
:
14215 case FLASH_5752VENDOR_ST_M45PE20
:
14216 case FLASH_5752VENDOR_ST_M45PE40
:
14217 tp
->nvram_jedecnum
= JEDEC_ST
;
14218 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14219 tg3_flag_set(tp
, FLASH
);
14221 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14222 case FLASH_5752VENDOR_ST_M45PE10
:
14223 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14225 case FLASH_5752VENDOR_ST_M45PE20
:
14226 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14228 case FLASH_5752VENDOR_ST_M45PE40
:
14229 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14234 tg3_flag_set(tp
, NO_NVRAM
);
14238 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14239 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14240 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14244 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14248 nvcfg1
= tr32(NVRAM_CFG1
);
14250 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14251 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14252 case FLASH_5717VENDOR_MICRO_EEPROM
:
14253 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14254 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14255 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14257 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14258 tw32(NVRAM_CFG1
, nvcfg1
);
14260 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14261 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14262 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14263 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14264 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14265 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14266 case FLASH_5717VENDOR_ATMEL_45USPT
:
14267 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14268 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14269 tg3_flag_set(tp
, FLASH
);
14271 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14272 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14273 /* Detect size with tg3_nvram_get_size() */
14275 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14276 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14277 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14280 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14284 case FLASH_5717VENDOR_ST_M_M25PE10
:
14285 case FLASH_5717VENDOR_ST_A_M25PE10
:
14286 case FLASH_5717VENDOR_ST_M_M45PE10
:
14287 case FLASH_5717VENDOR_ST_A_M45PE10
:
14288 case FLASH_5717VENDOR_ST_M_M25PE20
:
14289 case FLASH_5717VENDOR_ST_A_M25PE20
:
14290 case FLASH_5717VENDOR_ST_M_M45PE20
:
14291 case FLASH_5717VENDOR_ST_A_M45PE20
:
14292 case FLASH_5717VENDOR_ST_25USPT
:
14293 case FLASH_5717VENDOR_ST_45USPT
:
14294 tp
->nvram_jedecnum
= JEDEC_ST
;
14295 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14296 tg3_flag_set(tp
, FLASH
);
14298 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14299 case FLASH_5717VENDOR_ST_M_M25PE20
:
14300 case FLASH_5717VENDOR_ST_M_M45PE20
:
14301 /* Detect size with tg3_nvram_get_size() */
14303 case FLASH_5717VENDOR_ST_A_M25PE20
:
14304 case FLASH_5717VENDOR_ST_A_M45PE20
:
14305 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14308 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14313 tg3_flag_set(tp
, NO_NVRAM
);
14317 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14318 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14319 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14322 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14324 u32 nvcfg1
, nvmpinstrp
;
14326 nvcfg1
= tr32(NVRAM_CFG1
);
14327 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14329 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14330 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14331 tg3_flag_set(tp
, NO_NVRAM
);
14335 switch (nvmpinstrp
) {
14336 case FLASH_5762_EEPROM_HD
:
14337 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14339 case FLASH_5762_EEPROM_LD
:
14340 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14342 case FLASH_5720VENDOR_M_ST_M45PE20
:
14343 /* This pinstrap supports multiple sizes, so force it
14344 * to read the actual size from location 0xf0.
14346 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14351 switch (nvmpinstrp
) {
14352 case FLASH_5720_EEPROM_HD
:
14353 case FLASH_5720_EEPROM_LD
:
14354 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14355 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14357 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14358 tw32(NVRAM_CFG1
, nvcfg1
);
14359 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14360 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14362 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14364 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14365 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14366 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14367 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14368 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14369 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14370 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14371 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14372 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14373 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14374 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14375 case FLASH_5720VENDOR_ATMEL_45USPT
:
14376 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14377 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14378 tg3_flag_set(tp
, FLASH
);
14380 switch (nvmpinstrp
) {
14381 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14382 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14383 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14384 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14386 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14387 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14388 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14389 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14391 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14392 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14393 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14396 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14397 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14401 case FLASH_5720VENDOR_M_ST_M25PE10
:
14402 case FLASH_5720VENDOR_M_ST_M45PE10
:
14403 case FLASH_5720VENDOR_A_ST_M25PE10
:
14404 case FLASH_5720VENDOR_A_ST_M45PE10
:
14405 case FLASH_5720VENDOR_M_ST_M25PE20
:
14406 case FLASH_5720VENDOR_M_ST_M45PE20
:
14407 case FLASH_5720VENDOR_A_ST_M25PE20
:
14408 case FLASH_5720VENDOR_A_ST_M45PE20
:
14409 case FLASH_5720VENDOR_M_ST_M25PE40
:
14410 case FLASH_5720VENDOR_M_ST_M45PE40
:
14411 case FLASH_5720VENDOR_A_ST_M25PE40
:
14412 case FLASH_5720VENDOR_A_ST_M45PE40
:
14413 case FLASH_5720VENDOR_M_ST_M25PE80
:
14414 case FLASH_5720VENDOR_M_ST_M45PE80
:
14415 case FLASH_5720VENDOR_A_ST_M25PE80
:
14416 case FLASH_5720VENDOR_A_ST_M45PE80
:
14417 case FLASH_5720VENDOR_ST_25USPT
:
14418 case FLASH_5720VENDOR_ST_45USPT
:
14419 tp
->nvram_jedecnum
= JEDEC_ST
;
14420 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14421 tg3_flag_set(tp
, FLASH
);
14423 switch (nvmpinstrp
) {
14424 case FLASH_5720VENDOR_M_ST_M25PE20
:
14425 case FLASH_5720VENDOR_M_ST_M45PE20
:
14426 case FLASH_5720VENDOR_A_ST_M25PE20
:
14427 case FLASH_5720VENDOR_A_ST_M45PE20
:
14428 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14430 case FLASH_5720VENDOR_M_ST_M25PE40
:
14431 case FLASH_5720VENDOR_M_ST_M45PE40
:
14432 case FLASH_5720VENDOR_A_ST_M25PE40
:
14433 case FLASH_5720VENDOR_A_ST_M45PE40
:
14434 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14436 case FLASH_5720VENDOR_M_ST_M25PE80
:
14437 case FLASH_5720VENDOR_M_ST_M45PE80
:
14438 case FLASH_5720VENDOR_A_ST_M25PE80
:
14439 case FLASH_5720VENDOR_A_ST_M45PE80
:
14440 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14443 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14444 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14449 tg3_flag_set(tp
, NO_NVRAM
);
14453 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14454 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14455 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14457 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14460 if (tg3_nvram_read(tp
, 0, &val
))
14463 if (val
!= TG3_EEPROM_MAGIC
&&
14464 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14465 tg3_flag_set(tp
, NO_NVRAM
);
14469 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14470 static void tg3_nvram_init(struct tg3
*tp
)
14472 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14473 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14474 tg3_flag_clear(tp
, NVRAM
);
14475 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14476 tg3_flag_set(tp
, NO_NVRAM
);
14480 tw32_f(GRC_EEPROM_ADDR
,
14481 (EEPROM_ADDR_FSM_RESET
|
14482 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14483 EEPROM_ADDR_CLKPERD_SHIFT
)));
14487 /* Enable seeprom accesses. */
14488 tw32_f(GRC_LOCAL_CTRL
,
14489 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14492 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14493 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14494 tg3_flag_set(tp
, NVRAM
);
14496 if (tg3_nvram_lock(tp
)) {
14497 netdev_warn(tp
->dev
,
14498 "Cannot get nvram lock, %s failed\n",
14502 tg3_enable_nvram_access(tp
);
14504 tp
->nvram_size
= 0;
14506 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14507 tg3_get_5752_nvram_info(tp
);
14508 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14509 tg3_get_5755_nvram_info(tp
);
14510 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14511 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14512 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14513 tg3_get_5787_nvram_info(tp
);
14514 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14515 tg3_get_5761_nvram_info(tp
);
14516 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14517 tg3_get_5906_nvram_info(tp
);
14518 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14519 tg3_flag(tp
, 57765_CLASS
))
14520 tg3_get_57780_nvram_info(tp
);
14521 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14522 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14523 tg3_get_5717_nvram_info(tp
);
14524 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14525 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14526 tg3_get_5720_nvram_info(tp
);
14528 tg3_get_nvram_info(tp
);
14530 if (tp
->nvram_size
== 0)
14531 tg3_get_nvram_size(tp
);
14533 tg3_disable_nvram_access(tp
);
14534 tg3_nvram_unlock(tp
);
14537 tg3_flag_clear(tp
, NVRAM
);
14538 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14540 tg3_get_eeprom_size(tp
);
14544 struct subsys_tbl_ent
{
14545 u16 subsys_vendor
, subsys_devid
;
14549 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14550 /* Broadcom boards. */
14551 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14552 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14553 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14554 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14555 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14556 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14557 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14558 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14559 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14560 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14561 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14562 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14563 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14564 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14565 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14566 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14567 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14568 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14569 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14570 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14571 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14572 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14575 { TG3PCI_SUBVENDOR_ID_3COM
,
14576 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14577 { TG3PCI_SUBVENDOR_ID_3COM
,
14578 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14579 { TG3PCI_SUBVENDOR_ID_3COM
,
14580 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14581 { TG3PCI_SUBVENDOR_ID_3COM
,
14582 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14583 { TG3PCI_SUBVENDOR_ID_3COM
,
14584 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14587 { TG3PCI_SUBVENDOR_ID_DELL
,
14588 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14589 { TG3PCI_SUBVENDOR_ID_DELL
,
14590 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14591 { TG3PCI_SUBVENDOR_ID_DELL
,
14592 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14593 { TG3PCI_SUBVENDOR_ID_DELL
,
14594 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14596 /* Compaq boards. */
14597 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14598 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14599 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14600 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14601 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14602 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14603 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14604 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14605 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14606 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14609 { TG3PCI_SUBVENDOR_ID_IBM
,
14610 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14613 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14617 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14618 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14619 tp
->pdev
->subsystem_vendor
) &&
14620 (subsys_id_to_phy_id
[i
].subsys_devid
==
14621 tp
->pdev
->subsystem_device
))
14622 return &subsys_id_to_phy_id
[i
];
14627 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14631 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14632 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14634 /* Assume an onboard device and WOL capable by default. */
14635 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14636 tg3_flag_set(tp
, WOL_CAP
);
14638 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14639 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14640 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14641 tg3_flag_set(tp
, IS_NIC
);
14643 val
= tr32(VCPU_CFGSHDW
);
14644 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14645 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14646 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14647 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14648 tg3_flag_set(tp
, WOL_ENABLE
);
14649 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14654 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
14655 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
14656 u32 nic_cfg
, led_cfg
;
14657 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
14658 int eeprom_phy_serdes
= 0;
14660 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
14661 tp
->nic_sram_data_cfg
= nic_cfg
;
14663 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
14664 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
14665 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14666 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
14667 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
14668 (ver
> 0) && (ver
< 0x100))
14669 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
14671 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
14672 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14674 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14675 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14676 eeprom_phy_serdes
= 1;
14678 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14679 if (nic_phy_id
!= 0) {
14680 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14681 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14683 eeprom_phy_id
= (id1
>> 16) << 10;
14684 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14685 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14689 tp
->phy_id
= eeprom_phy_id
;
14690 if (eeprom_phy_serdes
) {
14691 if (!tg3_flag(tp
, 5705_PLUS
))
14692 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14694 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14697 if (tg3_flag(tp
, 5750_PLUS
))
14698 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14699 SHASTA_EXT_LED_MODE_MASK
);
14701 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14705 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14706 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14709 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14710 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14713 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14714 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14716 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14717 * read on some older 5700/5701 bootcode.
14719 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
14720 tg3_asic_rev(tp
) == ASIC_REV_5701
)
14721 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14725 case SHASTA_EXT_LED_SHARED
:
14726 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
14727 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
14728 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
14729 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14730 LED_CTRL_MODE_PHY_2
);
14733 case SHASTA_EXT_LED_MAC
:
14734 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
14737 case SHASTA_EXT_LED_COMBO
:
14738 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
14739 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
14740 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14741 LED_CTRL_MODE_PHY_2
);
14746 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
14747 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
14748 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
14749 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14751 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
14752 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14754 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
14755 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14756 if ((tp
->pdev
->subsystem_vendor
==
14757 PCI_VENDOR_ID_ARIMA
) &&
14758 (tp
->pdev
->subsystem_device
== 0x205a ||
14759 tp
->pdev
->subsystem_device
== 0x2063))
14760 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14762 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14763 tg3_flag_set(tp
, IS_NIC
);
14766 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
14767 tg3_flag_set(tp
, ENABLE_ASF
);
14768 if (tg3_flag(tp
, 5750_PLUS
))
14769 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
14772 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
14773 tg3_flag(tp
, 5750_PLUS
))
14774 tg3_flag_set(tp
, ENABLE_APE
);
14776 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
14777 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
14778 tg3_flag_clear(tp
, WOL_CAP
);
14780 if (tg3_flag(tp
, WOL_CAP
) &&
14781 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
14782 tg3_flag_set(tp
, WOL_ENABLE
);
14783 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14786 if (cfg2
& (1 << 17))
14787 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
14789 /* serdes signal pre-emphasis in register 0x590 set by */
14790 /* bootcode if bit 18 is set */
14791 if (cfg2
& (1 << 18))
14792 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
14794 if ((tg3_flag(tp
, 57765_PLUS
) ||
14795 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
14796 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
14797 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
14798 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
14800 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14803 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
14804 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
14805 !tg3_flag(tp
, 57765_PLUS
) &&
14806 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
14807 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14808 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
14809 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
14810 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
14811 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
14814 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
14815 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
14816 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
14817 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
14818 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
14819 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
14822 if (tg3_flag(tp
, WOL_CAP
))
14823 device_set_wakeup_enable(&tp
->pdev
->dev
,
14824 tg3_flag(tp
, WOL_ENABLE
));
14826 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
14829 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
14832 u32 val2
, off
= offset
* 8;
14834 err
= tg3_nvram_lock(tp
);
14838 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
14839 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
14840 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
14841 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
14844 for (i
= 0; i
< 100; i
++) {
14845 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
14846 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
14847 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
14853 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
14855 tg3_nvram_unlock(tp
);
14856 if (val2
& APE_OTP_STATUS_CMD_DONE
)
14862 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
14867 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
14868 tw32(OTP_CTRL
, cmd
);
14870 /* Wait for up to 1 ms for command to execute. */
14871 for (i
= 0; i
< 100; i
++) {
14872 val
= tr32(OTP_STATUS
);
14873 if (val
& OTP_STATUS_CMD_DONE
)
14878 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
14881 /* Read the gphy configuration from the OTP region of the chip. The gphy
14882 * configuration is a 32-bit value that straddles the alignment boundary.
14883 * We do two 32-bit reads and then shift and merge the results.
14885 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
14887 u32 bhalf_otp
, thalf_otp
;
14889 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
14891 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
14894 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
14896 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14899 thalf_otp
= tr32(OTP_READ_DATA
);
14901 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
14903 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14906 bhalf_otp
= tr32(OTP_READ_DATA
);
14908 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
14911 static void tg3_phy_init_link_config(struct tg3
*tp
)
14913 u32 adv
= ADVERTISED_Autoneg
;
14915 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14916 adv
|= ADVERTISED_1000baseT_Half
|
14917 ADVERTISED_1000baseT_Full
;
14919 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14920 adv
|= ADVERTISED_100baseT_Half
|
14921 ADVERTISED_100baseT_Full
|
14922 ADVERTISED_10baseT_Half
|
14923 ADVERTISED_10baseT_Full
|
14926 adv
|= ADVERTISED_FIBRE
;
14928 tp
->link_config
.advertising
= adv
;
14929 tp
->link_config
.speed
= SPEED_UNKNOWN
;
14930 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
14931 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
14932 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
14933 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
14938 static int tg3_phy_probe(struct tg3
*tp
)
14940 u32 hw_phy_id_1
, hw_phy_id_2
;
14941 u32 hw_phy_id
, hw_phy_id_masked
;
14944 /* flow control autonegotiation is default behavior */
14945 tg3_flag_set(tp
, PAUSE_AUTONEG
);
14946 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
14948 if (tg3_flag(tp
, ENABLE_APE
)) {
14949 switch (tp
->pci_fn
) {
14951 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
14954 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
14957 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
14960 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
14965 if (!tg3_flag(tp
, ENABLE_ASF
) &&
14966 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14967 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14968 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
14969 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
14971 if (tg3_flag(tp
, USE_PHYLIB
))
14972 return tg3_phy_init(tp
);
14974 /* Reading the PHY ID register can conflict with ASF
14975 * firmware access to the PHY hardware.
14978 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
14979 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
14981 /* Now read the physical PHY_ID from the chip and verify
14982 * that it is sane. If it doesn't look good, we fall back
14983 * to either the hard-coded table based PHY_ID and failing
14984 * that the value found in the eeprom area.
14986 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
14987 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
14989 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
14990 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
14991 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
14993 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
14996 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
14997 tp
->phy_id
= hw_phy_id
;
14998 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
14999 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15001 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15003 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15004 /* Do nothing, phy ID already set up in
15005 * tg3_get_eeprom_hw_cfg().
15008 struct subsys_tbl_ent
*p
;
15010 /* No eeprom signature? Try the hardcoded
15011 * subsys device table.
15013 p
= tg3_lookup_by_subsys(tp
);
15015 tp
->phy_id
= p
->phy_id
;
15016 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15017 /* For now we saw the IDs 0xbc050cd0,
15018 * 0xbc050f80 and 0xbc050c30 on devices
15019 * connected to an BCM4785 and there are
15020 * probably more. Just assume that the phy is
15021 * supported when it is connected to a SSB core
15028 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15029 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15033 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15034 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15035 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15036 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15037 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15038 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15039 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15040 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15041 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
)))
15042 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15044 tg3_phy_init_link_config(tp
);
15046 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15047 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15048 !tg3_flag(tp
, ENABLE_APE
) &&
15049 !tg3_flag(tp
, ENABLE_ASF
)) {
15052 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15053 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15054 (bmsr
& BMSR_LSTATUS
))
15055 goto skip_phy_reset
;
15057 err
= tg3_phy_reset(tp
);
15061 tg3_phy_set_wirespeed(tp
);
15063 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15064 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15065 tp
->link_config
.flowctrl
);
15067 tg3_writephy(tp
, MII_BMCR
,
15068 BMCR_ANENABLE
| BMCR_ANRESTART
);
15073 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15074 err
= tg3_init_5401phy_dsp(tp
);
15078 err
= tg3_init_5401phy_dsp(tp
);
15084 static void tg3_read_vpd(struct tg3
*tp
)
15087 unsigned int block_end
, rosize
, len
;
15091 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15095 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15097 goto out_not_found
;
15099 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15100 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15101 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15103 if (block_end
> vpdlen
)
15104 goto out_not_found
;
15106 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15107 PCI_VPD_RO_KEYWORD_MFR_ID
);
15109 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15111 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15112 if (j
+ len
> block_end
|| len
!= 4 ||
15113 memcmp(&vpd_data
[j
], "1028", 4))
15116 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15117 PCI_VPD_RO_KEYWORD_VENDOR0
);
15121 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15123 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15124 if (j
+ len
> block_end
)
15127 if (len
>= sizeof(tp
->fw_ver
))
15128 len
= sizeof(tp
->fw_ver
) - 1;
15129 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15130 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15135 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15136 PCI_VPD_RO_KEYWORD_PARTNO
);
15138 goto out_not_found
;
15140 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15142 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15143 if (len
> TG3_BPN_SIZE
||
15144 (len
+ i
) > vpdlen
)
15145 goto out_not_found
;
15147 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15151 if (tp
->board_part_number
[0])
15155 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15156 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15157 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15158 strcpy(tp
->board_part_number
, "BCM5717");
15159 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15160 strcpy(tp
->board_part_number
, "BCM5718");
15163 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15164 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15165 strcpy(tp
->board_part_number
, "BCM57780");
15166 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15167 strcpy(tp
->board_part_number
, "BCM57760");
15168 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15169 strcpy(tp
->board_part_number
, "BCM57790");
15170 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15171 strcpy(tp
->board_part_number
, "BCM57788");
15174 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15175 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15176 strcpy(tp
->board_part_number
, "BCM57761");
15177 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15178 strcpy(tp
->board_part_number
, "BCM57765");
15179 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15180 strcpy(tp
->board_part_number
, "BCM57781");
15181 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15182 strcpy(tp
->board_part_number
, "BCM57785");
15183 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15184 strcpy(tp
->board_part_number
, "BCM57791");
15185 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15186 strcpy(tp
->board_part_number
, "BCM57795");
15189 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15190 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15191 strcpy(tp
->board_part_number
, "BCM57762");
15192 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15193 strcpy(tp
->board_part_number
, "BCM57766");
15194 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15195 strcpy(tp
->board_part_number
, "BCM57782");
15196 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15197 strcpy(tp
->board_part_number
, "BCM57786");
15200 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15201 strcpy(tp
->board_part_number
, "BCM95906");
15204 strcpy(tp
->board_part_number
, "none");
15208 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15212 if (tg3_nvram_read(tp
, offset
, &val
) ||
15213 (val
& 0xfc000000) != 0x0c000000 ||
15214 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15221 static void tg3_read_bc_ver(struct tg3
*tp
)
15223 u32 val
, offset
, start
, ver_offset
;
15225 bool newver
= false;
15227 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15228 tg3_nvram_read(tp
, 0x4, &start
))
15231 offset
= tg3_nvram_logical_addr(tp
, offset
);
15233 if (tg3_nvram_read(tp
, offset
, &val
))
15236 if ((val
& 0xfc000000) == 0x0c000000) {
15237 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15244 dst_off
= strlen(tp
->fw_ver
);
15247 if (TG3_VER_SIZE
- dst_off
< 16 ||
15248 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15251 offset
= offset
+ ver_offset
- start
;
15252 for (i
= 0; i
< 16; i
+= 4) {
15254 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15257 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15262 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15265 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15266 TG3_NVM_BCVER_MAJSFT
;
15267 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15268 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15269 "v%d.%02d", major
, minor
);
15273 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15275 u32 val
, major
, minor
;
15277 /* Use native endian representation */
15278 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15281 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15282 TG3_NVM_HWSB_CFG1_MAJSFT
;
15283 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15284 TG3_NVM_HWSB_CFG1_MINSFT
;
15286 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15289 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15291 u32 offset
, major
, minor
, build
;
15293 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15295 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15298 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15299 case TG3_EEPROM_SB_REVISION_0
:
15300 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15302 case TG3_EEPROM_SB_REVISION_2
:
15303 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15305 case TG3_EEPROM_SB_REVISION_3
:
15306 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15308 case TG3_EEPROM_SB_REVISION_4
:
15309 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15311 case TG3_EEPROM_SB_REVISION_5
:
15312 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15314 case TG3_EEPROM_SB_REVISION_6
:
15315 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15321 if (tg3_nvram_read(tp
, offset
, &val
))
15324 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15325 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15326 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15327 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15328 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15330 if (minor
> 99 || build
> 26)
15333 offset
= strlen(tp
->fw_ver
);
15334 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15335 " v%d.%02d", major
, minor
);
15338 offset
= strlen(tp
->fw_ver
);
15339 if (offset
< TG3_VER_SIZE
- 1)
15340 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15344 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15346 u32 val
, offset
, start
;
15349 for (offset
= TG3_NVM_DIR_START
;
15350 offset
< TG3_NVM_DIR_END
;
15351 offset
+= TG3_NVM_DIRENT_SIZE
) {
15352 if (tg3_nvram_read(tp
, offset
, &val
))
15355 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15359 if (offset
== TG3_NVM_DIR_END
)
15362 if (!tg3_flag(tp
, 5705_PLUS
))
15363 start
= 0x08000000;
15364 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15367 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15368 !tg3_fw_img_is_valid(tp
, offset
) ||
15369 tg3_nvram_read(tp
, offset
+ 8, &val
))
15372 offset
+= val
- start
;
15374 vlen
= strlen(tp
->fw_ver
);
15376 tp
->fw_ver
[vlen
++] = ',';
15377 tp
->fw_ver
[vlen
++] = ' ';
15379 for (i
= 0; i
< 4; i
++) {
15381 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15384 offset
+= sizeof(v
);
15386 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15387 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15391 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15396 static void tg3_probe_ncsi(struct tg3
*tp
)
15400 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15401 if (apedata
!= APE_SEG_SIG_MAGIC
)
15404 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15405 if (!(apedata
& APE_FW_STATUS_READY
))
15408 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15409 tg3_flag_set(tp
, APE_HAS_NCSI
);
15412 static void tg3_read_dash_ver(struct tg3
*tp
)
15418 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15420 if (tg3_flag(tp
, APE_HAS_NCSI
))
15422 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15427 vlen
= strlen(tp
->fw_ver
);
15429 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15431 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15432 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15433 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15434 (apedata
& APE_FW_VERSION_BLDMSK
));
15437 static void tg3_read_otp_ver(struct tg3
*tp
)
15441 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15444 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15445 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15446 TG3_OTP_MAGIC0_VALID(val
)) {
15447 u64 val64
= (u64
) val
<< 32 | val2
;
15451 for (i
= 0; i
< 7; i
++) {
15452 if ((val64
& 0xff) == 0)
15454 ver
= val64
& 0xff;
15457 vlen
= strlen(tp
->fw_ver
);
15458 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
15462 static void tg3_read_fw_ver(struct tg3
*tp
)
15465 bool vpd_vers
= false;
15467 if (tp
->fw_ver
[0] != 0)
15470 if (tg3_flag(tp
, NO_NVRAM
)) {
15471 strcat(tp
->fw_ver
, "sb");
15472 tg3_read_otp_ver(tp
);
15476 if (tg3_nvram_read(tp
, 0, &val
))
15479 if (val
== TG3_EEPROM_MAGIC
)
15480 tg3_read_bc_ver(tp
);
15481 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
15482 tg3_read_sb_ver(tp
, val
);
15483 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
15484 tg3_read_hwsb_ver(tp
);
15486 if (tg3_flag(tp
, ENABLE_ASF
)) {
15487 if (tg3_flag(tp
, ENABLE_APE
)) {
15488 tg3_probe_ncsi(tp
);
15490 tg3_read_dash_ver(tp
);
15491 } else if (!vpd_vers
) {
15492 tg3_read_mgmtfw_ver(tp
);
15496 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15499 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15501 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15502 return TG3_RX_RET_MAX_SIZE_5717
;
15503 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15504 return TG3_RX_RET_MAX_SIZE_5700
;
15506 return TG3_RX_RET_MAX_SIZE_5705
;
15509 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
15510 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15511 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15512 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15516 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15518 struct pci_dev
*peer
;
15519 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15521 for (func
= 0; func
< 8; func
++) {
15522 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15523 if (peer
&& peer
!= tp
->pdev
)
15527 /* 5704 can be configured in single-port mode, set peer to
15528 * tp->pdev in that case.
15536 * We don't need to keep the refcount elevated; there's no way
15537 * to remove one half of this device without removing the other
15544 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15546 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15547 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15550 /* All devices that use the alternate
15551 * ASIC REV location have a CPMU.
15553 tg3_flag_set(tp
, CPMU_PRESENT
);
15555 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15556 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15557 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15558 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15559 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15560 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15561 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15562 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
)
15563 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15564 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15565 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15566 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15567 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15568 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15569 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15570 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15571 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15572 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15573 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15574 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15576 reg
= TG3PCI_PRODID_ASICREV
;
15578 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15581 /* Wrong chip ID in 5752 A0. This code can be removed later
15582 * as A0 is not in production.
15584 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15585 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15587 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15588 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15590 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15591 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15592 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15593 tg3_flag_set(tp
, 5717_PLUS
);
15595 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15596 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15597 tg3_flag_set(tp
, 57765_CLASS
);
15599 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15600 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15601 tg3_flag_set(tp
, 57765_PLUS
);
15603 /* Intentionally exclude ASIC_REV_5906 */
15604 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15605 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15606 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15607 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15608 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15609 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15610 tg3_flag(tp
, 57765_PLUS
))
15611 tg3_flag_set(tp
, 5755_PLUS
);
15613 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15614 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15615 tg3_flag_set(tp
, 5780_CLASS
);
15617 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15618 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15619 tg3_asic_rev(tp
) == ASIC_REV_5906
||
15620 tg3_flag(tp
, 5755_PLUS
) ||
15621 tg3_flag(tp
, 5780_CLASS
))
15622 tg3_flag_set(tp
, 5750_PLUS
);
15624 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
15625 tg3_flag(tp
, 5750_PLUS
))
15626 tg3_flag_set(tp
, 5705_PLUS
);
15629 static bool tg3_10_100_only_device(struct tg3
*tp
,
15630 const struct pci_device_id
*ent
)
15632 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
15634 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15635 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
15636 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
15639 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
15640 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
15641 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
15651 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
15654 u32 pci_state_reg
, grc_misc_cfg
;
15659 /* Force memory write invalidate off. If we leave it on,
15660 * then on 5700_BX chips we have to enable a workaround.
15661 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15662 * to match the cacheline size. The Broadcom driver have this
15663 * workaround but turns MWI off all the times so never uses
15664 * it. This seems to suggest that the workaround is insufficient.
15666 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15667 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
15668 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15670 /* Important! -- Make sure register accesses are byteswapped
15671 * correctly. Also, for those chips that require it, make
15672 * sure that indirect register accesses are enabled before
15673 * the first operation.
15675 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15677 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
15678 MISC_HOST_CTRL_CHIPREV
);
15679 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15680 tp
->misc_host_ctrl
);
15682 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
15684 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15685 * we need to disable memory and use config. cycles
15686 * only to access all registers. The 5702/03 chips
15687 * can mistakenly decode the special cycles from the
15688 * ICH chipsets as memory write cycles, causing corruption
15689 * of register and memory space. Only certain ICH bridges
15690 * will drive special cycles with non-zero data during the
15691 * address phase which can fall within the 5703's address
15692 * range. This is not an ICH bug as the PCI spec allows
15693 * non-zero address during special cycles. However, only
15694 * these ICH bridges are known to drive non-zero addresses
15695 * during special cycles.
15697 * Since special cycles do not cross PCI bridges, we only
15698 * enable this workaround if the 5703 is on the secondary
15699 * bus of these ICH bridges.
15701 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
15702 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
15703 static struct tg3_dev_id
{
15707 } ich_chipsets
[] = {
15708 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
15710 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
15712 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
15714 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
15718 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
15719 struct pci_dev
*bridge
= NULL
;
15721 while (pci_id
->vendor
!= 0) {
15722 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
15728 if (pci_id
->rev
!= PCI_ANY_ID
) {
15729 if (bridge
->revision
> pci_id
->rev
)
15732 if (bridge
->subordinate
&&
15733 (bridge
->subordinate
->number
==
15734 tp
->pdev
->bus
->number
)) {
15735 tg3_flag_set(tp
, ICH_WORKAROUND
);
15736 pci_dev_put(bridge
);
15742 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
15743 static struct tg3_dev_id
{
15746 } bridge_chipsets
[] = {
15747 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
15748 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
15751 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
15752 struct pci_dev
*bridge
= NULL
;
15754 while (pci_id
->vendor
!= 0) {
15755 bridge
= pci_get_device(pci_id
->vendor
,
15762 if (bridge
->subordinate
&&
15763 (bridge
->subordinate
->number
<=
15764 tp
->pdev
->bus
->number
) &&
15765 (bridge
->subordinate
->busn_res
.end
>=
15766 tp
->pdev
->bus
->number
)) {
15767 tg3_flag_set(tp
, 5701_DMA_BUG
);
15768 pci_dev_put(bridge
);
15774 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15775 * DMA addresses > 40-bit. This bridge may have other additional
15776 * 57xx devices behind it in some 4-port NIC designs for example.
15777 * Any tg3 device found behind the bridge will also need the 40-bit
15780 if (tg3_flag(tp
, 5780_CLASS
)) {
15781 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15782 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
15784 struct pci_dev
*bridge
= NULL
;
15787 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
15788 PCI_DEVICE_ID_SERVERWORKS_EPB
,
15790 if (bridge
&& bridge
->subordinate
&&
15791 (bridge
->subordinate
->number
<=
15792 tp
->pdev
->bus
->number
) &&
15793 (bridge
->subordinate
->busn_res
.end
>=
15794 tp
->pdev
->bus
->number
)) {
15795 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15796 pci_dev_put(bridge
);
15802 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15803 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15804 tp
->pdev_peer
= tg3_find_peer(tp
);
15806 /* Determine TSO capabilities */
15807 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
15808 ; /* Do nothing. HW bug. */
15809 else if (tg3_flag(tp
, 57765_PLUS
))
15810 tg3_flag_set(tp
, HW_TSO_3
);
15811 else if (tg3_flag(tp
, 5755_PLUS
) ||
15812 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15813 tg3_flag_set(tp
, HW_TSO_2
);
15814 else if (tg3_flag(tp
, 5750_PLUS
)) {
15815 tg3_flag_set(tp
, HW_TSO_1
);
15816 tg3_flag_set(tp
, TSO_BUG
);
15817 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
15818 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
15819 tg3_flag_clear(tp
, TSO_BUG
);
15820 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15821 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15822 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
15823 tg3_flag_set(tp
, FW_TSO
);
15824 tg3_flag_set(tp
, TSO_BUG
);
15825 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
15826 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
15828 tp
->fw_needed
= FIRMWARE_TG3TSO
;
15831 /* Selectively allow TSO based on operating conditions */
15832 if (tg3_flag(tp
, HW_TSO_1
) ||
15833 tg3_flag(tp
, HW_TSO_2
) ||
15834 tg3_flag(tp
, HW_TSO_3
) ||
15835 tg3_flag(tp
, FW_TSO
)) {
15836 /* For firmware TSO, assume ASF is disabled.
15837 * We'll disable TSO later if we discover ASF
15838 * is enabled in tg3_get_eeprom_hw_cfg().
15840 tg3_flag_set(tp
, TSO_CAPABLE
);
15842 tg3_flag_clear(tp
, TSO_CAPABLE
);
15843 tg3_flag_clear(tp
, TSO_BUG
);
15844 tp
->fw_needed
= NULL
;
15847 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
15848 tp
->fw_needed
= FIRMWARE_TG3
;
15850 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
15851 tp
->fw_needed
= FIRMWARE_TG357766
;
15855 if (tg3_flag(tp
, 5750_PLUS
)) {
15856 tg3_flag_set(tp
, SUPPORT_MSI
);
15857 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
15858 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
15859 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
15860 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
15861 tp
->pdev_peer
== tp
->pdev
))
15862 tg3_flag_clear(tp
, SUPPORT_MSI
);
15864 if (tg3_flag(tp
, 5755_PLUS
) ||
15865 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15866 tg3_flag_set(tp
, 1SHOT_MSI
);
15869 if (tg3_flag(tp
, 57765_PLUS
)) {
15870 tg3_flag_set(tp
, SUPPORT_MSIX
);
15871 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
15877 if (tp
->irq_max
> 1) {
15878 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
15879 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
15881 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15882 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15883 tp
->txq_max
= tp
->irq_max
- 1;
15886 if (tg3_flag(tp
, 5755_PLUS
) ||
15887 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15888 tg3_flag_set(tp
, SHORT_DMA_BUG
);
15890 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
15891 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
15893 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15894 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15895 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15896 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15897 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
15899 if (tg3_flag(tp
, 57765_PLUS
) &&
15900 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
15901 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
15903 if (!tg3_flag(tp
, 5705_PLUS
) ||
15904 tg3_flag(tp
, 5780_CLASS
) ||
15905 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
15906 tg3_flag_set(tp
, JUMBO_CAPABLE
);
15908 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15911 if (pci_is_pcie(tp
->pdev
)) {
15914 tg3_flag_set(tp
, PCI_EXPRESS
);
15916 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
15917 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
15918 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15919 tg3_flag_clear(tp
, HW_TSO_2
);
15920 tg3_flag_clear(tp
, TSO_CAPABLE
);
15922 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
15923 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15924 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
15925 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
15926 tg3_flag_set(tp
, CLKREQ_BUG
);
15927 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
15928 tg3_flag_set(tp
, L1PLLPD_EN
);
15930 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
15931 /* BCM5785 devices are effectively PCIe devices, and should
15932 * follow PCIe codepaths, but do not have a PCIe capabilities
15935 tg3_flag_set(tp
, PCI_EXPRESS
);
15936 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
15937 tg3_flag(tp
, 5780_CLASS
)) {
15938 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
15939 if (!tp
->pcix_cap
) {
15940 dev_err(&tp
->pdev
->dev
,
15941 "Cannot find PCI-X capability, aborting\n");
15945 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
15946 tg3_flag_set(tp
, PCIX_MODE
);
15949 /* If we have an AMD 762 or VIA K8T800 chipset, write
15950 * reordering to the mailbox registers done by the host
15951 * controller can cause major troubles. We read back from
15952 * every mailbox register write to force the writes to be
15953 * posted to the chip in order.
15955 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
15956 !tg3_flag(tp
, PCI_EXPRESS
))
15957 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
15959 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
15960 &tp
->pci_cacheline_sz
);
15961 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15962 &tp
->pci_lat_timer
);
15963 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15964 tp
->pci_lat_timer
< 64) {
15965 tp
->pci_lat_timer
= 64;
15966 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15967 tp
->pci_lat_timer
);
15970 /* Important! -- It is critical that the PCI-X hw workaround
15971 * situation is decided before the first MMIO register access.
15973 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
15974 /* 5700 BX chips need to have their TX producer index
15975 * mailboxes written twice to workaround a bug.
15977 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
15979 /* If we are in PCI-X mode, enable register write workaround.
15981 * The workaround is to use indirect register accesses
15982 * for all chip writes not to mailbox registers.
15984 if (tg3_flag(tp
, PCIX_MODE
)) {
15987 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
15989 /* The chip can have it's power management PCI config
15990 * space registers clobbered due to this bug.
15991 * So explicitly force the chip into D0 here.
15993 pci_read_config_dword(tp
->pdev
,
15994 tp
->pm_cap
+ PCI_PM_CTRL
,
15996 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
15997 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
15998 pci_write_config_dword(tp
->pdev
,
15999 tp
->pm_cap
+ PCI_PM_CTRL
,
16002 /* Also, force SERR#/PERR# in PCI command. */
16003 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16004 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16005 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16009 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16010 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16011 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16012 tg3_flag_set(tp
, PCI_32BIT
);
16014 /* Chip-specific fixup from Broadcom driver */
16015 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16016 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16017 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16018 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16021 /* Default fast path register access methods */
16022 tp
->read32
= tg3_read32
;
16023 tp
->write32
= tg3_write32
;
16024 tp
->read32_mbox
= tg3_read32
;
16025 tp
->write32_mbox
= tg3_write32
;
16026 tp
->write32_tx_mbox
= tg3_write32
;
16027 tp
->write32_rx_mbox
= tg3_write32
;
16029 /* Various workaround register access methods */
16030 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16031 tp
->write32
= tg3_write_indirect_reg32
;
16032 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16033 (tg3_flag(tp
, PCI_EXPRESS
) &&
16034 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16036 * Back to back register writes can cause problems on these
16037 * chips, the workaround is to read back all reg writes
16038 * except those to mailbox regs.
16040 * See tg3_write_indirect_reg32().
16042 tp
->write32
= tg3_write_flush_reg32
;
16045 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16046 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16047 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16048 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16051 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16052 tp
->read32
= tg3_read_indirect_reg32
;
16053 tp
->write32
= tg3_write_indirect_reg32
;
16054 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16055 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16056 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16057 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16062 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16063 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16064 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16066 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16067 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16068 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16069 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16070 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16073 if (tp
->write32
== tg3_write_indirect_reg32
||
16074 (tg3_flag(tp
, PCIX_MODE
) &&
16075 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16076 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16077 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16079 /* The memory arbiter has to be enabled in order for SRAM accesses
16080 * to succeed. Normally on powerup the tg3 chip firmware will make
16081 * sure it is enabled, but other entities such as system netboot
16082 * code might disable it.
16084 val
= tr32(MEMARB_MODE
);
16085 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16087 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16088 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16089 tg3_flag(tp
, 5780_CLASS
)) {
16090 if (tg3_flag(tp
, PCIX_MODE
)) {
16091 pci_read_config_dword(tp
->pdev
,
16092 tp
->pcix_cap
+ PCI_X_STATUS
,
16094 tp
->pci_fn
= val
& 0x7;
16096 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16097 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16098 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16099 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16100 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16101 val
= tr32(TG3_CPMU_STATUS
);
16103 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16104 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16106 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16107 TG3_CPMU_STATUS_FSHFT_5719
;
16110 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16111 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16112 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16115 /* Get eeprom hw config before calling tg3_set_power_state().
16116 * In particular, the TG3_FLAG_IS_NIC flag must be
16117 * determined before calling tg3_set_power_state() so that
16118 * we know whether or not to switch out of Vaux power.
16119 * When the flag is set, it means that GPIO1 is used for eeprom
16120 * write protect and also implies that it is a LOM where GPIOs
16121 * are not used to switch power.
16123 tg3_get_eeprom_hw_cfg(tp
);
16125 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16126 tg3_flag_clear(tp
, TSO_CAPABLE
);
16127 tg3_flag_clear(tp
, TSO_BUG
);
16128 tp
->fw_needed
= NULL
;
16131 if (tg3_flag(tp
, ENABLE_APE
)) {
16132 /* Allow reads and writes to the
16133 * APE register and memory space.
16135 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16136 PCISTATE_ALLOW_APE_SHMEM_WR
|
16137 PCISTATE_ALLOW_APE_PSPACE_WR
;
16138 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16141 tg3_ape_lock_init(tp
);
16144 /* Set up tp->grc_local_ctrl before calling
16145 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16146 * will bring 5700's external PHY out of reset.
16147 * It is also used as eeprom write protect on LOMs.
16149 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16150 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16151 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16152 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16153 GRC_LCLCTRL_GPIO_OUTPUT1
);
16154 /* Unused GPIO3 must be driven as output on 5752 because there
16155 * are no pull-up resistors on unused GPIO pins.
16157 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16158 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16160 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16161 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16162 tg3_flag(tp
, 57765_CLASS
))
16163 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16165 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16166 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16167 /* Turn off the debug UART. */
16168 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16169 if (tg3_flag(tp
, IS_NIC
))
16170 /* Keep VMain power. */
16171 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16172 GRC_LCLCTRL_GPIO_OUTPUT0
;
16175 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16176 tp
->grc_local_ctrl
|=
16177 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16179 /* Switch out of Vaux if it is a NIC */
16180 tg3_pwrsrc_switch_to_vmain(tp
);
16182 /* Derive initial jumbo mode from MTU assigned in
16183 * ether_setup() via the alloc_etherdev() call
16185 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16186 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16188 /* Determine WakeOnLan speed to use. */
16189 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16190 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16191 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16192 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16193 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16195 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16198 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16199 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16201 /* A few boards don't want Ethernet@WireSpeed phy feature */
16202 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16203 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16204 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16205 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16206 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16207 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16208 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16210 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16211 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16212 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16213 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16214 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16216 if (tg3_flag(tp
, 5705_PLUS
) &&
16217 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16218 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16219 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16220 !tg3_flag(tp
, 57765_PLUS
)) {
16221 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16222 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16223 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16224 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16225 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16226 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16227 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16228 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16229 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16231 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16234 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16235 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16236 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16237 if (tp
->phy_otp
== 0)
16238 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16241 if (tg3_flag(tp
, CPMU_PRESENT
))
16242 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16244 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16246 tp
->coalesce_mode
= 0;
16247 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16248 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16249 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16251 /* Set these bits to enable statistics workaround. */
16252 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16253 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16254 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16255 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16256 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16259 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16260 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16261 tg3_flag_set(tp
, USE_PHYLIB
);
16263 err
= tg3_mdio_init(tp
);
16267 /* Initialize data/descriptor byte/word swapping. */
16268 val
= tr32(GRC_MODE
);
16269 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16270 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16271 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16272 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16273 GRC_MODE_B2HRX_ENABLE
|
16274 GRC_MODE_HTX2B_ENABLE
|
16275 GRC_MODE_HOST_STACKUP
);
16277 val
&= GRC_MODE_HOST_STACKUP
;
16279 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16281 tg3_switch_clocks(tp
);
16283 /* Clear this out for sanity. */
16284 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16286 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16288 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16289 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16290 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16291 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16292 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16293 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16294 void __iomem
*sram_base
;
16296 /* Write some dummy words into the SRAM status block
16297 * area, see if it reads back correctly. If the return
16298 * value is bad, force enable the PCIX workaround.
16300 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16302 writel(0x00000000, sram_base
);
16303 writel(0x00000000, sram_base
+ 4);
16304 writel(0xffffffff, sram_base
+ 4);
16305 if (readl(sram_base
) != 0x00000000)
16306 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16311 tg3_nvram_init(tp
);
16313 /* If the device has an NVRAM, no need to load patch firmware */
16314 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16315 !tg3_flag(tp
, NO_NVRAM
))
16316 tp
->fw_needed
= NULL
;
16318 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16319 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16321 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16322 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16323 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16324 tg3_flag_set(tp
, IS_5788
);
16326 if (!tg3_flag(tp
, IS_5788
) &&
16327 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16328 tg3_flag_set(tp
, TAGGED_STATUS
);
16329 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16330 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16331 HOSTCC_MODE_CLRTICK_TXBD
);
16333 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16334 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16335 tp
->misc_host_ctrl
);
16338 /* Preserve the APE MAC_MODE bits */
16339 if (tg3_flag(tp
, ENABLE_APE
))
16340 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16344 if (tg3_10_100_only_device(tp
, ent
))
16345 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16347 err
= tg3_phy_probe(tp
);
16349 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16350 /* ... but do not return immediately ... */
16355 tg3_read_fw_ver(tp
);
16357 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16358 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16360 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16361 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16363 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16366 /* 5700 {AX,BX} chips have a broken status block link
16367 * change bit implementation, so we must use the
16368 * status register in those cases.
16370 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16371 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16373 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16375 /* The led_ctrl is set during tg3_phy_probe, here we might
16376 * have to force the link status polling mechanism based
16377 * upon subsystem IDs.
16379 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16380 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16381 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16382 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16383 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16386 /* For all SERDES we poll the MAC status register. */
16387 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16388 tg3_flag_set(tp
, POLL_SERDES
);
16390 tg3_flag_clear(tp
, POLL_SERDES
);
16392 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16393 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16394 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16395 tg3_flag(tp
, PCIX_MODE
)) {
16396 tp
->rx_offset
= NET_SKB_PAD
;
16397 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16398 tp
->rx_copy_thresh
= ~(u16
)0;
16402 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16403 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16404 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16406 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16408 /* Increment the rx prod index on the rx std ring by at most
16409 * 8 for these chips to workaround hw errata.
16411 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16412 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16413 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16414 tp
->rx_std_max_post
= 8;
16416 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16417 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16418 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16423 #ifdef CONFIG_SPARC
16424 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
16426 struct net_device
*dev
= tp
->dev
;
16427 struct pci_dev
*pdev
= tp
->pdev
;
16428 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
16429 const unsigned char *addr
;
16432 addr
= of_get_property(dp
, "local-mac-address", &len
);
16433 if (addr
&& len
== 6) {
16434 memcpy(dev
->dev_addr
, addr
, 6);
16440 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
16442 struct net_device
*dev
= tp
->dev
;
16444 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
16449 static int tg3_get_device_address(struct tg3
*tp
)
16451 struct net_device
*dev
= tp
->dev
;
16452 u32 hi
, lo
, mac_offset
;
16456 #ifdef CONFIG_SPARC
16457 if (!tg3_get_macaddr_sparc(tp
))
16461 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16462 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16463 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16468 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16469 tg3_flag(tp
, 5780_CLASS
)) {
16470 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
16472 if (tg3_nvram_lock(tp
))
16473 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
16475 tg3_nvram_unlock(tp
);
16476 } else if (tg3_flag(tp
, 5717_PLUS
)) {
16477 if (tp
->pci_fn
& 1)
16479 if (tp
->pci_fn
> 1)
16480 mac_offset
+= 0x18c;
16481 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16484 /* First try to get it from MAC address mailbox. */
16485 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
16486 if ((hi
>> 16) == 0x484b) {
16487 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16488 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
16490 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
16491 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16492 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16493 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16494 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
16496 /* Some old bootcode may report a 0 MAC address in SRAM */
16497 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
16500 /* Next, try NVRAM. */
16501 if (!tg3_flag(tp
, NO_NVRAM
) &&
16502 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16503 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16504 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16505 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16507 /* Finally just fetch it out of the MAC control regs. */
16509 hi
= tr32(MAC_ADDR_0_HIGH
);
16510 lo
= tr32(MAC_ADDR_0_LOW
);
16512 dev
->dev_addr
[5] = lo
& 0xff;
16513 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16514 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16515 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16516 dev
->dev_addr
[1] = hi
& 0xff;
16517 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16521 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
16522 #ifdef CONFIG_SPARC
16523 if (!tg3_get_default_macaddr_sparc(tp
))
16531 #define BOUNDARY_SINGLE_CACHELINE 1
16532 #define BOUNDARY_MULTI_CACHELINE 2
16534 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
16536 int cacheline_size
;
16540 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16542 cacheline_size
= 1024;
16544 cacheline_size
= (int) byte
* 4;
16546 /* On 5703 and later chips, the boundary bits have no
16549 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16550 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16551 !tg3_flag(tp
, PCI_EXPRESS
))
16554 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16555 goal
= BOUNDARY_MULTI_CACHELINE
;
16557 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16558 goal
= BOUNDARY_SINGLE_CACHELINE
;
16564 if (tg3_flag(tp
, 57765_PLUS
)) {
16565 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16572 /* PCI controllers on most RISC systems tend to disconnect
16573 * when a device tries to burst across a cache-line boundary.
16574 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16576 * Unfortunately, for PCI-E there are only limited
16577 * write-side controls for this, and thus for reads
16578 * we will still get the disconnects. We'll also waste
16579 * these PCI cycles for both read and write for chips
16580 * other than 5700 and 5701 which do not implement the
16583 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16584 switch (cacheline_size
) {
16589 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16590 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16591 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16593 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16594 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16599 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16600 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16604 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16605 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16608 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16609 switch (cacheline_size
) {
16613 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16614 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16615 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
16621 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16622 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
16626 switch (cacheline_size
) {
16628 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16629 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
16630 DMA_RWCTRL_WRITE_BNDRY_16
);
16635 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16636 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
16637 DMA_RWCTRL_WRITE_BNDRY_32
);
16642 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16643 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
16644 DMA_RWCTRL_WRITE_BNDRY_64
);
16649 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16650 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
16651 DMA_RWCTRL_WRITE_BNDRY_128
);
16656 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
16657 DMA_RWCTRL_WRITE_BNDRY_256
);
16660 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
16661 DMA_RWCTRL_WRITE_BNDRY_512
);
16665 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
16666 DMA_RWCTRL_WRITE_BNDRY_1024
);
16675 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
16676 int size
, bool to_device
)
16678 struct tg3_internal_buffer_desc test_desc
;
16679 u32 sram_dma_descs
;
16682 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
16684 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
16685 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
16686 tw32(RDMAC_STATUS
, 0);
16687 tw32(WDMAC_STATUS
, 0);
16689 tw32(BUFMGR_MODE
, 0);
16690 tw32(FTQ_RESET
, 0);
16692 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
16693 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
16694 test_desc
.nic_mbuf
= 0x00002100;
16695 test_desc
.len
= size
;
16698 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16699 * the *second* time the tg3 driver was getting loaded after an
16702 * Broadcom tells me:
16703 * ...the DMA engine is connected to the GRC block and a DMA
16704 * reset may affect the GRC block in some unpredictable way...
16705 * The behavior of resets to individual blocks has not been tested.
16707 * Broadcom noted the GRC reset will also reset all sub-components.
16710 test_desc
.cqid_sqid
= (13 << 8) | 2;
16712 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
16715 test_desc
.cqid_sqid
= (16 << 8) | 7;
16717 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
16720 test_desc
.flags
= 0x00000005;
16722 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
16725 val
= *(((u32
*)&test_desc
) + i
);
16726 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
16727 sram_dma_descs
+ (i
* sizeof(u32
)));
16728 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
16730 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16733 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
16735 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
16738 for (i
= 0; i
< 40; i
++) {
16742 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
16744 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
16745 if ((val
& 0xffff) == sram_dma_descs
) {
16756 #define TEST_BUFFER_SIZE 0x2000
16758 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
16759 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
16763 static int tg3_test_dma(struct tg3
*tp
)
16765 dma_addr_t buf_dma
;
16766 u32
*buf
, saved_dma_rwctrl
;
16769 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
16770 &buf_dma
, GFP_KERNEL
);
16776 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
16777 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
16779 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
16781 if (tg3_flag(tp
, 57765_PLUS
))
16784 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16785 /* DMA read watermark not used on PCIE */
16786 tp
->dma_rwctrl
|= 0x00180000;
16787 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
16788 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16789 tg3_asic_rev(tp
) == ASIC_REV_5750
)
16790 tp
->dma_rwctrl
|= 0x003f0000;
16792 tp
->dma_rwctrl
|= 0x003f000f;
16794 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16795 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
16796 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
16797 u32 read_water
= 0x7;
16799 /* If the 5704 is behind the EPB bridge, we can
16800 * do the less restrictive ONE_DMA workaround for
16801 * better performance.
16803 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
16804 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16805 tp
->dma_rwctrl
|= 0x8000;
16806 else if (ccval
== 0x6 || ccval
== 0x7)
16807 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16809 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
16811 /* Set bit 23 to enable PCIX hw bug fix */
16813 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
16814 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
16816 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
16817 /* 5780 always in PCIX mode */
16818 tp
->dma_rwctrl
|= 0x00144000;
16819 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
16820 /* 5714 always in PCIX mode */
16821 tp
->dma_rwctrl
|= 0x00148000;
16823 tp
->dma_rwctrl
|= 0x001b000f;
16826 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
16827 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16829 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16830 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16831 tp
->dma_rwctrl
&= 0xfffffff0;
16833 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16834 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16835 /* Remove this if it causes problems for some boards. */
16836 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
16838 /* On 5700/5701 chips, we need to set this bit.
16839 * Otherwise the chip will issue cacheline transactions
16840 * to streamable DMA memory with not all the byte
16841 * enables turned on. This is an error on several
16842 * RISC PCI controllers, in particular sparc64.
16844 * On 5703/5704 chips, this bit has been reassigned
16845 * a different meaning. In particular, it is used
16846 * on those chips to enable a PCI-X workaround.
16848 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
16851 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16854 /* Unneeded, already done by tg3_get_invariants. */
16855 tg3_switch_clocks(tp
);
16858 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16859 tg3_asic_rev(tp
) != ASIC_REV_5701
)
16862 /* It is best to perform DMA test with maximum write burst size
16863 * to expose the 5700/5701 write DMA bug.
16865 saved_dma_rwctrl
= tp
->dma_rwctrl
;
16866 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16867 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16872 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
16875 /* Send the buffer to the chip. */
16876 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
16878 dev_err(&tp
->pdev
->dev
,
16879 "%s: Buffer write failed. err = %d\n",
16885 /* validate data reached card RAM correctly. */
16886 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16888 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
16889 if (le32_to_cpu(val
) != p
[i
]) {
16890 dev_err(&tp
->pdev
->dev
,
16891 "%s: Buffer corrupted on device! "
16892 "(%d != %d)\n", __func__
, val
, i
);
16893 /* ret = -ENODEV here? */
16898 /* Now read it back. */
16899 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
16901 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
16902 "err = %d\n", __func__
, ret
);
16907 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16911 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16912 DMA_RWCTRL_WRITE_BNDRY_16
) {
16913 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16914 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16915 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16918 dev_err(&tp
->pdev
->dev
,
16919 "%s: Buffer corrupted on read back! "
16920 "(%d != %d)\n", __func__
, p
[i
], i
);
16926 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
16932 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16933 DMA_RWCTRL_WRITE_BNDRY_16
) {
16934 /* DMA test passed without adjusting DMA boundary,
16935 * now look for chipsets that are known to expose the
16936 * DMA bug without failing the test.
16938 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
16939 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16940 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16942 /* Safe to use the calculated DMA boundary. */
16943 tp
->dma_rwctrl
= saved_dma_rwctrl
;
16946 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16950 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
16955 static void tg3_init_bufmgr_config(struct tg3
*tp
)
16957 if (tg3_flag(tp
, 57765_PLUS
)) {
16958 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16959 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16960 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16961 DEFAULT_MB_MACRX_LOW_WATER_57765
;
16962 tp
->bufmgr_config
.mbuf_high_water
=
16963 DEFAULT_MB_HIGH_WATER_57765
;
16965 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16966 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16967 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16968 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
16969 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16970 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
16971 } else if (tg3_flag(tp
, 5705_PLUS
)) {
16972 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16973 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16974 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16975 DEFAULT_MB_MACRX_LOW_WATER_5705
;
16976 tp
->bufmgr_config
.mbuf_high_water
=
16977 DEFAULT_MB_HIGH_WATER_5705
;
16978 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16979 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16980 DEFAULT_MB_MACRX_LOW_WATER_5906
;
16981 tp
->bufmgr_config
.mbuf_high_water
=
16982 DEFAULT_MB_HIGH_WATER_5906
;
16985 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16986 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
16987 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16988 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
16989 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16990 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
16992 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16993 DEFAULT_MB_RDMA_LOW_WATER
;
16994 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16995 DEFAULT_MB_MACRX_LOW_WATER
;
16996 tp
->bufmgr_config
.mbuf_high_water
=
16997 DEFAULT_MB_HIGH_WATER
;
16999 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17000 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17001 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17002 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17003 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17004 DEFAULT_MB_HIGH_WATER_JUMBO
;
17007 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17008 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17011 static char *tg3_phy_string(struct tg3
*tp
)
17013 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17014 case TG3_PHY_ID_BCM5400
: return "5400";
17015 case TG3_PHY_ID_BCM5401
: return "5401";
17016 case TG3_PHY_ID_BCM5411
: return "5411";
17017 case TG3_PHY_ID_BCM5701
: return "5701";
17018 case TG3_PHY_ID_BCM5703
: return "5703";
17019 case TG3_PHY_ID_BCM5704
: return "5704";
17020 case TG3_PHY_ID_BCM5705
: return "5705";
17021 case TG3_PHY_ID_BCM5750
: return "5750";
17022 case TG3_PHY_ID_BCM5752
: return "5752";
17023 case TG3_PHY_ID_BCM5714
: return "5714";
17024 case TG3_PHY_ID_BCM5780
: return "5780";
17025 case TG3_PHY_ID_BCM5755
: return "5755";
17026 case TG3_PHY_ID_BCM5787
: return "5787";
17027 case TG3_PHY_ID_BCM5784
: return "5784";
17028 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17029 case TG3_PHY_ID_BCM5906
: return "5906";
17030 case TG3_PHY_ID_BCM5761
: return "5761";
17031 case TG3_PHY_ID_BCM5718C
: return "5718C";
17032 case TG3_PHY_ID_BCM5718S
: return "5718S";
17033 case TG3_PHY_ID_BCM57765
: return "57765";
17034 case TG3_PHY_ID_BCM5719C
: return "5719C";
17035 case TG3_PHY_ID_BCM5720C
: return "5720C";
17036 case TG3_PHY_ID_BCM5762
: return "5762C";
17037 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17038 case 0: return "serdes";
17039 default: return "unknown";
17043 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17045 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17046 strcpy(str
, "PCI Express");
17048 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17049 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17051 strcpy(str
, "PCIX:");
17053 if ((clock_ctrl
== 7) ||
17054 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17055 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17056 strcat(str
, "133MHz");
17057 else if (clock_ctrl
== 0)
17058 strcat(str
, "33MHz");
17059 else if (clock_ctrl
== 2)
17060 strcat(str
, "50MHz");
17061 else if (clock_ctrl
== 4)
17062 strcat(str
, "66MHz");
17063 else if (clock_ctrl
== 6)
17064 strcat(str
, "100MHz");
17066 strcpy(str
, "PCI:");
17067 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17068 strcat(str
, "66MHz");
17070 strcat(str
, "33MHz");
17072 if (tg3_flag(tp
, PCI_32BIT
))
17073 strcat(str
, ":32-bit");
17075 strcat(str
, ":64-bit");
17079 static void tg3_init_coal(struct tg3
*tp
)
17081 struct ethtool_coalesce
*ec
= &tp
->coal
;
17083 memset(ec
, 0, sizeof(*ec
));
17084 ec
->cmd
= ETHTOOL_GCOALESCE
;
17085 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17086 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17087 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17088 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17089 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17090 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17091 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17092 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17093 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17095 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17096 HOSTCC_MODE_CLRTICK_TXBD
)) {
17097 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17098 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17099 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17100 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17103 if (tg3_flag(tp
, 5705_PLUS
)) {
17104 ec
->rx_coalesce_usecs_irq
= 0;
17105 ec
->tx_coalesce_usecs_irq
= 0;
17106 ec
->stats_block_coalesce_usecs
= 0;
17110 static int tg3_init_one(struct pci_dev
*pdev
,
17111 const struct pci_device_id
*ent
)
17113 struct net_device
*dev
;
17115 int i
, err
, pm_cap
;
17116 u32 sndmbx
, rcvmbx
, intmbx
;
17118 u64 dma_mask
, persist_dma_mask
;
17119 netdev_features_t features
= 0;
17121 printk_once(KERN_INFO
"%s\n", version
);
17123 err
= pci_enable_device(pdev
);
17125 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17129 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17131 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17132 goto err_out_disable_pdev
;
17135 pci_set_master(pdev
);
17137 /* Find power-management capability. */
17138 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
17140 dev_err(&pdev
->dev
,
17141 "Cannot find Power Management capability, aborting\n");
17143 goto err_out_free_res
;
17146 err
= pci_set_power_state(pdev
, PCI_D0
);
17148 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
17149 goto err_out_free_res
;
17152 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17155 goto err_out_power_down
;
17158 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17160 tp
= netdev_priv(dev
);
17163 tp
->pm_cap
= pm_cap
;
17164 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17165 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17169 tp
->msg_enable
= tg3_debug
;
17171 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17173 if (pdev_is_ssb_gige_core(pdev
)) {
17174 tg3_flag_set(tp
, IS_SSB_CORE
);
17175 if (ssb_gige_must_flush_posted_writes(pdev
))
17176 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17177 if (ssb_gige_one_dma_at_once(pdev
))
17178 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17179 if (ssb_gige_have_roboswitch(pdev
))
17180 tg3_flag_set(tp
, ROBOSWITCH
);
17181 if (ssb_gige_is_rgmii(pdev
))
17182 tg3_flag_set(tp
, RGMII_MODE
);
17185 /* The word/byte swap controls here control register access byte
17186 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17189 tp
->misc_host_ctrl
=
17190 MISC_HOST_CTRL_MASK_PCI_INT
|
17191 MISC_HOST_CTRL_WORD_SWAP
|
17192 MISC_HOST_CTRL_INDIR_ACCESS
|
17193 MISC_HOST_CTRL_PCISTATE_RW
;
17195 /* The NONFRM (non-frame) byte/word swap controls take effect
17196 * on descriptor entries, anything which isn't packet data.
17198 * The StrongARM chips on the board (one for tx, one for rx)
17199 * are running in big-endian mode.
17201 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17202 GRC_MODE_WSWAP_NONFRM_DATA
);
17203 #ifdef __BIG_ENDIAN
17204 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17206 spin_lock_init(&tp
->lock
);
17207 spin_lock_init(&tp
->indirect_lock
);
17208 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17210 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17212 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17214 goto err_out_free_dev
;
17217 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17218 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17219 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17220 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17221 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17222 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17223 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17224 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17225 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17226 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17227 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17228 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
) {
17229 tg3_flag_set(tp
, ENABLE_APE
);
17230 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17231 if (!tp
->aperegs
) {
17232 dev_err(&pdev
->dev
,
17233 "Cannot map APE registers, aborting\n");
17235 goto err_out_iounmap
;
17239 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17240 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17242 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17243 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17244 dev
->netdev_ops
= &tg3_netdev_ops
;
17245 dev
->irq
= pdev
->irq
;
17247 err
= tg3_get_invariants(tp
, ent
);
17249 dev_err(&pdev
->dev
,
17250 "Problem fetching invariants of chip, aborting\n");
17251 goto err_out_apeunmap
;
17254 /* The EPB bridge inside 5714, 5715, and 5780 and any
17255 * device behind the EPB cannot support DMA addresses > 40-bit.
17256 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17257 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17258 * do DMA address check in tg3_start_xmit().
17260 if (tg3_flag(tp
, IS_5788
))
17261 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17262 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17263 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17264 #ifdef CONFIG_HIGHMEM
17265 dma_mask
= DMA_BIT_MASK(64);
17268 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17270 /* Configure DMA attributes. */
17271 if (dma_mask
> DMA_BIT_MASK(32)) {
17272 err
= pci_set_dma_mask(pdev
, dma_mask
);
17274 features
|= NETIF_F_HIGHDMA
;
17275 err
= pci_set_consistent_dma_mask(pdev
,
17278 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17279 "DMA for consistent allocations\n");
17280 goto err_out_apeunmap
;
17284 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17285 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17287 dev_err(&pdev
->dev
,
17288 "No usable DMA configuration, aborting\n");
17289 goto err_out_apeunmap
;
17293 tg3_init_bufmgr_config(tp
);
17295 features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
17297 /* 5700 B0 chips do not support checksumming correctly due
17298 * to hardware bugs.
17300 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17301 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17303 if (tg3_flag(tp
, 5755_PLUS
))
17304 features
|= NETIF_F_IPV6_CSUM
;
17307 /* TSO is on by default on chips that support hardware TSO.
17308 * Firmware TSO on older chips gives lower performance, so it
17309 * is off by default, but can be enabled using ethtool.
17311 if ((tg3_flag(tp
, HW_TSO_1
) ||
17312 tg3_flag(tp
, HW_TSO_2
) ||
17313 tg3_flag(tp
, HW_TSO_3
)) &&
17314 (features
& NETIF_F_IP_CSUM
))
17315 features
|= NETIF_F_TSO
;
17316 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17317 if (features
& NETIF_F_IPV6_CSUM
)
17318 features
|= NETIF_F_TSO6
;
17319 if (tg3_flag(tp
, HW_TSO_3
) ||
17320 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17321 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17322 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17323 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17324 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17325 features
|= NETIF_F_TSO_ECN
;
17328 dev
->features
|= features
;
17329 dev
->vlan_features
|= features
;
17332 * Add loopback capability only for a subset of devices that support
17333 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17334 * loopback for the remaining devices.
17336 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17337 !tg3_flag(tp
, CPMU_PRESENT
))
17338 /* Add the loopback capability */
17339 features
|= NETIF_F_LOOPBACK
;
17341 dev
->hw_features
|= features
;
17343 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17344 !tg3_flag(tp
, TSO_CAPABLE
) &&
17345 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17346 tg3_flag_set(tp
, MAX_RXPEND_64
);
17347 tp
->rx_pending
= 63;
17350 err
= tg3_get_device_address(tp
);
17352 dev_err(&pdev
->dev
,
17353 "Could not obtain valid ethernet address, aborting\n");
17354 goto err_out_apeunmap
;
17358 * Reset chip in case UNDI or EFI driver did not shutdown
17359 * DMA self test will enable WDMAC and we'll see (spurious)
17360 * pending DMA on the PCI bus at that point.
17362 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17363 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17364 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17365 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17368 err
= tg3_test_dma(tp
);
17370 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17371 goto err_out_apeunmap
;
17374 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17375 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17376 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17377 for (i
= 0; i
< tp
->irq_max
; i
++) {
17378 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17381 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17383 tnapi
->int_mbox
= intmbx
;
17389 tnapi
->consmbox
= rcvmbx
;
17390 tnapi
->prodmbox
= sndmbx
;
17393 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17395 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17397 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17401 * If we support MSIX, we'll be using RSS. If we're using
17402 * RSS, the first vector only handles link interrupts and the
17403 * remaining vectors handle rx and tx interrupts. Reuse the
17404 * mailbox values for the next iteration. The values we setup
17405 * above are still useful for the single vectored mode.
17420 pci_set_drvdata(pdev
, dev
);
17422 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17423 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17424 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17425 tg3_flag_set(tp
, PTP_CAPABLE
);
17427 if (tg3_flag(tp
, 5717_PLUS
)) {
17428 /* Resume a low-power mode */
17429 tg3_frob_aux_power(tp
, false);
17432 tg3_timer_init(tp
);
17434 tg3_carrier_off(tp
);
17436 err
= register_netdev(dev
);
17438 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17439 goto err_out_apeunmap
;
17442 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17443 tp
->board_part_number
,
17444 tg3_chip_rev_id(tp
),
17445 tg3_bus_string(tp
, str
),
17448 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
17449 struct phy_device
*phydev
;
17450 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
17452 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17453 phydev
->drv
->name
, dev_name(&phydev
->dev
));
17457 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17458 ethtype
= "10/100Base-TX";
17459 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17460 ethtype
= "1000Base-SX";
17462 ethtype
= "10/100/1000Base-T";
17464 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17465 "(WireSpeed[%d], EEE[%d])\n",
17466 tg3_phy_string(tp
), ethtype
,
17467 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17468 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17471 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17472 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17473 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17474 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17475 tg3_flag(tp
, ENABLE_ASF
) != 0,
17476 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17477 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17479 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17480 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17482 pci_save_state(pdev
);
17488 iounmap(tp
->aperegs
);
17489 tp
->aperegs
= NULL
;
17501 err_out_power_down
:
17502 pci_set_power_state(pdev
, PCI_D3hot
);
17505 pci_release_regions(pdev
);
17507 err_out_disable_pdev
:
17508 pci_disable_device(pdev
);
17509 pci_set_drvdata(pdev
, NULL
);
17513 static void tg3_remove_one(struct pci_dev
*pdev
)
17515 struct net_device
*dev
= pci_get_drvdata(pdev
);
17518 struct tg3
*tp
= netdev_priv(dev
);
17520 release_firmware(tp
->fw
);
17522 tg3_reset_task_cancel(tp
);
17524 if (tg3_flag(tp
, USE_PHYLIB
)) {
17529 unregister_netdev(dev
);
17531 iounmap(tp
->aperegs
);
17532 tp
->aperegs
= NULL
;
17539 pci_release_regions(pdev
);
17540 pci_disable_device(pdev
);
17541 pci_set_drvdata(pdev
, NULL
);
17545 #ifdef CONFIG_PM_SLEEP
17546 static int tg3_suspend(struct device
*device
)
17548 struct pci_dev
*pdev
= to_pci_dev(device
);
17549 struct net_device
*dev
= pci_get_drvdata(pdev
);
17550 struct tg3
*tp
= netdev_priv(dev
);
17553 if (!netif_running(dev
))
17556 tg3_reset_task_cancel(tp
);
17558 tg3_netif_stop(tp
);
17560 tg3_timer_stop(tp
);
17562 tg3_full_lock(tp
, 1);
17563 tg3_disable_ints(tp
);
17564 tg3_full_unlock(tp
);
17566 netif_device_detach(dev
);
17568 tg3_full_lock(tp
, 0);
17569 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17570 tg3_flag_clear(tp
, INIT_COMPLETE
);
17571 tg3_full_unlock(tp
);
17573 err
= tg3_power_down_prepare(tp
);
17577 tg3_full_lock(tp
, 0);
17579 tg3_flag_set(tp
, INIT_COMPLETE
);
17580 err2
= tg3_restart_hw(tp
, true);
17584 tg3_timer_start(tp
);
17586 netif_device_attach(dev
);
17587 tg3_netif_start(tp
);
17590 tg3_full_unlock(tp
);
17599 static int tg3_resume(struct device
*device
)
17601 struct pci_dev
*pdev
= to_pci_dev(device
);
17602 struct net_device
*dev
= pci_get_drvdata(pdev
);
17603 struct tg3
*tp
= netdev_priv(dev
);
17606 if (!netif_running(dev
))
17609 netif_device_attach(dev
);
17611 tg3_full_lock(tp
, 0);
17613 tg3_flag_set(tp
, INIT_COMPLETE
);
17614 err
= tg3_restart_hw(tp
,
17615 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
17619 tg3_timer_start(tp
);
17621 tg3_netif_start(tp
);
17624 tg3_full_unlock(tp
);
17631 #endif /* CONFIG_PM_SLEEP */
17633 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17636 * tg3_io_error_detected - called when PCI error is detected
17637 * @pdev: Pointer to PCI device
17638 * @state: The current pci connection state
17640 * This function is called after a PCI bus error affecting
17641 * this device has been detected.
17643 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
17644 pci_channel_state_t state
)
17646 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17647 struct tg3
*tp
= netdev_priv(netdev
);
17648 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
17650 netdev_info(netdev
, "PCI I/O error detected\n");
17654 if (!netif_running(netdev
))
17659 tg3_netif_stop(tp
);
17661 tg3_timer_stop(tp
);
17663 /* Want to make sure that the reset task doesn't run */
17664 tg3_reset_task_cancel(tp
);
17666 netif_device_detach(netdev
);
17668 /* Clean up software state, even if MMIO is blocked */
17669 tg3_full_lock(tp
, 0);
17670 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
17671 tg3_full_unlock(tp
);
17674 if (state
== pci_channel_io_perm_failure
)
17675 err
= PCI_ERS_RESULT_DISCONNECT
;
17677 pci_disable_device(pdev
);
17685 * tg3_io_slot_reset - called after the pci bus has been reset.
17686 * @pdev: Pointer to PCI device
17688 * Restart the card from scratch, as if from a cold-boot.
17689 * At this point, the card has exprienced a hard reset,
17690 * followed by fixups by BIOS, and has its config space
17691 * set up identically to what it was at cold boot.
17693 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
17695 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17696 struct tg3
*tp
= netdev_priv(netdev
);
17697 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
17702 if (pci_enable_device(pdev
)) {
17703 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
17707 pci_set_master(pdev
);
17708 pci_restore_state(pdev
);
17709 pci_save_state(pdev
);
17711 if (!netif_running(netdev
)) {
17712 rc
= PCI_ERS_RESULT_RECOVERED
;
17716 err
= tg3_power_up(tp
);
17720 rc
= PCI_ERS_RESULT_RECOVERED
;
17729 * tg3_io_resume - called when traffic can start flowing again.
17730 * @pdev: Pointer to PCI device
17732 * This callback is called when the error recovery driver tells
17733 * us that its OK to resume normal operation.
17735 static void tg3_io_resume(struct pci_dev
*pdev
)
17737 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17738 struct tg3
*tp
= netdev_priv(netdev
);
17743 if (!netif_running(netdev
))
17746 tg3_full_lock(tp
, 0);
17747 tg3_flag_set(tp
, INIT_COMPLETE
);
17748 err
= tg3_restart_hw(tp
, true);
17750 tg3_full_unlock(tp
);
17751 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
17755 netif_device_attach(netdev
);
17757 tg3_timer_start(tp
);
17759 tg3_netif_start(tp
);
17761 tg3_full_unlock(tp
);
17769 static const struct pci_error_handlers tg3_err_handler
= {
17770 .error_detected
= tg3_io_error_detected
,
17771 .slot_reset
= tg3_io_slot_reset
,
17772 .resume
= tg3_io_resume
17775 static struct pci_driver tg3_driver
= {
17776 .name
= DRV_MODULE_NAME
,
17777 .id_table
= tg3_pci_tbl
,
17778 .probe
= tg3_init_one
,
17779 .remove
= tg3_remove_one
,
17780 .err_handler
= &tg3_err_handler
,
17781 .driver
.pm
= &tg3_pm_ops
,
17784 static int __init
tg3_init(void)
17786 return pci_register_driver(&tg3_driver
);
17789 static void __exit
tg3_cleanup(void)
17791 pci_unregister_driver(&tg3_driver
);
17794 module_init(tg3_init
);
17795 module_exit(tg3_cleanup
);