2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag
, unsigned long *bits
)
75 return test_bit(flag
, bits
);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag
, unsigned long *bits
)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag
, unsigned long *bits
)
85 clear_bit(flag
, bits
);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version
[] =
220 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION
);
226 MODULE_FIRMWARE(FIRMWARE_TG3
);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO
);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5
);
230 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug
, int, 0);
232 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl
) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
),
257 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
258 TG3_DRV_DATA_FLAG_5705_10_100
},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
),
260 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
261 TG3_DRV_DATA_FLAG_5705_10_100
},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
),
264 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
|
265 TG3_DRV_DATA_FLAG_5705_10_100
},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
),
272 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
),
278 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5787M
,
287 PCI_VENDOR_ID_LENOVO
,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M
),
289 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787F
),
292 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5784
)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5764
)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5723
)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761
)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5761E
)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761S
)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5761SE
)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_G
)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5785_F
)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
312 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_A
),
313 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
,
315 PCI_VENDOR_ID_AI
, TG3PCI_SUBDEVICE_ID_ACER_57780_B
),
316 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57780
)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57760
)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57790
),
320 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57788
)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717
)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5717_C
)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5718
)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57781
)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57785
)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57761
)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57765
)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57791
),
330 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57795
),
332 .driver_data
= TG3_DRV_DATA_FLAG_10_100_ONLY
},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5719
)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5720
)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57762
)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_57766
)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5762
)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5725
)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, TG3PCI_DEVICE_TIGON3_5727
)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
353 static const struct {
354 const char string
[ETH_GSTRING_LEN
];
355 } ethtool_stats_keys
[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string
[ETH_GSTRING_LEN
];
449 } ethtool_test_keys
[] = {
450 [TG3_NVRAM_TEST
] = { "nvram test (online) " },
451 [TG3_LINK_TEST
] = { "link test (online) " },
452 [TG3_REGISTER_TEST
] = { "register test (offline)" },
453 [TG3_MEMORY_TEST
] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST
] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST
] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST
] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST
] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
465 writel(val
, tp
->regs
+ off
);
468 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
470 return readl(tp
->regs
+ off
);
473 static void tg3_ape_write32(struct tg3
*tp
, u32 off
, u32 val
)
475 writel(val
, tp
->aperegs
+ off
);
478 static u32
tg3_ape_read32(struct tg3
*tp
, u32 off
)
480 return readl(tp
->aperegs
+ off
);
483 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
487 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
489 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
490 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
493 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
495 writel(val
, tp
->regs
+ off
);
496 readl(tp
->regs
+ off
);
499 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
504 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
505 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
506 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
507 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
511 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
515 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
516 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
517 TG3_64BIT_REG_LOW
, val
);
520 if (off
== TG3_RX_STD_PROD_IDX_REG
) {
521 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
522 TG3_64BIT_REG_LOW
, val
);
526 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
527 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
528 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
529 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
536 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
537 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
541 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
546 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
547 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
548 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
549 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
560 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
) || tg3_flag(tp
, ICH_WORKAROUND
))
561 /* Non-posted methods */
562 tp
->write32(tp
, off
, val
);
565 tg3_write32(tp
, off
, val
);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
579 tp
->write32_mbox(tp
, off
, val
);
580 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
) ||
581 (!tg3_flag(tp
, MBOX_WRITE_REORDER
) &&
582 !tg3_flag(tp
, ICH_WORKAROUND
)))
583 tp
->read32_mbox(tp
, off
);
586 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
588 void __iomem
*mbox
= tp
->regs
+ off
;
590 if (tg3_flag(tp
, TXD_MBOX_HWBUG
))
592 if (tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
593 tg3_flag(tp
, FLUSH_POSTED_WRITES
))
597 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
599 return readl(tp
->regs
+ off
+ GRCMBOX_BASE
);
602 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
604 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
622 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
623 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
626 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
627 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
628 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
629 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
635 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
640 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
643 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
647 if (tg3_asic_rev(tp
) == ASIC_REV_5906
&&
648 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
653 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
654 if (tg3_flag(tp
, SRAM_USE_CONFIG
)) {
655 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
656 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
662 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
667 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
670 static void tg3_ape_lock_init(struct tg3
*tp
)
675 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
676 regbase
= TG3_APE_LOCK_GRANT
;
678 regbase
= TG3_APE_PER_LOCK_GRANT
;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i
= TG3_APE_LOCK_PHY0
; i
<= TG3_APE_LOCK_GPIO
; i
++) {
683 case TG3_APE_LOCK_PHY0
:
684 case TG3_APE_LOCK_PHY1
:
685 case TG3_APE_LOCK_PHY2
:
686 case TG3_APE_LOCK_PHY3
:
687 bit
= APE_LOCK_GRANT_DRIVER
;
691 bit
= APE_LOCK_GRANT_DRIVER
;
693 bit
= 1 << tp
->pci_fn
;
695 tg3_ape_write32(tp
, regbase
+ 4 * i
, bit
);
700 static int tg3_ape_lock(struct tg3
*tp
, int locknum
)
704 u32 status
, req
, gnt
, bit
;
706 if (!tg3_flag(tp
, ENABLE_APE
))
710 case TG3_APE_LOCK_GPIO
:
711 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
713 case TG3_APE_LOCK_GRC
:
714 case TG3_APE_LOCK_MEM
:
716 bit
= APE_LOCK_REQ_DRIVER
;
718 bit
= 1 << tp
->pci_fn
;
720 case TG3_APE_LOCK_PHY0
:
721 case TG3_APE_LOCK_PHY1
:
722 case TG3_APE_LOCK_PHY2
:
723 case TG3_APE_LOCK_PHY3
:
724 bit
= APE_LOCK_REQ_DRIVER
;
730 if (tg3_asic_rev(tp
) == ASIC_REV_5761
) {
731 req
= TG3_APE_LOCK_REQ
;
732 gnt
= TG3_APE_LOCK_GRANT
;
734 req
= TG3_APE_PER_LOCK_REQ
;
735 gnt
= TG3_APE_PER_LOCK_GRANT
;
740 tg3_ape_write32(tp
, req
+ off
, bit
);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i
= 0; i
< 100; i
++) {
744 status
= tg3_ape_read32(tp
, gnt
+ off
);
747 if (pci_channel_offline(tp
->pdev
))
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp
, gnt
+ off
, bit
);
762 static void tg3_ape_unlock(struct tg3
*tp
, int locknum
)
766 if (!tg3_flag(tp
, ENABLE_APE
))
770 case TG3_APE_LOCK_GPIO
:
771 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
773 case TG3_APE_LOCK_GRC
:
774 case TG3_APE_LOCK_MEM
:
776 bit
= APE_LOCK_GRANT_DRIVER
;
778 bit
= 1 << tp
->pci_fn
;
780 case TG3_APE_LOCK_PHY0
:
781 case TG3_APE_LOCK_PHY1
:
782 case TG3_APE_LOCK_PHY2
:
783 case TG3_APE_LOCK_PHY3
:
784 bit
= APE_LOCK_GRANT_DRIVER
;
790 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
791 gnt
= TG3_APE_LOCK_GRANT
;
793 gnt
= TG3_APE_PER_LOCK_GRANT
;
795 tg3_ape_write32(tp
, gnt
+ 4 * locknum
, bit
);
798 static int tg3_ape_event_lock(struct tg3
*tp
, u32 timeout_us
)
803 if (tg3_ape_lock(tp
, TG3_APE_LOCK_MEM
))
806 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
807 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
810 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
813 timeout_us
-= (timeout_us
> 10) ? 10 : timeout_us
;
816 return timeout_us
? 0 : -EBUSY
;
819 static int tg3_ape_wait_for_event(struct tg3
*tp
, u32 timeout_us
)
823 for (i
= 0; i
< timeout_us
/ 10; i
++) {
824 apedata
= tg3_ape_read32(tp
, TG3_APE_EVENT_STATUS
);
826 if (!(apedata
& APE_EVENT_STATUS_EVENT_PENDING
))
832 return i
== timeout_us
/ 10;
835 static int tg3_ape_scratchpad_read(struct tg3
*tp
, u32
*data
, u32 base_off
,
839 u32 i
, bufoff
, msgoff
, maxlen
, apedata
;
841 if (!tg3_flag(tp
, APE_HAS_NCSI
))
844 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
845 if (apedata
!= APE_SEG_SIG_MAGIC
)
848 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
849 if (!(apedata
& APE_FW_STATUS_READY
))
852 bufoff
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_OFF
) +
854 msgoff
= bufoff
+ 2 * sizeof(u32
);
855 maxlen
= tg3_ape_read32(tp
, TG3_APE_SEG_MSG_BUF_LEN
);
860 /* Cap xfer sizes to scratchpad limits. */
861 length
= (len
> maxlen
) ? maxlen
: len
;
864 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
865 if (!(apedata
& APE_FW_STATUS_READY
))
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err
= tg3_ape_event_lock(tp
, 1000);
873 apedata
= APE_EVENT_STATUS_DRIVER_EVNT
|
874 APE_EVENT_STATUS_SCRTCHPD_READ
|
875 APE_EVENT_STATUS_EVENT_PENDING
;
876 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
, apedata
);
878 tg3_ape_write32(tp
, bufoff
, base_off
);
879 tg3_ape_write32(tp
, bufoff
+ sizeof(u32
), length
);
881 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
882 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
886 if (tg3_ape_wait_for_event(tp
, 30000))
889 for (i
= 0; length
; i
+= 4, length
-= 4) {
890 u32 val
= tg3_ape_read32(tp
, msgoff
+ i
);
891 memcpy(data
, &val
, sizeof(u32
));
899 static int tg3_ape_send_event(struct tg3
*tp
, u32 event
)
904 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
905 if (apedata
!= APE_SEG_SIG_MAGIC
)
908 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
909 if (!(apedata
& APE_FW_STATUS_READY
))
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err
= tg3_ape_event_lock(tp
, 1000);
917 tg3_ape_write32(tp
, TG3_APE_EVENT_STATUS
,
918 event
| APE_EVENT_STATUS_EVENT_PENDING
);
920 tg3_ape_unlock(tp
, TG3_APE_LOCK_MEM
);
921 tg3_ape_write32(tp
, TG3_APE_EVENT
, APE_EVENT_1
);
926 static void tg3_ape_driver_state_change(struct tg3
*tp
, int kind
)
931 if (!tg3_flag(tp
, ENABLE_APE
))
935 case RESET_KIND_INIT
:
936 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
,
937 APE_HOST_SEG_SIG_MAGIC
);
938 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_LEN
,
939 APE_HOST_SEG_LEN_MAGIC
);
940 apedata
= tg3_ape_read32(tp
, TG3_APE_HOST_INIT_COUNT
);
941 tg3_ape_write32(tp
, TG3_APE_HOST_INIT_COUNT
, ++apedata
);
942 tg3_ape_write32(tp
, TG3_APE_HOST_DRIVER_ID
,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM
, TG3_MIN_NUM
));
944 tg3_ape_write32(tp
, TG3_APE_HOST_BEHAVIOR
,
945 APE_HOST_BEHAV_NO_PHYLOCK
);
946 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
,
947 TG3_APE_HOST_DRVR_STATE_START
);
949 event
= APE_EVENT_STATUS_STATE_START
;
951 case RESET_KIND_SHUTDOWN
:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
957 tg3_ape_write32(tp
, TG3_APE_HOST_SEG_SIG
, 0x0);
959 if (device_may_wakeup(&tp
->pdev
->dev
) &&
960 tg3_flag(tp
, WOL_ENABLE
)) {
961 tg3_ape_write32(tp
, TG3_APE_HOST_WOL_SPEED
,
962 TG3_APE_HOST_WOL_SPEED_AUTO
);
963 apedata
= TG3_APE_HOST_DRVR_STATE_WOL
;
965 apedata
= TG3_APE_HOST_DRVR_STATE_UNLOAD
;
967 tg3_ape_write32(tp
, TG3_APE_HOST_DRVR_STATE
, apedata
);
969 event
= APE_EVENT_STATUS_STATE_UNLOAD
;
971 case RESET_KIND_SUSPEND
:
972 event
= APE_EVENT_STATUS_STATE_SUSPEND
;
978 event
|= APE_EVENT_STATUS_DRIVER_EVNT
| APE_EVENT_STATUS_STATE_CHNGE
;
980 tg3_ape_send_event(tp
, event
);
983 static void tg3_disable_ints(struct tg3
*tp
)
987 tw32(TG3PCI_MISC_HOST_CTRL
,
988 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
989 for (i
= 0; i
< tp
->irq_max
; i
++)
990 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 0x00000001);
993 static void tg3_enable_ints(struct tg3
*tp
)
1000 tw32(TG3PCI_MISC_HOST_CTRL
,
1001 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
1003 tp
->coal_now
= tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
;
1004 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
1005 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
1007 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1008 if (tg3_flag(tp
, 1SHOT_MSI
))
1009 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1011 tp
->coal_now
|= tnapi
->coal_now
;
1014 /* Force an initial interrupt */
1015 if (!tg3_flag(tp
, TAGGED_STATUS
) &&
1016 (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
))
1017 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
1019 tw32(HOSTCC_MODE
, tp
->coal_now
);
1021 tp
->coal_now
&= ~(tp
->napi
[0].coal_now
| tp
->napi
[1].coal_now
);
1024 static inline unsigned int tg3_has_work(struct tg3_napi
*tnapi
)
1026 struct tg3
*tp
= tnapi
->tp
;
1027 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
1028 unsigned int work_exists
= 0;
1030 /* check for phy events */
1031 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
1032 if (sblk
->status
& SD_STATUS_LINK_CHG
)
1036 /* check for TX work to do */
1037 if (sblk
->idx
[0].tx_consumer
!= tnapi
->tx_cons
)
1040 /* check for RX work to do */
1041 if (tnapi
->rx_rcb_prod_idx
&&
1042 *(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
1049 * similar to tg3_enable_ints, but it accurately determines whether there
1050 * is new work pending and can return without flushing the PIO write
1051 * which reenables interrupts
1053 static void tg3_int_reenable(struct tg3_napi
*tnapi
)
1055 struct tg3
*tp
= tnapi
->tp
;
1057 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
1060 /* When doing tagged status, this work check is unnecessary.
1061 * The last_tag we write above tells the chip which piece of
1062 * work we've completed.
1064 if (!tg3_flag(tp
, TAGGED_STATUS
) && tg3_has_work(tnapi
))
1065 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
1066 HOSTCC_MODE_ENABLE
| tnapi
->coal_now
);
1069 static void tg3_switch_clocks(struct tg3
*tp
)
1072 u32 orig_clock_ctrl
;
1074 if (tg3_flag(tp
, CPMU_PRESENT
) || tg3_flag(tp
, 5780_CLASS
))
1077 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
1079 orig_clock_ctrl
= clock_ctrl
;
1080 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
1081 CLOCK_CTRL_CLKRUN_OENABLE
|
1083 tp
->pci_clock_ctrl
= clock_ctrl
;
1085 if (tg3_flag(tp
, 5705_PLUS
)) {
1086 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1088 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
1090 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1093 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1096 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
1099 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
1102 #define PHY_BUSY_LOOPS 5000
1104 static int __tg3_readphy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1111 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1113 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1117 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1121 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1122 MI_COM_PHY_ADDR_MASK
);
1123 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1124 MI_COM_REG_ADDR_MASK
);
1125 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
1127 tw32_f(MAC_MI_COM
, frame_val
);
1129 loops
= PHY_BUSY_LOOPS
;
1130 while (loops
!= 0) {
1132 frame_val
= tr32(MAC_MI_COM
);
1134 if ((frame_val
& MI_COM_BUSY
) == 0) {
1136 frame_val
= tr32(MAC_MI_COM
);
1144 *val
= frame_val
& MI_COM_DATA_MASK
;
1148 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1149 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1153 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1158 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
1160 return __tg3_readphy(tp
, tp
->phy_addr
, reg
, val
);
1163 static int __tg3_writephy(struct tg3
*tp
, unsigned int phy_addr
, int reg
,
1170 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
1171 (reg
== MII_CTRL1000
|| reg
== MII_TG3_AUX_CTRL
))
1174 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1176 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
1180 tg3_ape_lock(tp
, tp
->phy_ape_lock
);
1182 frame_val
= ((phy_addr
<< MI_COM_PHY_ADDR_SHIFT
) &
1183 MI_COM_PHY_ADDR_MASK
);
1184 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
1185 MI_COM_REG_ADDR_MASK
);
1186 frame_val
|= (val
& MI_COM_DATA_MASK
);
1187 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
1189 tw32_f(MAC_MI_COM
, frame_val
);
1191 loops
= PHY_BUSY_LOOPS
;
1192 while (loops
!= 0) {
1194 frame_val
= tr32(MAC_MI_COM
);
1195 if ((frame_val
& MI_COM_BUSY
) == 0) {
1197 frame_val
= tr32(MAC_MI_COM
);
1207 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
1208 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1212 tg3_ape_unlock(tp
, tp
->phy_ape_lock
);
1217 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
1219 return __tg3_writephy(tp
, tp
->phy_addr
, reg
, val
);
1222 static int tg3_phy_cl45_write(struct tg3
*tp
, u32 devad
, u32 addr
, u32 val
)
1226 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1230 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1234 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1235 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1239 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1245 static int tg3_phy_cl45_read(struct tg3
*tp
, u32 devad
, u32 addr
, u32
*val
)
1249 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
, devad
);
1253 err
= tg3_writephy(tp
, MII_TG3_MMD_ADDRESS
, addr
);
1257 err
= tg3_writephy(tp
, MII_TG3_MMD_CTRL
,
1258 MII_TG3_MMD_CTRL_DATA_NOINC
| devad
);
1262 err
= tg3_readphy(tp
, MII_TG3_MMD_ADDRESS
, val
);
1268 static int tg3_phydsp_read(struct tg3
*tp
, u32 reg
, u32
*val
)
1272 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1274 err
= tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1279 static int tg3_phydsp_write(struct tg3
*tp
, u32 reg
, u32 val
)
1283 err
= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, reg
);
1285 err
= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, val
);
1290 static int tg3_phy_auxctl_read(struct tg3
*tp
, int reg
, u32
*val
)
1294 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
1295 (reg
<< MII_TG3_AUXCTL_MISC_RDSEL_SHIFT
) |
1296 MII_TG3_AUXCTL_SHDWSEL_MISC
);
1298 err
= tg3_readphy(tp
, MII_TG3_AUX_CTRL
, val
);
1303 static int tg3_phy_auxctl_write(struct tg3
*tp
, int reg
, u32 set
)
1305 if (reg
== MII_TG3_AUXCTL_SHDWSEL_MISC
)
1306 set
|= MII_TG3_AUXCTL_MISC_WREN
;
1308 return tg3_writephy(tp
, MII_TG3_AUX_CTRL
, set
| reg
);
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3
*tp
, bool enable
)
1316 err
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
1322 val
|= MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1324 val
&= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA
;
1326 err
= tg3_phy_auxctl_write((tp
), MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
1327 val
| MII_TG3_AUXCTL_ACTL_TX_6DB
);
1332 static int tg3_bmcr_reset(struct tg3
*tp
)
1337 /* OK, reset it, and poll the BMCR_RESET bit until it
1338 * clears or we time out.
1340 phy_control
= BMCR_RESET
;
1341 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
1347 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
1351 if ((phy_control
& BMCR_RESET
) == 0) {
1363 static int tg3_mdio_read(struct mii_bus
*bp
, int mii_id
, int reg
)
1365 struct tg3
*tp
= bp
->priv
;
1368 spin_lock_bh(&tp
->lock
);
1370 if (tg3_readphy(tp
, reg
, &val
))
1373 spin_unlock_bh(&tp
->lock
);
1378 static int tg3_mdio_write(struct mii_bus
*bp
, int mii_id
, int reg
, u16 val
)
1380 struct tg3
*tp
= bp
->priv
;
1383 spin_lock_bh(&tp
->lock
);
1385 if (tg3_writephy(tp
, reg
, val
))
1388 spin_unlock_bh(&tp
->lock
);
1393 static int tg3_mdio_reset(struct mii_bus
*bp
)
1398 static void tg3_mdio_config_5785(struct tg3
*tp
)
1401 struct phy_device
*phydev
;
1403 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1404 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1405 case PHY_ID_BCM50610
:
1406 case PHY_ID_BCM50610M
:
1407 val
= MAC_PHYCFG2_50610_LED_MODES
;
1409 case PHY_ID_BCMAC131
:
1410 val
= MAC_PHYCFG2_AC131_LED_MODES
;
1412 case PHY_ID_RTL8211C
:
1413 val
= MAC_PHYCFG2_RTL8211C_LED_MODES
;
1415 case PHY_ID_RTL8201E
:
1416 val
= MAC_PHYCFG2_RTL8201E_LED_MODES
;
1422 if (phydev
->interface
!= PHY_INTERFACE_MODE_RGMII
) {
1423 tw32(MAC_PHYCFG2
, val
);
1425 val
= tr32(MAC_PHYCFG1
);
1426 val
&= ~(MAC_PHYCFG1_RGMII_INT
|
1427 MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
);
1428 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
;
1429 tw32(MAC_PHYCFG1
, val
);
1434 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1435 val
|= MAC_PHYCFG2_EMODE_MASK_MASK
|
1436 MAC_PHYCFG2_FMODE_MASK_MASK
|
1437 MAC_PHYCFG2_GMODE_MASK_MASK
|
1438 MAC_PHYCFG2_ACT_MASK_MASK
|
1439 MAC_PHYCFG2_QUAL_MASK_MASK
|
1440 MAC_PHYCFG2_INBAND_ENABLE
;
1442 tw32(MAC_PHYCFG2
, val
);
1444 val
= tr32(MAC_PHYCFG1
);
1445 val
&= ~(MAC_PHYCFG1_RXCLK_TO_MASK
| MAC_PHYCFG1_TXCLK_TO_MASK
|
1446 MAC_PHYCFG1_RGMII_EXT_RX_DEC
| MAC_PHYCFG1_RGMII_SND_STAT_EN
);
1447 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1448 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1449 val
|= MAC_PHYCFG1_RGMII_EXT_RX_DEC
;
1450 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1451 val
|= MAC_PHYCFG1_RGMII_SND_STAT_EN
;
1453 val
|= MAC_PHYCFG1_RXCLK_TIMEOUT
| MAC_PHYCFG1_TXCLK_TIMEOUT
|
1454 MAC_PHYCFG1_RGMII_INT
| MAC_PHYCFG1_TXC_DRV
;
1455 tw32(MAC_PHYCFG1
, val
);
1457 val
= tr32(MAC_EXT_RGMII_MODE
);
1458 val
&= ~(MAC_RGMII_MODE_RX_INT_B
|
1459 MAC_RGMII_MODE_RX_QUALITY
|
1460 MAC_RGMII_MODE_RX_ACTIVITY
|
1461 MAC_RGMII_MODE_RX_ENG_DET
|
1462 MAC_RGMII_MODE_TX_ENABLE
|
1463 MAC_RGMII_MODE_TX_LOWPWR
|
1464 MAC_RGMII_MODE_TX_RESET
);
1465 if (!tg3_flag(tp
, RGMII_INBAND_DISABLE
)) {
1466 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1467 val
|= MAC_RGMII_MODE_RX_INT_B
|
1468 MAC_RGMII_MODE_RX_QUALITY
|
1469 MAC_RGMII_MODE_RX_ACTIVITY
|
1470 MAC_RGMII_MODE_RX_ENG_DET
;
1471 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1472 val
|= MAC_RGMII_MODE_TX_ENABLE
|
1473 MAC_RGMII_MODE_TX_LOWPWR
|
1474 MAC_RGMII_MODE_TX_RESET
;
1476 tw32(MAC_EXT_RGMII_MODE
, val
);
1479 static void tg3_mdio_start(struct tg3
*tp
)
1481 tp
->mi_mode
&= ~MAC_MI_MODE_AUTO_POLL
;
1482 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1485 if (tg3_flag(tp
, MDIOBUS_INITED
) &&
1486 tg3_asic_rev(tp
) == ASIC_REV_5785
)
1487 tg3_mdio_config_5785(tp
);
1490 static int tg3_mdio_init(struct tg3
*tp
)
1494 struct phy_device
*phydev
;
1496 if (tg3_flag(tp
, 5717_PLUS
)) {
1499 tp
->phy_addr
= tp
->pci_fn
+ 1;
1501 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
)
1502 is_serdes
= tr32(SG_DIG_STATUS
) & SG_DIG_IS_SERDES
;
1504 is_serdes
= tr32(TG3_CPMU_PHY_STRAP
) &
1505 TG3_CPMU_PHY_STRAP_IS_SERDES
;
1509 tp
->phy_addr
= TG3_PHY_MII_ADDR
;
1513 if (!tg3_flag(tp
, USE_PHYLIB
) || tg3_flag(tp
, MDIOBUS_INITED
))
1516 tp
->mdio_bus
= mdiobus_alloc();
1517 if (tp
->mdio_bus
== NULL
)
1520 tp
->mdio_bus
->name
= "tg3 mdio bus";
1521 snprintf(tp
->mdio_bus
->id
, MII_BUS_ID_SIZE
, "%x",
1522 (tp
->pdev
->bus
->number
<< 8) | tp
->pdev
->devfn
);
1523 tp
->mdio_bus
->priv
= tp
;
1524 tp
->mdio_bus
->parent
= &tp
->pdev
->dev
;
1525 tp
->mdio_bus
->read
= &tg3_mdio_read
;
1526 tp
->mdio_bus
->write
= &tg3_mdio_write
;
1527 tp
->mdio_bus
->reset
= &tg3_mdio_reset
;
1528 tp
->mdio_bus
->phy_mask
= ~(1 << TG3_PHY_MII_ADDR
);
1529 tp
->mdio_bus
->irq
= &tp
->mdio_irq
[0];
1531 for (i
= 0; i
< PHY_MAX_ADDR
; i
++)
1532 tp
->mdio_bus
->irq
[i
] = PHY_POLL
;
1534 /* The bus registration will look for all the PHYs on the mdio bus.
1535 * Unfortunately, it does not ensure the PHY is powered up before
1536 * accessing the PHY ID registers. A chip reset is the
1537 * quickest way to bring the device back to an operational state..
1539 if (tg3_readphy(tp
, MII_BMCR
, ®
) || (reg
& BMCR_PDOWN
))
1542 i
= mdiobus_register(tp
->mdio_bus
);
1544 dev_warn(&tp
->pdev
->dev
, "mdiobus_reg failed (0x%x)\n", i
);
1545 mdiobus_free(tp
->mdio_bus
);
1549 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
1551 if (!phydev
|| !phydev
->drv
) {
1552 dev_warn(&tp
->pdev
->dev
, "No PHY devices\n");
1553 mdiobus_unregister(tp
->mdio_bus
);
1554 mdiobus_free(tp
->mdio_bus
);
1558 switch (phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
) {
1559 case PHY_ID_BCM57780
:
1560 phydev
->interface
= PHY_INTERFACE_MODE_GMII
;
1561 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1563 case PHY_ID_BCM50610
:
1564 case PHY_ID_BCM50610M
:
1565 phydev
->dev_flags
|= PHY_BRCM_CLEAR_RGMII_MODE
|
1566 PHY_BRCM_RX_REFCLK_UNUSED
|
1567 PHY_BRCM_DIS_TXCRXC_NOENRGY
|
1568 PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1569 if (tg3_flag(tp
, RGMII_INBAND_DISABLE
))
1570 phydev
->dev_flags
|= PHY_BRCM_STD_IBND_DISABLE
;
1571 if (tg3_flag(tp
, RGMII_EXT_IBND_RX_EN
))
1572 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_RX_ENABLE
;
1573 if (tg3_flag(tp
, RGMII_EXT_IBND_TX_EN
))
1574 phydev
->dev_flags
|= PHY_BRCM_EXT_IBND_TX_ENABLE
;
1576 case PHY_ID_RTL8211C
:
1577 phydev
->interface
= PHY_INTERFACE_MODE_RGMII
;
1579 case PHY_ID_RTL8201E
:
1580 case PHY_ID_BCMAC131
:
1581 phydev
->interface
= PHY_INTERFACE_MODE_MII
;
1582 phydev
->dev_flags
|= PHY_BRCM_AUTO_PWRDWN_ENABLE
;
1583 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
1587 tg3_flag_set(tp
, MDIOBUS_INITED
);
1589 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
1590 tg3_mdio_config_5785(tp
);
1595 static void tg3_mdio_fini(struct tg3
*tp
)
1597 if (tg3_flag(tp
, MDIOBUS_INITED
)) {
1598 tg3_flag_clear(tp
, MDIOBUS_INITED
);
1599 mdiobus_unregister(tp
->mdio_bus
);
1600 mdiobus_free(tp
->mdio_bus
);
1604 /* tp->lock is held. */
1605 static inline void tg3_generate_fw_event(struct tg3
*tp
)
1609 val
= tr32(GRC_RX_CPU_EVENT
);
1610 val
|= GRC_RX_CPU_DRIVER_EVENT
;
1611 tw32_f(GRC_RX_CPU_EVENT
, val
);
1613 tp
->last_event_jiffies
= jiffies
;
1616 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1618 /* tp->lock is held. */
1619 static void tg3_wait_for_event_ack(struct tg3
*tp
)
1622 unsigned int delay_cnt
;
1625 /* If enough time has passed, no wait is necessary. */
1626 time_remain
= (long)(tp
->last_event_jiffies
+ 1 +
1627 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC
)) -
1629 if (time_remain
< 0)
1632 /* Check if we can shorten the wait time. */
1633 delay_cnt
= jiffies_to_usecs(time_remain
);
1634 if (delay_cnt
> TG3_FW_EVENT_TIMEOUT_USEC
)
1635 delay_cnt
= TG3_FW_EVENT_TIMEOUT_USEC
;
1636 delay_cnt
= (delay_cnt
>> 3) + 1;
1638 for (i
= 0; i
< delay_cnt
; i
++) {
1639 if (!(tr32(GRC_RX_CPU_EVENT
) & GRC_RX_CPU_DRIVER_EVENT
))
1641 if (pci_channel_offline(tp
->pdev
))
1648 /* tp->lock is held. */
1649 static void tg3_phy_gather_ump_data(struct tg3
*tp
, u32
*data
)
1654 if (!tg3_readphy(tp
, MII_BMCR
, ®
))
1656 if (!tg3_readphy(tp
, MII_BMSR
, ®
))
1657 val
|= (reg
& 0xffff);
1661 if (!tg3_readphy(tp
, MII_ADVERTISE
, ®
))
1663 if (!tg3_readphy(tp
, MII_LPA
, ®
))
1664 val
|= (reg
& 0xffff);
1668 if (!(tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)) {
1669 if (!tg3_readphy(tp
, MII_CTRL1000
, ®
))
1671 if (!tg3_readphy(tp
, MII_STAT1000
, ®
))
1672 val
|= (reg
& 0xffff);
1676 if (!tg3_readphy(tp
, MII_PHYADDR
, ®
))
1683 /* tp->lock is held. */
1684 static void tg3_ump_link_report(struct tg3
*tp
)
1688 if (!tg3_flag(tp
, 5780_CLASS
) || !tg3_flag(tp
, ENABLE_ASF
))
1691 tg3_phy_gather_ump_data(tp
, data
);
1693 tg3_wait_for_event_ack(tp
);
1695 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_LINK_UPDATE
);
1696 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 14);
1697 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x0, data
[0]);
1698 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x4, data
[1]);
1699 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0x8, data
[2]);
1700 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
+ 0xc, data
[3]);
1702 tg3_generate_fw_event(tp
);
1705 /* tp->lock is held. */
1706 static void tg3_stop_fw(struct tg3
*tp
)
1708 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
1709 /* Wait for RX cpu to ACK the previous event. */
1710 tg3_wait_for_event_ack(tp
);
1712 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
1714 tg3_generate_fw_event(tp
);
1716 /* Wait for RX cpu to ACK this event. */
1717 tg3_wait_for_event_ack(tp
);
1721 /* tp->lock is held. */
1722 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
1724 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
1725 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
1727 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1729 case RESET_KIND_INIT
:
1730 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1734 case RESET_KIND_SHUTDOWN
:
1735 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1739 case RESET_KIND_SUSPEND
:
1740 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1749 if (kind
== RESET_KIND_INIT
||
1750 kind
== RESET_KIND_SUSPEND
)
1751 tg3_ape_driver_state_change(tp
, kind
);
1754 /* tp->lock is held. */
1755 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
1757 if (tg3_flag(tp
, ASF_NEW_HANDSHAKE
)) {
1759 case RESET_KIND_INIT
:
1760 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1761 DRV_STATE_START_DONE
);
1764 case RESET_KIND_SHUTDOWN
:
1765 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1766 DRV_STATE_UNLOAD_DONE
);
1774 if (kind
== RESET_KIND_SHUTDOWN
)
1775 tg3_ape_driver_state_change(tp
, kind
);
1778 /* tp->lock is held. */
1779 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
1781 if (tg3_flag(tp
, ENABLE_ASF
)) {
1783 case RESET_KIND_INIT
:
1784 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1788 case RESET_KIND_SHUTDOWN
:
1789 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1793 case RESET_KIND_SUSPEND
:
1794 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
1804 static int tg3_poll_fw(struct tg3
*tp
)
1809 if (tg3_flag(tp
, NO_FWARE_REPORTED
))
1812 if (tg3_flag(tp
, IS_SSB_CORE
)) {
1813 /* We don't use firmware. */
1817 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
1818 /* Wait up to 20ms for init done. */
1819 for (i
= 0; i
< 200; i
++) {
1820 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
1822 if (pci_channel_offline(tp
->pdev
))
1830 /* Wait for firmware initialization to complete. */
1831 for (i
= 0; i
< 100000; i
++) {
1832 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
1833 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1835 if (pci_channel_offline(tp
->pdev
)) {
1836 if (!tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1837 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1838 netdev_info(tp
->dev
, "No firmware running\n");
1847 /* Chip might not be fitted with firmware. Some Sun onboard
1848 * parts are configured like that. So don't signal the timeout
1849 * of the above loop as an error, but do report the lack of
1850 * running firmware once.
1852 if (i
>= 100000 && !tg3_flag(tp
, NO_FWARE_REPORTED
)) {
1853 tg3_flag_set(tp
, NO_FWARE_REPORTED
);
1855 netdev_info(tp
->dev
, "No firmware running\n");
1858 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
1859 /* The 57765 A0 needs a little more
1860 * time to do some important work.
1868 static void tg3_link_report(struct tg3
*tp
)
1870 if (!netif_carrier_ok(tp
->dev
)) {
1871 netif_info(tp
, link
, tp
->dev
, "Link is down\n");
1872 tg3_ump_link_report(tp
);
1873 } else if (netif_msg_link(tp
)) {
1874 netdev_info(tp
->dev
, "Link is up at %d Mbps, %s duplex\n",
1875 (tp
->link_config
.active_speed
== SPEED_1000
?
1877 (tp
->link_config
.active_speed
== SPEED_100
?
1879 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1882 netdev_info(tp
->dev
, "Flow control is %s for TX and %s for RX\n",
1883 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_TX
) ?
1885 (tp
->link_config
.active_flowctrl
& FLOW_CTRL_RX
) ?
1888 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)
1889 netdev_info(tp
->dev
, "EEE is %s\n",
1890 tp
->setlpicnt
? "enabled" : "disabled");
1892 tg3_ump_link_report(tp
);
1895 tp
->link_up
= netif_carrier_ok(tp
->dev
);
1898 static u32
tg3_decode_flowctrl_1000T(u32 adv
)
1902 if (adv
& ADVERTISE_PAUSE_CAP
) {
1903 flowctrl
|= FLOW_CTRL_RX
;
1904 if (!(adv
& ADVERTISE_PAUSE_ASYM
))
1905 flowctrl
|= FLOW_CTRL_TX
;
1906 } else if (adv
& ADVERTISE_PAUSE_ASYM
)
1907 flowctrl
|= FLOW_CTRL_TX
;
1912 static u16
tg3_advert_flowctrl_1000X(u8 flow_ctrl
)
1916 if ((flow_ctrl
& FLOW_CTRL_TX
) && (flow_ctrl
& FLOW_CTRL_RX
))
1917 miireg
= ADVERTISE_1000XPAUSE
;
1918 else if (flow_ctrl
& FLOW_CTRL_TX
)
1919 miireg
= ADVERTISE_1000XPSE_ASYM
;
1920 else if (flow_ctrl
& FLOW_CTRL_RX
)
1921 miireg
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1928 static u32
tg3_decode_flowctrl_1000X(u32 adv
)
1932 if (adv
& ADVERTISE_1000XPAUSE
) {
1933 flowctrl
|= FLOW_CTRL_RX
;
1934 if (!(adv
& ADVERTISE_1000XPSE_ASYM
))
1935 flowctrl
|= FLOW_CTRL_TX
;
1936 } else if (adv
& ADVERTISE_1000XPSE_ASYM
)
1937 flowctrl
|= FLOW_CTRL_TX
;
1942 static u8
tg3_resolve_flowctrl_1000X(u16 lcladv
, u16 rmtadv
)
1946 if (lcladv
& rmtadv
& ADVERTISE_1000XPAUSE
) {
1947 cap
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
1948 } else if (lcladv
& rmtadv
& ADVERTISE_1000XPSE_ASYM
) {
1949 if (lcladv
& ADVERTISE_1000XPAUSE
)
1951 if (rmtadv
& ADVERTISE_1000XPAUSE
)
1958 static void tg3_setup_flow_control(struct tg3
*tp
, u32 lcladv
, u32 rmtadv
)
1962 u32 old_rx_mode
= tp
->rx_mode
;
1963 u32 old_tx_mode
= tp
->tx_mode
;
1965 if (tg3_flag(tp
, USE_PHYLIB
))
1966 autoneg
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]->autoneg
;
1968 autoneg
= tp
->link_config
.autoneg
;
1970 if (autoneg
== AUTONEG_ENABLE
&& tg3_flag(tp
, PAUSE_AUTONEG
)) {
1971 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
1972 flowctrl
= tg3_resolve_flowctrl_1000X(lcladv
, rmtadv
);
1974 flowctrl
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1976 flowctrl
= tp
->link_config
.flowctrl
;
1978 tp
->link_config
.active_flowctrl
= flowctrl
;
1980 if (flowctrl
& FLOW_CTRL_RX
)
1981 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1983 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1985 if (old_rx_mode
!= tp
->rx_mode
)
1986 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1988 if (flowctrl
& FLOW_CTRL_TX
)
1989 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1991 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1993 if (old_tx_mode
!= tp
->tx_mode
)
1994 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1997 static void tg3_adjust_link(struct net_device
*dev
)
1999 u8 oldflowctrl
, linkmesg
= 0;
2000 u32 mac_mode
, lcl_adv
, rmt_adv
;
2001 struct tg3
*tp
= netdev_priv(dev
);
2002 struct phy_device
*phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2004 spin_lock_bh(&tp
->lock
);
2006 mac_mode
= tp
->mac_mode
& ~(MAC_MODE_PORT_MODE_MASK
|
2007 MAC_MODE_HALF_DUPLEX
);
2009 oldflowctrl
= tp
->link_config
.active_flowctrl
;
2015 if (phydev
->speed
== SPEED_100
|| phydev
->speed
== SPEED_10
)
2016 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2017 else if (phydev
->speed
== SPEED_1000
||
2018 tg3_asic_rev(tp
) != ASIC_REV_5785
)
2019 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2021 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
2023 if (phydev
->duplex
== DUPLEX_HALF
)
2024 mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2026 lcl_adv
= mii_advertise_flowctrl(
2027 tp
->link_config
.flowctrl
);
2030 rmt_adv
= LPA_PAUSE_CAP
;
2031 if (phydev
->asym_pause
)
2032 rmt_adv
|= LPA_PAUSE_ASYM
;
2035 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
2037 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2039 if (mac_mode
!= tp
->mac_mode
) {
2040 tp
->mac_mode
= mac_mode
;
2041 tw32_f(MAC_MODE
, tp
->mac_mode
);
2045 if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
2046 if (phydev
->speed
== SPEED_10
)
2048 MAC_MI_STAT_10MBPS_MODE
|
2049 MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2051 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
2054 if (phydev
->speed
== SPEED_1000
&& phydev
->duplex
== DUPLEX_HALF
)
2055 tw32(MAC_TX_LENGTHS
,
2056 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2057 (6 << TX_LENGTHS_IPG_SHIFT
) |
2058 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2060 tw32(MAC_TX_LENGTHS
,
2061 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2062 (6 << TX_LENGTHS_IPG_SHIFT
) |
2063 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2065 if (phydev
->link
!= tp
->old_link
||
2066 phydev
->speed
!= tp
->link_config
.active_speed
||
2067 phydev
->duplex
!= tp
->link_config
.active_duplex
||
2068 oldflowctrl
!= tp
->link_config
.active_flowctrl
)
2071 tp
->old_link
= phydev
->link
;
2072 tp
->link_config
.active_speed
= phydev
->speed
;
2073 tp
->link_config
.active_duplex
= phydev
->duplex
;
2075 spin_unlock_bh(&tp
->lock
);
2078 tg3_link_report(tp
);
2081 static int tg3_phy_init(struct tg3
*tp
)
2083 struct phy_device
*phydev
;
2085 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
)
2088 /* Bring the PHY back to a known state. */
2091 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2093 /* Attach the MAC to the PHY. */
2094 phydev
= phy_connect(tp
->dev
, dev_name(&phydev
->dev
),
2095 tg3_adjust_link
, phydev
->interface
);
2096 if (IS_ERR(phydev
)) {
2097 dev_err(&tp
->pdev
->dev
, "Could not attach to PHY\n");
2098 return PTR_ERR(phydev
);
2101 /* Mask with MAC supported features. */
2102 switch (phydev
->interface
) {
2103 case PHY_INTERFACE_MODE_GMII
:
2104 case PHY_INTERFACE_MODE_RGMII
:
2105 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
2106 phydev
->supported
&= (PHY_GBIT_FEATURES
|
2108 SUPPORTED_Asym_Pause
);
2112 case PHY_INTERFACE_MODE_MII
:
2113 phydev
->supported
&= (PHY_BASIC_FEATURES
|
2115 SUPPORTED_Asym_Pause
);
2118 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2122 tp
->phy_flags
|= TG3_PHYFLG_IS_CONNECTED
;
2124 phydev
->advertising
= phydev
->supported
;
2129 static void tg3_phy_start(struct tg3
*tp
)
2131 struct phy_device
*phydev
;
2133 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2136 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
2138 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) {
2139 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
2140 phydev
->speed
= tp
->link_config
.speed
;
2141 phydev
->duplex
= tp
->link_config
.duplex
;
2142 phydev
->autoneg
= tp
->link_config
.autoneg
;
2143 phydev
->advertising
= tp
->link_config
.advertising
;
2148 phy_start_aneg(phydev
);
2151 static void tg3_phy_stop(struct tg3
*tp
)
2153 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
2156 phy_stop(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2159 static void tg3_phy_fini(struct tg3
*tp
)
2161 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
2162 phy_disconnect(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
2163 tp
->phy_flags
&= ~TG3_PHYFLG_IS_CONNECTED
;
2167 static int tg3_phy_set_extloopbk(struct tg3
*tp
)
2172 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
2175 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2176 /* Cannot do read-modify-write on 5401 */
2177 err
= tg3_phy_auxctl_write(tp
,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK
|
2184 err
= tg3_phy_auxctl_read(tp
,
2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2189 val
|= MII_TG3_AUXCTL_ACTL_EXTLOOPBK
;
2190 err
= tg3_phy_auxctl_write(tp
,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, val
);
2197 static void tg3_phy_fet_toggle_apd(struct tg3
*tp
, bool enable
)
2201 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
2204 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2205 phytest
| MII_TG3_FET_SHADOW_EN
);
2206 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, &phy
)) {
2208 phy
|= MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2210 phy
&= ~MII_TG3_FET_SHDW_AUXSTAT2_APD
;
2211 tg3_writephy(tp
, MII_TG3_FET_SHDW_AUXSTAT2
, phy
);
2213 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
2217 static void tg3_phy_toggle_apd(struct tg3
*tp
, bool enable
)
2221 if (!tg3_flag(tp
, 5705_PLUS
) ||
2222 (tg3_flag(tp
, 5717_PLUS
) &&
2223 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)))
2226 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2227 tg3_phy_fet_toggle_apd(tp
, enable
);
2231 reg
= MII_TG3_MISC_SHDW_WREN
|
2232 MII_TG3_MISC_SHDW_SCR5_SEL
|
2233 MII_TG3_MISC_SHDW_SCR5_LPED
|
2234 MII_TG3_MISC_SHDW_SCR5_DLPTLM
|
2235 MII_TG3_MISC_SHDW_SCR5_SDTL
|
2236 MII_TG3_MISC_SHDW_SCR5_C125OE
;
2237 if (tg3_asic_rev(tp
) != ASIC_REV_5784
|| !enable
)
2238 reg
|= MII_TG3_MISC_SHDW_SCR5_DLLAPD
;
2240 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2243 reg
= MII_TG3_MISC_SHDW_WREN
|
2244 MII_TG3_MISC_SHDW_APD_SEL
|
2245 MII_TG3_MISC_SHDW_APD_WKTM_84MS
;
2247 reg
|= MII_TG3_MISC_SHDW_APD_ENABLE
;
2249 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, reg
);
2252 static void tg3_phy_toggle_automdix(struct tg3
*tp
, bool enable
)
2256 if (!tg3_flag(tp
, 5705_PLUS
) ||
2257 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
2260 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
2263 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &ephy
)) {
2264 u32 reg
= MII_TG3_FET_SHDW_MISCCTRL
;
2266 tg3_writephy(tp
, MII_TG3_FET_TEST
,
2267 ephy
| MII_TG3_FET_SHADOW_EN
);
2268 if (!tg3_readphy(tp
, reg
, &phy
)) {
2270 phy
|= MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2272 phy
&= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX
;
2273 tg3_writephy(tp
, reg
, phy
);
2275 tg3_writephy(tp
, MII_TG3_FET_TEST
, ephy
);
2280 ret
= tg3_phy_auxctl_read(tp
,
2281 MII_TG3_AUXCTL_SHDWSEL_MISC
, &phy
);
2284 phy
|= MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2286 phy
&= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX
;
2287 tg3_phy_auxctl_write(tp
,
2288 MII_TG3_AUXCTL_SHDWSEL_MISC
, phy
);
2293 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
2298 if (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
)
2301 ret
= tg3_phy_auxctl_read(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
, &val
);
2303 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_MISC
,
2304 val
| MII_TG3_AUXCTL_MISC_WIRESPD_EN
);
2307 static void tg3_phy_apply_otp(struct tg3
*tp
)
2316 if (tg3_phy_toggle_auxctl_smdsp(tp
, true))
2319 phy
= ((otp
& TG3_OTP_AGCTGT_MASK
) >> TG3_OTP_AGCTGT_SHIFT
);
2320 phy
|= MII_TG3_DSP_TAP1_AGCTGT_DFLT
;
2321 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP1
, phy
);
2323 phy
= ((otp
& TG3_OTP_HPFFLTR_MASK
) >> TG3_OTP_HPFFLTR_SHIFT
) |
2324 ((otp
& TG3_OTP_HPFOVER_MASK
) >> TG3_OTP_HPFOVER_SHIFT
);
2325 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH0
, phy
);
2327 phy
= ((otp
& TG3_OTP_LPFDIS_MASK
) >> TG3_OTP_LPFDIS_SHIFT
);
2328 phy
|= MII_TG3_DSP_AADJ1CH3_ADCCKADJ
;
2329 tg3_phydsp_write(tp
, MII_TG3_DSP_AADJ1CH3
, phy
);
2331 phy
= ((otp
& TG3_OTP_VDAC_MASK
) >> TG3_OTP_VDAC_SHIFT
);
2332 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP75
, phy
);
2334 phy
= ((otp
& TG3_OTP_10BTAMP_MASK
) >> TG3_OTP_10BTAMP_SHIFT
);
2335 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP96
, phy
);
2337 phy
= ((otp
& TG3_OTP_ROFF_MASK
) >> TG3_OTP_ROFF_SHIFT
) |
2338 ((otp
& TG3_OTP_RCOFF_MASK
) >> TG3_OTP_RCOFF_SHIFT
);
2339 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP97
, phy
);
2341 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2344 static void tg3_phy_eee_adjust(struct tg3
*tp
, bool current_link_up
)
2348 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
2353 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2355 tp
->link_config
.active_duplex
== DUPLEX_FULL
&&
2356 (tp
->link_config
.active_speed
== SPEED_100
||
2357 tp
->link_config
.active_speed
== SPEED_1000
)) {
2360 if (tp
->link_config
.active_speed
== SPEED_1000
)
2361 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_16_5_US
;
2363 eeectl
= TG3_CPMU_EEE_CTRL_EXIT_36_US
;
2365 tw32(TG3_CPMU_EEE_CTRL
, eeectl
);
2367 tg3_phy_cl45_read(tp
, MDIO_MMD_AN
,
2368 TG3_CL45_D7_EEERES_STAT
, &val
);
2370 if (val
== TG3_CL45_D7_EEERES_STAT_LP_1000T
||
2371 val
== TG3_CL45_D7_EEERES_STAT_LP_100TX
)
2375 if (!tp
->setlpicnt
) {
2376 if (current_link_up
&&
2377 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2378 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, 0x0000);
2379 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2382 val
= tr32(TG3_CPMU_EEE_MODE
);
2383 tw32(TG3_CPMU_EEE_MODE
, val
& ~TG3_CPMU_EEEMD_LPI_ENABLE
);
2387 static void tg3_phy_eee_enable(struct tg3
*tp
)
2391 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2392 (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2393 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2394 tg3_flag(tp
, 57765_CLASS
)) &&
2395 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2396 val
= MII_TG3_DSP_TAP26_ALNOKO
|
2397 MII_TG3_DSP_TAP26_RMRXSTO
;
2398 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
2399 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2402 val
= tr32(TG3_CPMU_EEE_MODE
);
2403 tw32(TG3_CPMU_EEE_MODE
, val
| TG3_CPMU_EEEMD_LPI_ENABLE
);
2406 static int tg3_wait_macro_done(struct tg3
*tp
)
2413 if (!tg3_readphy(tp
, MII_TG3_DSP_CONTROL
, &tmp32
)) {
2414 if ((tmp32
& 0x1000) == 0)
2424 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
2426 static const u32 test_pat
[4][6] = {
2427 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2428 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2429 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2430 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2434 for (chan
= 0; chan
< 4; chan
++) {
2437 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2438 (chan
* 0x2000) | 0x0200);
2439 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2441 for (i
= 0; i
< 6; i
++)
2442 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
2445 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2446 if (tg3_wait_macro_done(tp
)) {
2451 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2452 (chan
* 0x2000) | 0x0200);
2453 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0082);
2454 if (tg3_wait_macro_done(tp
)) {
2459 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0802);
2460 if (tg3_wait_macro_done(tp
)) {
2465 for (i
= 0; i
< 6; i
+= 2) {
2468 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
2469 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
2470 tg3_wait_macro_done(tp
)) {
2476 if (low
!= test_pat
[chan
][i
] ||
2477 high
!= test_pat
[chan
][i
+1]) {
2478 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
2479 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
2480 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
2490 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
2494 for (chan
= 0; chan
< 4; chan
++) {
2497 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
2498 (chan
* 0x2000) | 0x0200);
2499 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0002);
2500 for (i
= 0; i
< 6; i
++)
2501 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
2502 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0202);
2503 if (tg3_wait_macro_done(tp
))
2510 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
2512 u32 reg32
, phy9_orig
;
2513 int retries
, do_phy_reset
, err
;
2519 err
= tg3_bmcr_reset(tp
);
2525 /* Disable transmitter and interrupt. */
2526 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
2530 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2532 /* Set full-duplex, 1000 mbps. */
2533 tg3_writephy(tp
, MII_BMCR
,
2534 BMCR_FULLDPLX
| BMCR_SPEED1000
);
2536 /* Set to master mode. */
2537 if (tg3_readphy(tp
, MII_CTRL1000
, &phy9_orig
))
2540 tg3_writephy(tp
, MII_CTRL1000
,
2541 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
2543 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
2547 /* Block the PHY control access. */
2548 tg3_phydsp_write(tp
, 0x8005, 0x0800);
2550 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
2553 } while (--retries
);
2555 err
= tg3_phy_reset_chanpat(tp
);
2559 tg3_phydsp_write(tp
, 0x8005, 0x0000);
2561 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
2562 tg3_writephy(tp
, MII_TG3_DSP_CONTROL
, 0x0000);
2564 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2566 tg3_writephy(tp
, MII_CTRL1000
, phy9_orig
);
2568 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
2570 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
2577 static void tg3_carrier_off(struct tg3
*tp
)
2579 netif_carrier_off(tp
->dev
);
2580 tp
->link_up
= false;
2583 static void tg3_warn_mgmt_link_flap(struct tg3
*tp
)
2585 if (tg3_flag(tp
, ENABLE_ASF
))
2586 netdev_warn(tp
->dev
,
2587 "Management side-band traffic will be interrupted during phy settings change\n");
2590 /* This will reset the tigon3 PHY if there is no valid
2591 * link unless the FORCE argument is non-zero.
2593 static int tg3_phy_reset(struct tg3
*tp
)
2598 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2599 val
= tr32(GRC_MISC_CFG
);
2600 tw32_f(GRC_MISC_CFG
, val
& ~GRC_MISC_CFG_EPHY_IDDQ
);
2603 err
= tg3_readphy(tp
, MII_BMSR
, &val
);
2604 err
|= tg3_readphy(tp
, MII_BMSR
, &val
);
2608 if (netif_running(tp
->dev
) && tp
->link_up
) {
2609 netif_carrier_off(tp
->dev
);
2610 tg3_link_report(tp
);
2613 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
2614 tg3_asic_rev(tp
) == ASIC_REV_5704
||
2615 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
2616 err
= tg3_phy_reset_5703_4_5(tp
);
2623 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
2624 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
2625 cpmuctrl
= tr32(TG3_CPMU_CTRL
);
2626 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
)
2628 cpmuctrl
& ~CPMU_CTRL_GPHY_10MB_RXONLY
);
2631 err
= tg3_bmcr_reset(tp
);
2635 if (cpmuctrl
& CPMU_CTRL_GPHY_10MB_RXONLY
) {
2636 val
= MII_TG3_DSP_EXP8_AEDW
| MII_TG3_DSP_EXP8_REJ2MHz
;
2637 tg3_phydsp_write(tp
, MII_TG3_DSP_EXP8
, val
);
2639 tw32(TG3_CPMU_CTRL
, cpmuctrl
);
2642 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
2643 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
2644 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
2645 if ((val
& CPMU_LSPD_1000MB_MACCLK_MASK
) ==
2646 CPMU_LSPD_1000MB_MACCLK_12_5
) {
2647 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
2649 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
2653 if (tg3_flag(tp
, 5717_PLUS
) &&
2654 (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
))
2657 tg3_phy_apply_otp(tp
);
2659 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
2660 tg3_phy_toggle_apd(tp
, true);
2662 tg3_phy_toggle_apd(tp
, false);
2665 if ((tp
->phy_flags
& TG3_PHYFLG_ADC_BUG
) &&
2666 !tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2667 tg3_phydsp_write(tp
, 0x201f, 0x2aaa);
2668 tg3_phydsp_write(tp
, 0x000a, 0x0323);
2669 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2672 if (tp
->phy_flags
& TG3_PHYFLG_5704_A0_BUG
) {
2673 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2674 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
2677 if (tp
->phy_flags
& TG3_PHYFLG_BER_BUG
) {
2678 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2679 tg3_phydsp_write(tp
, 0x000a, 0x310b);
2680 tg3_phydsp_write(tp
, 0x201f, 0x9506);
2681 tg3_phydsp_write(tp
, 0x401f, 0x14e2);
2682 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2684 } else if (tp
->phy_flags
& TG3_PHYFLG_JITTER_BUG
) {
2685 if (!tg3_phy_toggle_auxctl_smdsp(tp
, true)) {
2686 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
2687 if (tp
->phy_flags
& TG3_PHYFLG_ADJUST_TRIM
) {
2688 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x110b);
2689 tg3_writephy(tp
, MII_TG3_TEST1
,
2690 MII_TG3_TEST1_TRIM_EN
| 0x4);
2692 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
2694 tg3_phy_toggle_auxctl_smdsp(tp
, false);
2698 /* Set Extended packet length bit (bit 14) on all chips that */
2699 /* support jumbo frames */
2700 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
2701 /* Cannot do read-modify-write on 5401 */
2702 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
2703 } else if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2704 /* Set bit 14 with read-modify-write to preserve other bits */
2705 err
= tg3_phy_auxctl_read(tp
,
2706 MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, &val
);
2708 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
,
2709 val
| MII_TG3_AUXCTL_ACTL_EXTPKTLEN
);
2712 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2713 * jumbo frames transmission.
2715 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
2716 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &val
))
2717 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
2718 val
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
2721 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
2722 /* adjust output voltage */
2723 tg3_writephy(tp
, MII_TG3_FET_PTEST
, 0x12);
2726 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5762_A0
)
2727 tg3_phydsp_write(tp
, 0xffb, 0x4000);
2729 tg3_phy_toggle_automdix(tp
, true);
2730 tg3_phy_set_wirespeed(tp
);
2734 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2735 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2736 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2737 TG3_GPIO_MSG_NEED_VAUX)
2738 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2739 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2740 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2741 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2742 (TG3_GPIO_MSG_DRVR_PRES << 12))
2744 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2745 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2746 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2747 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2748 (TG3_GPIO_MSG_NEED_VAUX << 12))
2750 static inline u32
tg3_set_function_status(struct tg3
*tp
, u32 newstat
)
2754 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2755 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2756 status
= tg3_ape_read32(tp
, TG3_APE_GPIO_MSG
);
2758 status
= tr32(TG3_CPMU_DRV_STATUS
);
2760 shift
= TG3_APE_GPIO_MSG_SHIFT
+ 4 * tp
->pci_fn
;
2761 status
&= ~(TG3_GPIO_MSG_MASK
<< shift
);
2762 status
|= (newstat
<< shift
);
2764 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2765 tg3_asic_rev(tp
) == ASIC_REV_5719
)
2766 tg3_ape_write32(tp
, TG3_APE_GPIO_MSG
, status
);
2768 tw32(TG3_CPMU_DRV_STATUS
, status
);
2770 return status
>> TG3_APE_GPIO_MSG_SHIFT
;
2773 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3
*tp
)
2775 if (!tg3_flag(tp
, IS_NIC
))
2778 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2779 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2780 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2781 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2784 tg3_set_function_status(tp
, TG3_GPIO_MSG_DRVR_PRES
);
2786 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2787 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2789 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2791 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
,
2792 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2798 static void tg3_pwrsrc_die_with_vmain(struct tg3
*tp
)
2802 if (!tg3_flag(tp
, IS_NIC
) ||
2803 tg3_asic_rev(tp
) == ASIC_REV_5700
||
2804 tg3_asic_rev(tp
) == ASIC_REV_5701
)
2807 grc_local_ctrl
= tp
->grc_local_ctrl
| GRC_LCLCTRL_GPIO_OE1
;
2809 tw32_wait_f(GRC_LOCAL_CTRL
,
2810 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2813 tw32_wait_f(GRC_LOCAL_CTRL
,
2815 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2817 tw32_wait_f(GRC_LOCAL_CTRL
,
2818 grc_local_ctrl
| GRC_LCLCTRL_GPIO_OUTPUT1
,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2822 static void tg3_pwrsrc_switch_to_vaux(struct tg3
*tp
)
2824 if (!tg3_flag(tp
, IS_NIC
))
2827 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
2828 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
2829 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2830 (GRC_LCLCTRL_GPIO_OE0
|
2831 GRC_LCLCTRL_GPIO_OE1
|
2832 GRC_LCLCTRL_GPIO_OE2
|
2833 GRC_LCLCTRL_GPIO_OUTPUT0
|
2834 GRC_LCLCTRL_GPIO_OUTPUT1
),
2835 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2836 } else if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
2837 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
2838 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2839 u32 grc_local_ctrl
= GRC_LCLCTRL_GPIO_OE0
|
2840 GRC_LCLCTRL_GPIO_OE1
|
2841 GRC_LCLCTRL_GPIO_OE2
|
2842 GRC_LCLCTRL_GPIO_OUTPUT0
|
2843 GRC_LCLCTRL_GPIO_OUTPUT1
|
2845 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2848 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT2
;
2849 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2852 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT0
;
2853 tw32_wait_f(GRC_LOCAL_CTRL
, grc_local_ctrl
,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2857 u32 grc_local_ctrl
= 0;
2859 /* Workaround to prevent overdrawing Amps. */
2860 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
2861 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
2862 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
2864 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2867 /* On 5753 and variants, GPIO2 cannot be used. */
2868 no_gpio2
= tp
->nic_sram_data_cfg
&
2869 NIC_SRAM_DATA_CFG_NO_GPIO2
;
2871 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
2872 GRC_LCLCTRL_GPIO_OE1
|
2873 GRC_LCLCTRL_GPIO_OE2
|
2874 GRC_LCLCTRL_GPIO_OUTPUT1
|
2875 GRC_LCLCTRL_GPIO_OUTPUT2
;
2877 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
2878 GRC_LCLCTRL_GPIO_OUTPUT2
);
2880 tw32_wait_f(GRC_LOCAL_CTRL
,
2881 tp
->grc_local_ctrl
| grc_local_ctrl
,
2882 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2884 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
2886 tw32_wait_f(GRC_LOCAL_CTRL
,
2887 tp
->grc_local_ctrl
| grc_local_ctrl
,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2891 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
2892 tw32_wait_f(GRC_LOCAL_CTRL
,
2893 tp
->grc_local_ctrl
| grc_local_ctrl
,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY
);
2899 static void tg3_frob_aux_power_5717(struct tg3
*tp
, bool wol_enable
)
2903 /* Serialize power state transitions */
2904 if (tg3_ape_lock(tp
, TG3_APE_LOCK_GPIO
))
2907 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
) || wol_enable
)
2908 msg
= TG3_GPIO_MSG_NEED_VAUX
;
2910 msg
= tg3_set_function_status(tp
, msg
);
2912 if (msg
& TG3_GPIO_MSG_ALL_DRVR_PRES_MASK
)
2915 if (msg
& TG3_GPIO_MSG_ALL_NEED_VAUX_MASK
)
2916 tg3_pwrsrc_switch_to_vaux(tp
);
2918 tg3_pwrsrc_die_with_vmain(tp
);
2921 tg3_ape_unlock(tp
, TG3_APE_LOCK_GPIO
);
2924 static void tg3_frob_aux_power(struct tg3
*tp
, bool include_wol
)
2926 bool need_vaux
= false;
2928 /* The GPIOs do something completely different on 57765. */
2929 if (!tg3_flag(tp
, IS_NIC
) || tg3_flag(tp
, 57765_CLASS
))
2932 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
2933 tg3_asic_rev(tp
) == ASIC_REV_5719
||
2934 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
2935 tg3_frob_aux_power_5717(tp
, include_wol
?
2936 tg3_flag(tp
, WOL_ENABLE
) != 0 : 0);
2940 if (tp
->pdev_peer
&& tp
->pdev_peer
!= tp
->pdev
) {
2941 struct net_device
*dev_peer
;
2943 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
2945 /* remove_one() may have been run on the peer. */
2947 struct tg3
*tp_peer
= netdev_priv(dev_peer
);
2949 if (tg3_flag(tp_peer
, INIT_COMPLETE
))
2952 if ((include_wol
&& tg3_flag(tp_peer
, WOL_ENABLE
)) ||
2953 tg3_flag(tp_peer
, ENABLE_ASF
))
2958 if ((include_wol
&& tg3_flag(tp
, WOL_ENABLE
)) ||
2959 tg3_flag(tp
, ENABLE_ASF
))
2963 tg3_pwrsrc_switch_to_vaux(tp
);
2965 tg3_pwrsrc_die_with_vmain(tp
);
2968 static int tg3_5700_link_polarity(struct tg3
*tp
, u32 speed
)
2970 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
)
2972 else if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
) {
2973 if (speed
!= SPEED_10
)
2975 } else if (speed
== SPEED_10
)
2981 static bool tg3_phy_power_bug(struct tg3
*tp
)
2983 switch (tg3_asic_rev(tp
)) {
2988 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
2997 if ((tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
3006 static bool tg3_phy_led_bug(struct tg3
*tp
)
3008 switch (tg3_asic_rev(tp
)) {
3010 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
3019 static void tg3_power_down_phy(struct tg3
*tp
, bool do_low_power
)
3023 if (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)
3026 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
3027 if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3028 u32 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
3029 u32 serdes_cfg
= tr32(MAC_SERDES_CFG
);
3032 SG_DIG_USING_HW_AUTONEG
| SG_DIG_SOFT_RESET
;
3033 tw32(SG_DIG_CTRL
, sg_dig_ctrl
);
3034 tw32(MAC_SERDES_CFG
, serdes_cfg
| (1 << 15));
3039 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3041 val
= tr32(GRC_MISC_CFG
);
3042 tw32_f(GRC_MISC_CFG
, val
| GRC_MISC_CFG_EPHY_IDDQ
);
3045 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
3047 if (!tg3_readphy(tp
, MII_TG3_FET_TEST
, &phytest
)) {
3050 tg3_writephy(tp
, MII_ADVERTISE
, 0);
3051 tg3_writephy(tp
, MII_BMCR
,
3052 BMCR_ANENABLE
| BMCR_ANRESTART
);
3054 tg3_writephy(tp
, MII_TG3_FET_TEST
,
3055 phytest
| MII_TG3_FET_SHADOW_EN
);
3056 if (!tg3_readphy(tp
, MII_TG3_FET_SHDW_AUXMODE4
, &phy
)) {
3057 phy
|= MII_TG3_FET_SHDW_AUXMODE4_SBPD
;
3059 MII_TG3_FET_SHDW_AUXMODE4
,
3062 tg3_writephy(tp
, MII_TG3_FET_TEST
, phytest
);
3065 } else if (do_low_power
) {
3066 if (!tg3_phy_led_bug(tp
))
3067 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
3068 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
3070 val
= MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
3071 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE
|
3072 MII_TG3_AUXCTL_PCTL_VREG_11V
;
3073 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, val
);
3076 /* The PHY should not be powered down on some chips because
3079 if (tg3_phy_power_bug(tp
))
3082 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
||
3083 tg3_chip_rev(tp
) == CHIPREV_5761_AX
) {
3084 val
= tr32(TG3_CPMU_LSPD_1000MB_CLK
);
3085 val
&= ~CPMU_LSPD_1000MB_MACCLK_MASK
;
3086 val
|= CPMU_LSPD_1000MB_MACCLK_12_5
;
3087 tw32_f(TG3_CPMU_LSPD_1000MB_CLK
, val
);
3090 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
3093 /* tp->lock is held. */
3094 static int tg3_nvram_lock(struct tg3
*tp
)
3096 if (tg3_flag(tp
, NVRAM
)) {
3099 if (tp
->nvram_lock_cnt
== 0) {
3100 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
3101 for (i
= 0; i
< 8000; i
++) {
3102 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
3107 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3111 tp
->nvram_lock_cnt
++;
3116 /* tp->lock is held. */
3117 static void tg3_nvram_unlock(struct tg3
*tp
)
3119 if (tg3_flag(tp
, NVRAM
)) {
3120 if (tp
->nvram_lock_cnt
> 0)
3121 tp
->nvram_lock_cnt
--;
3122 if (tp
->nvram_lock_cnt
== 0)
3123 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
3127 /* tp->lock is held. */
3128 static void tg3_enable_nvram_access(struct tg3
*tp
)
3130 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3131 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3133 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
3137 /* tp->lock is held. */
3138 static void tg3_disable_nvram_access(struct tg3
*tp
)
3140 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
)) {
3141 u32 nvaccess
= tr32(NVRAM_ACCESS
);
3143 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
3147 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
3148 u32 offset
, u32
*val
)
3153 if (offset
> EEPROM_ADDR_ADDR_MASK
|| (offset
% 4) != 0)
3156 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
3157 EEPROM_ADDR_DEVID_MASK
|
3159 tw32(GRC_EEPROM_ADDR
,
3161 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3162 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
3163 EEPROM_ADDR_ADDR_MASK
) |
3164 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
3166 for (i
= 0; i
< 1000; i
++) {
3167 tmp
= tr32(GRC_EEPROM_ADDR
);
3169 if (tmp
& EEPROM_ADDR_COMPLETE
)
3173 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
3176 tmp
= tr32(GRC_EEPROM_DATA
);
3179 * The data will always be opposite the native endian
3180 * format. Perform a blind byteswap to compensate.
3187 #define NVRAM_CMD_TIMEOUT 10000
3189 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
3193 tw32(NVRAM_CMD
, nvram_cmd
);
3194 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
3196 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
3202 if (i
== NVRAM_CMD_TIMEOUT
)
3208 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
3210 if (tg3_flag(tp
, NVRAM
) &&
3211 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3212 tg3_flag(tp
, FLASH
) &&
3213 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3214 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3216 addr
= ((addr
/ tp
->nvram_pagesize
) <<
3217 ATMEL_AT45DB0X1B_PAGE_POS
) +
3218 (addr
% tp
->nvram_pagesize
);
3223 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
3225 if (tg3_flag(tp
, NVRAM
) &&
3226 tg3_flag(tp
, NVRAM_BUFFERED
) &&
3227 tg3_flag(tp
, FLASH
) &&
3228 !tg3_flag(tp
, NO_NVRAM_ADDR_TRANS
) &&
3229 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
3231 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
3232 tp
->nvram_pagesize
) +
3233 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
3238 /* NOTE: Data read in from NVRAM is byteswapped according to
3239 * the byteswapping settings for all other register accesses.
3240 * tg3 devices are BE devices, so on a BE machine, the data
3241 * returned will be exactly as it is seen in NVRAM. On a LE
3242 * machine, the 32-bit value will be byteswapped.
3244 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
3248 if (!tg3_flag(tp
, NVRAM
))
3249 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
3251 offset
= tg3_nvram_phys_addr(tp
, offset
);
3253 if (offset
> NVRAM_ADDR_MSK
)
3256 ret
= tg3_nvram_lock(tp
);
3260 tg3_enable_nvram_access(tp
);
3262 tw32(NVRAM_ADDR
, offset
);
3263 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
3264 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
3267 *val
= tr32(NVRAM_RDDATA
);
3269 tg3_disable_nvram_access(tp
);
3271 tg3_nvram_unlock(tp
);
3276 /* Ensures NVRAM data is in bytestream format. */
3277 static int tg3_nvram_read_be32(struct tg3
*tp
, u32 offset
, __be32
*val
)
3280 int res
= tg3_nvram_read(tp
, offset
, &v
);
3282 *val
= cpu_to_be32(v
);
3286 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
3287 u32 offset
, u32 len
, u8
*buf
)
3292 for (i
= 0; i
< len
; i
+= 4) {
3298 memcpy(&data
, buf
+ i
, 4);
3301 * The SEEPROM interface expects the data to always be opposite
3302 * the native endian format. We accomplish this by reversing
3303 * all the operations that would have been performed on the
3304 * data from a call to tg3_nvram_read_be32().
3306 tw32(GRC_EEPROM_DATA
, swab32(be32_to_cpu(data
)));
3308 val
= tr32(GRC_EEPROM_ADDR
);
3309 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
3311 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
3313 tw32(GRC_EEPROM_ADDR
, val
|
3314 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
3315 (addr
& EEPROM_ADDR_ADDR_MASK
) |
3319 for (j
= 0; j
< 1000; j
++) {
3320 val
= tr32(GRC_EEPROM_ADDR
);
3322 if (val
& EEPROM_ADDR_COMPLETE
)
3326 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
3335 /* offset and length are dword aligned */
3336 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
3340 u32 pagesize
= tp
->nvram_pagesize
;
3341 u32 pagemask
= pagesize
- 1;
3345 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
3351 u32 phy_addr
, page_off
, size
;
3353 phy_addr
= offset
& ~pagemask
;
3355 for (j
= 0; j
< pagesize
; j
+= 4) {
3356 ret
= tg3_nvram_read_be32(tp
, phy_addr
+ j
,
3357 (__be32
*) (tmp
+ j
));
3364 page_off
= offset
& pagemask
;
3371 memcpy(tmp
+ page_off
, buf
, size
);
3373 offset
= offset
+ (pagesize
- page_off
);
3375 tg3_enable_nvram_access(tp
);
3378 * Before we can erase the flash page, we need
3379 * to issue a special "write enable" command.
3381 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3383 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3386 /* Erase the target page */
3387 tw32(NVRAM_ADDR
, phy_addr
);
3389 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
3390 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
3392 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3395 /* Issue another write enable to start the write. */
3396 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3398 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
3401 for (j
= 0; j
< pagesize
; j
+= 4) {
3404 data
= *((__be32
*) (tmp
+ j
));
3406 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3408 tw32(NVRAM_ADDR
, phy_addr
+ j
);
3410 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
3414 nvram_cmd
|= NVRAM_CMD_FIRST
;
3415 else if (j
== (pagesize
- 4))
3416 nvram_cmd
|= NVRAM_CMD_LAST
;
3418 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3426 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3427 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
3440 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
3441 u32 page_off
, phy_addr
, nvram_cmd
;
3444 memcpy(&data
, buf
+ i
, 4);
3445 tw32(NVRAM_WRDATA
, be32_to_cpu(data
));
3447 page_off
= offset
% tp
->nvram_pagesize
;
3449 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
3451 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
3453 if (page_off
== 0 || i
== 0)
3454 nvram_cmd
|= NVRAM_CMD_FIRST
;
3455 if (page_off
== (tp
->nvram_pagesize
- 4))
3456 nvram_cmd
|= NVRAM_CMD_LAST
;
3459 nvram_cmd
|= NVRAM_CMD_LAST
;
3461 if ((nvram_cmd
& NVRAM_CMD_FIRST
) ||
3462 !tg3_flag(tp
, FLASH
) ||
3463 !tg3_flag(tp
, 57765_PLUS
))
3464 tw32(NVRAM_ADDR
, phy_addr
);
3466 if (tg3_asic_rev(tp
) != ASIC_REV_5752
&&
3467 !tg3_flag(tp
, 5755_PLUS
) &&
3468 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
3469 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
3472 cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
3473 ret
= tg3_nvram_exec_cmd(tp
, cmd
);
3477 if (!tg3_flag(tp
, FLASH
)) {
3478 /* We always do complete word writes to eeprom. */
3479 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
3482 ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
);
3489 /* offset and length are dword aligned */
3490 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
3494 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3495 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
3496 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
3500 if (!tg3_flag(tp
, NVRAM
)) {
3501 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
3505 ret
= tg3_nvram_lock(tp
);
3509 tg3_enable_nvram_access(tp
);
3510 if (tg3_flag(tp
, 5750_PLUS
) && !tg3_flag(tp
, PROTECTED_NVRAM
))
3511 tw32(NVRAM_WRITE1
, 0x406);
3513 grc_mode
= tr32(GRC_MODE
);
3514 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
3516 if (tg3_flag(tp
, NVRAM_BUFFERED
) || !tg3_flag(tp
, FLASH
)) {
3517 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
3520 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
3524 grc_mode
= tr32(GRC_MODE
);
3525 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
3527 tg3_disable_nvram_access(tp
);
3528 tg3_nvram_unlock(tp
);
3531 if (tg3_flag(tp
, EEPROM_WRITE_PROT
)) {
3532 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
3539 #define RX_CPU_SCRATCH_BASE 0x30000
3540 #define RX_CPU_SCRATCH_SIZE 0x04000
3541 #define TX_CPU_SCRATCH_BASE 0x34000
3542 #define TX_CPU_SCRATCH_SIZE 0x04000
3544 /* tp->lock is held. */
3545 static int tg3_pause_cpu(struct tg3
*tp
, u32 cpu_base
)
3548 const int iters
= 10000;
3550 for (i
= 0; i
< iters
; i
++) {
3551 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3552 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3553 if (tr32(cpu_base
+ CPU_MODE
) & CPU_MODE_HALT
)
3555 if (pci_channel_offline(tp
->pdev
))
3559 return (i
== iters
) ? -EBUSY
: 0;
3562 /* tp->lock is held. */
3563 static int tg3_rxcpu_pause(struct tg3
*tp
)
3565 int rc
= tg3_pause_cpu(tp
, RX_CPU_BASE
);
3567 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
3568 tw32_f(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
3574 /* tp->lock is held. */
3575 static int tg3_txcpu_pause(struct tg3
*tp
)
3577 return tg3_pause_cpu(tp
, TX_CPU_BASE
);
3580 /* tp->lock is held. */
3581 static void tg3_resume_cpu(struct tg3
*tp
, u32 cpu_base
)
3583 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3584 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
3587 /* tp->lock is held. */
3588 static void tg3_rxcpu_resume(struct tg3
*tp
)
3590 tg3_resume_cpu(tp
, RX_CPU_BASE
);
3593 /* tp->lock is held. */
3594 static int tg3_halt_cpu(struct tg3
*tp
, u32 cpu_base
)
3598 BUG_ON(cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
));
3600 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
3601 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
3603 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
3606 if (cpu_base
== RX_CPU_BASE
) {
3607 rc
= tg3_rxcpu_pause(tp
);
3610 * There is only an Rx CPU for the 5750 derivative in the
3613 if (tg3_flag(tp
, IS_SSB_CORE
))
3616 rc
= tg3_txcpu_pause(tp
);
3620 netdev_err(tp
->dev
, "%s timed out, %s CPU\n",
3621 __func__
, cpu_base
== RX_CPU_BASE
? "RX" : "TX");
3625 /* Clear firmware's nvram arbitration. */
3626 if (tg3_flag(tp
, NVRAM
))
3627 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
3631 static int tg3_fw_data_len(struct tg3
*tp
,
3632 const struct tg3_firmware_hdr
*fw_hdr
)
3636 /* Non fragmented firmware have one firmware header followed by a
3637 * contiguous chunk of data to be written. The length field in that
3638 * header is not the length of data to be written but the complete
3639 * length of the bss. The data length is determined based on
3640 * tp->fw->size minus headers.
3642 * Fragmented firmware have a main header followed by multiple
3643 * fragments. Each fragment is identical to non fragmented firmware
3644 * with a firmware header followed by a contiguous chunk of data. In
3645 * the main header, the length field is unused and set to 0xffffffff.
3646 * In each fragment header the length is the entire size of that
3647 * fragment i.e. fragment data + header length. Data length is
3648 * therefore length field in the header minus TG3_FW_HDR_LEN.
3650 if (tp
->fw_len
== 0xffffffff)
3651 fw_len
= be32_to_cpu(fw_hdr
->len
);
3653 fw_len
= tp
->fw
->size
;
3655 return (fw_len
- TG3_FW_HDR_LEN
) / sizeof(u32
);
3658 /* tp->lock is held. */
3659 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
,
3660 u32 cpu_scratch_base
, int cpu_scratch_size
,
3661 const struct tg3_firmware_hdr
*fw_hdr
)
3664 void (*write_op
)(struct tg3
*, u32
, u32
);
3665 int total_len
= tp
->fw
->size
;
3667 if (cpu_base
== TX_CPU_BASE
&& tg3_flag(tp
, 5705_PLUS
)) {
3669 "%s: Trying to load TX cpu firmware which is 5705\n",
3674 if (tg3_flag(tp
, 5705_PLUS
) && tg3_asic_rev(tp
) != ASIC_REV_57766
)
3675 write_op
= tg3_write_mem
;
3677 write_op
= tg3_write_indirect_reg32
;
3679 if (tg3_asic_rev(tp
) != ASIC_REV_57766
) {
3680 /* It is possible that bootcode is still loading at this point.
3681 * Get the nvram lock first before halting the cpu.
3683 int lock_err
= tg3_nvram_lock(tp
);
3684 err
= tg3_halt_cpu(tp
, cpu_base
);
3686 tg3_nvram_unlock(tp
);
3690 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
3691 write_op(tp
, cpu_scratch_base
+ i
, 0);
3692 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3693 tw32(cpu_base
+ CPU_MODE
,
3694 tr32(cpu_base
+ CPU_MODE
) | CPU_MODE_HALT
);
3696 /* Subtract additional main header for fragmented firmware and
3697 * advance to the first fragment
3699 total_len
-= TG3_FW_HDR_LEN
;
3704 u32
*fw_data
= (u32
*)(fw_hdr
+ 1);
3705 for (i
= 0; i
< tg3_fw_data_len(tp
, fw_hdr
); i
++)
3706 write_op(tp
, cpu_scratch_base
+
3707 (be32_to_cpu(fw_hdr
->base_addr
) & 0xffff) +
3709 be32_to_cpu(fw_data
[i
]));
3711 total_len
-= be32_to_cpu(fw_hdr
->len
);
3713 /* Advance to next fragment */
3714 fw_hdr
= (struct tg3_firmware_hdr
*)
3715 ((void *)fw_hdr
+ be32_to_cpu(fw_hdr
->len
));
3716 } while (total_len
> 0);
3724 /* tp->lock is held. */
3725 static int tg3_pause_cpu_and_set_pc(struct tg3
*tp
, u32 cpu_base
, u32 pc
)
3728 const int iters
= 5;
3730 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3731 tw32_f(cpu_base
+ CPU_PC
, pc
);
3733 for (i
= 0; i
< iters
; i
++) {
3734 if (tr32(cpu_base
+ CPU_PC
) == pc
)
3736 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
3737 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
3738 tw32_f(cpu_base
+ CPU_PC
, pc
);
3742 return (i
== iters
) ? -EBUSY
: 0;
3745 /* tp->lock is held. */
3746 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
3748 const struct tg3_firmware_hdr
*fw_hdr
;
3751 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3753 /* Firmware blob starts with version numbers, followed by
3754 start address and length. We are setting complete length.
3755 length = end_address_of_bss - start_address_of_text.
3756 Remainder is the blob to be loaded contiguously
3757 from start address. */
3759 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
3760 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
3765 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
3766 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
3771 /* Now startup only the RX cpu. */
3772 err
= tg3_pause_cpu_and_set_pc(tp
, RX_CPU_BASE
,
3773 be32_to_cpu(fw_hdr
->base_addr
));
3775 netdev_err(tp
->dev
, "%s fails to set RX CPU PC, is %08x "
3776 "should be %08x\n", __func__
,
3777 tr32(RX_CPU_BASE
+ CPU_PC
),
3778 be32_to_cpu(fw_hdr
->base_addr
));
3782 tg3_rxcpu_resume(tp
);
3787 static int tg3_validate_rxcpu_state(struct tg3
*tp
)
3789 const int iters
= 1000;
3793 /* Wait for boot code to complete initialization and enter service
3794 * loop. It is then safe to download service patches
3796 for (i
= 0; i
< iters
; i
++) {
3797 if (tr32(RX_CPU_HWBKPT
) == TG3_SBROM_IN_SERVICE_LOOP
)
3804 netdev_err(tp
->dev
, "Boot code not ready for service patches\n");
3808 val
= tg3_read_indirect_reg32(tp
, TG3_57766_FW_HANDSHAKE
);
3810 netdev_warn(tp
->dev
,
3811 "Other patches exist. Not downloading EEE patch\n");
3818 /* tp->lock is held. */
3819 static void tg3_load_57766_firmware(struct tg3
*tp
)
3821 struct tg3_firmware_hdr
*fw_hdr
;
3823 if (!tg3_flag(tp
, NO_NVRAM
))
3826 if (tg3_validate_rxcpu_state(tp
))
3832 /* This firmware blob has a different format than older firmware
3833 * releases as given below. The main difference is we have fragmented
3834 * data to be written to non-contiguous locations.
3836 * In the beginning we have a firmware header identical to other
3837 * firmware which consists of version, base addr and length. The length
3838 * here is unused and set to 0xffffffff.
3840 * This is followed by a series of firmware fragments which are
3841 * individually identical to previous firmware. i.e. they have the
3842 * firmware header and followed by data for that fragment. The version
3843 * field of the individual fragment header is unused.
3846 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3847 if (be32_to_cpu(fw_hdr
->base_addr
) != TG3_57766_FW_BASE_ADDR
)
3850 if (tg3_rxcpu_pause(tp
))
3853 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3854 tg3_load_firmware_cpu(tp
, 0, TG3_57766_FW_BASE_ADDR
, 0, fw_hdr
);
3856 tg3_rxcpu_resume(tp
);
3859 /* tp->lock is held. */
3860 static int tg3_load_tso_firmware(struct tg3
*tp
)
3862 const struct tg3_firmware_hdr
*fw_hdr
;
3863 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
3866 if (!tg3_flag(tp
, FW_TSO
))
3869 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
3871 /* Firmware blob starts with version numbers, followed by
3872 start address and length. We are setting complete length.
3873 length = end_address_of_bss - start_address_of_text.
3874 Remainder is the blob to be loaded contiguously
3875 from start address. */
3877 cpu_scratch_size
= tp
->fw_len
;
3879 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
3880 cpu_base
= RX_CPU_BASE
;
3881 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
3883 cpu_base
= TX_CPU_BASE
;
3884 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
3885 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
3888 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
3889 cpu_scratch_base
, cpu_scratch_size
,
3894 /* Now startup the cpu. */
3895 err
= tg3_pause_cpu_and_set_pc(tp
, cpu_base
,
3896 be32_to_cpu(fw_hdr
->base_addr
));
3899 "%s fails to set CPU PC, is %08x should be %08x\n",
3900 __func__
, tr32(cpu_base
+ CPU_PC
),
3901 be32_to_cpu(fw_hdr
->base_addr
));
3905 tg3_resume_cpu(tp
, cpu_base
);
3910 /* tp->lock is held. */
3911 static void __tg3_set_mac_addr(struct tg3
*tp
, bool skip_mac_1
)
3913 u32 addr_high
, addr_low
;
3916 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
3917 tp
->dev
->dev_addr
[1]);
3918 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
3919 (tp
->dev
->dev_addr
[3] << 16) |
3920 (tp
->dev
->dev_addr
[4] << 8) |
3921 (tp
->dev
->dev_addr
[5] << 0));
3922 for (i
= 0; i
< 4; i
++) {
3923 if (i
== 1 && skip_mac_1
)
3925 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
3926 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
3929 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
3930 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
3931 for (i
= 0; i
< 12; i
++) {
3932 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
3933 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
3937 addr_high
= (tp
->dev
->dev_addr
[0] +
3938 tp
->dev
->dev_addr
[1] +
3939 tp
->dev
->dev_addr
[2] +
3940 tp
->dev
->dev_addr
[3] +
3941 tp
->dev
->dev_addr
[4] +
3942 tp
->dev
->dev_addr
[5]) &
3943 TX_BACKOFF_SEED_MASK
;
3944 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
3947 static void tg3_enable_register_access(struct tg3
*tp
)
3950 * Make sure register accesses (indirect or otherwise) will function
3953 pci_write_config_dword(tp
->pdev
,
3954 TG3PCI_MISC_HOST_CTRL
, tp
->misc_host_ctrl
);
3957 static int tg3_power_up(struct tg3
*tp
)
3961 tg3_enable_register_access(tp
);
3963 err
= pci_set_power_state(tp
->pdev
, PCI_D0
);
3965 /* Switch out of Vaux if it is a NIC */
3966 tg3_pwrsrc_switch_to_vmain(tp
);
3968 netdev_err(tp
->dev
, "Transition to D0 failed\n");
3974 static int tg3_setup_phy(struct tg3
*, bool);
3976 static int tg3_power_down_prepare(struct tg3
*tp
)
3979 bool device_should_wake
, do_low_power
;
3981 tg3_enable_register_access(tp
);
3983 /* Restore the CLKREQ setting. */
3984 if (tg3_flag(tp
, CLKREQ_BUG
))
3985 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
3986 PCI_EXP_LNKCTL_CLKREQ_EN
);
3988 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
3989 tw32(TG3PCI_MISC_HOST_CTRL
,
3990 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
3992 device_should_wake
= device_may_wakeup(&tp
->pdev
->dev
) &&
3993 tg3_flag(tp
, WOL_ENABLE
);
3995 if (tg3_flag(tp
, USE_PHYLIB
)) {
3996 do_low_power
= false;
3997 if ((tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) &&
3998 !(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
3999 struct phy_device
*phydev
;
4000 u32 phyid
, advertising
;
4002 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
4004 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4006 tp
->link_config
.speed
= phydev
->speed
;
4007 tp
->link_config
.duplex
= phydev
->duplex
;
4008 tp
->link_config
.autoneg
= phydev
->autoneg
;
4009 tp
->link_config
.advertising
= phydev
->advertising
;
4011 advertising
= ADVERTISED_TP
|
4013 ADVERTISED_Autoneg
|
4014 ADVERTISED_10baseT_Half
;
4016 if (tg3_flag(tp
, ENABLE_ASF
) || device_should_wake
) {
4017 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4019 ADVERTISED_100baseT_Half
|
4020 ADVERTISED_100baseT_Full
|
4021 ADVERTISED_10baseT_Full
;
4023 advertising
|= ADVERTISED_10baseT_Full
;
4026 phydev
->advertising
= advertising
;
4028 phy_start_aneg(phydev
);
4030 phyid
= phydev
->drv
->phy_id
& phydev
->drv
->phy_id_mask
;
4031 if (phyid
!= PHY_ID_BCMAC131
) {
4032 phyid
&= PHY_BCM_OUI_MASK
;
4033 if (phyid
== PHY_BCM_OUI_1
||
4034 phyid
== PHY_BCM_OUI_2
||
4035 phyid
== PHY_BCM_OUI_3
)
4036 do_low_power
= true;
4040 do_low_power
= true;
4042 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
))
4043 tp
->phy_flags
|= TG3_PHYFLG_IS_LOW_POWER
;
4045 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
4046 tg3_setup_phy(tp
, false);
4049 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4052 val
= tr32(GRC_VCPU_EXT_CTRL
);
4053 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
4054 } else if (!tg3_flag(tp
, ENABLE_ASF
)) {
4058 for (i
= 0; i
< 200; i
++) {
4059 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
4060 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4065 if (tg3_flag(tp
, WOL_CAP
))
4066 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
4067 WOL_DRV_STATE_SHUTDOWN
|
4071 if (device_should_wake
) {
4074 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
4076 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
4077 tg3_phy_auxctl_write(tp
,
4078 MII_TG3_AUXCTL_SHDWSEL_PWRCTL
,
4079 MII_TG3_AUXCTL_PCTL_WOL_EN
|
4080 MII_TG3_AUXCTL_PCTL_100TX_LPWR
|
4081 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC
);
4085 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4086 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4087 else if (tp
->phy_flags
&
4088 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) {
4089 if (tp
->link_config
.active_speed
== SPEED_1000
)
4090 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4092 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4094 mac_mode
= MAC_MODE_PORT_MODE_MII
;
4096 mac_mode
|= tp
->mac_mode
& MAC_MODE_LINK_POLARITY
;
4097 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4098 u32 speed
= tg3_flag(tp
, WOL_SPEED_100MB
) ?
4099 SPEED_100
: SPEED_10
;
4100 if (tg3_5700_link_polarity(tp
, speed
))
4101 mac_mode
|= MAC_MODE_LINK_POLARITY
;
4103 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4106 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4109 if (!tg3_flag(tp
, 5750_PLUS
))
4110 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
4112 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
4113 if ((tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
)) &&
4114 (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)))
4115 mac_mode
|= MAC_MODE_KEEP_FRAME_IN_WOL
;
4117 if (tg3_flag(tp
, ENABLE_APE
))
4118 mac_mode
|= MAC_MODE_APE_TX_EN
|
4119 MAC_MODE_APE_RX_EN
|
4120 MAC_MODE_TDE_ENABLE
;
4122 tw32_f(MAC_MODE
, mac_mode
);
4125 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
4129 if (!tg3_flag(tp
, WOL_SPEED_100MB
) &&
4130 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4131 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
4134 base_val
= tp
->pci_clock_ctrl
;
4135 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
4136 CLOCK_CTRL_TXCLK_DISABLE
);
4138 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
4139 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
4140 } else if (tg3_flag(tp
, 5780_CLASS
) ||
4141 tg3_flag(tp
, CPMU_PRESENT
) ||
4142 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
4144 } else if (!(tg3_flag(tp
, 5750_PLUS
) && tg3_flag(tp
, ENABLE_ASF
))) {
4145 u32 newbits1
, newbits2
;
4147 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4148 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4149 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
4150 CLOCK_CTRL_TXCLK_DISABLE
|
4152 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4153 } else if (tg3_flag(tp
, 5705_PLUS
)) {
4154 newbits1
= CLOCK_CTRL_625_CORE
;
4155 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
4157 newbits1
= CLOCK_CTRL_ALTCLK
;
4158 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
4161 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
4164 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
4167 if (!tg3_flag(tp
, 5705_PLUS
)) {
4170 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4171 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4172 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
4173 CLOCK_CTRL_TXCLK_DISABLE
|
4174 CLOCK_CTRL_44MHZ_CORE
);
4176 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
4179 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
4180 tp
->pci_clock_ctrl
| newbits3
, 40);
4184 if (!(device_should_wake
) && !tg3_flag(tp
, ENABLE_ASF
))
4185 tg3_power_down_phy(tp
, do_low_power
);
4187 tg3_frob_aux_power(tp
, true);
4189 /* Workaround for unstable PLL clock */
4190 if ((!tg3_flag(tp
, IS_SSB_CORE
)) &&
4191 ((tg3_chip_rev(tp
) == CHIPREV_5750_AX
) ||
4192 (tg3_chip_rev(tp
) == CHIPREV_5750_BX
))) {
4193 u32 val
= tr32(0x7d00);
4195 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4197 if (!tg3_flag(tp
, ENABLE_ASF
)) {
4200 err
= tg3_nvram_lock(tp
);
4201 tg3_halt_cpu(tp
, RX_CPU_BASE
);
4203 tg3_nvram_unlock(tp
);
4207 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
4212 static void tg3_power_down(struct tg3
*tp
)
4214 tg3_power_down_prepare(tp
);
4216 pci_wake_from_d3(tp
->pdev
, tg3_flag(tp
, WOL_ENABLE
));
4217 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
4220 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
4222 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
4223 case MII_TG3_AUX_STAT_10HALF
:
4225 *duplex
= DUPLEX_HALF
;
4228 case MII_TG3_AUX_STAT_10FULL
:
4230 *duplex
= DUPLEX_FULL
;
4233 case MII_TG3_AUX_STAT_100HALF
:
4235 *duplex
= DUPLEX_HALF
;
4238 case MII_TG3_AUX_STAT_100FULL
:
4240 *duplex
= DUPLEX_FULL
;
4243 case MII_TG3_AUX_STAT_1000HALF
:
4244 *speed
= SPEED_1000
;
4245 *duplex
= DUPLEX_HALF
;
4248 case MII_TG3_AUX_STAT_1000FULL
:
4249 *speed
= SPEED_1000
;
4250 *duplex
= DUPLEX_FULL
;
4254 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4255 *speed
= (val
& MII_TG3_AUX_STAT_100
) ? SPEED_100
:
4257 *duplex
= (val
& MII_TG3_AUX_STAT_FULL
) ? DUPLEX_FULL
:
4261 *speed
= SPEED_UNKNOWN
;
4262 *duplex
= DUPLEX_UNKNOWN
;
4267 static int tg3_phy_autoneg_cfg(struct tg3
*tp
, u32 advertise
, u32 flowctrl
)
4272 new_adv
= ADVERTISE_CSMA
;
4273 new_adv
|= ethtool_adv_to_mii_adv_t(advertise
) & ADVERTISE_ALL
;
4274 new_adv
|= mii_advertise_flowctrl(flowctrl
);
4276 err
= tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
4280 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4281 new_adv
= ethtool_adv_to_mii_ctrl1000_t(advertise
);
4283 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4284 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)
4285 new_adv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4287 err
= tg3_writephy(tp
, MII_CTRL1000
, new_adv
);
4292 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4295 tw32(TG3_CPMU_EEE_MODE
,
4296 tr32(TG3_CPMU_EEE_MODE
) & ~TG3_CPMU_EEEMD_LPI_ENABLE
);
4298 err
= tg3_phy_toggle_auxctl_smdsp(tp
, true);
4303 /* Advertise 100-BaseTX EEE ability */
4304 if (advertise
& ADVERTISED_100baseT_Full
)
4305 val
|= MDIO_AN_EEE_ADV_100TX
;
4306 /* Advertise 1000-BaseT EEE ability */
4307 if (advertise
& ADVERTISED_1000baseT_Full
)
4308 val
|= MDIO_AN_EEE_ADV_1000T
;
4309 err
= tg3_phy_cl45_write(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, val
);
4313 switch (tg3_asic_rev(tp
)) {
4315 case ASIC_REV_57765
:
4316 case ASIC_REV_57766
:
4318 /* If we advertised any eee advertisements above... */
4320 val
= MII_TG3_DSP_TAP26_ALNOKO
|
4321 MII_TG3_DSP_TAP26_RMRXSTO
|
4322 MII_TG3_DSP_TAP26_OPCSINPT
;
4323 tg3_phydsp_write(tp
, MII_TG3_DSP_TAP26
, val
);
4327 if (!tg3_phydsp_read(tp
, MII_TG3_DSP_CH34TP2
, &val
))
4328 tg3_phydsp_write(tp
, MII_TG3_DSP_CH34TP2
, val
|
4329 MII_TG3_DSP_CH34TP2_HIBW01
);
4332 err2
= tg3_phy_toggle_auxctl_smdsp(tp
, false);
4341 static void tg3_phy_copper_begin(struct tg3
*tp
)
4343 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
||
4344 (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4347 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4348 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4349 adv
= ADVERTISED_10baseT_Half
|
4350 ADVERTISED_10baseT_Full
;
4351 if (tg3_flag(tp
, WOL_SPEED_100MB
))
4352 adv
|= ADVERTISED_100baseT_Half
|
4353 ADVERTISED_100baseT_Full
;
4354 if (tp
->phy_flags
& TG3_PHYFLG_1G_ON_VAUX_OK
)
4355 adv
|= ADVERTISED_1000baseT_Half
|
4356 ADVERTISED_1000baseT_Full
;
4358 fc
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
4360 adv
= tp
->link_config
.advertising
;
4361 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
4362 adv
&= ~(ADVERTISED_1000baseT_Half
|
4363 ADVERTISED_1000baseT_Full
);
4365 fc
= tp
->link_config
.flowctrl
;
4368 tg3_phy_autoneg_cfg(tp
, adv
, fc
);
4370 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
4371 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
)) {
4372 /* Normally during power down we want to autonegotiate
4373 * the lowest possible speed for WOL. However, to avoid
4374 * link flap, we leave it untouched.
4379 tg3_writephy(tp
, MII_BMCR
,
4380 BMCR_ANENABLE
| BMCR_ANRESTART
);
4383 u32 bmcr
, orig_bmcr
;
4385 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
4386 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
4388 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
4389 /* With autoneg disabled, 5715 only links up when the
4390 * advertisement register has the configured speed
4393 tg3_writephy(tp
, MII_ADVERTISE
, ADVERTISE_ALL
);
4397 switch (tp
->link_config
.speed
) {
4403 bmcr
|= BMCR_SPEED100
;
4407 bmcr
|= BMCR_SPEED1000
;
4411 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
4412 bmcr
|= BMCR_FULLDPLX
;
4414 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
4415 (bmcr
!= orig_bmcr
)) {
4416 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
4417 for (i
= 0; i
< 1500; i
++) {
4421 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
4422 tg3_readphy(tp
, MII_BMSR
, &tmp
))
4424 if (!(tmp
& BMSR_LSTATUS
)) {
4429 tg3_writephy(tp
, MII_BMCR
, bmcr
);
4435 static int tg3_phy_pull_config(struct tg3
*tp
)
4440 err
= tg3_readphy(tp
, MII_BMCR
, &val
);
4444 if (!(val
& BMCR_ANENABLE
)) {
4445 tp
->link_config
.autoneg
= AUTONEG_DISABLE
;
4446 tp
->link_config
.advertising
= 0;
4447 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
4451 switch (val
& (BMCR_SPEED1000
| BMCR_SPEED100
)) {
4453 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4456 tp
->link_config
.speed
= SPEED_10
;
4459 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
4462 tp
->link_config
.speed
= SPEED_100
;
4464 case BMCR_SPEED1000
:
4465 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4466 tp
->link_config
.speed
= SPEED_1000
;
4474 if (val
& BMCR_FULLDPLX
)
4475 tp
->link_config
.duplex
= DUPLEX_FULL
;
4477 tp
->link_config
.duplex
= DUPLEX_HALF
;
4479 tp
->link_config
.flowctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
4485 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
4486 tp
->link_config
.advertising
= ADVERTISED_Autoneg
;
4487 tg3_flag_set(tp
, PAUSE_AUTONEG
);
4489 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4492 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4496 adv
= mii_adv_to_ethtool_adv_t(val
& ADVERTISE_ALL
);
4497 tp
->link_config
.advertising
|= adv
| ADVERTISED_TP
;
4499 tp
->link_config
.flowctrl
= tg3_decode_flowctrl_1000T(val
);
4501 tp
->link_config
.advertising
|= ADVERTISED_FIBRE
;
4504 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4507 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
4508 err
= tg3_readphy(tp
, MII_CTRL1000
, &val
);
4512 adv
= mii_ctrl1000_to_ethtool_adv_t(val
);
4514 err
= tg3_readphy(tp
, MII_ADVERTISE
, &val
);
4518 adv
= tg3_decode_flowctrl_1000X(val
);
4519 tp
->link_config
.flowctrl
= adv
;
4521 val
&= (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
);
4522 adv
= mii_adv_to_ethtool_adv_x(val
);
4525 tp
->link_config
.advertising
|= adv
;
4532 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
4536 /* Turn off tap power management. */
4537 /* Set Extended packet length bit */
4538 err
= tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_AUXCTL
, 0x4c20);
4540 err
|= tg3_phydsp_write(tp
, 0x0012, 0x1804);
4541 err
|= tg3_phydsp_write(tp
, 0x0013, 0x1204);
4542 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0132);
4543 err
|= tg3_phydsp_write(tp
, 0x8006, 0x0232);
4544 err
|= tg3_phydsp_write(tp
, 0x201f, 0x0a20);
4551 static bool tg3_phy_eee_config_ok(struct tg3
*tp
)
4555 u32 advertising
= tp
->link_config
.advertising
;
4557 if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
))
4560 if (tg3_phy_cl45_read(tp
, MDIO_MMD_AN
, MDIO_AN_EEE_ADV
, &val
))
4563 val
&= (MDIO_AN_EEE_ADV_100TX
| MDIO_AN_EEE_ADV_1000T
);
4566 if (advertising
& ADVERTISED_100baseT_Full
)
4567 tgtadv
|= MDIO_AN_EEE_ADV_100TX
;
4568 if (advertising
& ADVERTISED_1000baseT_Full
)
4569 tgtadv
|= MDIO_AN_EEE_ADV_1000T
;
4577 static bool tg3_phy_copper_an_config_ok(struct tg3
*tp
, u32
*lcladv
)
4579 u32 advmsk
, tgtadv
, advertising
;
4581 advertising
= tp
->link_config
.advertising
;
4582 tgtadv
= ethtool_adv_to_mii_adv_t(advertising
) & ADVERTISE_ALL
;
4584 advmsk
= ADVERTISE_ALL
;
4585 if (tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4586 tgtadv
|= mii_advertise_flowctrl(tp
->link_config
.flowctrl
);
4587 advmsk
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
4590 if (tg3_readphy(tp
, MII_ADVERTISE
, lcladv
))
4593 if ((*lcladv
& advmsk
) != tgtadv
)
4596 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4599 tgtadv
= ethtool_adv_to_mii_ctrl1000_t(advertising
);
4601 if (tg3_readphy(tp
, MII_CTRL1000
, &tg3_ctrl
))
4605 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4606 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
)) {
4607 tgtadv
|= CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
;
4608 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
|
4609 CTL1000_AS_MASTER
| CTL1000_ENABLE_MASTER
);
4611 tg3_ctrl
&= (ADVERTISE_1000HALF
| ADVERTISE_1000FULL
);
4614 if (tg3_ctrl
!= tgtadv
)
4621 static bool tg3_phy_copper_fetch_rmtadv(struct tg3
*tp
, u32
*rmtadv
)
4625 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)) {
4628 if (tg3_readphy(tp
, MII_STAT1000
, &val
))
4631 lpeth
= mii_stat1000_to_ethtool_lpa_t(val
);
4634 if (tg3_readphy(tp
, MII_LPA
, rmtadv
))
4637 lpeth
|= mii_lpa_to_ethtool_lpa_t(*rmtadv
);
4638 tp
->link_config
.rmt_adv
= lpeth
;
4643 static bool tg3_test_and_report_link_chg(struct tg3
*tp
, bool curr_link_up
)
4645 if (curr_link_up
!= tp
->link_up
) {
4647 netif_carrier_on(tp
->dev
);
4649 netif_carrier_off(tp
->dev
);
4650 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
4651 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
4654 tg3_link_report(tp
);
4661 static void tg3_clear_mac_status(struct tg3
*tp
)
4666 MAC_STATUS_SYNC_CHANGED
|
4667 MAC_STATUS_CFG_CHANGED
|
4668 MAC_STATUS_MI_COMPLETION
|
4669 MAC_STATUS_LNKSTATE_CHANGED
);
4673 static int tg3_setup_copper_phy(struct tg3
*tp
, bool force_reset
)
4675 bool current_link_up
;
4677 u32 lcl_adv
, rmt_adv
;
4682 tg3_clear_mac_status(tp
);
4684 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
4686 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
4690 tg3_phy_auxctl_write(tp
, MII_TG3_AUXCTL_SHDWSEL_PWRCTL
, 0);
4692 /* Some third-party PHYs need to be reset on link going
4695 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
||
4696 tg3_asic_rev(tp
) == ASIC_REV_5704
||
4697 tg3_asic_rev(tp
) == ASIC_REV_5705
) &&
4699 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4700 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4701 !(bmsr
& BMSR_LSTATUS
))
4707 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
4708 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4709 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
4710 !tg3_flag(tp
, INIT_COMPLETE
))
4713 if (!(bmsr
& BMSR_LSTATUS
)) {
4714 err
= tg3_init_5401phy_dsp(tp
);
4718 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4719 for (i
= 0; i
< 1000; i
++) {
4721 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4722 (bmsr
& BMSR_LSTATUS
)) {
4728 if ((tp
->phy_id
& TG3_PHY_ID_REV_MASK
) ==
4729 TG3_PHY_REV_BCM5401_B0
&&
4730 !(bmsr
& BMSR_LSTATUS
) &&
4731 tp
->link_config
.active_speed
== SPEED_1000
) {
4732 err
= tg3_phy_reset(tp
);
4734 err
= tg3_init_5401phy_dsp(tp
);
4739 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
4740 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
) {
4741 /* 5701 {A0,B0} CRC bug workaround */
4742 tg3_writephy(tp
, 0x15, 0x0a75);
4743 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4744 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8d68);
4745 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x8c68);
4748 /* Clear pending interrupts... */
4749 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4750 tg3_readphy(tp
, MII_TG3_ISTAT
, &val
);
4752 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
)
4753 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
4754 else if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
4755 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
4757 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
4758 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
4759 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
4760 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
4761 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
4763 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
4766 current_link_up
= false;
4767 current_speed
= SPEED_UNKNOWN
;
4768 current_duplex
= DUPLEX_UNKNOWN
;
4769 tp
->phy_flags
&= ~TG3_PHYFLG_MDIX_STATE
;
4770 tp
->link_config
.rmt_adv
= 0;
4772 if (tp
->phy_flags
& TG3_PHYFLG_CAPACITIVE_COUPLING
) {
4773 err
= tg3_phy_auxctl_read(tp
,
4774 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4776 if (!err
&& !(val
& (1 << 10))) {
4777 tg3_phy_auxctl_write(tp
,
4778 MII_TG3_AUXCTL_SHDWSEL_MISCTEST
,
4785 for (i
= 0; i
< 100; i
++) {
4786 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4787 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
4788 (bmsr
& BMSR_LSTATUS
))
4793 if (bmsr
& BMSR_LSTATUS
) {
4796 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
4797 for (i
= 0; i
< 2000; i
++) {
4799 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
4804 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
4809 for (i
= 0; i
< 200; i
++) {
4810 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
4811 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
4813 if (bmcr
&& bmcr
!= 0x7fff)
4821 tp
->link_config
.active_speed
= current_speed
;
4822 tp
->link_config
.active_duplex
= current_duplex
;
4824 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
4825 bool eee_config_ok
= tg3_phy_eee_config_ok(tp
);
4827 if ((bmcr
& BMCR_ANENABLE
) &&
4829 tg3_phy_copper_an_config_ok(tp
, &lcl_adv
) &&
4830 tg3_phy_copper_fetch_rmtadv(tp
, &rmt_adv
))
4831 current_link_up
= true;
4833 /* EEE settings changes take effect only after a phy
4834 * reset. If we have skipped a reset due to Link Flap
4835 * Avoidance being enabled, do it now.
4837 if (!eee_config_ok
&&
4838 (tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
4842 if (!(bmcr
& BMCR_ANENABLE
) &&
4843 tp
->link_config
.speed
== current_speed
&&
4844 tp
->link_config
.duplex
== current_duplex
) {
4845 current_link_up
= true;
4849 if (current_link_up
&&
4850 tp
->link_config
.active_duplex
== DUPLEX_FULL
) {
4853 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
4854 reg
= MII_TG3_FET_GEN_STAT
;
4855 bit
= MII_TG3_FET_GEN_STAT_MDIXSTAT
;
4857 reg
= MII_TG3_EXT_STAT
;
4858 bit
= MII_TG3_EXT_STAT_MDIX
;
4861 if (!tg3_readphy(tp
, reg
, &val
) && (val
& bit
))
4862 tp
->phy_flags
|= TG3_PHYFLG_MDIX_STATE
;
4864 tg3_setup_flow_control(tp
, lcl_adv
, rmt_adv
);
4869 if (!current_link_up
|| (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)) {
4870 tg3_phy_copper_begin(tp
);
4872 if (tg3_flag(tp
, ROBOSWITCH
)) {
4873 current_link_up
= true;
4874 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4875 current_speed
= SPEED_1000
;
4876 current_duplex
= DUPLEX_FULL
;
4877 tp
->link_config
.active_speed
= current_speed
;
4878 tp
->link_config
.active_duplex
= current_duplex
;
4881 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
4882 if ((!tg3_readphy(tp
, MII_BMSR
, &bmsr
) && (bmsr
& BMSR_LSTATUS
)) ||
4883 (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
4884 current_link_up
= true;
4887 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
4888 if (current_link_up
) {
4889 if (tp
->link_config
.active_speed
== SPEED_100
||
4890 tp
->link_config
.active_speed
== SPEED_10
)
4891 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4893 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4894 } else if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
4895 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
4897 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
4899 /* In order for the 5750 core in BCM4785 chip to work properly
4900 * in RGMII mode, the Led Control Register must be set up.
4902 if (tg3_flag(tp
, RGMII_MODE
)) {
4903 u32 led_ctrl
= tr32(MAC_LED_CTRL
);
4904 led_ctrl
&= ~(LED_CTRL_1000MBPS_ON
| LED_CTRL_100MBPS_ON
);
4906 if (tp
->link_config
.active_speed
== SPEED_10
)
4907 led_ctrl
|= LED_CTRL_LNKLED_OVERRIDE
;
4908 else if (tp
->link_config
.active_speed
== SPEED_100
)
4909 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4910 LED_CTRL_100MBPS_ON
);
4911 else if (tp
->link_config
.active_speed
== SPEED_1000
)
4912 led_ctrl
|= (LED_CTRL_LNKLED_OVERRIDE
|
4913 LED_CTRL_1000MBPS_ON
);
4915 tw32(MAC_LED_CTRL
, led_ctrl
);
4919 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
4920 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
4921 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
4923 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
4924 if (current_link_up
&&
4925 tg3_5700_link_polarity(tp
, tp
->link_config
.active_speed
))
4926 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
4928 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
4931 /* ??? Without this setting Netgear GA302T PHY does not
4932 * ??? send/receive packets...
4934 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5411
&&
4935 tg3_chip_rev_id(tp
) == CHIPREV_ID_5700_ALTIMA
) {
4936 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
4937 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
4941 tw32_f(MAC_MODE
, tp
->mac_mode
);
4944 tg3_phy_eee_adjust(tp
, current_link_up
);
4946 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
4947 /* Polled via timer. */
4948 tw32_f(MAC_EVENT
, 0);
4950 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
4954 if (tg3_asic_rev(tp
) == ASIC_REV_5700
&&
4956 tp
->link_config
.active_speed
== SPEED_1000
&&
4957 (tg3_flag(tp
, PCIX_MODE
) || tg3_flag(tp
, PCI_HIGH_SPEED
))) {
4960 (MAC_STATUS_SYNC_CHANGED
|
4961 MAC_STATUS_CFG_CHANGED
));
4964 NIC_SRAM_FIRMWARE_MBOX
,
4965 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
4968 /* Prevent send BD corruption. */
4969 if (tg3_flag(tp
, CLKREQ_BUG
)) {
4970 if (tp
->link_config
.active_speed
== SPEED_100
||
4971 tp
->link_config
.active_speed
== SPEED_10
)
4972 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4973 PCI_EXP_LNKCTL_CLKREQ_EN
);
4975 pcie_capability_set_word(tp
->pdev
, PCI_EXP_LNKCTL
,
4976 PCI_EXP_LNKCTL_CLKREQ_EN
);
4979 tg3_test_and_report_link_chg(tp
, current_link_up
);
4984 struct tg3_fiber_aneginfo
{
4986 #define ANEG_STATE_UNKNOWN 0
4987 #define ANEG_STATE_AN_ENABLE 1
4988 #define ANEG_STATE_RESTART_INIT 2
4989 #define ANEG_STATE_RESTART 3
4990 #define ANEG_STATE_DISABLE_LINK_OK 4
4991 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4992 #define ANEG_STATE_ABILITY_DETECT 6
4993 #define ANEG_STATE_ACK_DETECT_INIT 7
4994 #define ANEG_STATE_ACK_DETECT 8
4995 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4996 #define ANEG_STATE_COMPLETE_ACK 10
4997 #define ANEG_STATE_IDLE_DETECT_INIT 11
4998 #define ANEG_STATE_IDLE_DETECT 12
4999 #define ANEG_STATE_LINK_OK 13
5000 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5001 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5004 #define MR_AN_ENABLE 0x00000001
5005 #define MR_RESTART_AN 0x00000002
5006 #define MR_AN_COMPLETE 0x00000004
5007 #define MR_PAGE_RX 0x00000008
5008 #define MR_NP_LOADED 0x00000010
5009 #define MR_TOGGLE_TX 0x00000020
5010 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5011 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5012 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5013 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5014 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5015 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5016 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5017 #define MR_TOGGLE_RX 0x00002000
5018 #define MR_NP_RX 0x00004000
5020 #define MR_LINK_OK 0x80000000
5022 unsigned long link_time
, cur_time
;
5024 u32 ability_match_cfg
;
5025 int ability_match_count
;
5027 char ability_match
, idle_match
, ack_match
;
5029 u32 txconfig
, rxconfig
;
5030 #define ANEG_CFG_NP 0x00000080
5031 #define ANEG_CFG_ACK 0x00000040
5032 #define ANEG_CFG_RF2 0x00000020
5033 #define ANEG_CFG_RF1 0x00000010
5034 #define ANEG_CFG_PS2 0x00000001
5035 #define ANEG_CFG_PS1 0x00008000
5036 #define ANEG_CFG_HD 0x00004000
5037 #define ANEG_CFG_FD 0x00002000
5038 #define ANEG_CFG_INVAL 0x00001f06
5043 #define ANEG_TIMER_ENAB 2
5044 #define ANEG_FAILED -1
5046 #define ANEG_STATE_SETTLE_TIME 10000
5048 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
5049 struct tg3_fiber_aneginfo
*ap
)
5052 unsigned long delta
;
5056 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
5060 ap
->ability_match_cfg
= 0;
5061 ap
->ability_match_count
= 0;
5062 ap
->ability_match
= 0;
5068 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
5069 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
5071 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
5072 ap
->ability_match_cfg
= rx_cfg_reg
;
5073 ap
->ability_match
= 0;
5074 ap
->ability_match_count
= 0;
5076 if (++ap
->ability_match_count
> 1) {
5077 ap
->ability_match
= 1;
5078 ap
->ability_match_cfg
= rx_cfg_reg
;
5081 if (rx_cfg_reg
& ANEG_CFG_ACK
)
5089 ap
->ability_match_cfg
= 0;
5090 ap
->ability_match_count
= 0;
5091 ap
->ability_match
= 0;
5097 ap
->rxconfig
= rx_cfg_reg
;
5100 switch (ap
->state
) {
5101 case ANEG_STATE_UNKNOWN
:
5102 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
5103 ap
->state
= ANEG_STATE_AN_ENABLE
;
5106 case ANEG_STATE_AN_ENABLE
:
5107 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
5108 if (ap
->flags
& MR_AN_ENABLE
) {
5111 ap
->ability_match_cfg
= 0;
5112 ap
->ability_match_count
= 0;
5113 ap
->ability_match
= 0;
5117 ap
->state
= ANEG_STATE_RESTART_INIT
;
5119 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
5123 case ANEG_STATE_RESTART_INIT
:
5124 ap
->link_time
= ap
->cur_time
;
5125 ap
->flags
&= ~(MR_NP_LOADED
);
5127 tw32(MAC_TX_AUTO_NEG
, 0);
5128 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5129 tw32_f(MAC_MODE
, tp
->mac_mode
);
5132 ret
= ANEG_TIMER_ENAB
;
5133 ap
->state
= ANEG_STATE_RESTART
;
5136 case ANEG_STATE_RESTART
:
5137 delta
= ap
->cur_time
- ap
->link_time
;
5138 if (delta
> ANEG_STATE_SETTLE_TIME
)
5139 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
5141 ret
= ANEG_TIMER_ENAB
;
5144 case ANEG_STATE_DISABLE_LINK_OK
:
5148 case ANEG_STATE_ABILITY_DETECT_INIT
:
5149 ap
->flags
&= ~(MR_TOGGLE_TX
);
5150 ap
->txconfig
= ANEG_CFG_FD
;
5151 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5152 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5153 ap
->txconfig
|= ANEG_CFG_PS1
;
5154 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5155 ap
->txconfig
|= ANEG_CFG_PS2
;
5156 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5157 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5158 tw32_f(MAC_MODE
, tp
->mac_mode
);
5161 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
5164 case ANEG_STATE_ABILITY_DETECT
:
5165 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0)
5166 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
5169 case ANEG_STATE_ACK_DETECT_INIT
:
5170 ap
->txconfig
|= ANEG_CFG_ACK
;
5171 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
5172 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
5173 tw32_f(MAC_MODE
, tp
->mac_mode
);
5176 ap
->state
= ANEG_STATE_ACK_DETECT
;
5179 case ANEG_STATE_ACK_DETECT
:
5180 if (ap
->ack_match
!= 0) {
5181 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
5182 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
5183 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
5185 ap
->state
= ANEG_STATE_AN_ENABLE
;
5187 } else if (ap
->ability_match
!= 0 &&
5188 ap
->rxconfig
== 0) {
5189 ap
->state
= ANEG_STATE_AN_ENABLE
;
5193 case ANEG_STATE_COMPLETE_ACK_INIT
:
5194 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
5198 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
5199 MR_LP_ADV_HALF_DUPLEX
|
5200 MR_LP_ADV_SYM_PAUSE
|
5201 MR_LP_ADV_ASYM_PAUSE
|
5202 MR_LP_ADV_REMOTE_FAULT1
|
5203 MR_LP_ADV_REMOTE_FAULT2
|
5204 MR_LP_ADV_NEXT_PAGE
|
5207 if (ap
->rxconfig
& ANEG_CFG_FD
)
5208 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
5209 if (ap
->rxconfig
& ANEG_CFG_HD
)
5210 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
5211 if (ap
->rxconfig
& ANEG_CFG_PS1
)
5212 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
5213 if (ap
->rxconfig
& ANEG_CFG_PS2
)
5214 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
5215 if (ap
->rxconfig
& ANEG_CFG_RF1
)
5216 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
5217 if (ap
->rxconfig
& ANEG_CFG_RF2
)
5218 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
5219 if (ap
->rxconfig
& ANEG_CFG_NP
)
5220 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
5222 ap
->link_time
= ap
->cur_time
;
5224 ap
->flags
^= (MR_TOGGLE_TX
);
5225 if (ap
->rxconfig
& 0x0008)
5226 ap
->flags
|= MR_TOGGLE_RX
;
5227 if (ap
->rxconfig
& ANEG_CFG_NP
)
5228 ap
->flags
|= MR_NP_RX
;
5229 ap
->flags
|= MR_PAGE_RX
;
5231 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
5232 ret
= ANEG_TIMER_ENAB
;
5235 case ANEG_STATE_COMPLETE_ACK
:
5236 if (ap
->ability_match
!= 0 &&
5237 ap
->rxconfig
== 0) {
5238 ap
->state
= ANEG_STATE_AN_ENABLE
;
5241 delta
= ap
->cur_time
- ap
->link_time
;
5242 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5243 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
5244 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5246 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
5247 !(ap
->flags
& MR_NP_RX
)) {
5248 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
5256 case ANEG_STATE_IDLE_DETECT_INIT
:
5257 ap
->link_time
= ap
->cur_time
;
5258 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5259 tw32_f(MAC_MODE
, tp
->mac_mode
);
5262 ap
->state
= ANEG_STATE_IDLE_DETECT
;
5263 ret
= ANEG_TIMER_ENAB
;
5266 case ANEG_STATE_IDLE_DETECT
:
5267 if (ap
->ability_match
!= 0 &&
5268 ap
->rxconfig
== 0) {
5269 ap
->state
= ANEG_STATE_AN_ENABLE
;
5272 delta
= ap
->cur_time
- ap
->link_time
;
5273 if (delta
> ANEG_STATE_SETTLE_TIME
) {
5274 /* XXX another gem from the Broadcom driver :( */
5275 ap
->state
= ANEG_STATE_LINK_OK
;
5279 case ANEG_STATE_LINK_OK
:
5280 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
5284 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
5285 /* ??? unimplemented */
5288 case ANEG_STATE_NEXT_PAGE_WAIT
:
5289 /* ??? unimplemented */
5300 static int fiber_autoneg(struct tg3
*tp
, u32
*txflags
, u32
*rxflags
)
5303 struct tg3_fiber_aneginfo aninfo
;
5304 int status
= ANEG_FAILED
;
5308 tw32_f(MAC_TX_AUTO_NEG
, 0);
5310 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
5311 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
5314 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
5317 memset(&aninfo
, 0, sizeof(aninfo
));
5318 aninfo
.flags
|= MR_AN_ENABLE
;
5319 aninfo
.state
= ANEG_STATE_UNKNOWN
;
5320 aninfo
.cur_time
= 0;
5322 while (++tick
< 195000) {
5323 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
5324 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
5330 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
5331 tw32_f(MAC_MODE
, tp
->mac_mode
);
5334 *txflags
= aninfo
.txconfig
;
5335 *rxflags
= aninfo
.flags
;
5337 if (status
== ANEG_DONE
&&
5338 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
5339 MR_LP_ADV_FULL_DUPLEX
)))
5345 static void tg3_init_bcm8002(struct tg3
*tp
)
5347 u32 mac_status
= tr32(MAC_STATUS
);
5350 /* Reset when initting first time or we have a link. */
5351 if (tg3_flag(tp
, INIT_COMPLETE
) &&
5352 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
5355 /* Set PLL lock range. */
5356 tg3_writephy(tp
, 0x16, 0x8007);
5359 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
5361 /* Wait for reset to complete. */
5362 /* XXX schedule_timeout() ... */
5363 for (i
= 0; i
< 500; i
++)
5366 /* Config mode; select PMA/Ch 1 regs. */
5367 tg3_writephy(tp
, 0x10, 0x8411);
5369 /* Enable auto-lock and comdet, select txclk for tx. */
5370 tg3_writephy(tp
, 0x11, 0x0a10);
5372 tg3_writephy(tp
, 0x18, 0x00a0);
5373 tg3_writephy(tp
, 0x16, 0x41ff);
5375 /* Assert and deassert POR. */
5376 tg3_writephy(tp
, 0x13, 0x0400);
5378 tg3_writephy(tp
, 0x13, 0x0000);
5380 tg3_writephy(tp
, 0x11, 0x0a50);
5382 tg3_writephy(tp
, 0x11, 0x0a10);
5384 /* Wait for signal to stabilize */
5385 /* XXX schedule_timeout() ... */
5386 for (i
= 0; i
< 15000; i
++)
5389 /* Deselect the channel register so we can read the PHYID
5392 tg3_writephy(tp
, 0x10, 0x8011);
5395 static bool tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
5398 bool current_link_up
;
5399 u32 sg_dig_ctrl
, sg_dig_status
;
5400 u32 serdes_cfg
, expected_sg_dig_ctrl
;
5401 int workaround
, port_a
;
5404 expected_sg_dig_ctrl
= 0;
5407 current_link_up
= false;
5409 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A0
&&
5410 tg3_chip_rev_id(tp
) != CHIPREV_ID_5704_A1
) {
5412 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
5415 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5416 /* preserve bits 20-23 for voltage regulator */
5417 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
5420 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
5422 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
5423 if (sg_dig_ctrl
& SG_DIG_USING_HW_AUTONEG
) {
5425 u32 val
= serdes_cfg
;
5431 tw32_f(MAC_SERDES_CFG
, val
);
5434 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5436 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
5437 tg3_setup_flow_control(tp
, 0, 0);
5438 current_link_up
= true;
5443 /* Want auto-negotiation. */
5444 expected_sg_dig_ctrl
= SG_DIG_USING_HW_AUTONEG
| SG_DIG_COMMON_SETUP
;
5446 flowctrl
= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5447 if (flowctrl
& ADVERTISE_1000XPAUSE
)
5448 expected_sg_dig_ctrl
|= SG_DIG_PAUSE_CAP
;
5449 if (flowctrl
& ADVERTISE_1000XPSE_ASYM
)
5450 expected_sg_dig_ctrl
|= SG_DIG_ASYM_PAUSE
;
5452 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
5453 if ((tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
) &&
5454 tp
->serdes_counter
&&
5455 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
5456 MAC_STATUS_RCVD_CFG
)) ==
5457 MAC_STATUS_PCS_SYNCED
)) {
5458 tp
->serdes_counter
--;
5459 current_link_up
= true;
5464 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
5465 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| SG_DIG_SOFT_RESET
);
5467 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
5469 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5470 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5471 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
5472 MAC_STATUS_SIGNAL_DET
)) {
5473 sg_dig_status
= tr32(SG_DIG_STATUS
);
5474 mac_status
= tr32(MAC_STATUS
);
5476 if ((sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
) &&
5477 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
5478 u32 local_adv
= 0, remote_adv
= 0;
5480 if (sg_dig_ctrl
& SG_DIG_PAUSE_CAP
)
5481 local_adv
|= ADVERTISE_1000XPAUSE
;
5482 if (sg_dig_ctrl
& SG_DIG_ASYM_PAUSE
)
5483 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5485 if (sg_dig_status
& SG_DIG_PARTNER_PAUSE_CAPABLE
)
5486 remote_adv
|= LPA_1000XPAUSE
;
5487 if (sg_dig_status
& SG_DIG_PARTNER_ASYM_PAUSE
)
5488 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5490 tp
->link_config
.rmt_adv
=
5491 mii_adv_to_ethtool_adv_x(remote_adv
);
5493 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5494 current_link_up
= true;
5495 tp
->serdes_counter
= 0;
5496 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5497 } else if (!(sg_dig_status
& SG_DIG_AUTONEG_COMPLETE
)) {
5498 if (tp
->serdes_counter
)
5499 tp
->serdes_counter
--;
5502 u32 val
= serdes_cfg
;
5509 tw32_f(MAC_SERDES_CFG
, val
);
5512 tw32_f(SG_DIG_CTRL
, SG_DIG_COMMON_SETUP
);
5515 /* Link parallel detection - link is up */
5516 /* only if we have PCS_SYNC and not */
5517 /* receiving config code words */
5518 mac_status
= tr32(MAC_STATUS
);
5519 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5520 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
5521 tg3_setup_flow_control(tp
, 0, 0);
5522 current_link_up
= true;
5524 TG3_PHYFLG_PARALLEL_DETECT
;
5525 tp
->serdes_counter
=
5526 SERDES_PARALLEL_DET_TIMEOUT
;
5528 goto restart_autoneg
;
5532 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
5533 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5537 return current_link_up
;
5540 static bool tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
5542 bool current_link_up
= false;
5544 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
))
5547 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5548 u32 txflags
, rxflags
;
5551 if (fiber_autoneg(tp
, &txflags
, &rxflags
)) {
5552 u32 local_adv
= 0, remote_adv
= 0;
5554 if (txflags
& ANEG_CFG_PS1
)
5555 local_adv
|= ADVERTISE_1000XPAUSE
;
5556 if (txflags
& ANEG_CFG_PS2
)
5557 local_adv
|= ADVERTISE_1000XPSE_ASYM
;
5559 if (rxflags
& MR_LP_ADV_SYM_PAUSE
)
5560 remote_adv
|= LPA_1000XPAUSE
;
5561 if (rxflags
& MR_LP_ADV_ASYM_PAUSE
)
5562 remote_adv
|= LPA_1000XPAUSE_ASYM
;
5564 tp
->link_config
.rmt_adv
=
5565 mii_adv_to_ethtool_adv_x(remote_adv
);
5567 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5569 current_link_up
= true;
5571 for (i
= 0; i
< 30; i
++) {
5574 (MAC_STATUS_SYNC_CHANGED
|
5575 MAC_STATUS_CFG_CHANGED
));
5577 if ((tr32(MAC_STATUS
) &
5578 (MAC_STATUS_SYNC_CHANGED
|
5579 MAC_STATUS_CFG_CHANGED
)) == 0)
5583 mac_status
= tr32(MAC_STATUS
);
5584 if (!current_link_up
&&
5585 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
5586 !(mac_status
& MAC_STATUS_RCVD_CFG
))
5587 current_link_up
= true;
5589 tg3_setup_flow_control(tp
, 0, 0);
5591 /* Forcing 1000FD link up. */
5592 current_link_up
= true;
5594 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
5597 tw32_f(MAC_MODE
, tp
->mac_mode
);
5602 return current_link_up
;
5605 static int tg3_setup_fiber_phy(struct tg3
*tp
, bool force_reset
)
5608 u16 orig_active_speed
;
5609 u8 orig_active_duplex
;
5611 bool current_link_up
;
5614 orig_pause_cfg
= tp
->link_config
.active_flowctrl
;
5615 orig_active_speed
= tp
->link_config
.active_speed
;
5616 orig_active_duplex
= tp
->link_config
.active_duplex
;
5618 if (!tg3_flag(tp
, HW_AUTONEG
) &&
5620 tg3_flag(tp
, INIT_COMPLETE
)) {
5621 mac_status
= tr32(MAC_STATUS
);
5622 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
5623 MAC_STATUS_SIGNAL_DET
|
5624 MAC_STATUS_CFG_CHANGED
|
5625 MAC_STATUS_RCVD_CFG
);
5626 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
5627 MAC_STATUS_SIGNAL_DET
)) {
5628 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5629 MAC_STATUS_CFG_CHANGED
));
5634 tw32_f(MAC_TX_AUTO_NEG
, 0);
5636 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
5637 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
5638 tw32_f(MAC_MODE
, tp
->mac_mode
);
5641 if (tp
->phy_id
== TG3_PHY_ID_BCM8002
)
5642 tg3_init_bcm8002(tp
);
5644 /* Enable link change event even when serdes polling. */
5645 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5648 current_link_up
= false;
5649 tp
->link_config
.rmt_adv
= 0;
5650 mac_status
= tr32(MAC_STATUS
);
5652 if (tg3_flag(tp
, HW_AUTONEG
))
5653 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
5655 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
5657 tp
->napi
[0].hw_status
->status
=
5658 (SD_STATUS_UPDATED
|
5659 (tp
->napi
[0].hw_status
->status
& ~SD_STATUS_LINK_CHG
));
5661 for (i
= 0; i
< 100; i
++) {
5662 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
5663 MAC_STATUS_CFG_CHANGED
));
5665 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
5666 MAC_STATUS_CFG_CHANGED
|
5667 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
5671 mac_status
= tr32(MAC_STATUS
);
5672 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
5673 current_link_up
= false;
5674 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
5675 tp
->serdes_counter
== 0) {
5676 tw32_f(MAC_MODE
, (tp
->mac_mode
|
5677 MAC_MODE_SEND_CONFIGS
));
5679 tw32_f(MAC_MODE
, tp
->mac_mode
);
5683 if (current_link_up
) {
5684 tp
->link_config
.active_speed
= SPEED_1000
;
5685 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
5686 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5687 LED_CTRL_LNKLED_OVERRIDE
|
5688 LED_CTRL_1000MBPS_ON
));
5690 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
5691 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
5692 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
5693 LED_CTRL_LNKLED_OVERRIDE
|
5694 LED_CTRL_TRAFFIC_OVERRIDE
));
5697 if (!tg3_test_and_report_link_chg(tp
, current_link_up
)) {
5698 u32 now_pause_cfg
= tp
->link_config
.active_flowctrl
;
5699 if (orig_pause_cfg
!= now_pause_cfg
||
5700 orig_active_speed
!= tp
->link_config
.active_speed
||
5701 orig_active_duplex
!= tp
->link_config
.active_duplex
)
5702 tg3_link_report(tp
);
5708 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, bool force_reset
)
5712 u16 current_speed
= SPEED_UNKNOWN
;
5713 u8 current_duplex
= DUPLEX_UNKNOWN
;
5714 bool current_link_up
= false;
5715 u32 local_adv
, remote_adv
, sgsr
;
5717 if ((tg3_asic_rev(tp
) == ASIC_REV_5719
||
5718 tg3_asic_rev(tp
) == ASIC_REV_5720
) &&
5719 !tg3_readphy(tp
, SERDES_TG3_1000X_STATUS
, &sgsr
) &&
5720 (sgsr
& SERDES_TG3_SGMII_MODE
)) {
5725 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
5727 if (!(sgsr
& SERDES_TG3_LINK_UP
)) {
5728 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5730 current_link_up
= true;
5731 if (sgsr
& SERDES_TG3_SPEED_1000
) {
5732 current_speed
= SPEED_1000
;
5733 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5734 } else if (sgsr
& SERDES_TG3_SPEED_100
) {
5735 current_speed
= SPEED_100
;
5736 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5738 current_speed
= SPEED_10
;
5739 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
5742 if (sgsr
& SERDES_TG3_FULL_DUPLEX
)
5743 current_duplex
= DUPLEX_FULL
;
5745 current_duplex
= DUPLEX_HALF
;
5748 tw32_f(MAC_MODE
, tp
->mac_mode
);
5751 tg3_clear_mac_status(tp
);
5753 goto fiber_setup_done
;
5756 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
5757 tw32_f(MAC_MODE
, tp
->mac_mode
);
5760 tg3_clear_mac_status(tp
);
5765 tp
->link_config
.rmt_adv
= 0;
5767 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5768 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5769 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5770 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5771 bmsr
|= BMSR_LSTATUS
;
5773 bmsr
&= ~BMSR_LSTATUS
;
5776 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5778 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
5779 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5780 /* do nothing, just check for link up at the end */
5781 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
5784 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5785 newadv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
5786 ADVERTISE_1000XPAUSE
|
5787 ADVERTISE_1000XPSE_ASYM
|
5790 newadv
|= tg3_advert_flowctrl_1000X(tp
->link_config
.flowctrl
);
5791 newadv
|= ethtool_adv_to_mii_adv_x(tp
->link_config
.advertising
);
5793 if ((newadv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
5794 tg3_writephy(tp
, MII_ADVERTISE
, newadv
);
5795 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
5796 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5798 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5799 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
5800 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5807 bmcr
&= ~BMCR_SPEED1000
;
5808 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
5810 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
5811 new_bmcr
|= BMCR_FULLDPLX
;
5813 if (new_bmcr
!= bmcr
) {
5814 /* BMCR_SPEED1000 is a reserved bit that needs
5815 * to be set on write.
5817 new_bmcr
|= BMCR_SPEED1000
;
5819 /* Force a linkdown */
5823 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
5824 adv
&= ~(ADVERTISE_1000XFULL
|
5825 ADVERTISE_1000XHALF
|
5827 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
5828 tg3_writephy(tp
, MII_BMCR
, bmcr
|
5832 tg3_carrier_off(tp
);
5834 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
5836 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5837 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
5838 if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
5839 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
5840 bmsr
|= BMSR_LSTATUS
;
5842 bmsr
&= ~BMSR_LSTATUS
;
5844 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5848 if (bmsr
& BMSR_LSTATUS
) {
5849 current_speed
= SPEED_1000
;
5850 current_link_up
= true;
5851 if (bmcr
& BMCR_FULLDPLX
)
5852 current_duplex
= DUPLEX_FULL
;
5854 current_duplex
= DUPLEX_HALF
;
5859 if (bmcr
& BMCR_ANENABLE
) {
5862 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
5863 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
5864 common
= local_adv
& remote_adv
;
5865 if (common
& (ADVERTISE_1000XHALF
|
5866 ADVERTISE_1000XFULL
)) {
5867 if (common
& ADVERTISE_1000XFULL
)
5868 current_duplex
= DUPLEX_FULL
;
5870 current_duplex
= DUPLEX_HALF
;
5872 tp
->link_config
.rmt_adv
=
5873 mii_adv_to_ethtool_adv_x(remote_adv
);
5874 } else if (!tg3_flag(tp
, 5780_CLASS
)) {
5875 /* Link is up via parallel detect */
5877 current_link_up
= false;
5883 if (current_link_up
&& current_duplex
== DUPLEX_FULL
)
5884 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
5886 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
5887 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
5888 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
5890 tw32_f(MAC_MODE
, tp
->mac_mode
);
5893 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
5895 tp
->link_config
.active_speed
= current_speed
;
5896 tp
->link_config
.active_duplex
= current_duplex
;
5898 tg3_test_and_report_link_chg(tp
, current_link_up
);
5902 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
5904 if (tp
->serdes_counter
) {
5905 /* Give autoneg time to complete. */
5906 tp
->serdes_counter
--;
5911 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
5914 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5915 if (bmcr
& BMCR_ANENABLE
) {
5918 /* Select shadow register 0x1f */
5919 tg3_writephy(tp
, MII_TG3_MISC_SHDW
, 0x7c00);
5920 tg3_readphy(tp
, MII_TG3_MISC_SHDW
, &phy1
);
5922 /* Select expansion interrupt status register */
5923 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5924 MII_TG3_DSP_EXP1_INT_STAT
);
5925 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5926 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5928 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
5929 /* We have signal detect and not receiving
5930 * config code words, link is up by parallel
5934 bmcr
&= ~BMCR_ANENABLE
;
5935 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
5936 tg3_writephy(tp
, MII_BMCR
, bmcr
);
5937 tp
->phy_flags
|= TG3_PHYFLG_PARALLEL_DETECT
;
5940 } else if (tp
->link_up
&&
5941 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
5942 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
)) {
5945 /* Select expansion interrupt status register */
5946 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
5947 MII_TG3_DSP_EXP1_INT_STAT
);
5948 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &phy2
);
5952 /* Config code words received, turn on autoneg. */
5953 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
5954 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
5956 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
5962 static int tg3_setup_phy(struct tg3
*tp
, bool force_reset
)
5967 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
5968 err
= tg3_setup_fiber_phy(tp
, force_reset
);
5969 else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
5970 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
5972 err
= tg3_setup_copper_phy(tp
, force_reset
);
5974 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
5977 val
= tr32(TG3_CPMU_CLCK_STAT
) & CPMU_CLCK_STAT_MAC_CLCK_MASK
;
5978 if (val
== CPMU_CLCK_STAT_MAC_CLCK_62_5
)
5980 else if (val
== CPMU_CLCK_STAT_MAC_CLCK_6_25
)
5985 val
= tr32(GRC_MISC_CFG
) & ~GRC_MISC_CFG_PRESCALAR_MASK
;
5986 val
|= (scale
<< GRC_MISC_CFG_PRESCALAR_SHIFT
);
5987 tw32(GRC_MISC_CFG
, val
);
5990 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5991 (6 << TX_LENGTHS_IPG_SHIFT
);
5992 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
5993 tg3_asic_rev(tp
) == ASIC_REV_5762
)
5994 val
|= tr32(MAC_TX_LENGTHS
) &
5995 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
5996 TX_LENGTHS_CNT_DWN_VAL_MSK
);
5998 if (tp
->link_config
.active_speed
== SPEED_1000
&&
5999 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
6000 tw32(MAC_TX_LENGTHS
, val
|
6001 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
));
6003 tw32(MAC_TX_LENGTHS
, val
|
6004 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6006 if (!tg3_flag(tp
, 5705_PLUS
)) {
6008 tw32(HOSTCC_STAT_COAL_TICKS
,
6009 tp
->coal
.stats_block_coalesce_usecs
);
6011 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
6015 if (tg3_flag(tp
, ASPM_WORKAROUND
)) {
6016 val
= tr32(PCIE_PWR_MGMT_THRESH
);
6018 val
= (val
& ~PCIE_PWR_MGMT_L1_THRESH_MSK
) |
6021 val
|= PCIE_PWR_MGMT_L1_THRESH_MSK
;
6022 tw32(PCIE_PWR_MGMT_THRESH
, val
);
6028 /* tp->lock must be held */
6029 static u64
tg3_refclk_read(struct tg3
*tp
)
6031 u64 stamp
= tr32(TG3_EAV_REF_CLCK_LSB
);
6032 return stamp
| (u64
)tr32(TG3_EAV_REF_CLCK_MSB
) << 32;
6035 /* tp->lock must be held */
6036 static void tg3_refclk_write(struct tg3
*tp
, u64 newval
)
6038 tw32(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_STOP
);
6039 tw32(TG3_EAV_REF_CLCK_LSB
, newval
& 0xffffffff);
6040 tw32(TG3_EAV_REF_CLCK_MSB
, newval
>> 32);
6041 tw32_f(TG3_EAV_REF_CLCK_CTL
, TG3_EAV_REF_CLCK_CTL_RESUME
);
6044 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
);
6045 static inline void tg3_full_unlock(struct tg3
*tp
);
6046 static int tg3_get_ts_info(struct net_device
*dev
, struct ethtool_ts_info
*info
)
6048 struct tg3
*tp
= netdev_priv(dev
);
6050 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
|
6051 SOF_TIMESTAMPING_RX_SOFTWARE
|
6052 SOF_TIMESTAMPING_SOFTWARE
;
6054 if (tg3_flag(tp
, PTP_CAPABLE
)) {
6055 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
6056 SOF_TIMESTAMPING_RX_HARDWARE
|
6057 SOF_TIMESTAMPING_RAW_HARDWARE
;
6061 info
->phc_index
= ptp_clock_index(tp
->ptp_clock
);
6063 info
->phc_index
= -1;
6065 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
6067 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
6068 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT
) |
6069 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
6070 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
6074 static int tg3_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
6076 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6077 bool neg_adj
= false;
6085 /* Frequency adjustment is performed using hardware with a 24 bit
6086 * accumulator and a programmable correction value. On each clk, the
6087 * correction value gets added to the accumulator and when it
6088 * overflows, the time counter is incremented/decremented.
6090 * So conversion from ppb to correction value is
6091 * ppb * (1 << 24) / 1000000000
6093 correction
= div_u64((u64
)ppb
* (1 << 24), 1000000000ULL) &
6094 TG3_EAV_REF_CLK_CORRECT_MASK
;
6096 tg3_full_lock(tp
, 0);
6099 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
,
6100 TG3_EAV_REF_CLK_CORRECT_EN
|
6101 (neg_adj
? TG3_EAV_REF_CLK_CORRECT_NEG
: 0) | correction
);
6103 tw32(TG3_EAV_REF_CLK_CORRECT_CTL
, 0);
6105 tg3_full_unlock(tp
);
6110 static int tg3_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
6112 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6114 tg3_full_lock(tp
, 0);
6115 tp
->ptp_adjust
+= delta
;
6116 tg3_full_unlock(tp
);
6121 static int tg3_ptp_gettime(struct ptp_clock_info
*ptp
, struct timespec
*ts
)
6125 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6127 tg3_full_lock(tp
, 0);
6128 ns
= tg3_refclk_read(tp
);
6129 ns
+= tp
->ptp_adjust
;
6130 tg3_full_unlock(tp
);
6132 ts
->tv_sec
= div_u64_rem(ns
, 1000000000, &remainder
);
6133 ts
->tv_nsec
= remainder
;
6138 static int tg3_ptp_settime(struct ptp_clock_info
*ptp
,
6139 const struct timespec
*ts
)
6142 struct tg3
*tp
= container_of(ptp
, struct tg3
, ptp_info
);
6144 ns
= timespec_to_ns(ts
);
6146 tg3_full_lock(tp
, 0);
6147 tg3_refclk_write(tp
, ns
);
6149 tg3_full_unlock(tp
);
6154 static int tg3_ptp_enable(struct ptp_clock_info
*ptp
,
6155 struct ptp_clock_request
*rq
, int on
)
6160 static const struct ptp_clock_info tg3_ptp_caps
= {
6161 .owner
= THIS_MODULE
,
6162 .name
= "tg3 clock",
6163 .max_adj
= 250000000,
6168 .adjfreq
= tg3_ptp_adjfreq
,
6169 .adjtime
= tg3_ptp_adjtime
,
6170 .gettime
= tg3_ptp_gettime
,
6171 .settime
= tg3_ptp_settime
,
6172 .enable
= tg3_ptp_enable
,
6175 static void tg3_hwclock_to_timestamp(struct tg3
*tp
, u64 hwclock
,
6176 struct skb_shared_hwtstamps
*timestamp
)
6178 memset(timestamp
, 0, sizeof(struct skb_shared_hwtstamps
));
6179 timestamp
->hwtstamp
= ns_to_ktime((hwclock
& TG3_TSTAMP_MASK
) +
6183 /* tp->lock must be held */
6184 static void tg3_ptp_init(struct tg3
*tp
)
6186 if (!tg3_flag(tp
, PTP_CAPABLE
))
6189 /* Initialize the hardware clock to the system time. */
6190 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()));
6192 tp
->ptp_info
= tg3_ptp_caps
;
6195 /* tp->lock must be held */
6196 static void tg3_ptp_resume(struct tg3
*tp
)
6198 if (!tg3_flag(tp
, PTP_CAPABLE
))
6201 tg3_refclk_write(tp
, ktime_to_ns(ktime_get_real()) + tp
->ptp_adjust
);
6205 static void tg3_ptp_fini(struct tg3
*tp
)
6207 if (!tg3_flag(tp
, PTP_CAPABLE
) || !tp
->ptp_clock
)
6210 ptp_clock_unregister(tp
->ptp_clock
);
6211 tp
->ptp_clock
= NULL
;
6215 static inline int tg3_irq_sync(struct tg3
*tp
)
6217 return tp
->irq_sync
;
6220 static inline void tg3_rd32_loop(struct tg3
*tp
, u32
*dst
, u32 off
, u32 len
)
6224 dst
= (u32
*)((u8
*)dst
+ off
);
6225 for (i
= 0; i
< len
; i
+= sizeof(u32
))
6226 *dst
++ = tr32(off
+ i
);
6229 static void tg3_dump_legacy_regs(struct tg3
*tp
, u32
*regs
)
6231 tg3_rd32_loop(tp
, regs
, TG3PCI_VENDOR
, 0xb0);
6232 tg3_rd32_loop(tp
, regs
, MAILBOX_INTERRUPT_0
, 0x200);
6233 tg3_rd32_loop(tp
, regs
, MAC_MODE
, 0x4f0);
6234 tg3_rd32_loop(tp
, regs
, SNDDATAI_MODE
, 0xe0);
6235 tg3_rd32_loop(tp
, regs
, SNDDATAC_MODE
, 0x04);
6236 tg3_rd32_loop(tp
, regs
, SNDBDS_MODE
, 0x80);
6237 tg3_rd32_loop(tp
, regs
, SNDBDI_MODE
, 0x48);
6238 tg3_rd32_loop(tp
, regs
, SNDBDC_MODE
, 0x04);
6239 tg3_rd32_loop(tp
, regs
, RCVLPC_MODE
, 0x20);
6240 tg3_rd32_loop(tp
, regs
, RCVLPC_SELLST_BASE
, 0x15c);
6241 tg3_rd32_loop(tp
, regs
, RCVDBDI_MODE
, 0x0c);
6242 tg3_rd32_loop(tp
, regs
, RCVDBDI_JUMBO_BD
, 0x3c);
6243 tg3_rd32_loop(tp
, regs
, RCVDBDI_BD_PROD_IDX_0
, 0x44);
6244 tg3_rd32_loop(tp
, regs
, RCVDCC_MODE
, 0x04);
6245 tg3_rd32_loop(tp
, regs
, RCVBDI_MODE
, 0x20);
6246 tg3_rd32_loop(tp
, regs
, RCVCC_MODE
, 0x14);
6247 tg3_rd32_loop(tp
, regs
, RCVLSC_MODE
, 0x08);
6248 tg3_rd32_loop(tp
, regs
, MBFREE_MODE
, 0x08);
6249 tg3_rd32_loop(tp
, regs
, HOSTCC_MODE
, 0x100);
6251 if (tg3_flag(tp
, SUPPORT_MSIX
))
6252 tg3_rd32_loop(tp
, regs
, HOSTCC_RXCOL_TICKS_VEC1
, 0x180);
6254 tg3_rd32_loop(tp
, regs
, MEMARB_MODE
, 0x10);
6255 tg3_rd32_loop(tp
, regs
, BUFMGR_MODE
, 0x58);
6256 tg3_rd32_loop(tp
, regs
, RDMAC_MODE
, 0x08);
6257 tg3_rd32_loop(tp
, regs
, WDMAC_MODE
, 0x08);
6258 tg3_rd32_loop(tp
, regs
, RX_CPU_MODE
, 0x04);
6259 tg3_rd32_loop(tp
, regs
, RX_CPU_STATE
, 0x04);
6260 tg3_rd32_loop(tp
, regs
, RX_CPU_PGMCTR
, 0x04);
6261 tg3_rd32_loop(tp
, regs
, RX_CPU_HWBKPT
, 0x04);
6263 if (!tg3_flag(tp
, 5705_PLUS
)) {
6264 tg3_rd32_loop(tp
, regs
, TX_CPU_MODE
, 0x04);
6265 tg3_rd32_loop(tp
, regs
, TX_CPU_STATE
, 0x04);
6266 tg3_rd32_loop(tp
, regs
, TX_CPU_PGMCTR
, 0x04);
6269 tg3_rd32_loop(tp
, regs
, GRCMBOX_INTERRUPT_0
, 0x110);
6270 tg3_rd32_loop(tp
, regs
, FTQ_RESET
, 0x120);
6271 tg3_rd32_loop(tp
, regs
, MSGINT_MODE
, 0x0c);
6272 tg3_rd32_loop(tp
, regs
, DMAC_MODE
, 0x04);
6273 tg3_rd32_loop(tp
, regs
, GRC_MODE
, 0x4c);
6275 if (tg3_flag(tp
, NVRAM
))
6276 tg3_rd32_loop(tp
, regs
, NVRAM_CMD
, 0x24);
6279 static void tg3_dump_state(struct tg3
*tp
)
6284 regs
= kzalloc(TG3_REG_BLK_SIZE
, GFP_ATOMIC
);
6288 if (tg3_flag(tp
, PCI_EXPRESS
)) {
6289 /* Read up to but not including private PCI registers */
6290 for (i
= 0; i
< TG3_PCIE_TLDLPL_PORT
; i
+= sizeof(u32
))
6291 regs
[i
/ sizeof(u32
)] = tr32(i
);
6293 tg3_dump_legacy_regs(tp
, regs
);
6295 for (i
= 0; i
< TG3_REG_BLK_SIZE
/ sizeof(u32
); i
+= 4) {
6296 if (!regs
[i
+ 0] && !regs
[i
+ 1] &&
6297 !regs
[i
+ 2] && !regs
[i
+ 3])
6300 netdev_err(tp
->dev
, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6302 regs
[i
+ 0], regs
[i
+ 1], regs
[i
+ 2], regs
[i
+ 3]);
6307 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
6308 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
6310 /* SW status block */
6312 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6314 tnapi
->hw_status
->status
,
6315 tnapi
->hw_status
->status_tag
,
6316 tnapi
->hw_status
->rx_jumbo_consumer
,
6317 tnapi
->hw_status
->rx_consumer
,
6318 tnapi
->hw_status
->rx_mini_consumer
,
6319 tnapi
->hw_status
->idx
[0].rx_producer
,
6320 tnapi
->hw_status
->idx
[0].tx_consumer
);
6323 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6325 tnapi
->last_tag
, tnapi
->last_irq_tag
,
6326 tnapi
->tx_prod
, tnapi
->tx_cons
, tnapi
->tx_pending
,
6328 tnapi
->prodring
.rx_std_prod_idx
,
6329 tnapi
->prodring
.rx_std_cons_idx
,
6330 tnapi
->prodring
.rx_jmb_prod_idx
,
6331 tnapi
->prodring
.rx_jmb_cons_idx
);
6335 /* This is called whenever we suspect that the system chipset is re-
6336 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6337 * is bogus tx completions. We try to recover by setting the
6338 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6341 static void tg3_tx_recover(struct tg3
*tp
)
6343 BUG_ON(tg3_flag(tp
, MBOX_WRITE_REORDER
) ||
6344 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
6346 netdev_warn(tp
->dev
,
6347 "The system may be re-ordering memory-mapped I/O "
6348 "cycles to the network device, attempting to recover. "
6349 "Please report the problem to the driver maintainer "
6350 "and include system chipset information.\n");
6352 spin_lock(&tp
->lock
);
6353 tg3_flag_set(tp
, TX_RECOVERY_PENDING
);
6354 spin_unlock(&tp
->lock
);
6357 static inline u32
tg3_tx_avail(struct tg3_napi
*tnapi
)
6359 /* Tell compiler to fetch tx indices from memory. */
6361 return tnapi
->tx_pending
-
6362 ((tnapi
->tx_prod
- tnapi
->tx_cons
) & (TG3_TX_RING_SIZE
- 1));
6365 /* Tigon3 never reports partial packet sends. So we do not
6366 * need special logic to handle SKBs that have not had all
6367 * of their frags sent yet, like SunGEM does.
6369 static void tg3_tx(struct tg3_napi
*tnapi
)
6371 struct tg3
*tp
= tnapi
->tp
;
6372 u32 hw_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
6373 u32 sw_idx
= tnapi
->tx_cons
;
6374 struct netdev_queue
*txq
;
6375 int index
= tnapi
- tp
->napi
;
6376 unsigned int pkts_compl
= 0, bytes_compl
= 0;
6378 if (tg3_flag(tp
, ENABLE_TSS
))
6381 txq
= netdev_get_tx_queue(tp
->dev
, index
);
6383 while (sw_idx
!= hw_idx
) {
6384 struct tg3_tx_ring_info
*ri
= &tnapi
->tx_buffers
[sw_idx
];
6385 struct sk_buff
*skb
= ri
->skb
;
6388 if (unlikely(skb
== NULL
)) {
6393 if (tnapi
->tx_ring
[sw_idx
].len_flags
& TXD_FLAG_HWTSTAMP
) {
6394 struct skb_shared_hwtstamps timestamp
;
6395 u64 hwclock
= tr32(TG3_TX_TSTAMP_LSB
);
6396 hwclock
|= (u64
)tr32(TG3_TX_TSTAMP_MSB
) << 32;
6398 tg3_hwclock_to_timestamp(tp
, hwclock
, ×tamp
);
6400 skb_tstamp_tx(skb
, ×tamp
);
6403 pci_unmap_single(tp
->pdev
,
6404 dma_unmap_addr(ri
, mapping
),
6410 while (ri
->fragmented
) {
6411 ri
->fragmented
= false;
6412 sw_idx
= NEXT_TX(sw_idx
);
6413 ri
= &tnapi
->tx_buffers
[sw_idx
];
6416 sw_idx
= NEXT_TX(sw_idx
);
6418 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6419 ri
= &tnapi
->tx_buffers
[sw_idx
];
6420 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
6423 pci_unmap_page(tp
->pdev
,
6424 dma_unmap_addr(ri
, mapping
),
6425 skb_frag_size(&skb_shinfo(skb
)->frags
[i
]),
6428 while (ri
->fragmented
) {
6429 ri
->fragmented
= false;
6430 sw_idx
= NEXT_TX(sw_idx
);
6431 ri
= &tnapi
->tx_buffers
[sw_idx
];
6434 sw_idx
= NEXT_TX(sw_idx
);
6438 bytes_compl
+= skb
->len
;
6442 if (unlikely(tx_bug
)) {
6448 netdev_tx_completed_queue(txq
, pkts_compl
, bytes_compl
);
6450 tnapi
->tx_cons
= sw_idx
;
6452 /* Need to make the tx_cons update visible to tg3_start_xmit()
6453 * before checking for netif_queue_stopped(). Without the
6454 * memory barrier, there is a small possibility that tg3_start_xmit()
6455 * will miss it and cause the queue to be stopped forever.
6459 if (unlikely(netif_tx_queue_stopped(txq
) &&
6460 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))) {
6461 __netif_tx_lock(txq
, smp_processor_id());
6462 if (netif_tx_queue_stopped(txq
) &&
6463 (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
)))
6464 netif_tx_wake_queue(txq
);
6465 __netif_tx_unlock(txq
);
6469 static void tg3_frag_free(bool is_frag
, void *data
)
6472 put_page(virt_to_head_page(data
));
6477 static void tg3_rx_data_free(struct tg3
*tp
, struct ring_info
*ri
, u32 map_sz
)
6479 unsigned int skb_size
= SKB_DATA_ALIGN(map_sz
+ TG3_RX_OFFSET(tp
)) +
6480 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6485 pci_unmap_single(tp
->pdev
, dma_unmap_addr(ri
, mapping
),
6486 map_sz
, PCI_DMA_FROMDEVICE
);
6487 tg3_frag_free(skb_size
<= PAGE_SIZE
, ri
->data
);
6492 /* Returns size of skb allocated or < 0 on error.
6494 * We only need to fill in the address because the other members
6495 * of the RX descriptor are invariant, see tg3_init_rings.
6497 * Note the purposeful assymetry of cpu vs. chip accesses. For
6498 * posting buffers we only dirty the first cache line of the RX
6499 * descriptor (containing the address). Whereas for the RX status
6500 * buffers the cpu only reads the last cacheline of the RX descriptor
6501 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6503 static int tg3_alloc_rx_data(struct tg3
*tp
, struct tg3_rx_prodring_set
*tpr
,
6504 u32 opaque_key
, u32 dest_idx_unmasked
,
6505 unsigned int *frag_size
)
6507 struct tg3_rx_buffer_desc
*desc
;
6508 struct ring_info
*map
;
6511 int skb_size
, data_size
, dest_idx
;
6513 switch (opaque_key
) {
6514 case RXD_OPAQUE_RING_STD
:
6515 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6516 desc
= &tpr
->rx_std
[dest_idx
];
6517 map
= &tpr
->rx_std_buffers
[dest_idx
];
6518 data_size
= tp
->rx_pkt_map_sz
;
6521 case RXD_OPAQUE_RING_JUMBO
:
6522 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6523 desc
= &tpr
->rx_jmb
[dest_idx
].std
;
6524 map
= &tpr
->rx_jmb_buffers
[dest_idx
];
6525 data_size
= TG3_RX_JMB_MAP_SZ
;
6532 /* Do not overwrite any of the map or rp information
6533 * until we are sure we can commit to a new buffer.
6535 * Callers depend upon this behavior and assume that
6536 * we leave everything unchanged if we fail.
6538 skb_size
= SKB_DATA_ALIGN(data_size
+ TG3_RX_OFFSET(tp
)) +
6539 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
6540 if (skb_size
<= PAGE_SIZE
) {
6541 data
= netdev_alloc_frag(skb_size
);
6542 *frag_size
= skb_size
;
6544 data
= kmalloc(skb_size
, GFP_ATOMIC
);
6550 mapping
= pci_map_single(tp
->pdev
,
6551 data
+ TG3_RX_OFFSET(tp
),
6553 PCI_DMA_FROMDEVICE
);
6554 if (unlikely(pci_dma_mapping_error(tp
->pdev
, mapping
))) {
6555 tg3_frag_free(skb_size
<= PAGE_SIZE
, data
);
6560 dma_unmap_addr_set(map
, mapping
, mapping
);
6562 desc
->addr_hi
= ((u64
)mapping
>> 32);
6563 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
6568 /* We only need to move over in the address because the other
6569 * members of the RX descriptor are invariant. See notes above
6570 * tg3_alloc_rx_data for full details.
6572 static void tg3_recycle_rx(struct tg3_napi
*tnapi
,
6573 struct tg3_rx_prodring_set
*dpr
,
6574 u32 opaque_key
, int src_idx
,
6575 u32 dest_idx_unmasked
)
6577 struct tg3
*tp
= tnapi
->tp
;
6578 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
6579 struct ring_info
*src_map
, *dest_map
;
6580 struct tg3_rx_prodring_set
*spr
= &tp
->napi
[0].prodring
;
6583 switch (opaque_key
) {
6584 case RXD_OPAQUE_RING_STD
:
6585 dest_idx
= dest_idx_unmasked
& tp
->rx_std_ring_mask
;
6586 dest_desc
= &dpr
->rx_std
[dest_idx
];
6587 dest_map
= &dpr
->rx_std_buffers
[dest_idx
];
6588 src_desc
= &spr
->rx_std
[src_idx
];
6589 src_map
= &spr
->rx_std_buffers
[src_idx
];
6592 case RXD_OPAQUE_RING_JUMBO
:
6593 dest_idx
= dest_idx_unmasked
& tp
->rx_jmb_ring_mask
;
6594 dest_desc
= &dpr
->rx_jmb
[dest_idx
].std
;
6595 dest_map
= &dpr
->rx_jmb_buffers
[dest_idx
];
6596 src_desc
= &spr
->rx_jmb
[src_idx
].std
;
6597 src_map
= &spr
->rx_jmb_buffers
[src_idx
];
6604 dest_map
->data
= src_map
->data
;
6605 dma_unmap_addr_set(dest_map
, mapping
,
6606 dma_unmap_addr(src_map
, mapping
));
6607 dest_desc
->addr_hi
= src_desc
->addr_hi
;
6608 dest_desc
->addr_lo
= src_desc
->addr_lo
;
6610 /* Ensure that the update to the skb happens after the physical
6611 * addresses have been transferred to the new BD location.
6615 src_map
->data
= NULL
;
6618 /* The RX ring scheme is composed of multiple rings which post fresh
6619 * buffers to the chip, and one special ring the chip uses to report
6620 * status back to the host.
6622 * The special ring reports the status of received packets to the
6623 * host. The chip does not write into the original descriptor the
6624 * RX buffer was obtained from. The chip simply takes the original
6625 * descriptor as provided by the host, updates the status and length
6626 * field, then writes this into the next status ring entry.
6628 * Each ring the host uses to post buffers to the chip is described
6629 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6630 * it is first placed into the on-chip ram. When the packet's length
6631 * is known, it walks down the TG3_BDINFO entries to select the ring.
6632 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6633 * which is within the range of the new packet's length is chosen.
6635 * The "separate ring for rx status" scheme may sound queer, but it makes
6636 * sense from a cache coherency perspective. If only the host writes
6637 * to the buffer post rings, and only the chip writes to the rx status
6638 * rings, then cache lines never move beyond shared-modified state.
6639 * If both the host and chip were to write into the same ring, cache line
6640 * eviction could occur since both entities want it in an exclusive state.
6642 static int tg3_rx(struct tg3_napi
*tnapi
, int budget
)
6644 struct tg3
*tp
= tnapi
->tp
;
6645 u32 work_mask
, rx_std_posted
= 0;
6646 u32 std_prod_idx
, jmb_prod_idx
;
6647 u32 sw_idx
= tnapi
->rx_rcb_ptr
;
6650 struct tg3_rx_prodring_set
*tpr
= &tnapi
->prodring
;
6652 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6654 * We need to order the read of hw_idx and the read of
6655 * the opaque cookie.
6660 std_prod_idx
= tpr
->rx_std_prod_idx
;
6661 jmb_prod_idx
= tpr
->rx_jmb_prod_idx
;
6662 while (sw_idx
!= hw_idx
&& budget
> 0) {
6663 struct ring_info
*ri
;
6664 struct tg3_rx_buffer_desc
*desc
= &tnapi
->rx_rcb
[sw_idx
];
6666 struct sk_buff
*skb
;
6667 dma_addr_t dma_addr
;
6668 u32 opaque_key
, desc_idx
, *post_ptr
;
6672 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
6673 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
6674 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
6675 ri
= &tp
->napi
[0].prodring
.rx_std_buffers
[desc_idx
];
6676 dma_addr
= dma_unmap_addr(ri
, mapping
);
6678 post_ptr
= &std_prod_idx
;
6680 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
6681 ri
= &tp
->napi
[0].prodring
.rx_jmb_buffers
[desc_idx
];
6682 dma_addr
= dma_unmap_addr(ri
, mapping
);
6684 post_ptr
= &jmb_prod_idx
;
6686 goto next_pkt_nopost
;
6688 work_mask
|= opaque_key
;
6690 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
6691 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
6693 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6694 desc_idx
, *post_ptr
);
6696 /* Other statistics kept track of by card. */
6701 prefetch(data
+ TG3_RX_OFFSET(tp
));
6702 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) -
6705 if ((desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6706 RXD_FLAG_PTPSTAT_PTPV1
||
6707 (desc
->type_flags
& RXD_FLAG_PTPSTAT_MASK
) ==
6708 RXD_FLAG_PTPSTAT_PTPV2
) {
6709 tstamp
= tr32(TG3_RX_TSTAMP_LSB
);
6710 tstamp
|= (u64
)tr32(TG3_RX_TSTAMP_MSB
) << 32;
6713 if (len
> TG3_RX_COPY_THRESH(tp
)) {
6715 unsigned int frag_size
;
6717 skb_size
= tg3_alloc_rx_data(tp
, tpr
, opaque_key
,
6718 *post_ptr
, &frag_size
);
6722 pci_unmap_single(tp
->pdev
, dma_addr
, skb_size
,
6723 PCI_DMA_FROMDEVICE
);
6725 /* Ensure that the update to the data happens
6726 * after the usage of the old DMA mapping.
6732 skb
= build_skb(data
, frag_size
);
6734 tg3_frag_free(frag_size
!= 0, data
);
6735 goto drop_it_no_recycle
;
6737 skb_reserve(skb
, TG3_RX_OFFSET(tp
));
6739 tg3_recycle_rx(tnapi
, tpr
, opaque_key
,
6740 desc_idx
, *post_ptr
);
6742 skb
= netdev_alloc_skb(tp
->dev
,
6743 len
+ TG3_RAW_IP_ALIGN
);
6745 goto drop_it_no_recycle
;
6747 skb_reserve(skb
, TG3_RAW_IP_ALIGN
);
6748 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6750 data
+ TG3_RX_OFFSET(tp
),
6752 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
6757 tg3_hwclock_to_timestamp(tp
, tstamp
,
6758 skb_hwtstamps(skb
));
6760 if ((tp
->dev
->features
& NETIF_F_RXCSUM
) &&
6761 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
6762 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
6763 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
6764 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
6766 skb_checksum_none_assert(skb
);
6768 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
6770 if (len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
6771 skb
->protocol
!= htons(ETH_P_8021Q
)) {
6773 goto drop_it_no_recycle
;
6776 if (desc
->type_flags
& RXD_FLAG_VLAN
&&
6777 !(tp
->rx_mode
& RX_MODE_KEEP_VLAN_TAG
))
6778 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
6779 desc
->err_vlan
& RXD_VLAN_MASK
);
6781 napi_gro_receive(&tnapi
->napi
, skb
);
6789 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
6790 tpr
->rx_std_prod_idx
= std_prod_idx
&
6791 tp
->rx_std_ring_mask
;
6792 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6793 tpr
->rx_std_prod_idx
);
6794 work_mask
&= ~RXD_OPAQUE_RING_STD
;
6799 sw_idx
&= tp
->rx_ret_ring_mask
;
6801 /* Refresh hw_idx to see if there is new work */
6802 if (sw_idx
== hw_idx
) {
6803 hw_idx
= *(tnapi
->rx_rcb_prod_idx
);
6808 /* ACK the status ring. */
6809 tnapi
->rx_rcb_ptr
= sw_idx
;
6810 tw32_rx_mbox(tnapi
->consmbox
, sw_idx
);
6812 /* Refill RX ring(s). */
6813 if (!tg3_flag(tp
, ENABLE_RSS
)) {
6814 /* Sync BD data before updating mailbox */
6817 if (work_mask
& RXD_OPAQUE_RING_STD
) {
6818 tpr
->rx_std_prod_idx
= std_prod_idx
&
6819 tp
->rx_std_ring_mask
;
6820 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
6821 tpr
->rx_std_prod_idx
);
6823 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
6824 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
&
6825 tp
->rx_jmb_ring_mask
;
6826 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
6827 tpr
->rx_jmb_prod_idx
);
6830 } else if (work_mask
) {
6831 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6832 * updated before the producer indices can be updated.
6836 tpr
->rx_std_prod_idx
= std_prod_idx
& tp
->rx_std_ring_mask
;
6837 tpr
->rx_jmb_prod_idx
= jmb_prod_idx
& tp
->rx_jmb_ring_mask
;
6839 if (tnapi
!= &tp
->napi
[1]) {
6840 tp
->rx_refill
= true;
6841 napi_schedule(&tp
->napi
[1].napi
);
6848 static void tg3_poll_link(struct tg3
*tp
)
6850 /* handle link change and other phy events */
6851 if (!(tg3_flag(tp
, USE_LINKCHG_REG
) || tg3_flag(tp
, POLL_SERDES
))) {
6852 struct tg3_hw_status
*sblk
= tp
->napi
[0].hw_status
;
6854 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
6855 sblk
->status
= SD_STATUS_UPDATED
|
6856 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
6857 spin_lock(&tp
->lock
);
6858 if (tg3_flag(tp
, USE_PHYLIB
)) {
6860 (MAC_STATUS_SYNC_CHANGED
|
6861 MAC_STATUS_CFG_CHANGED
|
6862 MAC_STATUS_MI_COMPLETION
|
6863 MAC_STATUS_LNKSTATE_CHANGED
));
6866 tg3_setup_phy(tp
, false);
6867 spin_unlock(&tp
->lock
);
6872 static int tg3_rx_prodring_xfer(struct tg3
*tp
,
6873 struct tg3_rx_prodring_set
*dpr
,
6874 struct tg3_rx_prodring_set
*spr
)
6876 u32 si
, di
, cpycnt
, src_prod_idx
;
6880 src_prod_idx
= spr
->rx_std_prod_idx
;
6882 /* Make sure updates to the rx_std_buffers[] entries and the
6883 * standard producer index are seen in the correct order.
6887 if (spr
->rx_std_cons_idx
== src_prod_idx
)
6890 if (spr
->rx_std_cons_idx
< src_prod_idx
)
6891 cpycnt
= src_prod_idx
- spr
->rx_std_cons_idx
;
6893 cpycnt
= tp
->rx_std_ring_mask
+ 1 -
6894 spr
->rx_std_cons_idx
;
6896 cpycnt
= min(cpycnt
,
6897 tp
->rx_std_ring_mask
+ 1 - dpr
->rx_std_prod_idx
);
6899 si
= spr
->rx_std_cons_idx
;
6900 di
= dpr
->rx_std_prod_idx
;
6902 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6903 if (dpr
->rx_std_buffers
[i
].data
) {
6913 /* Ensure that updates to the rx_std_buffers ring and the
6914 * shadowed hardware producer ring from tg3_recycle_skb() are
6915 * ordered correctly WRT the skb check above.
6919 memcpy(&dpr
->rx_std_buffers
[di
],
6920 &spr
->rx_std_buffers
[si
],
6921 cpycnt
* sizeof(struct ring_info
));
6923 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6924 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6925 sbd
= &spr
->rx_std
[si
];
6926 dbd
= &dpr
->rx_std
[di
];
6927 dbd
->addr_hi
= sbd
->addr_hi
;
6928 dbd
->addr_lo
= sbd
->addr_lo
;
6931 spr
->rx_std_cons_idx
= (spr
->rx_std_cons_idx
+ cpycnt
) &
6932 tp
->rx_std_ring_mask
;
6933 dpr
->rx_std_prod_idx
= (dpr
->rx_std_prod_idx
+ cpycnt
) &
6934 tp
->rx_std_ring_mask
;
6938 src_prod_idx
= spr
->rx_jmb_prod_idx
;
6940 /* Make sure updates to the rx_jmb_buffers[] entries and
6941 * the jumbo producer index are seen in the correct order.
6945 if (spr
->rx_jmb_cons_idx
== src_prod_idx
)
6948 if (spr
->rx_jmb_cons_idx
< src_prod_idx
)
6949 cpycnt
= src_prod_idx
- spr
->rx_jmb_cons_idx
;
6951 cpycnt
= tp
->rx_jmb_ring_mask
+ 1 -
6952 spr
->rx_jmb_cons_idx
;
6954 cpycnt
= min(cpycnt
,
6955 tp
->rx_jmb_ring_mask
+ 1 - dpr
->rx_jmb_prod_idx
);
6957 si
= spr
->rx_jmb_cons_idx
;
6958 di
= dpr
->rx_jmb_prod_idx
;
6960 for (i
= di
; i
< di
+ cpycnt
; i
++) {
6961 if (dpr
->rx_jmb_buffers
[i
].data
) {
6971 /* Ensure that updates to the rx_jmb_buffers ring and the
6972 * shadowed hardware producer ring from tg3_recycle_skb() are
6973 * ordered correctly WRT the skb check above.
6977 memcpy(&dpr
->rx_jmb_buffers
[di
],
6978 &spr
->rx_jmb_buffers
[si
],
6979 cpycnt
* sizeof(struct ring_info
));
6981 for (i
= 0; i
< cpycnt
; i
++, di
++, si
++) {
6982 struct tg3_rx_buffer_desc
*sbd
, *dbd
;
6983 sbd
= &spr
->rx_jmb
[si
].std
;
6984 dbd
= &dpr
->rx_jmb
[di
].std
;
6985 dbd
->addr_hi
= sbd
->addr_hi
;
6986 dbd
->addr_lo
= sbd
->addr_lo
;
6989 spr
->rx_jmb_cons_idx
= (spr
->rx_jmb_cons_idx
+ cpycnt
) &
6990 tp
->rx_jmb_ring_mask
;
6991 dpr
->rx_jmb_prod_idx
= (dpr
->rx_jmb_prod_idx
+ cpycnt
) &
6992 tp
->rx_jmb_ring_mask
;
6998 static int tg3_poll_work(struct tg3_napi
*tnapi
, int work_done
, int budget
)
7000 struct tg3
*tp
= tnapi
->tp
;
7002 /* run TX completion thread */
7003 if (tnapi
->hw_status
->idx
[0].tx_consumer
!= tnapi
->tx_cons
) {
7005 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7009 if (!tnapi
->rx_rcb_prod_idx
)
7012 /* run RX thread, within the bounds set by NAPI.
7013 * All RX "locking" is done by ensuring outside
7014 * code synchronizes with tg3->napi.poll()
7016 if (*(tnapi
->rx_rcb_prod_idx
) != tnapi
->rx_rcb_ptr
)
7017 work_done
+= tg3_rx(tnapi
, budget
- work_done
);
7019 if (tg3_flag(tp
, ENABLE_RSS
) && tnapi
== &tp
->napi
[1]) {
7020 struct tg3_rx_prodring_set
*dpr
= &tp
->napi
[0].prodring
;
7022 u32 std_prod_idx
= dpr
->rx_std_prod_idx
;
7023 u32 jmb_prod_idx
= dpr
->rx_jmb_prod_idx
;
7025 tp
->rx_refill
= false;
7026 for (i
= 1; i
<= tp
->rxq_cnt
; i
++)
7027 err
|= tg3_rx_prodring_xfer(tp
, dpr
,
7028 &tp
->napi
[i
].prodring
);
7032 if (std_prod_idx
!= dpr
->rx_std_prod_idx
)
7033 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
,
7034 dpr
->rx_std_prod_idx
);
7036 if (jmb_prod_idx
!= dpr
->rx_jmb_prod_idx
)
7037 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
,
7038 dpr
->rx_jmb_prod_idx
);
7043 tw32_f(HOSTCC_MODE
, tp
->coal_now
);
7049 static inline void tg3_reset_task_schedule(struct tg3
*tp
)
7051 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING
, tp
->tg3_flags
))
7052 schedule_work(&tp
->reset_task
);
7055 static inline void tg3_reset_task_cancel(struct tg3
*tp
)
7057 cancel_work_sync(&tp
->reset_task
);
7058 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
7059 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
7062 static int tg3_poll_msix(struct napi_struct
*napi
, int budget
)
7064 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7065 struct tg3
*tp
= tnapi
->tp
;
7067 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7070 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7072 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7075 if (unlikely(work_done
>= budget
))
7078 /* tp->last_tag is used in tg3_int_reenable() below
7079 * to tell the hw how much work has been processed,
7080 * so we must read it before checking for more work.
7082 tnapi
->last_tag
= sblk
->status_tag
;
7083 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7086 /* check for RX/TX work to do */
7087 if (likely(sblk
->idx
[0].tx_consumer
== tnapi
->tx_cons
&&
7088 *(tnapi
->rx_rcb_prod_idx
) == tnapi
->rx_rcb_ptr
)) {
7090 /* This test here is not race free, but will reduce
7091 * the number of interrupts by looping again.
7093 if (tnapi
== &tp
->napi
[1] && tp
->rx_refill
)
7096 napi_complete(napi
);
7097 /* Reenable interrupts. */
7098 tw32_mailbox(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
7100 /* This test here is synchronized by napi_schedule()
7101 * and napi_complete() to close the race condition.
7103 if (unlikely(tnapi
== &tp
->napi
[1] && tp
->rx_refill
)) {
7104 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
7105 HOSTCC_MODE_ENABLE
|
7116 /* work_done is guaranteed to be less than budget. */
7117 napi_complete(napi
);
7118 tg3_reset_task_schedule(tp
);
7122 static void tg3_process_error(struct tg3
*tp
)
7125 bool real_error
= false;
7127 if (tg3_flag(tp
, ERROR_PROCESSED
))
7130 /* Check Flow Attention register */
7131 val
= tr32(HOSTCC_FLOW_ATTN
);
7132 if (val
& ~HOSTCC_FLOW_ATTN_MBUF_LWM
) {
7133 netdev_err(tp
->dev
, "FLOW Attention error. Resetting chip.\n");
7137 if (tr32(MSGINT_STATUS
) & ~MSGINT_STATUS_MSI_REQ
) {
7138 netdev_err(tp
->dev
, "MSI Status error. Resetting chip.\n");
7142 if (tr32(RDMAC_STATUS
) || tr32(WDMAC_STATUS
)) {
7143 netdev_err(tp
->dev
, "DMA Status error. Resetting chip.\n");
7152 tg3_flag_set(tp
, ERROR_PROCESSED
);
7153 tg3_reset_task_schedule(tp
);
7156 static int tg3_poll(struct napi_struct
*napi
, int budget
)
7158 struct tg3_napi
*tnapi
= container_of(napi
, struct tg3_napi
, napi
);
7159 struct tg3
*tp
= tnapi
->tp
;
7161 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7164 if (sblk
->status
& SD_STATUS_ERROR
)
7165 tg3_process_error(tp
);
7169 work_done
= tg3_poll_work(tnapi
, work_done
, budget
);
7171 if (unlikely(tg3_flag(tp
, TX_RECOVERY_PENDING
)))
7174 if (unlikely(work_done
>= budget
))
7177 if (tg3_flag(tp
, TAGGED_STATUS
)) {
7178 /* tp->last_tag is used in tg3_int_reenable() below
7179 * to tell the hw how much work has been processed,
7180 * so we must read it before checking for more work.
7182 tnapi
->last_tag
= sblk
->status_tag
;
7183 tnapi
->last_irq_tag
= tnapi
->last_tag
;
7186 sblk
->status
&= ~SD_STATUS_UPDATED
;
7188 if (likely(!tg3_has_work(tnapi
))) {
7189 napi_complete(napi
);
7190 tg3_int_reenable(tnapi
);
7198 /* work_done is guaranteed to be less than budget. */
7199 napi_complete(napi
);
7200 tg3_reset_task_schedule(tp
);
7204 static void tg3_napi_disable(struct tg3
*tp
)
7208 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--)
7209 napi_disable(&tp
->napi
[i
].napi
);
7212 static void tg3_napi_enable(struct tg3
*tp
)
7216 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7217 napi_enable(&tp
->napi
[i
].napi
);
7220 static void tg3_napi_init(struct tg3
*tp
)
7224 netif_napi_add(tp
->dev
, &tp
->napi
[0].napi
, tg3_poll
, 64);
7225 for (i
= 1; i
< tp
->irq_cnt
; i
++)
7226 netif_napi_add(tp
->dev
, &tp
->napi
[i
].napi
, tg3_poll_msix
, 64);
7229 static void tg3_napi_fini(struct tg3
*tp
)
7233 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7234 netif_napi_del(&tp
->napi
[i
].napi
);
7237 static inline void tg3_netif_stop(struct tg3
*tp
)
7239 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
7240 tg3_napi_disable(tp
);
7241 netif_carrier_off(tp
->dev
);
7242 netif_tx_disable(tp
->dev
);
7245 /* tp->lock must be held */
7246 static inline void tg3_netif_start(struct tg3
*tp
)
7250 /* NOTE: unconditional netif_tx_wake_all_queues is only
7251 * appropriate so long as all callers are assured to
7252 * have free tx slots (such as after tg3_init_hw)
7254 netif_tx_wake_all_queues(tp
->dev
);
7257 netif_carrier_on(tp
->dev
);
7259 tg3_napi_enable(tp
);
7260 tp
->napi
[0].hw_status
->status
|= SD_STATUS_UPDATED
;
7261 tg3_enable_ints(tp
);
7264 static void tg3_irq_quiesce(struct tg3
*tp
)
7268 BUG_ON(tp
->irq_sync
);
7273 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7274 synchronize_irq(tp
->napi
[i
].irq_vec
);
7277 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7278 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7279 * with as well. Most of the time, this is not necessary except when
7280 * shutting down the device.
7282 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
7284 spin_lock_bh(&tp
->lock
);
7286 tg3_irq_quiesce(tp
);
7289 static inline void tg3_full_unlock(struct tg3
*tp
)
7291 spin_unlock_bh(&tp
->lock
);
7294 /* One-shot MSI handler - Chip automatically disables interrupt
7295 * after sending MSI so driver doesn't have to do it.
7297 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
)
7299 struct tg3_napi
*tnapi
= dev_id
;
7300 struct tg3
*tp
= tnapi
->tp
;
7302 prefetch(tnapi
->hw_status
);
7304 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7306 if (likely(!tg3_irq_sync(tp
)))
7307 napi_schedule(&tnapi
->napi
);
7312 /* MSI ISR - No need to check for interrupt sharing and no need to
7313 * flush status block and interrupt mailbox. PCI ordering rules
7314 * guarantee that MSI will arrive after the status block.
7316 static irqreturn_t
tg3_msi(int irq
, void *dev_id
)
7318 struct tg3_napi
*tnapi
= dev_id
;
7319 struct tg3
*tp
= tnapi
->tp
;
7321 prefetch(tnapi
->hw_status
);
7323 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7325 * Writing any value to intr-mbox-0 clears PCI INTA# and
7326 * chip-internal interrupt pending events.
7327 * Writing non-zero to intr-mbox-0 additional tells the
7328 * NIC to stop sending us irqs, engaging "in-intr-handler"
7331 tw32_mailbox(tnapi
->int_mbox
, 0x00000001);
7332 if (likely(!tg3_irq_sync(tp
)))
7333 napi_schedule(&tnapi
->napi
);
7335 return IRQ_RETVAL(1);
7338 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
)
7340 struct tg3_napi
*tnapi
= dev_id
;
7341 struct tg3
*tp
= tnapi
->tp
;
7342 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7343 unsigned int handled
= 1;
7345 /* In INTx mode, it is possible for the interrupt to arrive at
7346 * the CPU before the status block posted prior to the interrupt.
7347 * Reading the PCI State register will confirm whether the
7348 * interrupt is ours and will flush the status block.
7350 if (unlikely(!(sblk
->status
& SD_STATUS_UPDATED
))) {
7351 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7352 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7359 * Writing any value to intr-mbox-0 clears PCI INTA# and
7360 * chip-internal interrupt pending events.
7361 * Writing non-zero to intr-mbox-0 additional tells the
7362 * NIC to stop sending us irqs, engaging "in-intr-handler"
7365 * Flush the mailbox to de-assert the IRQ immediately to prevent
7366 * spurious interrupts. The flush impacts performance but
7367 * excessive spurious interrupts can be worse in some cases.
7369 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7370 if (tg3_irq_sync(tp
))
7372 sblk
->status
&= ~SD_STATUS_UPDATED
;
7373 if (likely(tg3_has_work(tnapi
))) {
7374 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7375 napi_schedule(&tnapi
->napi
);
7377 /* No work, shared interrupt perhaps? re-enable
7378 * interrupts, and flush that PCI write
7380 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
7384 return IRQ_RETVAL(handled
);
7387 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
)
7389 struct tg3_napi
*tnapi
= dev_id
;
7390 struct tg3
*tp
= tnapi
->tp
;
7391 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7392 unsigned int handled
= 1;
7394 /* In INTx mode, it is possible for the interrupt to arrive at
7395 * the CPU before the status block posted prior to the interrupt.
7396 * Reading the PCI State register will confirm whether the
7397 * interrupt is ours and will flush the status block.
7399 if (unlikely(sblk
->status_tag
== tnapi
->last_irq_tag
)) {
7400 if (tg3_flag(tp
, CHIP_RESETTING
) ||
7401 (tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7408 * writing any value to intr-mbox-0 clears PCI INTA# and
7409 * chip-internal interrupt pending events.
7410 * writing non-zero to intr-mbox-0 additional tells the
7411 * NIC to stop sending us irqs, engaging "in-intr-handler"
7414 * Flush the mailbox to de-assert the IRQ immediately to prevent
7415 * spurious interrupts. The flush impacts performance but
7416 * excessive spurious interrupts can be worse in some cases.
7418 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
7421 * In a shared interrupt configuration, sometimes other devices'
7422 * interrupts will scream. We record the current status tag here
7423 * so that the above check can report that the screaming interrupts
7424 * are unhandled. Eventually they will be silenced.
7426 tnapi
->last_irq_tag
= sblk
->status_tag
;
7428 if (tg3_irq_sync(tp
))
7431 prefetch(&tnapi
->rx_rcb
[tnapi
->rx_rcb_ptr
]);
7433 napi_schedule(&tnapi
->napi
);
7436 return IRQ_RETVAL(handled
);
7439 /* ISR for interrupt test */
7440 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
)
7442 struct tg3_napi
*tnapi
= dev_id
;
7443 struct tg3
*tp
= tnapi
->tp
;
7444 struct tg3_hw_status
*sblk
= tnapi
->hw_status
;
7446 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
7447 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
7448 tg3_disable_ints(tp
);
7449 return IRQ_RETVAL(1);
7451 return IRQ_RETVAL(0);
7454 #ifdef CONFIG_NET_POLL_CONTROLLER
7455 static void tg3_poll_controller(struct net_device
*dev
)
7458 struct tg3
*tp
= netdev_priv(dev
);
7460 if (tg3_irq_sync(tp
))
7463 for (i
= 0; i
< tp
->irq_cnt
; i
++)
7464 tg3_interrupt(tp
->napi
[i
].irq_vec
, &tp
->napi
[i
]);
7468 static void tg3_tx_timeout(struct net_device
*dev
)
7470 struct tg3
*tp
= netdev_priv(dev
);
7472 if (netif_msg_tx_err(tp
)) {
7473 netdev_err(dev
, "transmit timed out, resetting\n");
7477 tg3_reset_task_schedule(tp
);
7480 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7481 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
7483 u32 base
= (u32
) mapping
& 0xffffffff;
7485 return (base
> 0xffffdcc0) && (base
+ len
+ 8 < base
);
7488 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7489 * of any 4GB boundaries: 4G, 8G, etc
7491 static inline int tg3_4g_tso_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7494 if (tg3_asic_rev(tp
) == ASIC_REV_5762
&& mss
) {
7495 u32 base
= (u32
) mapping
& 0xffffffff;
7497 return ((base
+ len
+ (mss
& 0x3fff)) < base
);
7502 /* Test for DMA addresses > 40-bit */
7503 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
7506 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7507 if (tg3_flag(tp
, 40BIT_DMA_BUG
))
7508 return ((u64
) mapping
+ len
) > DMA_BIT_MASK(40);
7515 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc
*txbd
,
7516 dma_addr_t mapping
, u32 len
, u32 flags
,
7519 txbd
->addr_hi
= ((u64
) mapping
>> 32);
7520 txbd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
7521 txbd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | (flags
& 0x0000ffff);
7522 txbd
->vlan_tag
= (mss
<< TXD_MSS_SHIFT
) | (vlan
<< TXD_VLAN_TAG_SHIFT
);
7525 static bool tg3_tx_frag_set(struct tg3_napi
*tnapi
, u32
*entry
, u32
*budget
,
7526 dma_addr_t map
, u32 len
, u32 flags
,
7529 struct tg3
*tp
= tnapi
->tp
;
7532 if (tg3_flag(tp
, SHORT_DMA_BUG
) && len
<= 8)
7535 if (tg3_4g_overflow_test(map
, len
))
7538 if (tg3_4g_tso_overflow_test(tp
, map
, len
, mss
))
7541 if (tg3_40bit_overflow_test(tp
, map
, len
))
7544 if (tp
->dma_limit
) {
7545 u32 prvidx
= *entry
;
7546 u32 tmp_flag
= flags
& ~TXD_FLAG_END
;
7547 while (len
> tp
->dma_limit
&& *budget
) {
7548 u32 frag_len
= tp
->dma_limit
;
7549 len
-= tp
->dma_limit
;
7551 /* Avoid the 8byte DMA problem */
7553 len
+= tp
->dma_limit
/ 2;
7554 frag_len
= tp
->dma_limit
/ 2;
7557 tnapi
->tx_buffers
[*entry
].fragmented
= true;
7559 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7560 frag_len
, tmp_flag
, mss
, vlan
);
7563 *entry
= NEXT_TX(*entry
);
7570 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7571 len
, flags
, mss
, vlan
);
7573 *entry
= NEXT_TX(*entry
);
7576 tnapi
->tx_buffers
[prvidx
].fragmented
= false;
7580 tg3_tx_set_bd(&tnapi
->tx_ring
[*entry
], map
,
7581 len
, flags
, mss
, vlan
);
7582 *entry
= NEXT_TX(*entry
);
7588 static void tg3_tx_skb_unmap(struct tg3_napi
*tnapi
, u32 entry
, int last
)
7591 struct sk_buff
*skb
;
7592 struct tg3_tx_ring_info
*txb
= &tnapi
->tx_buffers
[entry
];
7597 pci_unmap_single(tnapi
->tp
->pdev
,
7598 dma_unmap_addr(txb
, mapping
),
7602 while (txb
->fragmented
) {
7603 txb
->fragmented
= false;
7604 entry
= NEXT_TX(entry
);
7605 txb
= &tnapi
->tx_buffers
[entry
];
7608 for (i
= 0; i
<= last
; i
++) {
7609 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7611 entry
= NEXT_TX(entry
);
7612 txb
= &tnapi
->tx_buffers
[entry
];
7614 pci_unmap_page(tnapi
->tp
->pdev
,
7615 dma_unmap_addr(txb
, mapping
),
7616 skb_frag_size(frag
), PCI_DMA_TODEVICE
);
7618 while (txb
->fragmented
) {
7619 txb
->fragmented
= false;
7620 entry
= NEXT_TX(entry
);
7621 txb
= &tnapi
->tx_buffers
[entry
];
7626 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7627 static int tigon3_dma_hwbug_workaround(struct tg3_napi
*tnapi
,
7628 struct sk_buff
**pskb
,
7629 u32
*entry
, u32
*budget
,
7630 u32 base_flags
, u32 mss
, u32 vlan
)
7632 struct tg3
*tp
= tnapi
->tp
;
7633 struct sk_buff
*new_skb
, *skb
= *pskb
;
7634 dma_addr_t new_addr
= 0;
7637 if (tg3_asic_rev(tp
) != ASIC_REV_5701
)
7638 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
7640 int more_headroom
= 4 - ((unsigned long)skb
->data
& 3);
7642 new_skb
= skb_copy_expand(skb
,
7643 skb_headroom(skb
) + more_headroom
,
7644 skb_tailroom(skb
), GFP_ATOMIC
);
7650 /* New SKB is guaranteed to be linear. */
7651 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
7653 /* Make sure the mapping succeeded */
7654 if (pci_dma_mapping_error(tp
->pdev
, new_addr
)) {
7655 dev_kfree_skb(new_skb
);
7658 u32 save_entry
= *entry
;
7660 base_flags
|= TXD_FLAG_END
;
7662 tnapi
->tx_buffers
[*entry
].skb
= new_skb
;
7663 dma_unmap_addr_set(&tnapi
->tx_buffers
[*entry
],
7666 if (tg3_tx_frag_set(tnapi
, entry
, budget
, new_addr
,
7667 new_skb
->len
, base_flags
,
7669 tg3_tx_skb_unmap(tnapi
, save_entry
, -1);
7670 dev_kfree_skb(new_skb
);
7681 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*, struct net_device
*);
7683 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7684 * TSO header is greater than 80 bytes.
7686 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
7688 struct sk_buff
*segs
, *nskb
;
7689 u32 frag_cnt_est
= skb_shinfo(skb
)->gso_segs
* 3;
7691 /* Estimate the number of fragments in the worst case */
7692 if (unlikely(tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)) {
7693 netif_stop_queue(tp
->dev
);
7695 /* netif_tx_stop_queue() must be done before checking
7696 * checking tx index in tg3_tx_avail() below, because in
7697 * tg3_tx(), we update tx index before checking for
7698 * netif_tx_queue_stopped().
7701 if (tg3_tx_avail(&tp
->napi
[0]) <= frag_cnt_est
)
7702 return NETDEV_TX_BUSY
;
7704 netif_wake_queue(tp
->dev
);
7707 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
7709 goto tg3_tso_bug_end
;
7715 tg3_start_xmit(nskb
, tp
->dev
);
7721 return NETDEV_TX_OK
;
7724 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7725 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7727 static netdev_tx_t
tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
7729 struct tg3
*tp
= netdev_priv(dev
);
7730 u32 len
, entry
, base_flags
, mss
, vlan
= 0;
7732 int i
= -1, would_hit_hwbug
;
7734 struct tg3_napi
*tnapi
;
7735 struct netdev_queue
*txq
;
7738 txq
= netdev_get_tx_queue(dev
, skb_get_queue_mapping(skb
));
7739 tnapi
= &tp
->napi
[skb_get_queue_mapping(skb
)];
7740 if (tg3_flag(tp
, ENABLE_TSS
))
7743 budget
= tg3_tx_avail(tnapi
);
7745 /* We are running in BH disabled context with netif_tx_lock
7746 * and TX reclaim runs via tp->napi.poll inside of a software
7747 * interrupt. Furthermore, IRQ processing runs lockless so we have
7748 * no IRQ context deadlocks to worry about either. Rejoice!
7750 if (unlikely(budget
<= (skb_shinfo(skb
)->nr_frags
+ 1))) {
7751 if (!netif_tx_queue_stopped(txq
)) {
7752 netif_tx_stop_queue(txq
);
7754 /* This is a hard error, log it. */
7756 "BUG! Tx Ring full when queue awake!\n");
7758 return NETDEV_TX_BUSY
;
7761 entry
= tnapi
->tx_prod
;
7763 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
7764 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
7766 mss
= skb_shinfo(skb
)->gso_size
;
7769 u32 tcp_opt_len
, hdr_len
;
7771 if (skb_header_cloned(skb
) &&
7772 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
7776 tcp_opt_len
= tcp_optlen(skb
);
7778 hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
) - ETH_HLEN
;
7780 if (!skb_is_gso_v6(skb
)) {
7782 iph
->tot_len
= htons(mss
+ hdr_len
);
7785 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
7786 tg3_flag(tp
, TSO_BUG
))
7787 return tg3_tso_bug(tp
, skb
);
7789 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
7790 TXD_FLAG_CPU_POST_DMA
);
7792 if (tg3_flag(tp
, HW_TSO_1
) ||
7793 tg3_flag(tp
, HW_TSO_2
) ||
7794 tg3_flag(tp
, HW_TSO_3
)) {
7795 tcp_hdr(skb
)->check
= 0;
7796 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
7798 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
7803 if (tg3_flag(tp
, HW_TSO_3
)) {
7804 mss
|= (hdr_len
& 0xc) << 12;
7806 base_flags
|= 0x00000010;
7807 base_flags
|= (hdr_len
& 0x3e0) << 5;
7808 } else if (tg3_flag(tp
, HW_TSO_2
))
7809 mss
|= hdr_len
<< 9;
7810 else if (tg3_flag(tp
, HW_TSO_1
) ||
7811 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
7812 if (tcp_opt_len
|| iph
->ihl
> 5) {
7815 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7816 mss
|= (tsflags
<< 11);
7819 if (tcp_opt_len
|| iph
->ihl
> 5) {
7822 tsflags
= (iph
->ihl
- 5) + (tcp_opt_len
>> 2);
7823 base_flags
|= tsflags
<< 12;
7828 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
7829 !mss
&& skb
->len
> VLAN_ETH_FRAME_LEN
)
7830 base_flags
|= TXD_FLAG_JMB_PKT
;
7832 if (vlan_tx_tag_present(skb
)) {
7833 base_flags
|= TXD_FLAG_VLAN
;
7834 vlan
= vlan_tx_tag_get(skb
);
7837 if ((unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) &&
7838 tg3_flag(tp
, TX_TSTAMP_EN
)) {
7839 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
7840 base_flags
|= TXD_FLAG_HWTSTAMP
;
7843 len
= skb_headlen(skb
);
7845 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
7846 if (pci_dma_mapping_error(tp
->pdev
, mapping
))
7850 tnapi
->tx_buffers
[entry
].skb
= skb
;
7851 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
, mapping
);
7853 would_hit_hwbug
= 0;
7855 if (tg3_flag(tp
, 5701_DMA_BUG
))
7856 would_hit_hwbug
= 1;
7858 if (tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
, len
, base_flags
|
7859 ((skb_shinfo(skb
)->nr_frags
== 0) ? TXD_FLAG_END
: 0),
7861 would_hit_hwbug
= 1;
7862 } else if (skb_shinfo(skb
)->nr_frags
> 0) {
7865 if (!tg3_flag(tp
, HW_TSO_1
) &&
7866 !tg3_flag(tp
, HW_TSO_2
) &&
7867 !tg3_flag(tp
, HW_TSO_3
))
7870 /* Now loop through additional data
7871 * fragments, and queue them.
7873 last
= skb_shinfo(skb
)->nr_frags
- 1;
7874 for (i
= 0; i
<= last
; i
++) {
7875 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
7877 len
= skb_frag_size(frag
);
7878 mapping
= skb_frag_dma_map(&tp
->pdev
->dev
, frag
, 0,
7879 len
, DMA_TO_DEVICE
);
7881 tnapi
->tx_buffers
[entry
].skb
= NULL
;
7882 dma_unmap_addr_set(&tnapi
->tx_buffers
[entry
], mapping
,
7884 if (dma_mapping_error(&tp
->pdev
->dev
, mapping
))
7888 tg3_tx_frag_set(tnapi
, &entry
, &budget
, mapping
,
7890 ((i
== last
) ? TXD_FLAG_END
: 0),
7892 would_hit_hwbug
= 1;
7898 if (would_hit_hwbug
) {
7899 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, i
);
7901 /* If the workaround fails due to memory/mapping
7902 * failure, silently drop this packet.
7904 entry
= tnapi
->tx_prod
;
7905 budget
= tg3_tx_avail(tnapi
);
7906 if (tigon3_dma_hwbug_workaround(tnapi
, &skb
, &entry
, &budget
,
7907 base_flags
, mss
, vlan
))
7911 skb_tx_timestamp(skb
);
7912 netdev_tx_sent_queue(txq
, skb
->len
);
7914 /* Sync BD data before updating mailbox */
7917 /* Packets are ready, update Tx producer idx local and on card. */
7918 tw32_tx_mbox(tnapi
->prodmbox
, entry
);
7920 tnapi
->tx_prod
= entry
;
7921 if (unlikely(tg3_tx_avail(tnapi
) <= (MAX_SKB_FRAGS
+ 1))) {
7922 netif_tx_stop_queue(txq
);
7924 /* netif_tx_stop_queue() must be done before checking
7925 * checking tx index in tg3_tx_avail() below, because in
7926 * tg3_tx(), we update tx index before checking for
7927 * netif_tx_queue_stopped().
7930 if (tg3_tx_avail(tnapi
) > TG3_TX_WAKEUP_THRESH(tnapi
))
7931 netif_tx_wake_queue(txq
);
7935 return NETDEV_TX_OK
;
7938 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
, --i
);
7939 tnapi
->tx_buffers
[tnapi
->tx_prod
].skb
= NULL
;
7944 return NETDEV_TX_OK
;
7947 static void tg3_mac_loopback(struct tg3
*tp
, bool enable
)
7950 tp
->mac_mode
&= ~(MAC_MODE_HALF_DUPLEX
|
7951 MAC_MODE_PORT_MODE_MASK
);
7953 tp
->mac_mode
|= MAC_MODE_PORT_INT_LPBACK
;
7955 if (!tg3_flag(tp
, 5705_PLUS
))
7956 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
7958 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
7959 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
7961 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
7963 tp
->mac_mode
&= ~MAC_MODE_PORT_INT_LPBACK
;
7965 if (tg3_flag(tp
, 5705_PLUS
) ||
7966 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) ||
7967 tg3_asic_rev(tp
) == ASIC_REV_5700
)
7968 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
7971 tw32(MAC_MODE
, tp
->mac_mode
);
7975 static int tg3_phy_lpbk_set(struct tg3
*tp
, u32 speed
, bool extlpbk
)
7977 u32 val
, bmcr
, mac_mode
, ptest
= 0;
7979 tg3_phy_toggle_apd(tp
, false);
7980 tg3_phy_toggle_automdix(tp
, false);
7982 if (extlpbk
&& tg3_phy_set_extloopbk(tp
))
7985 bmcr
= BMCR_FULLDPLX
;
7990 bmcr
|= BMCR_SPEED100
;
7994 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) {
7996 bmcr
|= BMCR_SPEED100
;
7999 bmcr
|= BMCR_SPEED1000
;
8004 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
8005 tg3_readphy(tp
, MII_CTRL1000
, &val
);
8006 val
|= CTL1000_AS_MASTER
|
8007 CTL1000_ENABLE_MASTER
;
8008 tg3_writephy(tp
, MII_CTRL1000
, val
);
8010 ptest
= MII_TG3_FET_PTEST_TRIM_SEL
|
8011 MII_TG3_FET_PTEST_TRIM_2
;
8012 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
);
8015 bmcr
|= BMCR_LOOPBACK
;
8017 tg3_writephy(tp
, MII_BMCR
, bmcr
);
8019 /* The write needs to be flushed for the FETs */
8020 if (tp
->phy_flags
& TG3_PHYFLG_IS_FET
)
8021 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8025 if ((tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
8026 tg3_asic_rev(tp
) == ASIC_REV_5785
) {
8027 tg3_writephy(tp
, MII_TG3_FET_PTEST
, ptest
|
8028 MII_TG3_FET_PTEST_FRC_TX_LINK
|
8029 MII_TG3_FET_PTEST_FRC_TX_LOCK
);
8031 /* The write needs to be flushed for the AC131 */
8032 tg3_readphy(tp
, MII_TG3_FET_PTEST
, &val
);
8035 /* Reset to prevent losing 1st rx packet intermittently */
8036 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
8037 tg3_flag(tp
, 5780_CLASS
)) {
8038 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8040 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8043 mac_mode
= tp
->mac_mode
&
8044 ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
8045 if (speed
== SPEED_1000
)
8046 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8048 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8050 if (tg3_asic_rev(tp
) == ASIC_REV_5700
) {
8051 u32 masked_phy_id
= tp
->phy_id
& TG3_PHY_ID_MASK
;
8053 if (masked_phy_id
== TG3_PHY_ID_BCM5401
)
8054 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8055 else if (masked_phy_id
== TG3_PHY_ID_BCM5411
)
8056 mac_mode
|= MAC_MODE_LINK_POLARITY
;
8058 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8059 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8062 tw32(MAC_MODE
, mac_mode
);
8068 static void tg3_set_loopback(struct net_device
*dev
, netdev_features_t features
)
8070 struct tg3
*tp
= netdev_priv(dev
);
8072 if (features
& NETIF_F_LOOPBACK
) {
8073 if (tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
)
8076 spin_lock_bh(&tp
->lock
);
8077 tg3_mac_loopback(tp
, true);
8078 netif_carrier_on(tp
->dev
);
8079 spin_unlock_bh(&tp
->lock
);
8080 netdev_info(dev
, "Internal MAC loopback mode enabled.\n");
8082 if (!(tp
->mac_mode
& MAC_MODE_PORT_INT_LPBACK
))
8085 spin_lock_bh(&tp
->lock
);
8086 tg3_mac_loopback(tp
, false);
8087 /* Force link status check */
8088 tg3_setup_phy(tp
, true);
8089 spin_unlock_bh(&tp
->lock
);
8090 netdev_info(dev
, "Internal MAC loopback mode disabled.\n");
8094 static netdev_features_t
tg3_fix_features(struct net_device
*dev
,
8095 netdev_features_t features
)
8097 struct tg3
*tp
= netdev_priv(dev
);
8099 if (dev
->mtu
> ETH_DATA_LEN
&& tg3_flag(tp
, 5780_CLASS
))
8100 features
&= ~NETIF_F_ALL_TSO
;
8105 static int tg3_set_features(struct net_device
*dev
, netdev_features_t features
)
8107 netdev_features_t changed
= dev
->features
^ features
;
8109 if ((changed
& NETIF_F_LOOPBACK
) && netif_running(dev
))
8110 tg3_set_loopback(dev
, features
);
8115 static void tg3_rx_prodring_free(struct tg3
*tp
,
8116 struct tg3_rx_prodring_set
*tpr
)
8120 if (tpr
!= &tp
->napi
[0].prodring
) {
8121 for (i
= tpr
->rx_std_cons_idx
; i
!= tpr
->rx_std_prod_idx
;
8122 i
= (i
+ 1) & tp
->rx_std_ring_mask
)
8123 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8126 if (tg3_flag(tp
, JUMBO_CAPABLE
)) {
8127 for (i
= tpr
->rx_jmb_cons_idx
;
8128 i
!= tpr
->rx_jmb_prod_idx
;
8129 i
= (i
+ 1) & tp
->rx_jmb_ring_mask
) {
8130 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8138 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++)
8139 tg3_rx_data_free(tp
, &tpr
->rx_std_buffers
[i
],
8142 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8143 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++)
8144 tg3_rx_data_free(tp
, &tpr
->rx_jmb_buffers
[i
],
8149 /* Initialize rx rings for packet processing.
8151 * The chip has been shut down and the driver detached from
8152 * the networking, so no interrupts or new tx packets will
8153 * end up in the driver. tp->{tx,}lock are held and thus
8156 static int tg3_rx_prodring_alloc(struct tg3
*tp
,
8157 struct tg3_rx_prodring_set
*tpr
)
8159 u32 i
, rx_pkt_dma_sz
;
8161 tpr
->rx_std_cons_idx
= 0;
8162 tpr
->rx_std_prod_idx
= 0;
8163 tpr
->rx_jmb_cons_idx
= 0;
8164 tpr
->rx_jmb_prod_idx
= 0;
8166 if (tpr
!= &tp
->napi
[0].prodring
) {
8167 memset(&tpr
->rx_std_buffers
[0], 0,
8168 TG3_RX_STD_BUFF_RING_SIZE(tp
));
8169 if (tpr
->rx_jmb_buffers
)
8170 memset(&tpr
->rx_jmb_buffers
[0], 0,
8171 TG3_RX_JMB_BUFF_RING_SIZE(tp
));
8175 /* Zero out all descriptors. */
8176 memset(tpr
->rx_std
, 0, TG3_RX_STD_RING_BYTES(tp
));
8178 rx_pkt_dma_sz
= TG3_RX_STD_DMA_SZ
;
8179 if (tg3_flag(tp
, 5780_CLASS
) &&
8180 tp
->dev
->mtu
> ETH_DATA_LEN
)
8181 rx_pkt_dma_sz
= TG3_RX_JMB_DMA_SZ
;
8182 tp
->rx_pkt_map_sz
= TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz
);
8184 /* Initialize invariants of the rings, we only set this
8185 * stuff once. This works because the card does not
8186 * write into the rx buffer posting rings.
8188 for (i
= 0; i
<= tp
->rx_std_ring_mask
; i
++) {
8189 struct tg3_rx_buffer_desc
*rxd
;
8191 rxd
= &tpr
->rx_std
[i
];
8192 rxd
->idx_len
= rx_pkt_dma_sz
<< RXD_LEN_SHIFT
;
8193 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
8194 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
8195 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8198 /* Now allocate fresh SKBs for each rx ring. */
8199 for (i
= 0; i
< tp
->rx_pending
; i
++) {
8200 unsigned int frag_size
;
8202 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_STD
, i
,
8204 netdev_warn(tp
->dev
,
8205 "Using a smaller RX standard ring. Only "
8206 "%d out of %d buffers were allocated "
8207 "successfully\n", i
, tp
->rx_pending
);
8215 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
8218 memset(tpr
->rx_jmb
, 0, TG3_RX_JMB_RING_BYTES(tp
));
8220 if (!tg3_flag(tp
, JUMBO_RING_ENABLE
))
8223 for (i
= 0; i
<= tp
->rx_jmb_ring_mask
; i
++) {
8224 struct tg3_rx_buffer_desc
*rxd
;
8226 rxd
= &tpr
->rx_jmb
[i
].std
;
8227 rxd
->idx_len
= TG3_RX_JMB_DMA_SZ
<< RXD_LEN_SHIFT
;
8228 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
8230 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
8231 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
8234 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
8235 unsigned int frag_size
;
8237 if (tg3_alloc_rx_data(tp
, tpr
, RXD_OPAQUE_RING_JUMBO
, i
,
8239 netdev_warn(tp
->dev
,
8240 "Using a smaller RX jumbo ring. Only %d "
8241 "out of %d buffers were allocated "
8242 "successfully\n", i
, tp
->rx_jumbo_pending
);
8245 tp
->rx_jumbo_pending
= i
;
8254 tg3_rx_prodring_free(tp
, tpr
);
8258 static void tg3_rx_prodring_fini(struct tg3
*tp
,
8259 struct tg3_rx_prodring_set
*tpr
)
8261 kfree(tpr
->rx_std_buffers
);
8262 tpr
->rx_std_buffers
= NULL
;
8263 kfree(tpr
->rx_jmb_buffers
);
8264 tpr
->rx_jmb_buffers
= NULL
;
8266 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_STD_RING_BYTES(tp
),
8267 tpr
->rx_std
, tpr
->rx_std_mapping
);
8271 dma_free_coherent(&tp
->pdev
->dev
, TG3_RX_JMB_RING_BYTES(tp
),
8272 tpr
->rx_jmb
, tpr
->rx_jmb_mapping
);
8277 static int tg3_rx_prodring_init(struct tg3
*tp
,
8278 struct tg3_rx_prodring_set
*tpr
)
8280 tpr
->rx_std_buffers
= kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp
),
8282 if (!tpr
->rx_std_buffers
)
8285 tpr
->rx_std
= dma_alloc_coherent(&tp
->pdev
->dev
,
8286 TG3_RX_STD_RING_BYTES(tp
),
8287 &tpr
->rx_std_mapping
,
8292 if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
)) {
8293 tpr
->rx_jmb_buffers
= kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp
),
8295 if (!tpr
->rx_jmb_buffers
)
8298 tpr
->rx_jmb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8299 TG3_RX_JMB_RING_BYTES(tp
),
8300 &tpr
->rx_jmb_mapping
,
8309 tg3_rx_prodring_fini(tp
, tpr
);
8313 /* Free up pending packets in all rx/tx rings.
8315 * The chip has been shut down and the driver detached from
8316 * the networking, so no interrupts or new tx packets will
8317 * end up in the driver. tp->{tx,}lock is not held and we are not
8318 * in an interrupt context and thus may sleep.
8320 static void tg3_free_rings(struct tg3
*tp
)
8324 for (j
= 0; j
< tp
->irq_cnt
; j
++) {
8325 struct tg3_napi
*tnapi
= &tp
->napi
[j
];
8327 tg3_rx_prodring_free(tp
, &tnapi
->prodring
);
8329 if (!tnapi
->tx_buffers
)
8332 for (i
= 0; i
< TG3_TX_RING_SIZE
; i
++) {
8333 struct sk_buff
*skb
= tnapi
->tx_buffers
[i
].skb
;
8338 tg3_tx_skb_unmap(tnapi
, i
,
8339 skb_shinfo(skb
)->nr_frags
- 1);
8341 dev_kfree_skb_any(skb
);
8343 netdev_tx_reset_queue(netdev_get_tx_queue(tp
->dev
, j
));
8347 /* Initialize tx/rx rings for packet processing.
8349 * The chip has been shut down and the driver detached from
8350 * the networking, so no interrupts or new tx packets will
8351 * end up in the driver. tp->{tx,}lock are held and thus
8354 static int tg3_init_rings(struct tg3
*tp
)
8358 /* Free up all the SKBs. */
8361 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8362 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8364 tnapi
->last_tag
= 0;
8365 tnapi
->last_irq_tag
= 0;
8366 tnapi
->hw_status
->status
= 0;
8367 tnapi
->hw_status
->status_tag
= 0;
8368 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8373 memset(tnapi
->tx_ring
, 0, TG3_TX_RING_BYTES
);
8375 tnapi
->rx_rcb_ptr
= 0;
8377 memset(tnapi
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
8379 if (tg3_rx_prodring_alloc(tp
, &tnapi
->prodring
)) {
8388 static void tg3_mem_tx_release(struct tg3
*tp
)
8392 for (i
= 0; i
< tp
->irq_max
; i
++) {
8393 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8395 if (tnapi
->tx_ring
) {
8396 dma_free_coherent(&tp
->pdev
->dev
, TG3_TX_RING_BYTES
,
8397 tnapi
->tx_ring
, tnapi
->tx_desc_mapping
);
8398 tnapi
->tx_ring
= NULL
;
8401 kfree(tnapi
->tx_buffers
);
8402 tnapi
->tx_buffers
= NULL
;
8406 static int tg3_mem_tx_acquire(struct tg3
*tp
)
8409 struct tg3_napi
*tnapi
= &tp
->napi
[0];
8411 /* If multivector TSS is enabled, vector 0 does not handle
8412 * tx interrupts. Don't allocate any resources for it.
8414 if (tg3_flag(tp
, ENABLE_TSS
))
8417 for (i
= 0; i
< tp
->txq_cnt
; i
++, tnapi
++) {
8418 tnapi
->tx_buffers
= kzalloc(sizeof(struct tg3_tx_ring_info
) *
8419 TG3_TX_RING_SIZE
, GFP_KERNEL
);
8420 if (!tnapi
->tx_buffers
)
8423 tnapi
->tx_ring
= dma_alloc_coherent(&tp
->pdev
->dev
,
8425 &tnapi
->tx_desc_mapping
,
8427 if (!tnapi
->tx_ring
)
8434 tg3_mem_tx_release(tp
);
8438 static void tg3_mem_rx_release(struct tg3
*tp
)
8442 for (i
= 0; i
< tp
->irq_max
; i
++) {
8443 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8445 tg3_rx_prodring_fini(tp
, &tnapi
->prodring
);
8450 dma_free_coherent(&tp
->pdev
->dev
,
8451 TG3_RX_RCB_RING_BYTES(tp
),
8453 tnapi
->rx_rcb_mapping
);
8454 tnapi
->rx_rcb
= NULL
;
8458 static int tg3_mem_rx_acquire(struct tg3
*tp
)
8460 unsigned int i
, limit
;
8462 limit
= tp
->rxq_cnt
;
8464 /* If RSS is enabled, we need a (dummy) producer ring
8465 * set on vector zero. This is the true hw prodring.
8467 if (tg3_flag(tp
, ENABLE_RSS
))
8470 for (i
= 0; i
< limit
; i
++) {
8471 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8473 if (tg3_rx_prodring_init(tp
, &tnapi
->prodring
))
8476 /* If multivector RSS is enabled, vector 0
8477 * does not handle rx or tx interrupts.
8478 * Don't allocate any resources for it.
8480 if (!i
&& tg3_flag(tp
, ENABLE_RSS
))
8483 tnapi
->rx_rcb
= dma_alloc_coherent(&tp
->pdev
->dev
,
8484 TG3_RX_RCB_RING_BYTES(tp
),
8485 &tnapi
->rx_rcb_mapping
,
8486 GFP_KERNEL
| __GFP_ZERO
);
8494 tg3_mem_rx_release(tp
);
8499 * Must not be invoked with interrupt sources disabled and
8500 * the hardware shutdown down.
8502 static void tg3_free_consistent(struct tg3
*tp
)
8506 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8507 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8509 if (tnapi
->hw_status
) {
8510 dma_free_coherent(&tp
->pdev
->dev
, TG3_HW_STATUS_SIZE
,
8512 tnapi
->status_mapping
);
8513 tnapi
->hw_status
= NULL
;
8517 tg3_mem_rx_release(tp
);
8518 tg3_mem_tx_release(tp
);
8521 dma_free_coherent(&tp
->pdev
->dev
, sizeof(struct tg3_hw_stats
),
8522 tp
->hw_stats
, tp
->stats_mapping
);
8523 tp
->hw_stats
= NULL
;
8528 * Must not be invoked with interrupt sources disabled and
8529 * the hardware shutdown down. Can sleep.
8531 static int tg3_alloc_consistent(struct tg3
*tp
)
8535 tp
->hw_stats
= dma_alloc_coherent(&tp
->pdev
->dev
,
8536 sizeof(struct tg3_hw_stats
),
8538 GFP_KERNEL
| __GFP_ZERO
);
8542 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8543 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8544 struct tg3_hw_status
*sblk
;
8546 tnapi
->hw_status
= dma_alloc_coherent(&tp
->pdev
->dev
,
8548 &tnapi
->status_mapping
,
8549 GFP_KERNEL
| __GFP_ZERO
);
8550 if (!tnapi
->hw_status
)
8553 sblk
= tnapi
->hw_status
;
8555 if (tg3_flag(tp
, ENABLE_RSS
)) {
8556 u16
*prodptr
= NULL
;
8559 * When RSS is enabled, the status block format changes
8560 * slightly. The "rx_jumbo_consumer", "reserved",
8561 * and "rx_mini_consumer" members get mapped to the
8562 * other three rx return ring producer indexes.
8566 prodptr
= &sblk
->idx
[0].rx_producer
;
8569 prodptr
= &sblk
->rx_jumbo_consumer
;
8572 prodptr
= &sblk
->reserved
;
8575 prodptr
= &sblk
->rx_mini_consumer
;
8578 tnapi
->rx_rcb_prod_idx
= prodptr
;
8580 tnapi
->rx_rcb_prod_idx
= &sblk
->idx
[0].rx_producer
;
8584 if (tg3_mem_tx_acquire(tp
) || tg3_mem_rx_acquire(tp
))
8590 tg3_free_consistent(tp
);
8594 #define MAX_WAIT_CNT 1000
8596 /* To stop a block, clear the enable bit and poll till it
8597 * clears. tp->lock is held.
8599 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, bool silent
)
8604 if (tg3_flag(tp
, 5705_PLUS
)) {
8611 /* We can't enable/disable these bits of the
8612 * 5705/5750, just say success.
8625 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8626 if (pci_channel_offline(tp
->pdev
)) {
8627 dev_err(&tp
->pdev
->dev
,
8628 "tg3_stop_block device offline, "
8629 "ofs=%lx enable_bit=%x\n",
8636 if ((val
& enable_bit
) == 0)
8640 if (i
== MAX_WAIT_CNT
&& !silent
) {
8641 dev_err(&tp
->pdev
->dev
,
8642 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8650 /* tp->lock is held. */
8651 static int tg3_abort_hw(struct tg3
*tp
, bool silent
)
8655 tg3_disable_ints(tp
);
8657 if (pci_channel_offline(tp
->pdev
)) {
8658 tp
->rx_mode
&= ~(RX_MODE_ENABLE
| TX_MODE_ENABLE
);
8659 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8664 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
8665 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8668 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
8669 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
8670 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
8671 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
8672 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
8673 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
8675 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
8676 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
8677 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
8678 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
8679 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
8680 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
8681 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
8683 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
8684 tw32_f(MAC_MODE
, tp
->mac_mode
);
8687 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
8688 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
8690 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
8692 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
8695 if (i
>= MAX_WAIT_CNT
) {
8696 dev_err(&tp
->pdev
->dev
,
8697 "%s timed out, TX_MODE_ENABLE will not clear "
8698 "MAC_TX_MODE=%08x\n", __func__
, tr32(MAC_TX_MODE
));
8702 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
8703 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
8704 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
8706 tw32(FTQ_RESET
, 0xffffffff);
8707 tw32(FTQ_RESET
, 0x00000000);
8709 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
8710 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
8713 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8714 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8715 if (tnapi
->hw_status
)
8716 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
8722 /* Save PCI command register before chip reset */
8723 static void tg3_save_pci_state(struct tg3
*tp
)
8725 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &tp
->pci_cmd
);
8728 /* Restore PCI state after chip reset */
8729 static void tg3_restore_pci_state(struct tg3
*tp
)
8733 /* Re-enable indirect register accesses. */
8734 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
8735 tp
->misc_host_ctrl
);
8737 /* Set MAX PCI retry to zero. */
8738 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
8739 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
8740 tg3_flag(tp
, PCIX_MODE
))
8741 val
|= PCISTATE_RETRY_SAME_DMA
;
8742 /* Allow reads and writes to the APE register and memory space. */
8743 if (tg3_flag(tp
, ENABLE_APE
))
8744 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
8745 PCISTATE_ALLOW_APE_SHMEM_WR
|
8746 PCISTATE_ALLOW_APE_PSPACE_WR
;
8747 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
8749 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, tp
->pci_cmd
);
8751 if (!tg3_flag(tp
, PCI_EXPRESS
)) {
8752 pci_write_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
8753 tp
->pci_cacheline_sz
);
8754 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
8758 /* Make sure PCI-X relaxed ordering bit is clear. */
8759 if (tg3_flag(tp
, PCIX_MODE
)) {
8762 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8764 pcix_cmd
&= ~PCI_X_CMD_ERO
;
8765 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
8769 if (tg3_flag(tp
, 5780_CLASS
)) {
8771 /* Chip reset on 5780 will reset MSI enable bit,
8772 * so need to restore it.
8774 if (tg3_flag(tp
, USING_MSI
)) {
8777 pci_read_config_word(tp
->pdev
,
8778 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8780 pci_write_config_word(tp
->pdev
,
8781 tp
->msi_cap
+ PCI_MSI_FLAGS
,
8782 ctrl
| PCI_MSI_FLAGS_ENABLE
);
8783 val
= tr32(MSGINT_MODE
);
8784 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
8789 /* tp->lock is held. */
8790 static int tg3_chip_reset(struct tg3
*tp
)
8793 void (*write_op
)(struct tg3
*, u32
, u32
);
8798 tg3_ape_lock(tp
, TG3_APE_LOCK_GRC
);
8800 /* No matching tg3_nvram_unlock() after this because
8801 * chip reset below will undo the nvram lock.
8803 tp
->nvram_lock_cnt
= 0;
8805 /* GRC_MISC_CFG core clock reset will clear the memory
8806 * enable bit in PCI register 4 and the MSI enable bit
8807 * on some chips, so we save relevant registers here.
8809 tg3_save_pci_state(tp
);
8811 if (tg3_asic_rev(tp
) == ASIC_REV_5752
||
8812 tg3_flag(tp
, 5755_PLUS
))
8813 tw32(GRC_FASTBOOT_PC
, 0);
8816 * We must avoid the readl() that normally takes place.
8817 * It locks machines, causes machine checks, and other
8818 * fun things. So, temporarily disable the 5701
8819 * hardware workaround, while we do the reset.
8821 write_op
= tp
->write32
;
8822 if (write_op
== tg3_write_flush_reg32
)
8823 tp
->write32
= tg3_write32
;
8825 /* Prevent the irq handler from reading or writing PCI registers
8826 * during chip reset when the memory enable bit in the PCI command
8827 * register may be cleared. The chip does not generate interrupt
8828 * at this time, but the irq handler may still be called due to irq
8829 * sharing or irqpoll.
8831 tg3_flag_set(tp
, CHIP_RESETTING
);
8832 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
8833 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
8834 if (tnapi
->hw_status
) {
8835 tnapi
->hw_status
->status
= 0;
8836 tnapi
->hw_status
->status_tag
= 0;
8838 tnapi
->last_tag
= 0;
8839 tnapi
->last_irq_tag
= 0;
8843 for (i
= 0; i
< tp
->irq_cnt
; i
++)
8844 synchronize_irq(tp
->napi
[i
].irq_vec
);
8846 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
8847 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
8848 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
8852 val
= GRC_MISC_CFG_CORECLK_RESET
;
8854 if (tg3_flag(tp
, PCI_EXPRESS
)) {
8855 /* Force PCIe 1.0a mode */
8856 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
8857 !tg3_flag(tp
, 57765_PLUS
) &&
8858 tr32(TG3_PCIE_PHY_TSTCTL
) ==
8859 (TG3_PCIE_PHY_TSTCTL_PCIE10
| TG3_PCIE_PHY_TSTCTL_PSCRAM
))
8860 tw32(TG3_PCIE_PHY_TSTCTL
, TG3_PCIE_PHY_TSTCTL_PSCRAM
);
8862 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
) {
8863 tw32(GRC_MISC_CFG
, (1 << 29));
8868 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
8869 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
8870 tw32(GRC_VCPU_EXT_CTRL
,
8871 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
8874 /* Manage gphy power for all CPMU absent PCIe devices. */
8875 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, CPMU_PRESENT
))
8876 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
8878 tw32(GRC_MISC_CFG
, val
);
8880 /* restore 5701 hardware bug workaround write method */
8881 tp
->write32
= write_op
;
8883 /* Unfortunately, we have to delay before the PCI read back.
8884 * Some 575X chips even will not respond to a PCI cfg access
8885 * when the reset command is given to the chip.
8887 * How do these hardware designers expect things to work
8888 * properly if the PCI write is posted for a long period
8889 * of time? It is always necessary to have some method by
8890 * which a register read back can occur to push the write
8891 * out which does the reset.
8893 * For most tg3 variants the trick below was working.
8898 /* Flush PCI posted writes. The normal MMIO registers
8899 * are inaccessible at this time so this is the only
8900 * way to make this reliably (actually, this is no longer
8901 * the case, see above). I tried to use indirect
8902 * register read/write but this upset some 5701 variants.
8904 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
8908 if (tg3_flag(tp
, PCI_EXPRESS
) && pci_is_pcie(tp
->pdev
)) {
8911 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
) {
8915 /* Wait for link training to complete. */
8916 for (j
= 0; j
< 5000; j
++)
8919 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
8920 pci_write_config_dword(tp
->pdev
, 0xc4,
8921 cfg_val
| (1 << 15));
8924 /* Clear the "no snoop" and "relaxed ordering" bits. */
8925 val16
= PCI_EXP_DEVCTL_RELAX_EN
| PCI_EXP_DEVCTL_NOSNOOP_EN
;
8927 * Older PCIe devices only support the 128 byte
8928 * MPS setting. Enforce the restriction.
8930 if (!tg3_flag(tp
, CPMU_PRESENT
))
8931 val16
|= PCI_EXP_DEVCTL_PAYLOAD
;
8932 pcie_capability_clear_word(tp
->pdev
, PCI_EXP_DEVCTL
, val16
);
8934 /* Clear error status */
8935 pcie_capability_write_word(tp
->pdev
, PCI_EXP_DEVSTA
,
8936 PCI_EXP_DEVSTA_CED
|
8937 PCI_EXP_DEVSTA_NFED
|
8938 PCI_EXP_DEVSTA_FED
|
8939 PCI_EXP_DEVSTA_URD
);
8942 tg3_restore_pci_state(tp
);
8944 tg3_flag_clear(tp
, CHIP_RESETTING
);
8945 tg3_flag_clear(tp
, ERROR_PROCESSED
);
8948 if (tg3_flag(tp
, 5780_CLASS
))
8949 val
= tr32(MEMARB_MODE
);
8950 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
8952 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A3
) {
8954 tw32(0x5000, 0x400);
8957 if (tg3_flag(tp
, IS_SSB_CORE
)) {
8959 * BCM4785: In order to avoid repercussions from using
8960 * potentially defective internal ROM, stop the Rx RISC CPU,
8961 * which is not required.
8964 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8967 err
= tg3_poll_fw(tp
);
8971 tw32(GRC_MODE
, tp
->grc_mode
);
8973 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
) {
8976 tw32(0xc4, val
| (1 << 15));
8979 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
8980 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
8981 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
8982 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A0
)
8983 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
8984 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
8987 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
8988 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
8990 } else if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
8991 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
8996 tw32_f(MAC_MODE
, val
);
8999 tg3_ape_unlock(tp
, TG3_APE_LOCK_GRC
);
9003 if (tg3_flag(tp
, PCI_EXPRESS
) &&
9004 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
9005 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
9006 !tg3_flag(tp
, 57765_PLUS
)) {
9009 tw32(0x7c00, val
| (1 << 25));
9012 if (tg3_asic_rev(tp
) == ASIC_REV_5720
) {
9013 val
= tr32(TG3_CPMU_CLCK_ORIDE
);
9014 tw32(TG3_CPMU_CLCK_ORIDE
, val
& ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN
);
9017 /* Reprobe ASF enable state. */
9018 tg3_flag_clear(tp
, ENABLE_ASF
);
9019 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
9020 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
9022 tg3_flag_clear(tp
, ASF_NEW_HANDSHAKE
);
9023 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9024 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9027 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9028 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9029 tg3_flag_set(tp
, ENABLE_ASF
);
9030 tp
->last_event_jiffies
= jiffies
;
9031 if (tg3_flag(tp
, 5750_PLUS
))
9032 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
9034 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &nic_cfg
);
9035 if (nic_cfg
& NIC_SRAM_1G_ON_VAUX_OK
)
9036 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
9037 if (nic_cfg
& NIC_SRAM_LNK_FLAP_AVOID
)
9038 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
9045 static void tg3_get_nstats(struct tg3
*, struct rtnl_link_stats64
*);
9046 static void tg3_get_estats(struct tg3
*, struct tg3_ethtool_stats
*);
9048 /* tp->lock is held. */
9049 static int tg3_halt(struct tg3
*tp
, int kind
, bool silent
)
9055 tg3_write_sig_pre_reset(tp
, kind
);
9057 tg3_abort_hw(tp
, silent
);
9058 err
= tg3_chip_reset(tp
);
9060 __tg3_set_mac_addr(tp
, false);
9062 tg3_write_sig_legacy(tp
, kind
);
9063 tg3_write_sig_post_reset(tp
, kind
);
9066 /* Save the stats across chip resets... */
9067 tg3_get_nstats(tp
, &tp
->net_stats_prev
);
9068 tg3_get_estats(tp
, &tp
->estats_prev
);
9070 /* And make sure the next sample is new data */
9071 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
9080 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
9082 struct tg3
*tp
= netdev_priv(dev
);
9083 struct sockaddr
*addr
= p
;
9085 bool skip_mac_1
= false;
9087 if (!is_valid_ether_addr(addr
->sa_data
))
9088 return -EADDRNOTAVAIL
;
9090 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9092 if (!netif_running(dev
))
9095 if (tg3_flag(tp
, ENABLE_ASF
)) {
9096 u32 addr0_high
, addr0_low
, addr1_high
, addr1_low
;
9098 addr0_high
= tr32(MAC_ADDR_0_HIGH
);
9099 addr0_low
= tr32(MAC_ADDR_0_LOW
);
9100 addr1_high
= tr32(MAC_ADDR_1_HIGH
);
9101 addr1_low
= tr32(MAC_ADDR_1_LOW
);
9103 /* Skip MAC addr 1 if ASF is using it. */
9104 if ((addr0_high
!= addr1_high
|| addr0_low
!= addr1_low
) &&
9105 !(addr1_high
== 0 && addr1_low
== 0))
9108 spin_lock_bh(&tp
->lock
);
9109 __tg3_set_mac_addr(tp
, skip_mac_1
);
9110 spin_unlock_bh(&tp
->lock
);
9115 /* tp->lock is held. */
9116 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
9117 dma_addr_t mapping
, u32 maxlen_flags
,
9121 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
9122 ((u64
) mapping
>> 32));
9124 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
9125 ((u64
) mapping
& 0xffffffff));
9127 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
9130 if (!tg3_flag(tp
, 5705_PLUS
))
9132 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
9137 static void tg3_coal_tx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9141 if (!tg3_flag(tp
, ENABLE_TSS
)) {
9142 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
9143 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
9144 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
9146 tw32(HOSTCC_TXCOL_TICKS
, 0);
9147 tw32(HOSTCC_TXMAX_FRAMES
, 0);
9148 tw32(HOSTCC_TXCOAL_MAXF_INT
, 0);
9150 for (; i
< tp
->txq_cnt
; i
++) {
9153 reg
= HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18;
9154 tw32(reg
, ec
->tx_coalesce_usecs
);
9155 reg
= HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18;
9156 tw32(reg
, ec
->tx_max_coalesced_frames
);
9157 reg
= HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9158 tw32(reg
, ec
->tx_max_coalesced_frames_irq
);
9162 for (; i
< tp
->irq_max
- 1; i
++) {
9163 tw32(HOSTCC_TXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9164 tw32(HOSTCC_TXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9165 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9169 static void tg3_coal_rx_init(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9172 u32 limit
= tp
->rxq_cnt
;
9174 if (!tg3_flag(tp
, ENABLE_RSS
)) {
9175 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
9176 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
9177 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
9180 tw32(HOSTCC_RXCOL_TICKS
, 0);
9181 tw32(HOSTCC_RXMAX_FRAMES
, 0);
9182 tw32(HOSTCC_RXCOAL_MAXF_INT
, 0);
9185 for (; i
< limit
; i
++) {
9188 reg
= HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18;
9189 tw32(reg
, ec
->rx_coalesce_usecs
);
9190 reg
= HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18;
9191 tw32(reg
, ec
->rx_max_coalesced_frames
);
9192 reg
= HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18;
9193 tw32(reg
, ec
->rx_max_coalesced_frames_irq
);
9196 for (; i
< tp
->irq_max
- 1; i
++) {
9197 tw32(HOSTCC_RXCOL_TICKS_VEC1
+ i
* 0x18, 0);
9198 tw32(HOSTCC_RXMAX_FRAMES_VEC1
+ i
* 0x18, 0);
9199 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1
+ i
* 0x18, 0);
9203 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
9205 tg3_coal_tx_init(tp
, ec
);
9206 tg3_coal_rx_init(tp
, ec
);
9208 if (!tg3_flag(tp
, 5705_PLUS
)) {
9209 u32 val
= ec
->stats_block_coalesce_usecs
;
9211 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
9212 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
9217 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
9221 /* tp->lock is held. */
9222 static void tg3_rings_reset(struct tg3
*tp
)
9225 u32 stblk
, txrcb
, rxrcb
, limit
;
9226 struct tg3_napi
*tnapi
= &tp
->napi
[0];
9228 /* Disable all transmit rings but the first. */
9229 if (!tg3_flag(tp
, 5705_PLUS
))
9230 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 16;
9231 else if (tg3_flag(tp
, 5717_PLUS
))
9232 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 4;
9233 else if (tg3_flag(tp
, 57765_CLASS
) ||
9234 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9235 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
* 2;
9237 limit
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9239 for (txrcb
= NIC_SRAM_SEND_RCB
+ TG3_BDINFO_SIZE
;
9240 txrcb
< limit
; txrcb
+= TG3_BDINFO_SIZE
)
9241 tg3_write_mem(tp
, txrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9242 BDINFO_FLAGS_DISABLED
);
9245 /* Disable all receive return rings but the first. */
9246 if (tg3_flag(tp
, 5717_PLUS
))
9247 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 17;
9248 else if (!tg3_flag(tp
, 5705_PLUS
))
9249 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 16;
9250 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9251 tg3_asic_rev(tp
) == ASIC_REV_5762
||
9252 tg3_flag(tp
, 57765_CLASS
))
9253 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
* 4;
9255 limit
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9257 for (rxrcb
= NIC_SRAM_RCV_RET_RCB
+ TG3_BDINFO_SIZE
;
9258 rxrcb
< limit
; rxrcb
+= TG3_BDINFO_SIZE
)
9259 tg3_write_mem(tp
, rxrcb
+ TG3_BDINFO_MAXLEN_FLAGS
,
9260 BDINFO_FLAGS_DISABLED
);
9262 /* Disable interrupts */
9263 tw32_mailbox_f(tp
->napi
[0].int_mbox
, 1);
9264 tp
->napi
[0].chk_msi_cnt
= 0;
9265 tp
->napi
[0].last_rx_cons
= 0;
9266 tp
->napi
[0].last_tx_cons
= 0;
9268 /* Zero mailbox registers. */
9269 if (tg3_flag(tp
, SUPPORT_MSIX
)) {
9270 for (i
= 1; i
< tp
->irq_max
; i
++) {
9271 tp
->napi
[i
].tx_prod
= 0;
9272 tp
->napi
[i
].tx_cons
= 0;
9273 if (tg3_flag(tp
, ENABLE_TSS
))
9274 tw32_mailbox(tp
->napi
[i
].prodmbox
, 0);
9275 tw32_rx_mbox(tp
->napi
[i
].consmbox
, 0);
9276 tw32_mailbox_f(tp
->napi
[i
].int_mbox
, 1);
9277 tp
->napi
[i
].chk_msi_cnt
= 0;
9278 tp
->napi
[i
].last_rx_cons
= 0;
9279 tp
->napi
[i
].last_tx_cons
= 0;
9281 if (!tg3_flag(tp
, ENABLE_TSS
))
9282 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9284 tp
->napi
[0].tx_prod
= 0;
9285 tp
->napi
[0].tx_cons
= 0;
9286 tw32_mailbox(tp
->napi
[0].prodmbox
, 0);
9287 tw32_rx_mbox(tp
->napi
[0].consmbox
, 0);
9290 /* Make sure the NIC-based send BD rings are disabled. */
9291 if (!tg3_flag(tp
, 5705_PLUS
)) {
9292 u32 mbox
= MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
9293 for (i
= 0; i
< 16; i
++)
9294 tw32_tx_mbox(mbox
+ i
* 8, 0);
9297 txrcb
= NIC_SRAM_SEND_RCB
;
9298 rxrcb
= NIC_SRAM_RCV_RET_RCB
;
9300 /* Clear status block in ram. */
9301 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9303 /* Set status block DMA address */
9304 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9305 ((u64
) tnapi
->status_mapping
>> 32));
9306 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9307 ((u64
) tnapi
->status_mapping
& 0xffffffff));
9309 if (tnapi
->tx_ring
) {
9310 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9311 (TG3_TX_RING_SIZE
<<
9312 BDINFO_FLAGS_MAXLEN_SHIFT
),
9313 NIC_SRAM_TX_BUFFER_DESC
);
9314 txrcb
+= TG3_BDINFO_SIZE
;
9317 if (tnapi
->rx_rcb
) {
9318 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9319 (tp
->rx_ret_ring_mask
+ 1) <<
9320 BDINFO_FLAGS_MAXLEN_SHIFT
, 0);
9321 rxrcb
+= TG3_BDINFO_SIZE
;
9324 stblk
= HOSTCC_STATBLCK_RING1
;
9326 for (i
= 1, tnapi
++; i
< tp
->irq_cnt
; i
++, tnapi
++) {
9327 u64 mapping
= (u64
)tnapi
->status_mapping
;
9328 tw32(stblk
+ TG3_64BIT_REG_HIGH
, mapping
>> 32);
9329 tw32(stblk
+ TG3_64BIT_REG_LOW
, mapping
& 0xffffffff);
9331 /* Clear status block in ram. */
9332 memset(tnapi
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
9334 if (tnapi
->tx_ring
) {
9335 tg3_set_bdinfo(tp
, txrcb
, tnapi
->tx_desc_mapping
,
9336 (TG3_TX_RING_SIZE
<<
9337 BDINFO_FLAGS_MAXLEN_SHIFT
),
9338 NIC_SRAM_TX_BUFFER_DESC
);
9339 txrcb
+= TG3_BDINFO_SIZE
;
9342 tg3_set_bdinfo(tp
, rxrcb
, tnapi
->rx_rcb_mapping
,
9343 ((tp
->rx_ret_ring_mask
+ 1) <<
9344 BDINFO_FLAGS_MAXLEN_SHIFT
), 0);
9347 rxrcb
+= TG3_BDINFO_SIZE
;
9351 static void tg3_setup_rxbd_thresholds(struct tg3
*tp
)
9353 u32 val
, bdcache_maxcnt
, host_rep_thresh
, nic_rep_thresh
;
9355 if (!tg3_flag(tp
, 5750_PLUS
) ||
9356 tg3_flag(tp
, 5780_CLASS
) ||
9357 tg3_asic_rev(tp
) == ASIC_REV_5750
||
9358 tg3_asic_rev(tp
) == ASIC_REV_5752
||
9359 tg3_flag(tp
, 57765_PLUS
))
9360 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5700
;
9361 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
9362 tg3_asic_rev(tp
) == ASIC_REV_5787
)
9363 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5755
;
9365 bdcache_maxcnt
= TG3_SRAM_RX_STD_BDCACHE_SIZE_5906
;
9367 nic_rep_thresh
= min(bdcache_maxcnt
/ 2, tp
->rx_std_max_post
);
9368 host_rep_thresh
= max_t(u32
, tp
->rx_pending
/ 8, 1);
9370 val
= min(nic_rep_thresh
, host_rep_thresh
);
9371 tw32(RCVBDI_STD_THRESH
, val
);
9373 if (tg3_flag(tp
, 57765_PLUS
))
9374 tw32(STD_REPLENISH_LWM
, bdcache_maxcnt
);
9376 if (!tg3_flag(tp
, JUMBO_CAPABLE
) || tg3_flag(tp
, 5780_CLASS
))
9379 bdcache_maxcnt
= TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700
;
9381 host_rep_thresh
= max_t(u32
, tp
->rx_jumbo_pending
/ 8, 1);
9383 val
= min(bdcache_maxcnt
/ 2, host_rep_thresh
);
9384 tw32(RCVBDI_JUMBO_THRESH
, val
);
9386 if (tg3_flag(tp
, 57765_PLUS
))
9387 tw32(JMB_REPLENISH_LWM
, bdcache_maxcnt
);
9390 static inline u32
calc_crc(unsigned char *buf
, int len
)
9398 for (j
= 0; j
< len
; j
++) {
9401 for (k
= 0; k
< 8; k
++) {
9414 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
9416 /* accept or reject all multicast frames */
9417 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
9418 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
9419 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
9420 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
9423 static void __tg3_set_rx_mode(struct net_device
*dev
)
9425 struct tg3
*tp
= netdev_priv(dev
);
9428 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
9429 RX_MODE_KEEP_VLAN_TAG
);
9431 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9432 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9435 if (!tg3_flag(tp
, ENABLE_ASF
))
9436 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
9439 if (dev
->flags
& IFF_PROMISC
) {
9440 /* Promiscuous mode. */
9441 rx_mode
|= RX_MODE_PROMISC
;
9442 } else if (dev
->flags
& IFF_ALLMULTI
) {
9443 /* Accept all multicast. */
9444 tg3_set_multi(tp
, 1);
9445 } else if (netdev_mc_empty(dev
)) {
9446 /* Reject all multicast. */
9447 tg3_set_multi(tp
, 0);
9449 /* Accept one or more multicast(s). */
9450 struct netdev_hw_addr
*ha
;
9451 u32 mc_filter
[4] = { 0, };
9456 netdev_for_each_mc_addr(ha
, dev
) {
9457 crc
= calc_crc(ha
->addr
, ETH_ALEN
);
9459 regidx
= (bit
& 0x60) >> 5;
9461 mc_filter
[regidx
] |= (1 << bit
);
9464 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
9465 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
9466 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
9467 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
9470 if (rx_mode
!= tp
->rx_mode
) {
9471 tp
->rx_mode
= rx_mode
;
9472 tw32_f(MAC_RX_MODE
, rx_mode
);
9477 static void tg3_rss_init_dflt_indir_tbl(struct tg3
*tp
, u32 qcnt
)
9481 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
9482 tp
->rss_ind_tbl
[i
] = ethtool_rxfh_indir_default(i
, qcnt
);
9485 static void tg3_rss_check_indir_tbl(struct tg3
*tp
)
9489 if (!tg3_flag(tp
, SUPPORT_MSIX
))
9492 if (tp
->rxq_cnt
== 1) {
9493 memset(&tp
->rss_ind_tbl
[0], 0, sizeof(tp
->rss_ind_tbl
));
9497 /* Validate table against current IRQ count */
9498 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++) {
9499 if (tp
->rss_ind_tbl
[i
] >= tp
->rxq_cnt
)
9503 if (i
!= TG3_RSS_INDIR_TBL_SIZE
)
9504 tg3_rss_init_dflt_indir_tbl(tp
, tp
->rxq_cnt
);
9507 static void tg3_rss_write_indir_tbl(struct tg3
*tp
)
9510 u32 reg
= MAC_RSS_INDIR_TBL_0
;
9512 while (i
< TG3_RSS_INDIR_TBL_SIZE
) {
9513 u32 val
= tp
->rss_ind_tbl
[i
];
9515 for (; i
% 8; i
++) {
9517 val
|= tp
->rss_ind_tbl
[i
];
9524 static inline u32
tg3_lso_rd_dma_workaround_bit(struct tg3
*tp
)
9526 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9527 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719
;
9529 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720
;
9532 /* tp->lock is held. */
9533 static int tg3_reset_hw(struct tg3
*tp
, bool reset_phy
)
9535 u32 val
, rdmac_mode
;
9537 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
9539 tg3_disable_ints(tp
);
9543 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
9545 if (tg3_flag(tp
, INIT_COMPLETE
))
9546 tg3_abort_hw(tp
, 1);
9548 /* Enable MAC control of LPI */
9549 if (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) {
9550 val
= TG3_CPMU_EEE_LNKIDL_PCIE_NL0
|
9551 TG3_CPMU_EEE_LNKIDL_UART_IDL
;
9552 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9553 val
|= TG3_CPMU_EEE_LNKIDL_APE_TX_MT
;
9555 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL
, val
);
9557 tw32_f(TG3_CPMU_EEE_CTRL
,
9558 TG3_CPMU_EEE_CTRL_EXIT_20_1_US
);
9560 val
= TG3_CPMU_EEEMD_ERLY_L1_XIT_DET
|
9561 TG3_CPMU_EEEMD_LPI_IN_TX
|
9562 TG3_CPMU_EEEMD_LPI_IN_RX
|
9563 TG3_CPMU_EEEMD_EEE_ENABLE
;
9565 if (tg3_asic_rev(tp
) != ASIC_REV_5717
)
9566 val
|= TG3_CPMU_EEEMD_SND_IDX_DET_EN
;
9568 if (tg3_flag(tp
, ENABLE_APE
))
9569 val
|= TG3_CPMU_EEEMD_APE_TX_DET_EN
;
9571 tw32_f(TG3_CPMU_EEE_MODE
, val
);
9573 tw32_f(TG3_CPMU_EEE_DBTMR1
,
9574 TG3_CPMU_DBTMR1_PCIEXIT_2047US
|
9575 TG3_CPMU_DBTMR1_LNKIDLE_2047US
);
9577 tw32_f(TG3_CPMU_EEE_DBTMR2
,
9578 TG3_CPMU_DBTMR2_APE_TX_2047US
|
9579 TG3_CPMU_DBTMR2_TXIDXEQ_2047US
);
9582 if ((tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
9583 !(tp
->phy_flags
& TG3_PHYFLG_USER_CONFIGURED
)) {
9584 tg3_phy_pull_config(tp
);
9585 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
9591 err
= tg3_chip_reset(tp
);
9595 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
9597 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
) {
9598 val
= tr32(TG3_CPMU_CTRL
);
9599 val
&= ~(CPMU_CTRL_LINK_AWARE_MODE
| CPMU_CTRL_LINK_IDLE_MODE
);
9600 tw32(TG3_CPMU_CTRL
, val
);
9602 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9603 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9604 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9605 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9607 val
= tr32(TG3_CPMU_LNK_AWARE_PWRMD
);
9608 val
&= ~CPMU_LNK_AWARE_MACCLK_MASK
;
9609 val
|= CPMU_LNK_AWARE_MACCLK_6_25
;
9610 tw32(TG3_CPMU_LNK_AWARE_PWRMD
, val
);
9612 val
= tr32(TG3_CPMU_HST_ACC
);
9613 val
&= ~CPMU_HST_ACC_MACCLK_MASK
;
9614 val
|= CPMU_HST_ACC_MACCLK_6_25
;
9615 tw32(TG3_CPMU_HST_ACC
, val
);
9618 if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
9619 val
= tr32(PCIE_PWR_MGMT_THRESH
) & ~PCIE_PWR_MGMT_L1_THRESH_MSK
;
9620 val
|= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN
|
9621 PCIE_PWR_MGMT_L1_THRESH_4MS
;
9622 tw32(PCIE_PWR_MGMT_THRESH
, val
);
9624 val
= tr32(TG3_PCIE_EIDLE_DELAY
) & ~TG3_PCIE_EIDLE_DELAY_MASK
;
9625 tw32(TG3_PCIE_EIDLE_DELAY
, val
| TG3_PCIE_EIDLE_DELAY_13_CLKS
);
9627 tw32(TG3_CORR_ERR_STAT
, TG3_CORR_ERR_STAT_CLEAR
);
9629 val
= tr32(TG3_PCIE_LNKCTL
) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN
;
9630 tw32(TG3_PCIE_LNKCTL
, val
| TG3_PCIE_LNKCTL_L1_PLL_PD_DIS
);
9633 if (tg3_flag(tp
, L1PLLPD_EN
)) {
9634 u32 grc_mode
= tr32(GRC_MODE
);
9636 /* Access the lower 1K of PL PCIE block registers. */
9637 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9638 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9640 val
= tr32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
);
9641 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL1
,
9642 val
| TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN
);
9644 tw32(GRC_MODE
, grc_mode
);
9647 if (tg3_flag(tp
, 57765_CLASS
)) {
9648 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
) {
9649 u32 grc_mode
= tr32(GRC_MODE
);
9651 /* Access the lower 1K of PL PCIE block registers. */
9652 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9653 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_PL_SEL
);
9655 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9656 TG3_PCIE_PL_LO_PHYCTL5
);
9657 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_PL_LO_PHYCTL5
,
9658 val
| TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ
);
9660 tw32(GRC_MODE
, grc_mode
);
9663 if (tg3_chip_rev(tp
) != CHIPREV_57765_AX
) {
9666 /* Fix transmit hangs */
9667 val
= tr32(TG3_CPMU_PADRNG_CTL
);
9668 val
|= TG3_CPMU_PADRNG_CTL_RDIV2
;
9669 tw32(TG3_CPMU_PADRNG_CTL
, val
);
9671 grc_mode
= tr32(GRC_MODE
);
9673 /* Access the lower 1K of DL PCIE block registers. */
9674 val
= grc_mode
& ~GRC_MODE_PCIE_PORT_MASK
;
9675 tw32(GRC_MODE
, val
| GRC_MODE_PCIE_DL_SEL
);
9677 val
= tr32(TG3_PCIE_TLDLPL_PORT
+
9678 TG3_PCIE_DL_LO_FTSMAX
);
9679 val
&= ~TG3_PCIE_DL_LO_FTSMAX_MSK
;
9680 tw32(TG3_PCIE_TLDLPL_PORT
+ TG3_PCIE_DL_LO_FTSMAX
,
9681 val
| TG3_PCIE_DL_LO_FTSMAX_VAL
);
9683 tw32(GRC_MODE
, grc_mode
);
9686 val
= tr32(TG3_CPMU_LSPD_10MB_CLK
);
9687 val
&= ~CPMU_LSPD_10MB_MACCLK_MASK
;
9688 val
|= CPMU_LSPD_10MB_MACCLK_6_25
;
9689 tw32(TG3_CPMU_LSPD_10MB_CLK
, val
);
9692 /* This works around an issue with Athlon chipsets on
9693 * B3 tigon3 silicon. This bit has no effect on any
9694 * other revision. But do not set this on PCI Express
9695 * chips and don't even touch the clocks if the CPMU is present.
9697 if (!tg3_flag(tp
, CPMU_PRESENT
)) {
9698 if (!tg3_flag(tp
, PCI_EXPRESS
))
9699 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
9700 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
9703 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
&&
9704 tg3_flag(tp
, PCIX_MODE
)) {
9705 val
= tr32(TG3PCI_PCISTATE
);
9706 val
|= PCISTATE_RETRY_SAME_DMA
;
9707 tw32(TG3PCI_PCISTATE
, val
);
9710 if (tg3_flag(tp
, ENABLE_APE
)) {
9711 /* Allow reads and writes to the
9712 * APE register and memory space.
9714 val
= tr32(TG3PCI_PCISTATE
);
9715 val
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
9716 PCISTATE_ALLOW_APE_SHMEM_WR
|
9717 PCISTATE_ALLOW_APE_PSPACE_WR
;
9718 tw32(TG3PCI_PCISTATE
, val
);
9721 if (tg3_chip_rev(tp
) == CHIPREV_5704_BX
) {
9722 /* Enable some hw fixes. */
9723 val
= tr32(TG3PCI_MSI_DATA
);
9724 val
|= (1 << 26) | (1 << 28) | (1 << 29);
9725 tw32(TG3PCI_MSI_DATA
, val
);
9728 /* Descriptor ring init may make accesses to the
9729 * NIC SRAM area to setup the TX descriptors, so we
9730 * can only do this after the hardware has been
9731 * successfully reset.
9733 err
= tg3_init_rings(tp
);
9737 if (tg3_flag(tp
, 57765_PLUS
)) {
9738 val
= tr32(TG3PCI_DMA_RW_CTRL
) &
9739 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
9740 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_57765_A0
)
9741 val
&= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK
;
9742 if (!tg3_flag(tp
, 57765_CLASS
) &&
9743 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
9744 tg3_asic_rev(tp
) != ASIC_REV_5762
)
9745 val
|= DMA_RWCTRL_TAGGED_STAT_WA
;
9746 tw32(TG3PCI_DMA_RW_CTRL
, val
| tp
->dma_rwctrl
);
9747 } else if (tg3_asic_rev(tp
) != ASIC_REV_5784
&&
9748 tg3_asic_rev(tp
) != ASIC_REV_5761
) {
9749 /* This value is determined during the probe time DMA
9750 * engine test, tg3_test_dma.
9752 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
9755 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
9756 GRC_MODE_4X_NIC_SEND_RINGS
|
9757 GRC_MODE_NO_TX_PHDR_CSUM
|
9758 GRC_MODE_NO_RX_PHDR_CSUM
);
9759 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
9761 /* Pseudo-header checksum is done by hardware logic and not
9762 * the offload processers, so make the chip do the pseudo-
9763 * header checksums on receive. For transmit it is more
9764 * convenient to do the pseudo-header checksum in software
9765 * as Linux does that on transmit for us in all cases.
9767 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
9769 val
= GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
;
9771 tw32(TG3_RX_PTP_CTL
,
9772 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
9774 if (tg3_flag(tp
, PTP_CAPABLE
))
9775 val
|= GRC_MODE_TIME_SYNC_ENABLE
;
9777 tw32(GRC_MODE
, tp
->grc_mode
| val
);
9779 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9780 val
= tr32(GRC_MISC_CFG
);
9782 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
9783 tw32(GRC_MISC_CFG
, val
);
9785 /* Initialize MBUF/DESC pool. */
9786 if (tg3_flag(tp
, 5750_PLUS
)) {
9788 } else if (tg3_asic_rev(tp
) != ASIC_REV_5705
) {
9789 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
9790 if (tg3_asic_rev(tp
) == ASIC_REV_5704
)
9791 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
9793 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
9794 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
9795 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
9796 } else if (tg3_flag(tp
, TSO_CAPABLE
)) {
9799 fw_len
= tp
->fw_len
;
9800 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
9801 tw32(BUFMGR_MB_POOL_ADDR
,
9802 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
9803 tw32(BUFMGR_MB_POOL_SIZE
,
9804 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
9807 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9808 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9809 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
9810 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9811 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
9812 tw32(BUFMGR_MB_HIGH_WATER
,
9813 tp
->bufmgr_config
.mbuf_high_water
);
9815 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
9816 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
9817 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
9818 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
9819 tw32(BUFMGR_MB_HIGH_WATER
,
9820 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
9822 tw32(BUFMGR_DMA_LOW_WATER
,
9823 tp
->bufmgr_config
.dma_low_water
);
9824 tw32(BUFMGR_DMA_HIGH_WATER
,
9825 tp
->bufmgr_config
.dma_high_water
);
9827 val
= BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
;
9828 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
9829 val
|= BUFMGR_MODE_NO_TX_UNDERRUN
;
9830 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
9831 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9832 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
)
9833 val
|= BUFMGR_MODE_MBLOW_ATTN_ENAB
;
9834 tw32(BUFMGR_MODE
, val
);
9835 for (i
= 0; i
< 2000; i
++) {
9836 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
9841 netdev_err(tp
->dev
, "%s cannot enable BUFMGR\n", __func__
);
9845 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5906_A1
)
9846 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
9848 tg3_setup_rxbd_thresholds(tp
);
9850 /* Initialize TG3_BDINFO's at:
9851 * RCVDBDI_STD_BD: standard eth size rx ring
9852 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9853 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9856 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9857 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9858 * ring attribute flags
9859 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9861 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9862 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9864 * The size of each ring is fixed in the firmware, but the location is
9867 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9868 ((u64
) tpr
->rx_std_mapping
>> 32));
9869 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9870 ((u64
) tpr
->rx_std_mapping
& 0xffffffff));
9871 if (!tg3_flag(tp
, 5717_PLUS
))
9872 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
9873 NIC_SRAM_RX_BUFFER_DESC
);
9875 /* Disable the mini ring */
9876 if (!tg3_flag(tp
, 5705_PLUS
))
9877 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9878 BDINFO_FLAGS_DISABLED
);
9880 /* Program the jumbo buffer descriptor ring control
9881 * blocks on those devices that have them.
9883 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
9884 (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))) {
9886 if (tg3_flag(tp
, JUMBO_RING_ENABLE
)) {
9887 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
9888 ((u64
) tpr
->rx_jmb_mapping
>> 32));
9889 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
9890 ((u64
) tpr
->rx_jmb_mapping
& 0xffffffff));
9891 val
= TG3_RX_JMB_RING_SIZE(tp
) <<
9892 BDINFO_FLAGS_MAXLEN_SHIFT
;
9893 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9894 val
| BDINFO_FLAGS_USE_EXT_RECV
);
9895 if (!tg3_flag(tp
, USE_JUMBO_BDFLAG
) ||
9896 tg3_flag(tp
, 57765_CLASS
) ||
9897 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9898 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
9899 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
9901 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
9902 BDINFO_FLAGS_DISABLED
);
9905 if (tg3_flag(tp
, 57765_PLUS
)) {
9906 val
= TG3_RX_STD_RING_SIZE(tp
);
9907 val
<<= BDINFO_FLAGS_MAXLEN_SHIFT
;
9908 val
|= (TG3_RX_STD_DMA_SZ
<< 2);
9910 val
= TG3_RX_STD_DMA_SZ
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9912 val
= TG3_RX_STD_MAX_SIZE_5700
<< BDINFO_FLAGS_MAXLEN_SHIFT
;
9914 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
, val
);
9916 tpr
->rx_std_prod_idx
= tp
->rx_pending
;
9917 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG
, tpr
->rx_std_prod_idx
);
9919 tpr
->rx_jmb_prod_idx
=
9920 tg3_flag(tp
, JUMBO_RING_ENABLE
) ? tp
->rx_jumbo_pending
: 0;
9921 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG
, tpr
->rx_jmb_prod_idx
);
9923 tg3_rings_reset(tp
);
9925 /* Initialize MAC address and backoff seed. */
9926 __tg3_set_mac_addr(tp
, false);
9928 /* MTU + ethernet header + FCS + optional VLAN tag */
9929 tw32(MAC_RX_MTU_SIZE
,
9930 tp
->dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
9932 /* The slot time is changed by tg3_setup_phy if we
9933 * run at gigabit with half duplex.
9935 val
= (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
9936 (6 << TX_LENGTHS_IPG_SHIFT
) |
9937 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
);
9939 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
9940 tg3_asic_rev(tp
) == ASIC_REV_5762
)
9941 val
|= tr32(MAC_TX_LENGTHS
) &
9942 (TX_LENGTHS_JMB_FRM_LEN_MSK
|
9943 TX_LENGTHS_CNT_DWN_VAL_MSK
);
9945 tw32(MAC_TX_LENGTHS
, val
);
9947 /* Receive rules. */
9948 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
9949 tw32(RCVLPC_CONFIG
, 0x0181);
9951 /* Calculate RDMAC_MODE setting early, we need it to determine
9952 * the RCVLPC_STATE_ENABLE mask.
9954 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
9955 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
9956 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
9957 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
9958 RDMAC_MODE_LNGREAD_ENAB
);
9960 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
9961 rdmac_mode
|= RDMAC_MODE_MULT_DMA_RD_DIS
;
9963 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
9964 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9965 tg3_asic_rev(tp
) == ASIC_REV_57780
)
9966 rdmac_mode
|= RDMAC_MODE_BD_SBD_CRPT_ENAB
|
9967 RDMAC_MODE_MBUF_RBD_CRPT_ENAB
|
9968 RDMAC_MODE_MBUF_SBD_CRPT_ENAB
;
9970 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
9971 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
9972 if (tg3_flag(tp
, TSO_CAPABLE
) &&
9973 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
9974 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
9975 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
9976 !tg3_flag(tp
, IS_5788
)) {
9977 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9981 if (tg3_flag(tp
, PCI_EXPRESS
))
9982 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
9984 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
9986 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
9987 rdmac_mode
|= RDMAC_MODE_JMB_2K_MMRR
;
9988 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_2K
;
9992 if (tg3_flag(tp
, HW_TSO_1
) ||
9993 tg3_flag(tp
, HW_TSO_2
) ||
9994 tg3_flag(tp
, HW_TSO_3
))
9995 rdmac_mode
|= RDMAC_MODE_IPV4_LSO_EN
;
9997 if (tg3_flag(tp
, 57765_PLUS
) ||
9998 tg3_asic_rev(tp
) == ASIC_REV_5785
||
9999 tg3_asic_rev(tp
) == ASIC_REV_57780
)
10000 rdmac_mode
|= RDMAC_MODE_IPV6_LSO_EN
;
10002 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10003 tg3_asic_rev(tp
) == ASIC_REV_5762
)
10004 rdmac_mode
|= tr32(RDMAC_MODE
) & RDMAC_MODE_H2BNC_VLAN_DET
;
10006 if (tg3_asic_rev(tp
) == ASIC_REV_5761
||
10007 tg3_asic_rev(tp
) == ASIC_REV_5784
||
10008 tg3_asic_rev(tp
) == ASIC_REV_5785
||
10009 tg3_asic_rev(tp
) == ASIC_REV_57780
||
10010 tg3_flag(tp
, 57765_PLUS
)) {
10013 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10014 tgtreg
= TG3_RDMA_RSRVCTRL_REG2
;
10016 tgtreg
= TG3_RDMA_RSRVCTRL_REG
;
10018 val
= tr32(tgtreg
);
10019 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
10020 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10021 val
&= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK
|
10022 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK
|
10023 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK
);
10024 val
|= TG3_RDMA_RSRVCTRL_TXMRGN_320B
|
10025 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K
|
10026 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K
;
10028 tw32(tgtreg
, val
| TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX
);
10031 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10032 tg3_asic_rev(tp
) == ASIC_REV_5720
||
10033 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10036 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
10037 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL2
;
10039 tgtreg
= TG3_LSO_RD_DMA_CRPTEN_CTRL
;
10041 val
= tr32(tgtreg
);
10043 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K
|
10044 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K
);
10047 /* Receive/send statistics. */
10048 if (tg3_flag(tp
, 5750_PLUS
)) {
10049 val
= tr32(RCVLPC_STATS_ENABLE
);
10050 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
10051 tw32(RCVLPC_STATS_ENABLE
, val
);
10052 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
10053 tg3_flag(tp
, TSO_CAPABLE
)) {
10054 val
= tr32(RCVLPC_STATS_ENABLE
);
10055 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
10056 tw32(RCVLPC_STATS_ENABLE
, val
);
10058 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
10060 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
10061 tw32(SNDDATAI_STATSENAB
, 0xffffff);
10062 tw32(SNDDATAI_STATSCTRL
,
10063 (SNDDATAI_SCTRL_ENABLE
|
10064 SNDDATAI_SCTRL_FASTUPD
));
10066 /* Setup host coalescing engine. */
10067 tw32(HOSTCC_MODE
, 0);
10068 for (i
= 0; i
< 2000; i
++) {
10069 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
10074 __tg3_set_coalesce(tp
, &tp
->coal
);
10076 if (!tg3_flag(tp
, 5705_PLUS
)) {
10077 /* Status/statistics block address. See tg3_timer,
10078 * the tg3_periodic_fetch_stats call there, and
10079 * tg3_get_stats to see how this works for 5705/5750 chips.
10081 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
10082 ((u64
) tp
->stats_mapping
>> 32));
10083 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
10084 ((u64
) tp
->stats_mapping
& 0xffffffff));
10085 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
10087 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
10089 /* Clear statistics and status block memory areas */
10090 for (i
= NIC_SRAM_STATS_BLK
;
10091 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
10092 i
+= sizeof(u32
)) {
10093 tg3_write_mem(tp
, i
, 0);
10098 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
10100 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
10101 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
10102 if (!tg3_flag(tp
, 5705_PLUS
))
10103 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
10105 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) {
10106 tp
->phy_flags
&= ~TG3_PHYFLG_PARALLEL_DETECT
;
10107 /* reset to prevent losing 1st rx packet intermittently */
10108 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10112 tp
->mac_mode
|= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
10113 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
|
10114 MAC_MODE_FHDE_ENABLE
;
10115 if (tg3_flag(tp
, ENABLE_APE
))
10116 tp
->mac_mode
|= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
10117 if (!tg3_flag(tp
, 5705_PLUS
) &&
10118 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10119 tg3_asic_rev(tp
) != ASIC_REV_5700
)
10120 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
10121 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
10124 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10125 * If TG3_FLAG_IS_NIC is zero, we should read the
10126 * register to preserve the GPIO settings for LOMs. The GPIOs,
10127 * whether used as inputs or outputs, are set by boot code after
10130 if (!tg3_flag(tp
, IS_NIC
)) {
10133 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE1
|
10134 GRC_LCLCTRL_GPIO_OE2
| GRC_LCLCTRL_GPIO_OUTPUT0
|
10135 GRC_LCLCTRL_GPIO_OUTPUT1
| GRC_LCLCTRL_GPIO_OUTPUT2
;
10137 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
10138 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
10139 GRC_LCLCTRL_GPIO_OUTPUT3
;
10141 if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
10142 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10144 tp
->grc_local_ctrl
&= ~gpio_mask
;
10145 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
10147 /* GPIO1 must be driven high for eeprom write protect */
10148 if (tg3_flag(tp
, EEPROM_WRITE_PROT
))
10149 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10150 GRC_LCLCTRL_GPIO_OUTPUT1
);
10152 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10155 if (tg3_flag(tp
, USING_MSIX
)) {
10156 val
= tr32(MSGINT_MODE
);
10157 val
|= MSGINT_MODE_ENABLE
;
10158 if (tp
->irq_cnt
> 1)
10159 val
|= MSGINT_MODE_MULTIVEC_EN
;
10160 if (!tg3_flag(tp
, 1SHOT_MSI
))
10161 val
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
10162 tw32(MSGINT_MODE
, val
);
10165 if (!tg3_flag(tp
, 5705_PLUS
)) {
10166 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
10170 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
10171 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
10172 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
10173 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
10174 WDMAC_MODE_LNGREAD_ENAB
);
10176 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
10177 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
10178 if (tg3_flag(tp
, TSO_CAPABLE
) &&
10179 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
||
10180 tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A2
)) {
10182 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
10183 !tg3_flag(tp
, IS_5788
)) {
10184 val
|= WDMAC_MODE_RX_ACCEL
;
10188 /* Enable host coalescing bug fix */
10189 if (tg3_flag(tp
, 5755_PLUS
))
10190 val
|= WDMAC_MODE_STATUS_TAG_FIX
;
10192 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
10193 val
|= WDMAC_MODE_BURST_ALL_DATA
;
10195 tw32_f(WDMAC_MODE
, val
);
10198 if (tg3_flag(tp
, PCIX_MODE
)) {
10201 pci_read_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10203 if (tg3_asic_rev(tp
) == ASIC_REV_5703
) {
10204 pcix_cmd
&= ~PCI_X_CMD_MAX_READ
;
10205 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10206 } else if (tg3_asic_rev(tp
) == ASIC_REV_5704
) {
10207 pcix_cmd
&= ~(PCI_X_CMD_MAX_SPLIT
| PCI_X_CMD_MAX_READ
);
10208 pcix_cmd
|= PCI_X_CMD_READ_2K
;
10210 pci_write_config_word(tp
->pdev
, tp
->pcix_cap
+ PCI_X_CMD
,
10214 tw32_f(RDMAC_MODE
, rdmac_mode
);
10217 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
10218 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
10219 for (i
= 0; i
< TG3_NUM_RDMA_CHANNELS
; i
++) {
10220 if (tr32(TG3_RDMA_LENGTH
+ (i
<< 2)) > TG3_MAX_MTU(tp
))
10223 if (i
< TG3_NUM_RDMA_CHANNELS
) {
10224 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10225 val
|= tg3_lso_rd_dma_workaround_bit(tp
);
10226 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10227 tg3_flag_set(tp
, 5719_5720_RDMA_BUG
);
10231 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
10232 if (!tg3_flag(tp
, 5705_PLUS
))
10233 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
10235 if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
10236 tw32(SNDDATAC_MODE
,
10237 SNDDATAC_MODE_ENABLE
| SNDDATAC_MODE_CDELAY
);
10239 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
10241 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
10242 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
10243 val
= RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
;
10244 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
10245 val
|= RCVDBDI_MODE_LRG_RING_SZ
;
10246 tw32(RCVDBDI_MODE
, val
);
10247 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
10248 if (tg3_flag(tp
, HW_TSO_1
) ||
10249 tg3_flag(tp
, HW_TSO_2
) ||
10250 tg3_flag(tp
, HW_TSO_3
))
10251 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
10252 val
= SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
;
10253 if (tg3_flag(tp
, ENABLE_TSS
))
10254 val
|= SNDBDI_MODE_MULTI_TXQ_EN
;
10255 tw32(SNDBDI_MODE
, val
);
10256 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
10258 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
10259 err
= tg3_load_5701_a0_firmware_fix(tp
);
10264 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
10265 /* Ignore any errors for the firmware download. If download
10266 * fails, the device will operate with EEE disabled
10268 tg3_load_57766_firmware(tp
);
10271 if (tg3_flag(tp
, TSO_CAPABLE
)) {
10272 err
= tg3_load_tso_firmware(tp
);
10277 tp
->tx_mode
= TX_MODE_ENABLE
;
10279 if (tg3_flag(tp
, 5755_PLUS
) ||
10280 tg3_asic_rev(tp
) == ASIC_REV_5906
)
10281 tp
->tx_mode
|= TX_MODE_MBUF_LOCKUP_FIX
;
10283 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
10284 tg3_asic_rev(tp
) == ASIC_REV_5762
) {
10285 val
= TX_MODE_JMB_FRM_LEN
| TX_MODE_CNT_DN_MODE
;
10286 tp
->tx_mode
&= ~val
;
10287 tp
->tx_mode
|= tr32(MAC_TX_MODE
) & val
;
10290 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
10293 if (tg3_flag(tp
, ENABLE_RSS
)) {
10294 tg3_rss_write_indir_tbl(tp
);
10296 /* Setup the "secret" hash key. */
10297 tw32(MAC_RSS_HASH_KEY_0
, 0x5f865437);
10298 tw32(MAC_RSS_HASH_KEY_1
, 0xe4ac62cc);
10299 tw32(MAC_RSS_HASH_KEY_2
, 0x50103a45);
10300 tw32(MAC_RSS_HASH_KEY_3
, 0x36621985);
10301 tw32(MAC_RSS_HASH_KEY_4
, 0xbf14c0e8);
10302 tw32(MAC_RSS_HASH_KEY_5
, 0x1bc27a1e);
10303 tw32(MAC_RSS_HASH_KEY_6
, 0x84f4b556);
10304 tw32(MAC_RSS_HASH_KEY_7
, 0x094ea6fe);
10305 tw32(MAC_RSS_HASH_KEY_8
, 0x7dda01e7);
10306 tw32(MAC_RSS_HASH_KEY_9
, 0xc04d7481);
10309 tp
->rx_mode
= RX_MODE_ENABLE
;
10310 if (tg3_flag(tp
, 5755_PLUS
))
10311 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
10313 if (tg3_flag(tp
, ENABLE_RSS
))
10314 tp
->rx_mode
|= RX_MODE_RSS_ENABLE
|
10315 RX_MODE_RSS_ITBL_HASH_BITS_7
|
10316 RX_MODE_RSS_IPV6_HASH_EN
|
10317 RX_MODE_RSS_TCP_IPV6_HASH_EN
|
10318 RX_MODE_RSS_IPV4_HASH_EN
|
10319 RX_MODE_RSS_TCP_IPV4_HASH_EN
;
10321 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10324 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
10326 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
10327 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10328 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
10331 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
10334 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
10335 if ((tg3_asic_rev(tp
) == ASIC_REV_5704
) &&
10336 !(tp
->phy_flags
& TG3_PHYFLG_SERDES_PREEMPHASIS
)) {
10337 /* Set drive transmission level to 1.2V */
10338 /* only if the signal pre-emphasis bit is not set */
10339 val
= tr32(MAC_SERDES_CFG
);
10342 tw32(MAC_SERDES_CFG
, val
);
10344 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
)
10345 tw32(MAC_SERDES_CFG
, 0x616000);
10348 /* Prevent chip from dropping frames when flow control
10351 if (tg3_flag(tp
, 57765_CLASS
))
10355 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, val
);
10357 if (tg3_asic_rev(tp
) == ASIC_REV_5704
&&
10358 (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
10359 /* Use hardware link auto-negotiation */
10360 tg3_flag_set(tp
, HW_AUTONEG
);
10363 if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10364 tg3_asic_rev(tp
) == ASIC_REV_5714
) {
10367 tmp
= tr32(SERDES_RX_CTRL
);
10368 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
10369 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
10370 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
10371 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
10374 if (!tg3_flag(tp
, USE_PHYLIB
)) {
10375 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
10376 tp
->phy_flags
&= ~TG3_PHYFLG_IS_LOW_POWER
;
10378 err
= tg3_setup_phy(tp
, false);
10382 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
10383 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
)) {
10386 /* Clear CRC stats. */
10387 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &tmp
)) {
10388 tg3_writephy(tp
, MII_TG3_TEST1
,
10389 tmp
| MII_TG3_TEST1_CRC_EN
);
10390 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &tmp
);
10395 __tg3_set_rx_mode(tp
->dev
);
10397 /* Initialize receive rules. */
10398 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
10399 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10400 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
10401 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
10403 if (tg3_flag(tp
, 5705_PLUS
) && !tg3_flag(tp
, 5780_CLASS
))
10407 if (tg3_flag(tp
, ENABLE_ASF
))
10411 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
10413 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
10415 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
10417 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
10419 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
10421 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
10423 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
10425 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
10427 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
10429 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
10431 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
10433 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
10435 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10437 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10445 if (tg3_flag(tp
, ENABLE_APE
))
10446 /* Write our heartbeat update interval to APE. */
10447 tg3_ape_write32(tp
, TG3_APE_HOST_HEARTBEAT_INT_MS
,
10448 APE_HOST_HEARTBEAT_INT_DISABLE
);
10450 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
10455 /* Called at device open time to get the chip ready for
10456 * packet processing. Invoked with tp->lock held.
10458 static int tg3_init_hw(struct tg3
*tp
, bool reset_phy
)
10460 /* Chip may have been just powered on. If so, the boot code may still
10461 * be running initialization. Wait for it to finish to avoid races in
10462 * accessing the hardware.
10464 tg3_enable_register_access(tp
);
10467 tg3_switch_clocks(tp
);
10469 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10471 return tg3_reset_hw(tp
, reset_phy
);
10474 static void tg3_sd_scan_scratchpad(struct tg3
*tp
, struct tg3_ocir
*ocir
)
10478 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++, ocir
++) {
10479 u32 off
= i
* TG3_OCIR_LEN
, len
= TG3_OCIR_LEN
;
10481 tg3_ape_scratchpad_read(tp
, (u32
*) ocir
, off
, len
);
10484 if (ocir
->signature
!= TG3_OCIR_SIG_MAGIC
||
10485 !(ocir
->version_flags
& TG3_OCIR_FLAG_ACTIVE
))
10486 memset(ocir
, 0, TG3_OCIR_LEN
);
10490 /* sysfs attributes for hwmon */
10491 static ssize_t
tg3_show_temp(struct device
*dev
,
10492 struct device_attribute
*devattr
, char *buf
)
10494 struct pci_dev
*pdev
= to_pci_dev(dev
);
10495 struct net_device
*netdev
= pci_get_drvdata(pdev
);
10496 struct tg3
*tp
= netdev_priv(netdev
);
10497 struct sensor_device_attribute
*attr
= to_sensor_dev_attr(devattr
);
10500 spin_lock_bh(&tp
->lock
);
10501 tg3_ape_scratchpad_read(tp
, &temperature
, attr
->index
,
10502 sizeof(temperature
));
10503 spin_unlock_bh(&tp
->lock
);
10504 return sprintf(buf
, "%u\n", temperature
);
10508 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, tg3_show_temp
, NULL
,
10509 TG3_TEMP_SENSOR_OFFSET
);
10510 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, tg3_show_temp
, NULL
,
10511 TG3_TEMP_CAUTION_OFFSET
);
10512 static SENSOR_DEVICE_ATTR(temp1_max
, S_IRUGO
, tg3_show_temp
, NULL
,
10513 TG3_TEMP_MAX_OFFSET
);
10515 static struct attribute
*tg3_attributes
[] = {
10516 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
10517 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
10518 &sensor_dev_attr_temp1_max
.dev_attr
.attr
,
10522 static const struct attribute_group tg3_group
= {
10523 .attrs
= tg3_attributes
,
10526 static void tg3_hwmon_close(struct tg3
*tp
)
10528 if (tp
->hwmon_dev
) {
10529 hwmon_device_unregister(tp
->hwmon_dev
);
10530 tp
->hwmon_dev
= NULL
;
10531 sysfs_remove_group(&tp
->pdev
->dev
.kobj
, &tg3_group
);
10535 static void tg3_hwmon_open(struct tg3
*tp
)
10539 struct pci_dev
*pdev
= tp
->pdev
;
10540 struct tg3_ocir ocirs
[TG3_SD_NUM_RECS
];
10542 tg3_sd_scan_scratchpad(tp
, ocirs
);
10544 for (i
= 0; i
< TG3_SD_NUM_RECS
; i
++) {
10545 if (!ocirs
[i
].src_data_length
)
10548 size
+= ocirs
[i
].src_hdr_length
;
10549 size
+= ocirs
[i
].src_data_length
;
10555 /* Register hwmon sysfs hooks */
10556 err
= sysfs_create_group(&pdev
->dev
.kobj
, &tg3_group
);
10558 dev_err(&pdev
->dev
, "Cannot create sysfs group, aborting\n");
10562 tp
->hwmon_dev
= hwmon_device_register(&pdev
->dev
);
10563 if (IS_ERR(tp
->hwmon_dev
)) {
10564 tp
->hwmon_dev
= NULL
;
10565 dev_err(&pdev
->dev
, "Cannot register hwmon device, aborting\n");
10566 sysfs_remove_group(&pdev
->dev
.kobj
, &tg3_group
);
10571 #define TG3_STAT_ADD32(PSTAT, REG) \
10572 do { u32 __val = tr32(REG); \
10573 (PSTAT)->low += __val; \
10574 if ((PSTAT)->low < __val) \
10575 (PSTAT)->high += 1; \
10578 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
10580 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
10585 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
10586 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
10587 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
10588 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
10589 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
10590 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
10591 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
10592 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
10593 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
10594 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
10595 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
10596 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
10597 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
10598 if (unlikely(tg3_flag(tp
, 5719_5720_RDMA_BUG
) &&
10599 (sp
->tx_ucast_packets
.low
+ sp
->tx_mcast_packets
.low
+
10600 sp
->tx_bcast_packets
.low
) > TG3_NUM_RDMA_CHANNELS
)) {
10603 val
= tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL
);
10604 val
&= ~tg3_lso_rd_dma_workaround_bit(tp
);
10605 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL
, val
);
10606 tg3_flag_clear(tp
, 5719_5720_RDMA_BUG
);
10609 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
10610 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
10611 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
10612 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
10613 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
10614 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
10615 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
10616 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
10617 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
10618 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
10619 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
10620 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
10621 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
10622 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
10624 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
10625 if (tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10626 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
&&
10627 tg3_chip_rev_id(tp
) != CHIPREV_ID_5720_A0
) {
10628 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
10630 u32 val
= tr32(HOSTCC_FLOW_ATTN
);
10631 val
= (val
& HOSTCC_FLOW_ATTN_MBUF_LWM
) ? 1 : 0;
10633 tw32(HOSTCC_FLOW_ATTN
, HOSTCC_FLOW_ATTN_MBUF_LWM
);
10634 sp
->rx_discards
.low
+= val
;
10635 if (sp
->rx_discards
.low
< val
)
10636 sp
->rx_discards
.high
+= 1;
10638 sp
->mbuf_lwm_thresh_hit
= sp
->rx_discards
;
10640 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
10643 static void tg3_chk_missed_msi(struct tg3
*tp
)
10647 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
10648 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
10650 if (tg3_has_work(tnapi
)) {
10651 if (tnapi
->last_rx_cons
== tnapi
->rx_rcb_ptr
&&
10652 tnapi
->last_tx_cons
== tnapi
->tx_cons
) {
10653 if (tnapi
->chk_msi_cnt
< 1) {
10654 tnapi
->chk_msi_cnt
++;
10660 tnapi
->chk_msi_cnt
= 0;
10661 tnapi
->last_rx_cons
= tnapi
->rx_rcb_ptr
;
10662 tnapi
->last_tx_cons
= tnapi
->tx_cons
;
10666 static void tg3_timer(unsigned long __opaque
)
10668 struct tg3
*tp
= (struct tg3
*) __opaque
;
10670 if (tp
->irq_sync
|| tg3_flag(tp
, RESET_TASK_PENDING
))
10671 goto restart_timer
;
10673 spin_lock(&tp
->lock
);
10675 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
10676 tg3_flag(tp
, 57765_CLASS
))
10677 tg3_chk_missed_msi(tp
);
10679 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
10680 /* BCM4785: Flush posted writes from GbE to host memory. */
10684 if (!tg3_flag(tp
, TAGGED_STATUS
)) {
10685 /* All of this garbage is because when using non-tagged
10686 * IRQ status the mailbox/status_block protocol the chip
10687 * uses with the cpu is race prone.
10689 if (tp
->napi
[0].hw_status
->status
& SD_STATUS_UPDATED
) {
10690 tw32(GRC_LOCAL_CTRL
,
10691 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
10693 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
10694 HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
);
10697 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10698 spin_unlock(&tp
->lock
);
10699 tg3_reset_task_schedule(tp
);
10700 goto restart_timer
;
10704 /* This part only runs once per second. */
10705 if (!--tp
->timer_counter
) {
10706 if (tg3_flag(tp
, 5705_PLUS
))
10707 tg3_periodic_fetch_stats(tp
);
10709 if (tp
->setlpicnt
&& !--tp
->setlpicnt
)
10710 tg3_phy_eee_enable(tp
);
10712 if (tg3_flag(tp
, USE_LINKCHG_REG
)) {
10716 mac_stat
= tr32(MAC_STATUS
);
10719 if (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) {
10720 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
10722 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
10726 tg3_setup_phy(tp
, false);
10727 } else if (tg3_flag(tp
, POLL_SERDES
)) {
10728 u32 mac_stat
= tr32(MAC_STATUS
);
10729 int need_setup
= 0;
10732 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
10735 if (!tp
->link_up
&&
10736 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
10737 MAC_STATUS_SIGNAL_DET
))) {
10741 if (!tp
->serdes_counter
) {
10744 ~MAC_MODE_PORT_MODE_MASK
));
10746 tw32_f(MAC_MODE
, tp
->mac_mode
);
10749 tg3_setup_phy(tp
, false);
10751 } else if ((tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
) &&
10752 tg3_flag(tp
, 5780_CLASS
)) {
10753 tg3_serdes_parallel_detect(tp
);
10756 tp
->timer_counter
= tp
->timer_multiplier
;
10759 /* Heartbeat is only sent once every 2 seconds.
10761 * The heartbeat is to tell the ASF firmware that the host
10762 * driver is still alive. In the event that the OS crashes,
10763 * ASF needs to reset the hardware to free up the FIFO space
10764 * that may be filled with rx packets destined for the host.
10765 * If the FIFO is full, ASF will no longer function properly.
10767 * Unintended resets have been reported on real time kernels
10768 * where the timer doesn't run on time. Netpoll will also have
10771 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10772 * to check the ring condition when the heartbeat is expiring
10773 * before doing the reset. This will prevent most unintended
10776 if (!--tp
->asf_counter
) {
10777 if (tg3_flag(tp
, ENABLE_ASF
) && !tg3_flag(tp
, ENABLE_APE
)) {
10778 tg3_wait_for_event_ack(tp
);
10780 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
10781 FWCMD_NICDRV_ALIVE3
);
10782 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
10783 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
,
10784 TG3_FW_UPDATE_TIMEOUT_SEC
);
10786 tg3_generate_fw_event(tp
);
10788 tp
->asf_counter
= tp
->asf_multiplier
;
10791 spin_unlock(&tp
->lock
);
10794 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10795 add_timer(&tp
->timer
);
10798 static void tg3_timer_init(struct tg3
*tp
)
10800 if (tg3_flag(tp
, TAGGED_STATUS
) &&
10801 tg3_asic_rev(tp
) != ASIC_REV_5717
&&
10802 !tg3_flag(tp
, 57765_CLASS
))
10803 tp
->timer_offset
= HZ
;
10805 tp
->timer_offset
= HZ
/ 10;
10807 BUG_ON(tp
->timer_offset
> HZ
);
10809 tp
->timer_multiplier
= (HZ
/ tp
->timer_offset
);
10810 tp
->asf_multiplier
= (HZ
/ tp
->timer_offset
) *
10811 TG3_FW_UPDATE_FREQ_SEC
;
10813 init_timer(&tp
->timer
);
10814 tp
->timer
.data
= (unsigned long) tp
;
10815 tp
->timer
.function
= tg3_timer
;
10818 static void tg3_timer_start(struct tg3
*tp
)
10820 tp
->asf_counter
= tp
->asf_multiplier
;
10821 tp
->timer_counter
= tp
->timer_multiplier
;
10823 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
10824 add_timer(&tp
->timer
);
10827 static void tg3_timer_stop(struct tg3
*tp
)
10829 del_timer_sync(&tp
->timer
);
10832 /* Restart hardware after configuration changes, self-test, etc.
10833 * Invoked with tp->lock held.
10835 static int tg3_restart_hw(struct tg3
*tp
, bool reset_phy
)
10836 __releases(tp
->lock
)
10837 __acquires(tp
->lock
)
10841 err
= tg3_init_hw(tp
, reset_phy
);
10843 netdev_err(tp
->dev
,
10844 "Failed to re-initialize device, aborting\n");
10845 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10846 tg3_full_unlock(tp
);
10847 tg3_timer_stop(tp
);
10849 tg3_napi_enable(tp
);
10850 dev_close(tp
->dev
);
10851 tg3_full_lock(tp
, 0);
10856 static void tg3_reset_task(struct work_struct
*work
)
10858 struct tg3
*tp
= container_of(work
, struct tg3
, reset_task
);
10861 tg3_full_lock(tp
, 0);
10863 if (!netif_running(tp
->dev
)) {
10864 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10865 tg3_full_unlock(tp
);
10869 tg3_full_unlock(tp
);
10873 tg3_netif_stop(tp
);
10875 tg3_full_lock(tp
, 1);
10877 if (tg3_flag(tp
, TX_RECOVERY_PENDING
)) {
10878 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10879 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10880 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
10881 tg3_flag_clear(tp
, TX_RECOVERY_PENDING
);
10884 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
10885 err
= tg3_init_hw(tp
, true);
10889 tg3_netif_start(tp
);
10892 tg3_full_unlock(tp
);
10897 tg3_flag_clear(tp
, RESET_TASK_PENDING
);
10900 static int tg3_request_irq(struct tg3
*tp
, int irq_num
)
10903 unsigned long flags
;
10905 struct tg3_napi
*tnapi
= &tp
->napi
[irq_num
];
10907 if (tp
->irq_cnt
== 1)
10908 name
= tp
->dev
->name
;
10910 name
= &tnapi
->irq_lbl
[0];
10911 snprintf(name
, IFNAMSIZ
, "%s-%d", tp
->dev
->name
, irq_num
);
10912 name
[IFNAMSIZ
-1] = 0;
10915 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
10917 if (tg3_flag(tp
, 1SHOT_MSI
))
10918 fn
= tg3_msi_1shot
;
10921 fn
= tg3_interrupt
;
10922 if (tg3_flag(tp
, TAGGED_STATUS
))
10923 fn
= tg3_interrupt_tagged
;
10924 flags
= IRQF_SHARED
;
10927 return request_irq(tnapi
->irq_vec
, fn
, flags
, name
, tnapi
);
10930 static int tg3_test_interrupt(struct tg3
*tp
)
10932 struct tg3_napi
*tnapi
= &tp
->napi
[0];
10933 struct net_device
*dev
= tp
->dev
;
10934 int err
, i
, intr_ok
= 0;
10937 if (!netif_running(dev
))
10940 tg3_disable_ints(tp
);
10942 free_irq(tnapi
->irq_vec
, tnapi
);
10945 * Turn off MSI one shot mode. Otherwise this test has no
10946 * observable way to know whether the interrupt was delivered.
10948 if (tg3_flag(tp
, 57765_PLUS
)) {
10949 val
= tr32(MSGINT_MODE
) | MSGINT_MODE_ONE_SHOT_DISABLE
;
10950 tw32(MSGINT_MODE
, val
);
10953 err
= request_irq(tnapi
->irq_vec
, tg3_test_isr
,
10954 IRQF_SHARED
, dev
->name
, tnapi
);
10958 tnapi
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
10959 tg3_enable_ints(tp
);
10961 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
10964 for (i
= 0; i
< 5; i
++) {
10965 u32 int_mbox
, misc_host_ctrl
;
10967 int_mbox
= tr32_mailbox(tnapi
->int_mbox
);
10968 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
10970 if ((int_mbox
!= 0) ||
10971 (misc_host_ctrl
& MISC_HOST_CTRL_MASK_PCI_INT
)) {
10976 if (tg3_flag(tp
, 57765_PLUS
) &&
10977 tnapi
->hw_status
->status_tag
!= tnapi
->last_tag
)
10978 tw32_mailbox_f(tnapi
->int_mbox
, tnapi
->last_tag
<< 24);
10983 tg3_disable_ints(tp
);
10985 free_irq(tnapi
->irq_vec
, tnapi
);
10987 err
= tg3_request_irq(tp
, 0);
10993 /* Reenable MSI one shot mode. */
10994 if (tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, 1SHOT_MSI
)) {
10995 val
= tr32(MSGINT_MODE
) & ~MSGINT_MODE_ONE_SHOT_DISABLE
;
10996 tw32(MSGINT_MODE
, val
);
11004 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11005 * successfully restored
11007 static int tg3_test_msi(struct tg3
*tp
)
11012 if (!tg3_flag(tp
, USING_MSI
))
11015 /* Turn off SERR reporting in case MSI terminates with Master
11018 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
11019 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
11020 pci_cmd
& ~PCI_COMMAND_SERR
);
11022 err
= tg3_test_interrupt(tp
);
11024 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
11029 /* other failures */
11033 /* MSI test failed, go back to INTx mode */
11034 netdev_warn(tp
->dev
, "No interrupt was generated using MSI. Switching "
11035 "to INTx mode. Please report this failure to the PCI "
11036 "maintainer and include system chipset information\n");
11038 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11040 pci_disable_msi(tp
->pdev
);
11042 tg3_flag_clear(tp
, USING_MSI
);
11043 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11045 err
= tg3_request_irq(tp
, 0);
11049 /* Need to reset the chip because the MSI cycle may have terminated
11050 * with Master Abort.
11052 tg3_full_lock(tp
, 1);
11054 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11055 err
= tg3_init_hw(tp
, true);
11057 tg3_full_unlock(tp
);
11060 free_irq(tp
->napi
[0].irq_vec
, &tp
->napi
[0]);
11065 static int tg3_request_firmware(struct tg3
*tp
)
11067 const struct tg3_firmware_hdr
*fw_hdr
;
11069 if (request_firmware(&tp
->fw
, tp
->fw_needed
, &tp
->pdev
->dev
)) {
11070 netdev_err(tp
->dev
, "Failed to load firmware \"%s\"\n",
11075 fw_hdr
= (struct tg3_firmware_hdr
*)tp
->fw
->data
;
11077 /* Firmware blob starts with version numbers, followed by
11078 * start address and _full_ length including BSS sections
11079 * (which must be longer than the actual data, of course
11082 tp
->fw_len
= be32_to_cpu(fw_hdr
->len
); /* includes bss */
11083 if (tp
->fw_len
< (tp
->fw
->size
- TG3_FW_HDR_LEN
)) {
11084 netdev_err(tp
->dev
, "bogus length %d in \"%s\"\n",
11085 tp
->fw_len
, tp
->fw_needed
);
11086 release_firmware(tp
->fw
);
11091 /* We no longer need firmware; we have it. */
11092 tp
->fw_needed
= NULL
;
11096 static u32
tg3_irq_count(struct tg3
*tp
)
11098 u32 irq_cnt
= max(tp
->rxq_cnt
, tp
->txq_cnt
);
11101 /* We want as many rx rings enabled as there are cpus.
11102 * In multiqueue MSI-X mode, the first MSI-X vector
11103 * only deals with link interrupts, etc, so we add
11104 * one to the number of vectors we are requesting.
11106 irq_cnt
= min_t(unsigned, irq_cnt
+ 1, tp
->irq_max
);
11112 static bool tg3_enable_msix(struct tg3
*tp
)
11115 struct msix_entry msix_ent
[TG3_IRQ_MAX_VECS
];
11117 tp
->txq_cnt
= tp
->txq_req
;
11118 tp
->rxq_cnt
= tp
->rxq_req
;
11120 tp
->rxq_cnt
= netif_get_num_default_rss_queues();
11121 if (tp
->rxq_cnt
> tp
->rxq_max
)
11122 tp
->rxq_cnt
= tp
->rxq_max
;
11124 /* Disable multiple TX rings by default. Simple round-robin hardware
11125 * scheduling of the TX rings can cause starvation of rings with
11126 * small packets when other rings have TSO or jumbo packets.
11131 tp
->irq_cnt
= tg3_irq_count(tp
);
11133 for (i
= 0; i
< tp
->irq_max
; i
++) {
11134 msix_ent
[i
].entry
= i
;
11135 msix_ent
[i
].vector
= 0;
11138 rc
= pci_enable_msix(tp
->pdev
, msix_ent
, tp
->irq_cnt
);
11141 } else if (rc
!= 0) {
11142 if (pci_enable_msix(tp
->pdev
, msix_ent
, rc
))
11144 netdev_notice(tp
->dev
, "Requested %d MSI-X vectors, received %d\n",
11147 tp
->rxq_cnt
= max(rc
- 1, 1);
11149 tp
->txq_cnt
= min(tp
->rxq_cnt
, tp
->txq_max
);
11152 for (i
= 0; i
< tp
->irq_max
; i
++)
11153 tp
->napi
[i
].irq_vec
= msix_ent
[i
].vector
;
11155 if (netif_set_real_num_rx_queues(tp
->dev
, tp
->rxq_cnt
)) {
11156 pci_disable_msix(tp
->pdev
);
11160 if (tp
->irq_cnt
== 1)
11163 tg3_flag_set(tp
, ENABLE_RSS
);
11165 if (tp
->txq_cnt
> 1)
11166 tg3_flag_set(tp
, ENABLE_TSS
);
11168 netif_set_real_num_tx_queues(tp
->dev
, tp
->txq_cnt
);
11173 static void tg3_ints_init(struct tg3
*tp
)
11175 if ((tg3_flag(tp
, SUPPORT_MSI
) || tg3_flag(tp
, SUPPORT_MSIX
)) &&
11176 !tg3_flag(tp
, TAGGED_STATUS
)) {
11177 /* All MSI supporting chips should support tagged
11178 * status. Assert that this is the case.
11180 netdev_warn(tp
->dev
,
11181 "MSI without TAGGED_STATUS? Not using MSI\n");
11185 if (tg3_flag(tp
, SUPPORT_MSIX
) && tg3_enable_msix(tp
))
11186 tg3_flag_set(tp
, USING_MSIX
);
11187 else if (tg3_flag(tp
, SUPPORT_MSI
) && pci_enable_msi(tp
->pdev
) == 0)
11188 tg3_flag_set(tp
, USING_MSI
);
11190 if (tg3_flag(tp
, USING_MSI
) || tg3_flag(tp
, USING_MSIX
)) {
11191 u32 msi_mode
= tr32(MSGINT_MODE
);
11192 if (tg3_flag(tp
, USING_MSIX
) && tp
->irq_cnt
> 1)
11193 msi_mode
|= MSGINT_MODE_MULTIVEC_EN
;
11194 if (!tg3_flag(tp
, 1SHOT_MSI
))
11195 msi_mode
|= MSGINT_MODE_ONE_SHOT_DISABLE
;
11196 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
11199 if (!tg3_flag(tp
, USING_MSIX
)) {
11201 tp
->napi
[0].irq_vec
= tp
->pdev
->irq
;
11204 if (tp
->irq_cnt
== 1) {
11207 netif_set_real_num_tx_queues(tp
->dev
, 1);
11208 netif_set_real_num_rx_queues(tp
->dev
, 1);
11212 static void tg3_ints_fini(struct tg3
*tp
)
11214 if (tg3_flag(tp
, USING_MSIX
))
11215 pci_disable_msix(tp
->pdev
);
11216 else if (tg3_flag(tp
, USING_MSI
))
11217 pci_disable_msi(tp
->pdev
);
11218 tg3_flag_clear(tp
, USING_MSI
);
11219 tg3_flag_clear(tp
, USING_MSIX
);
11220 tg3_flag_clear(tp
, ENABLE_RSS
);
11221 tg3_flag_clear(tp
, ENABLE_TSS
);
11224 static int tg3_start(struct tg3
*tp
, bool reset_phy
, bool test_irq
,
11227 struct net_device
*dev
= tp
->dev
;
11231 * Setup interrupts first so we know how
11232 * many NAPI resources to allocate
11236 tg3_rss_check_indir_tbl(tp
);
11238 /* The placement of this call is tied
11239 * to the setup and use of Host TX descriptors.
11241 err
= tg3_alloc_consistent(tp
);
11247 tg3_napi_enable(tp
);
11249 for (i
= 0; i
< tp
->irq_cnt
; i
++) {
11250 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11251 err
= tg3_request_irq(tp
, i
);
11253 for (i
--; i
>= 0; i
--) {
11254 tnapi
= &tp
->napi
[i
];
11255 free_irq(tnapi
->irq_vec
, tnapi
);
11261 tg3_full_lock(tp
, 0);
11263 err
= tg3_init_hw(tp
, reset_phy
);
11265 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11266 tg3_free_rings(tp
);
11269 tg3_full_unlock(tp
);
11274 if (test_irq
&& tg3_flag(tp
, USING_MSI
)) {
11275 err
= tg3_test_msi(tp
);
11278 tg3_full_lock(tp
, 0);
11279 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11280 tg3_free_rings(tp
);
11281 tg3_full_unlock(tp
);
11286 if (!tg3_flag(tp
, 57765_PLUS
) && tg3_flag(tp
, USING_MSI
)) {
11287 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
11289 tw32(PCIE_TRANSACTION_CFG
,
11290 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
11296 tg3_hwmon_open(tp
);
11298 tg3_full_lock(tp
, 0);
11300 tg3_timer_start(tp
);
11301 tg3_flag_set(tp
, INIT_COMPLETE
);
11302 tg3_enable_ints(tp
);
11307 tg3_ptp_resume(tp
);
11310 tg3_full_unlock(tp
);
11312 netif_tx_start_all_queues(dev
);
11315 * Reset loopback feature if it was turned on while the device was down
11316 * make sure that it's installed properly now.
11318 if (dev
->features
& NETIF_F_LOOPBACK
)
11319 tg3_set_loopback(dev
, dev
->features
);
11324 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11325 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11326 free_irq(tnapi
->irq_vec
, tnapi
);
11330 tg3_napi_disable(tp
);
11332 tg3_free_consistent(tp
);
11340 static void tg3_stop(struct tg3
*tp
)
11344 tg3_reset_task_cancel(tp
);
11345 tg3_netif_stop(tp
);
11347 tg3_timer_stop(tp
);
11349 tg3_hwmon_close(tp
);
11353 tg3_full_lock(tp
, 1);
11355 tg3_disable_ints(tp
);
11357 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11358 tg3_free_rings(tp
);
11359 tg3_flag_clear(tp
, INIT_COMPLETE
);
11361 tg3_full_unlock(tp
);
11363 for (i
= tp
->irq_cnt
- 1; i
>= 0; i
--) {
11364 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
11365 free_irq(tnapi
->irq_vec
, tnapi
);
11372 tg3_free_consistent(tp
);
11375 static int tg3_open(struct net_device
*dev
)
11377 struct tg3
*tp
= netdev_priv(dev
);
11380 if (tp
->fw_needed
) {
11381 err
= tg3_request_firmware(tp
);
11382 if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
11384 netdev_warn(tp
->dev
, "EEE capability disabled\n");
11385 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
11386 } else if (!(tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
)) {
11387 netdev_warn(tp
->dev
, "EEE capability restored\n");
11388 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
11390 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
) {
11394 netdev_warn(tp
->dev
, "TSO capability disabled\n");
11395 tg3_flag_clear(tp
, TSO_CAPABLE
);
11396 } else if (!tg3_flag(tp
, TSO_CAPABLE
)) {
11397 netdev_notice(tp
->dev
, "TSO capability restored\n");
11398 tg3_flag_set(tp
, TSO_CAPABLE
);
11402 tg3_carrier_off(tp
);
11404 err
= tg3_power_up(tp
);
11408 tg3_full_lock(tp
, 0);
11410 tg3_disable_ints(tp
);
11411 tg3_flag_clear(tp
, INIT_COMPLETE
);
11413 tg3_full_unlock(tp
);
11415 err
= tg3_start(tp
,
11416 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
),
11419 tg3_frob_aux_power(tp
, false);
11420 pci_set_power_state(tp
->pdev
, PCI_D3hot
);
11423 if (tg3_flag(tp
, PTP_CAPABLE
)) {
11424 tp
->ptp_clock
= ptp_clock_register(&tp
->ptp_info
,
11426 if (IS_ERR(tp
->ptp_clock
))
11427 tp
->ptp_clock
= NULL
;
11433 static int tg3_close(struct net_device
*dev
)
11435 struct tg3
*tp
= netdev_priv(dev
);
11441 /* Clear stats across close / open calls */
11442 memset(&tp
->net_stats_prev
, 0, sizeof(tp
->net_stats_prev
));
11443 memset(&tp
->estats_prev
, 0, sizeof(tp
->estats_prev
));
11445 tg3_power_down(tp
);
11447 tg3_carrier_off(tp
);
11452 static inline u64
get_stat64(tg3_stat64_t
*val
)
11454 return ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
11457 static u64
tg3_calc_crc_errors(struct tg3
*tp
)
11459 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11461 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
11462 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
11463 tg3_asic_rev(tp
) == ASIC_REV_5701
)) {
11466 if (!tg3_readphy(tp
, MII_TG3_TEST1
, &val
)) {
11467 tg3_writephy(tp
, MII_TG3_TEST1
,
11468 val
| MII_TG3_TEST1_CRC_EN
);
11469 tg3_readphy(tp
, MII_TG3_RXR_COUNTERS
, &val
);
11473 tp
->phy_crc_errors
+= val
;
11475 return tp
->phy_crc_errors
;
11478 return get_stat64(&hw_stats
->rx_fcs_errors
);
11481 #define ESTAT_ADD(member) \
11482 estats->member = old_estats->member + \
11483 get_stat64(&hw_stats->member)
11485 static void tg3_get_estats(struct tg3
*tp
, struct tg3_ethtool_stats
*estats
)
11487 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
11488 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11490 ESTAT_ADD(rx_octets
);
11491 ESTAT_ADD(rx_fragments
);
11492 ESTAT_ADD(rx_ucast_packets
);
11493 ESTAT_ADD(rx_mcast_packets
);
11494 ESTAT_ADD(rx_bcast_packets
);
11495 ESTAT_ADD(rx_fcs_errors
);
11496 ESTAT_ADD(rx_align_errors
);
11497 ESTAT_ADD(rx_xon_pause_rcvd
);
11498 ESTAT_ADD(rx_xoff_pause_rcvd
);
11499 ESTAT_ADD(rx_mac_ctrl_rcvd
);
11500 ESTAT_ADD(rx_xoff_entered
);
11501 ESTAT_ADD(rx_frame_too_long_errors
);
11502 ESTAT_ADD(rx_jabbers
);
11503 ESTAT_ADD(rx_undersize_packets
);
11504 ESTAT_ADD(rx_in_length_errors
);
11505 ESTAT_ADD(rx_out_length_errors
);
11506 ESTAT_ADD(rx_64_or_less_octet_packets
);
11507 ESTAT_ADD(rx_65_to_127_octet_packets
);
11508 ESTAT_ADD(rx_128_to_255_octet_packets
);
11509 ESTAT_ADD(rx_256_to_511_octet_packets
);
11510 ESTAT_ADD(rx_512_to_1023_octet_packets
);
11511 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
11512 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
11513 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
11514 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
11515 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
11517 ESTAT_ADD(tx_octets
);
11518 ESTAT_ADD(tx_collisions
);
11519 ESTAT_ADD(tx_xon_sent
);
11520 ESTAT_ADD(tx_xoff_sent
);
11521 ESTAT_ADD(tx_flow_control
);
11522 ESTAT_ADD(tx_mac_errors
);
11523 ESTAT_ADD(tx_single_collisions
);
11524 ESTAT_ADD(tx_mult_collisions
);
11525 ESTAT_ADD(tx_deferred
);
11526 ESTAT_ADD(tx_excessive_collisions
);
11527 ESTAT_ADD(tx_late_collisions
);
11528 ESTAT_ADD(tx_collide_2times
);
11529 ESTAT_ADD(tx_collide_3times
);
11530 ESTAT_ADD(tx_collide_4times
);
11531 ESTAT_ADD(tx_collide_5times
);
11532 ESTAT_ADD(tx_collide_6times
);
11533 ESTAT_ADD(tx_collide_7times
);
11534 ESTAT_ADD(tx_collide_8times
);
11535 ESTAT_ADD(tx_collide_9times
);
11536 ESTAT_ADD(tx_collide_10times
);
11537 ESTAT_ADD(tx_collide_11times
);
11538 ESTAT_ADD(tx_collide_12times
);
11539 ESTAT_ADD(tx_collide_13times
);
11540 ESTAT_ADD(tx_collide_14times
);
11541 ESTAT_ADD(tx_collide_15times
);
11542 ESTAT_ADD(tx_ucast_packets
);
11543 ESTAT_ADD(tx_mcast_packets
);
11544 ESTAT_ADD(tx_bcast_packets
);
11545 ESTAT_ADD(tx_carrier_sense_errors
);
11546 ESTAT_ADD(tx_discards
);
11547 ESTAT_ADD(tx_errors
);
11549 ESTAT_ADD(dma_writeq_full
);
11550 ESTAT_ADD(dma_write_prioq_full
);
11551 ESTAT_ADD(rxbds_empty
);
11552 ESTAT_ADD(rx_discards
);
11553 ESTAT_ADD(rx_errors
);
11554 ESTAT_ADD(rx_threshold_hit
);
11556 ESTAT_ADD(dma_readq_full
);
11557 ESTAT_ADD(dma_read_prioq_full
);
11558 ESTAT_ADD(tx_comp_queue_full
);
11560 ESTAT_ADD(ring_set_send_prod_index
);
11561 ESTAT_ADD(ring_status_update
);
11562 ESTAT_ADD(nic_irqs
);
11563 ESTAT_ADD(nic_avoided_irqs
);
11564 ESTAT_ADD(nic_tx_threshold_hit
);
11566 ESTAT_ADD(mbuf_lwm_thresh_hit
);
11569 static void tg3_get_nstats(struct tg3
*tp
, struct rtnl_link_stats64
*stats
)
11571 struct rtnl_link_stats64
*old_stats
= &tp
->net_stats_prev
;
11572 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
11574 stats
->rx_packets
= old_stats
->rx_packets
+
11575 get_stat64(&hw_stats
->rx_ucast_packets
) +
11576 get_stat64(&hw_stats
->rx_mcast_packets
) +
11577 get_stat64(&hw_stats
->rx_bcast_packets
);
11579 stats
->tx_packets
= old_stats
->tx_packets
+
11580 get_stat64(&hw_stats
->tx_ucast_packets
) +
11581 get_stat64(&hw_stats
->tx_mcast_packets
) +
11582 get_stat64(&hw_stats
->tx_bcast_packets
);
11584 stats
->rx_bytes
= old_stats
->rx_bytes
+
11585 get_stat64(&hw_stats
->rx_octets
);
11586 stats
->tx_bytes
= old_stats
->tx_bytes
+
11587 get_stat64(&hw_stats
->tx_octets
);
11589 stats
->rx_errors
= old_stats
->rx_errors
+
11590 get_stat64(&hw_stats
->rx_errors
);
11591 stats
->tx_errors
= old_stats
->tx_errors
+
11592 get_stat64(&hw_stats
->tx_errors
) +
11593 get_stat64(&hw_stats
->tx_mac_errors
) +
11594 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
11595 get_stat64(&hw_stats
->tx_discards
);
11597 stats
->multicast
= old_stats
->multicast
+
11598 get_stat64(&hw_stats
->rx_mcast_packets
);
11599 stats
->collisions
= old_stats
->collisions
+
11600 get_stat64(&hw_stats
->tx_collisions
);
11602 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
11603 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
11604 get_stat64(&hw_stats
->rx_undersize_packets
);
11606 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
11607 get_stat64(&hw_stats
->rxbds_empty
);
11608 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
11609 get_stat64(&hw_stats
->rx_align_errors
);
11610 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
11611 get_stat64(&hw_stats
->tx_discards
);
11612 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
11613 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
11615 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
11616 tg3_calc_crc_errors(tp
);
11618 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
11619 get_stat64(&hw_stats
->rx_discards
);
11621 stats
->rx_dropped
= tp
->rx_dropped
;
11622 stats
->tx_dropped
= tp
->tx_dropped
;
11625 static int tg3_get_regs_len(struct net_device
*dev
)
11627 return TG3_REG_BLK_SIZE
;
11630 static void tg3_get_regs(struct net_device
*dev
,
11631 struct ethtool_regs
*regs
, void *_p
)
11633 struct tg3
*tp
= netdev_priv(dev
);
11637 memset(_p
, 0, TG3_REG_BLK_SIZE
);
11639 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11642 tg3_full_lock(tp
, 0);
11644 tg3_dump_legacy_regs(tp
, (u32
*)_p
);
11646 tg3_full_unlock(tp
);
11649 static int tg3_get_eeprom_len(struct net_device
*dev
)
11651 struct tg3
*tp
= netdev_priv(dev
);
11653 return tp
->nvram_size
;
11656 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11658 struct tg3
*tp
= netdev_priv(dev
);
11661 u32 i
, offset
, len
, b_offset
, b_count
;
11664 if (tg3_flag(tp
, NO_NVRAM
))
11667 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11670 offset
= eeprom
->offset
;
11674 eeprom
->magic
= TG3_EEPROM_MAGIC
;
11677 /* adjustments to start on required 4 byte boundary */
11678 b_offset
= offset
& 3;
11679 b_count
= 4 - b_offset
;
11680 if (b_count
> len
) {
11681 /* i.e. offset=1 len=2 */
11684 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &val
);
11687 memcpy(data
, ((char *)&val
) + b_offset
, b_count
);
11690 eeprom
->len
+= b_count
;
11693 /* read bytes up to the last 4 byte boundary */
11694 pd
= &data
[eeprom
->len
];
11695 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
11696 ret
= tg3_nvram_read_be32(tp
, offset
+ i
, &val
);
11701 memcpy(pd
+ i
, &val
, 4);
11706 /* read last bytes not ending on 4 byte boundary */
11707 pd
= &data
[eeprom
->len
];
11709 b_offset
= offset
+ len
- b_count
;
11710 ret
= tg3_nvram_read_be32(tp
, b_offset
, &val
);
11713 memcpy(pd
, &val
, b_count
);
11714 eeprom
->len
+= b_count
;
11719 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
11721 struct tg3
*tp
= netdev_priv(dev
);
11723 u32 offset
, len
, b_offset
, odd_len
;
11727 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
11730 if (tg3_flag(tp
, NO_NVRAM
) ||
11731 eeprom
->magic
!= TG3_EEPROM_MAGIC
)
11734 offset
= eeprom
->offset
;
11737 if ((b_offset
= (offset
& 3))) {
11738 /* adjustments to start on required 4 byte boundary */
11739 ret
= tg3_nvram_read_be32(tp
, offset
-b_offset
, &start
);
11750 /* adjustments to end on required 4 byte boundary */
11752 len
= (len
+ 3) & ~3;
11753 ret
= tg3_nvram_read_be32(tp
, offset
+len
-4, &end
);
11759 if (b_offset
|| odd_len
) {
11760 buf
= kmalloc(len
, GFP_KERNEL
);
11764 memcpy(buf
, &start
, 4);
11766 memcpy(buf
+len
-4, &end
, 4);
11767 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
11770 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
11778 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11780 struct tg3
*tp
= netdev_priv(dev
);
11782 if (tg3_flag(tp
, USE_PHYLIB
)) {
11783 struct phy_device
*phydev
;
11784 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11786 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11787 return phy_ethtool_gset(phydev
, cmd
);
11790 cmd
->supported
= (SUPPORTED_Autoneg
);
11792 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11793 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
11794 SUPPORTED_1000baseT_Full
);
11796 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11797 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
11798 SUPPORTED_100baseT_Full
|
11799 SUPPORTED_10baseT_Half
|
11800 SUPPORTED_10baseT_Full
|
11802 cmd
->port
= PORT_TP
;
11804 cmd
->supported
|= SUPPORTED_FIBRE
;
11805 cmd
->port
= PORT_FIBRE
;
11808 cmd
->advertising
= tp
->link_config
.advertising
;
11809 if (tg3_flag(tp
, PAUSE_AUTONEG
)) {
11810 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
) {
11811 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11812 cmd
->advertising
|= ADVERTISED_Pause
;
11814 cmd
->advertising
|= ADVERTISED_Pause
|
11815 ADVERTISED_Asym_Pause
;
11817 } else if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
) {
11818 cmd
->advertising
|= ADVERTISED_Asym_Pause
;
11821 if (netif_running(dev
) && tp
->link_up
) {
11822 ethtool_cmd_speed_set(cmd
, tp
->link_config
.active_speed
);
11823 cmd
->duplex
= tp
->link_config
.active_duplex
;
11824 cmd
->lp_advertising
= tp
->link_config
.rmt_adv
;
11825 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)) {
11826 if (tp
->phy_flags
& TG3_PHYFLG_MDIX_STATE
)
11827 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
11829 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
11832 ethtool_cmd_speed_set(cmd
, SPEED_UNKNOWN
);
11833 cmd
->duplex
= DUPLEX_UNKNOWN
;
11834 cmd
->eth_tp_mdix
= ETH_TP_MDI_INVALID
;
11836 cmd
->phy_address
= tp
->phy_addr
;
11837 cmd
->transceiver
= XCVR_INTERNAL
;
11838 cmd
->autoneg
= tp
->link_config
.autoneg
;
11844 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
11846 struct tg3
*tp
= netdev_priv(dev
);
11847 u32 speed
= ethtool_cmd_speed(cmd
);
11849 if (tg3_flag(tp
, USE_PHYLIB
)) {
11850 struct phy_device
*phydev
;
11851 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
11853 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
11854 return phy_ethtool_sset(phydev
, cmd
);
11857 if (cmd
->autoneg
!= AUTONEG_ENABLE
&&
11858 cmd
->autoneg
!= AUTONEG_DISABLE
)
11861 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
11862 cmd
->duplex
!= DUPLEX_FULL
&&
11863 cmd
->duplex
!= DUPLEX_HALF
)
11866 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11867 u32 mask
= ADVERTISED_Autoneg
|
11869 ADVERTISED_Asym_Pause
;
11871 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
11872 mask
|= ADVERTISED_1000baseT_Half
|
11873 ADVERTISED_1000baseT_Full
;
11875 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
11876 mask
|= ADVERTISED_100baseT_Half
|
11877 ADVERTISED_100baseT_Full
|
11878 ADVERTISED_10baseT_Half
|
11879 ADVERTISED_10baseT_Full
|
11882 mask
|= ADVERTISED_FIBRE
;
11884 if (cmd
->advertising
& ~mask
)
11887 mask
&= (ADVERTISED_1000baseT_Half
|
11888 ADVERTISED_1000baseT_Full
|
11889 ADVERTISED_100baseT_Half
|
11890 ADVERTISED_100baseT_Full
|
11891 ADVERTISED_10baseT_Half
|
11892 ADVERTISED_10baseT_Full
);
11894 cmd
->advertising
&= mask
;
11896 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) {
11897 if (speed
!= SPEED_1000
)
11900 if (cmd
->duplex
!= DUPLEX_FULL
)
11903 if (speed
!= SPEED_100
&&
11909 tg3_full_lock(tp
, 0);
11911 tp
->link_config
.autoneg
= cmd
->autoneg
;
11912 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
11913 tp
->link_config
.advertising
= (cmd
->advertising
|
11914 ADVERTISED_Autoneg
);
11915 tp
->link_config
.speed
= SPEED_UNKNOWN
;
11916 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
11918 tp
->link_config
.advertising
= 0;
11919 tp
->link_config
.speed
= speed
;
11920 tp
->link_config
.duplex
= cmd
->duplex
;
11923 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
11925 tg3_warn_mgmt_link_flap(tp
);
11927 if (netif_running(dev
))
11928 tg3_setup_phy(tp
, true);
11930 tg3_full_unlock(tp
);
11935 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
11937 struct tg3
*tp
= netdev_priv(dev
);
11939 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
11940 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
11941 strlcpy(info
->fw_version
, tp
->fw_ver
, sizeof(info
->fw_version
));
11942 strlcpy(info
->bus_info
, pci_name(tp
->pdev
), sizeof(info
->bus_info
));
11945 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11947 struct tg3
*tp
= netdev_priv(dev
);
11949 if (tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(&tp
->pdev
->dev
))
11950 wol
->supported
= WAKE_MAGIC
;
11952 wol
->supported
= 0;
11954 if (tg3_flag(tp
, WOL_ENABLE
) && device_can_wakeup(&tp
->pdev
->dev
))
11955 wol
->wolopts
= WAKE_MAGIC
;
11956 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
11959 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
11961 struct tg3
*tp
= netdev_priv(dev
);
11962 struct device
*dp
= &tp
->pdev
->dev
;
11964 if (wol
->wolopts
& ~WAKE_MAGIC
)
11966 if ((wol
->wolopts
& WAKE_MAGIC
) &&
11967 !(tg3_flag(tp
, WOL_CAP
) && device_can_wakeup(dp
)))
11970 device_set_wakeup_enable(dp
, wol
->wolopts
& WAKE_MAGIC
);
11972 spin_lock_bh(&tp
->lock
);
11973 if (device_may_wakeup(dp
))
11974 tg3_flag_set(tp
, WOL_ENABLE
);
11976 tg3_flag_clear(tp
, WOL_ENABLE
);
11977 spin_unlock_bh(&tp
->lock
);
11982 static u32
tg3_get_msglevel(struct net_device
*dev
)
11984 struct tg3
*tp
= netdev_priv(dev
);
11985 return tp
->msg_enable
;
11988 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
11990 struct tg3
*tp
= netdev_priv(dev
);
11991 tp
->msg_enable
= value
;
11994 static int tg3_nway_reset(struct net_device
*dev
)
11996 struct tg3
*tp
= netdev_priv(dev
);
11999 if (!netif_running(dev
))
12002 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
12005 tg3_warn_mgmt_link_flap(tp
);
12007 if (tg3_flag(tp
, USE_PHYLIB
)) {
12008 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
12010 r
= phy_start_aneg(tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
]);
12014 spin_lock_bh(&tp
->lock
);
12016 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
12017 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
12018 ((bmcr
& BMCR_ANENABLE
) ||
12019 (tp
->phy_flags
& TG3_PHYFLG_PARALLEL_DETECT
))) {
12020 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
12024 spin_unlock_bh(&tp
->lock
);
12030 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12032 struct tg3
*tp
= netdev_priv(dev
);
12034 ering
->rx_max_pending
= tp
->rx_std_ring_mask
;
12035 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12036 ering
->rx_jumbo_max_pending
= tp
->rx_jmb_ring_mask
;
12038 ering
->rx_jumbo_max_pending
= 0;
12040 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
12042 ering
->rx_pending
= tp
->rx_pending
;
12043 if (tg3_flag(tp
, JUMBO_RING_ENABLE
))
12044 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
12046 ering
->rx_jumbo_pending
= 0;
12048 ering
->tx_pending
= tp
->napi
[0].tx_pending
;
12051 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
12053 struct tg3
*tp
= netdev_priv(dev
);
12054 int i
, irq_sync
= 0, err
= 0;
12056 if ((ering
->rx_pending
> tp
->rx_std_ring_mask
) ||
12057 (ering
->rx_jumbo_pending
> tp
->rx_jmb_ring_mask
) ||
12058 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1) ||
12059 (ering
->tx_pending
<= MAX_SKB_FRAGS
) ||
12060 (tg3_flag(tp
, TSO_BUG
) &&
12061 (ering
->tx_pending
<= (MAX_SKB_FRAGS
* 3))))
12064 if (netif_running(dev
)) {
12066 tg3_netif_stop(tp
);
12070 tg3_full_lock(tp
, irq_sync
);
12072 tp
->rx_pending
= ering
->rx_pending
;
12074 if (tg3_flag(tp
, MAX_RXPEND_64
) &&
12075 tp
->rx_pending
> 63)
12076 tp
->rx_pending
= 63;
12077 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
12079 for (i
= 0; i
< tp
->irq_max
; i
++)
12080 tp
->napi
[i
].tx_pending
= ering
->tx_pending
;
12082 if (netif_running(dev
)) {
12083 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12084 err
= tg3_restart_hw(tp
, false);
12086 tg3_netif_start(tp
);
12089 tg3_full_unlock(tp
);
12091 if (irq_sync
&& !err
)
12097 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12099 struct tg3
*tp
= netdev_priv(dev
);
12101 epause
->autoneg
= !!tg3_flag(tp
, PAUSE_AUTONEG
);
12103 if (tp
->link_config
.flowctrl
& FLOW_CTRL_RX
)
12104 epause
->rx_pause
= 1;
12106 epause
->rx_pause
= 0;
12108 if (tp
->link_config
.flowctrl
& FLOW_CTRL_TX
)
12109 epause
->tx_pause
= 1;
12111 epause
->tx_pause
= 0;
12114 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
12116 struct tg3
*tp
= netdev_priv(dev
);
12119 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)
12120 tg3_warn_mgmt_link_flap(tp
);
12122 if (tg3_flag(tp
, USE_PHYLIB
)) {
12124 struct phy_device
*phydev
;
12126 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
12128 if (!(phydev
->supported
& SUPPORTED_Pause
) ||
12129 (!(phydev
->supported
& SUPPORTED_Asym_Pause
) &&
12130 (epause
->rx_pause
!= epause
->tx_pause
)))
12133 tp
->link_config
.flowctrl
= 0;
12134 if (epause
->rx_pause
) {
12135 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12137 if (epause
->tx_pause
) {
12138 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12139 newadv
= ADVERTISED_Pause
;
12141 newadv
= ADVERTISED_Pause
|
12142 ADVERTISED_Asym_Pause
;
12143 } else if (epause
->tx_pause
) {
12144 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12145 newadv
= ADVERTISED_Asym_Pause
;
12149 if (epause
->autoneg
)
12150 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12152 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12154 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
12155 u32 oldadv
= phydev
->advertising
&
12156 (ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
12157 if (oldadv
!= newadv
) {
12158 phydev
->advertising
&=
12159 ~(ADVERTISED_Pause
|
12160 ADVERTISED_Asym_Pause
);
12161 phydev
->advertising
|= newadv
;
12162 if (phydev
->autoneg
) {
12164 * Always renegotiate the link to
12165 * inform our link partner of our
12166 * flow control settings, even if the
12167 * flow control is forced. Let
12168 * tg3_adjust_link() do the final
12169 * flow control setup.
12171 return phy_start_aneg(phydev
);
12175 if (!epause
->autoneg
)
12176 tg3_setup_flow_control(tp
, 0, 0);
12178 tp
->link_config
.advertising
&=
12179 ~(ADVERTISED_Pause
|
12180 ADVERTISED_Asym_Pause
);
12181 tp
->link_config
.advertising
|= newadv
;
12186 if (netif_running(dev
)) {
12187 tg3_netif_stop(tp
);
12191 tg3_full_lock(tp
, irq_sync
);
12193 if (epause
->autoneg
)
12194 tg3_flag_set(tp
, PAUSE_AUTONEG
);
12196 tg3_flag_clear(tp
, PAUSE_AUTONEG
);
12197 if (epause
->rx_pause
)
12198 tp
->link_config
.flowctrl
|= FLOW_CTRL_RX
;
12200 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_RX
;
12201 if (epause
->tx_pause
)
12202 tp
->link_config
.flowctrl
|= FLOW_CTRL_TX
;
12204 tp
->link_config
.flowctrl
&= ~FLOW_CTRL_TX
;
12206 if (netif_running(dev
)) {
12207 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
12208 err
= tg3_restart_hw(tp
, false);
12210 tg3_netif_start(tp
);
12213 tg3_full_unlock(tp
);
12216 tp
->phy_flags
|= TG3_PHYFLG_USER_CONFIGURED
;
12221 static int tg3_get_sset_count(struct net_device
*dev
, int sset
)
12225 return TG3_NUM_TEST
;
12227 return TG3_NUM_STATS
;
12229 return -EOPNOTSUPP
;
12233 static int tg3_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
12234 u32
*rules __always_unused
)
12236 struct tg3
*tp
= netdev_priv(dev
);
12238 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12239 return -EOPNOTSUPP
;
12241 switch (info
->cmd
) {
12242 case ETHTOOL_GRXRINGS
:
12243 if (netif_running(tp
->dev
))
12244 info
->data
= tp
->rxq_cnt
;
12246 info
->data
= num_online_cpus();
12247 if (info
->data
> TG3_RSS_MAX_NUM_QS
)
12248 info
->data
= TG3_RSS_MAX_NUM_QS
;
12251 /* The first interrupt vector only
12252 * handles link interrupts.
12258 return -EOPNOTSUPP
;
12262 static u32
tg3_get_rxfh_indir_size(struct net_device
*dev
)
12265 struct tg3
*tp
= netdev_priv(dev
);
12267 if (tg3_flag(tp
, SUPPORT_MSIX
))
12268 size
= TG3_RSS_INDIR_TBL_SIZE
;
12273 static int tg3_get_rxfh_indir(struct net_device
*dev
, u32
*indir
)
12275 struct tg3
*tp
= netdev_priv(dev
);
12278 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12279 indir
[i
] = tp
->rss_ind_tbl
[i
];
12284 static int tg3_set_rxfh_indir(struct net_device
*dev
, const u32
*indir
)
12286 struct tg3
*tp
= netdev_priv(dev
);
12289 for (i
= 0; i
< TG3_RSS_INDIR_TBL_SIZE
; i
++)
12290 tp
->rss_ind_tbl
[i
] = indir
[i
];
12292 if (!netif_running(dev
) || !tg3_flag(tp
, ENABLE_RSS
))
12295 /* It is legal to write the indirection
12296 * table while the device is running.
12298 tg3_full_lock(tp
, 0);
12299 tg3_rss_write_indir_tbl(tp
);
12300 tg3_full_unlock(tp
);
12305 static void tg3_get_channels(struct net_device
*dev
,
12306 struct ethtool_channels
*channel
)
12308 struct tg3
*tp
= netdev_priv(dev
);
12309 u32 deflt_qs
= netif_get_num_default_rss_queues();
12311 channel
->max_rx
= tp
->rxq_max
;
12312 channel
->max_tx
= tp
->txq_max
;
12314 if (netif_running(dev
)) {
12315 channel
->rx_count
= tp
->rxq_cnt
;
12316 channel
->tx_count
= tp
->txq_cnt
;
12319 channel
->rx_count
= tp
->rxq_req
;
12321 channel
->rx_count
= min(deflt_qs
, tp
->rxq_max
);
12324 channel
->tx_count
= tp
->txq_req
;
12326 channel
->tx_count
= min(deflt_qs
, tp
->txq_max
);
12330 static int tg3_set_channels(struct net_device
*dev
,
12331 struct ethtool_channels
*channel
)
12333 struct tg3
*tp
= netdev_priv(dev
);
12335 if (!tg3_flag(tp
, SUPPORT_MSIX
))
12336 return -EOPNOTSUPP
;
12338 if (channel
->rx_count
> tp
->rxq_max
||
12339 channel
->tx_count
> tp
->txq_max
)
12342 tp
->rxq_req
= channel
->rx_count
;
12343 tp
->txq_req
= channel
->tx_count
;
12345 if (!netif_running(dev
))
12350 tg3_carrier_off(tp
);
12352 tg3_start(tp
, true, false, false);
12357 static void tg3_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
12359 switch (stringset
) {
12361 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
12364 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
12367 WARN_ON(1); /* we need a WARN() */
12372 static int tg3_set_phys_id(struct net_device
*dev
,
12373 enum ethtool_phys_id_state state
)
12375 struct tg3
*tp
= netdev_priv(dev
);
12377 if (!netif_running(tp
->dev
))
12381 case ETHTOOL_ID_ACTIVE
:
12382 return 1; /* cycle on/off once per second */
12384 case ETHTOOL_ID_ON
:
12385 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12386 LED_CTRL_1000MBPS_ON
|
12387 LED_CTRL_100MBPS_ON
|
12388 LED_CTRL_10MBPS_ON
|
12389 LED_CTRL_TRAFFIC_OVERRIDE
|
12390 LED_CTRL_TRAFFIC_BLINK
|
12391 LED_CTRL_TRAFFIC_LED
);
12394 case ETHTOOL_ID_OFF
:
12395 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
12396 LED_CTRL_TRAFFIC_OVERRIDE
);
12399 case ETHTOOL_ID_INACTIVE
:
12400 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
12407 static void tg3_get_ethtool_stats(struct net_device
*dev
,
12408 struct ethtool_stats
*estats
, u64
*tmp_stats
)
12410 struct tg3
*tp
= netdev_priv(dev
);
12413 tg3_get_estats(tp
, (struct tg3_ethtool_stats
*)tmp_stats
);
12415 memset(tmp_stats
, 0, sizeof(struct tg3_ethtool_stats
));
12418 static __be32
*tg3_vpd_readblock(struct tg3
*tp
, u32
*vpdlen
)
12422 u32 offset
= 0, len
= 0;
12425 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &magic
))
12428 if (magic
== TG3_EEPROM_MAGIC
) {
12429 for (offset
= TG3_NVM_DIR_START
;
12430 offset
< TG3_NVM_DIR_END
;
12431 offset
+= TG3_NVM_DIRENT_SIZE
) {
12432 if (tg3_nvram_read(tp
, offset
, &val
))
12435 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) ==
12436 TG3_NVM_DIRTYPE_EXTVPD
)
12440 if (offset
!= TG3_NVM_DIR_END
) {
12441 len
= (val
& TG3_NVM_DIRTYPE_LENMSK
) * 4;
12442 if (tg3_nvram_read(tp
, offset
+ 4, &offset
))
12445 offset
= tg3_nvram_logical_addr(tp
, offset
);
12449 if (!offset
|| !len
) {
12450 offset
= TG3_NVM_VPD_OFF
;
12451 len
= TG3_NVM_VPD_LEN
;
12454 buf
= kmalloc(len
, GFP_KERNEL
);
12458 if (magic
== TG3_EEPROM_MAGIC
) {
12459 for (i
= 0; i
< len
; i
+= 4) {
12460 /* The data is in little-endian format in NVRAM.
12461 * Use the big-endian read routines to preserve
12462 * the byte order as it exists in NVRAM.
12464 if (tg3_nvram_read_be32(tp
, offset
+ i
, &buf
[i
/4]))
12470 unsigned int pos
= 0;
12472 ptr
= (u8
*)&buf
[0];
12473 for (i
= 0; pos
< len
&& i
< 3; i
++, pos
+= cnt
, ptr
+= cnt
) {
12474 cnt
= pci_read_vpd(tp
->pdev
, pos
,
12476 if (cnt
== -ETIMEDOUT
|| cnt
== -EINTR
)
12494 #define NVRAM_TEST_SIZE 0x100
12495 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12496 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12497 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12498 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12499 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12500 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12501 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12502 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12504 static int tg3_test_nvram(struct tg3
*tp
)
12506 u32 csum
, magic
, len
;
12508 int i
, j
, k
, err
= 0, size
;
12510 if (tg3_flag(tp
, NO_NVRAM
))
12513 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
12516 if (magic
== TG3_EEPROM_MAGIC
)
12517 size
= NVRAM_TEST_SIZE
;
12518 else if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
) {
12519 if ((magic
& TG3_EEPROM_SB_FORMAT_MASK
) ==
12520 TG3_EEPROM_SB_FORMAT_1
) {
12521 switch (magic
& TG3_EEPROM_SB_REVISION_MASK
) {
12522 case TG3_EEPROM_SB_REVISION_0
:
12523 size
= NVRAM_SELFBOOT_FORMAT1_0_SIZE
;
12525 case TG3_EEPROM_SB_REVISION_2
:
12526 size
= NVRAM_SELFBOOT_FORMAT1_2_SIZE
;
12528 case TG3_EEPROM_SB_REVISION_3
:
12529 size
= NVRAM_SELFBOOT_FORMAT1_3_SIZE
;
12531 case TG3_EEPROM_SB_REVISION_4
:
12532 size
= NVRAM_SELFBOOT_FORMAT1_4_SIZE
;
12534 case TG3_EEPROM_SB_REVISION_5
:
12535 size
= NVRAM_SELFBOOT_FORMAT1_5_SIZE
;
12537 case TG3_EEPROM_SB_REVISION_6
:
12538 size
= NVRAM_SELFBOOT_FORMAT1_6_SIZE
;
12545 } else if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
12546 size
= NVRAM_SELFBOOT_HW_SIZE
;
12550 buf
= kmalloc(size
, GFP_KERNEL
);
12555 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
12556 err
= tg3_nvram_read_be32(tp
, i
, &buf
[j
]);
12563 /* Selfboot format */
12564 magic
= be32_to_cpu(buf
[0]);
12565 if ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) ==
12566 TG3_EEPROM_MAGIC_FW
) {
12567 u8
*buf8
= (u8
*) buf
, csum8
= 0;
12569 if ((magic
& TG3_EEPROM_SB_REVISION_MASK
) ==
12570 TG3_EEPROM_SB_REVISION_2
) {
12571 /* For rev 2, the csum doesn't include the MBA. */
12572 for (i
= 0; i
< TG3_EEPROM_SB_F1R2_MBA_OFF
; i
++)
12574 for (i
= TG3_EEPROM_SB_F1R2_MBA_OFF
+ 4; i
< size
; i
++)
12577 for (i
= 0; i
< size
; i
++)
12590 if ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) ==
12591 TG3_EEPROM_MAGIC_HW
) {
12592 u8 data
[NVRAM_SELFBOOT_DATA_SIZE
];
12593 u8 parity
[NVRAM_SELFBOOT_DATA_SIZE
];
12594 u8
*buf8
= (u8
*) buf
;
12596 /* Separate the parity bits and the data bytes. */
12597 for (i
= 0, j
= 0, k
= 0; i
< NVRAM_SELFBOOT_HW_SIZE
; i
++) {
12598 if ((i
== 0) || (i
== 8)) {
12602 for (l
= 0, msk
= 0x80; l
< 7; l
++, msk
>>= 1)
12603 parity
[k
++] = buf8
[i
] & msk
;
12605 } else if (i
== 16) {
12609 for (l
= 0, msk
= 0x20; l
< 6; l
++, msk
>>= 1)
12610 parity
[k
++] = buf8
[i
] & msk
;
12613 for (l
= 0, msk
= 0x80; l
< 8; l
++, msk
>>= 1)
12614 parity
[k
++] = buf8
[i
] & msk
;
12617 data
[j
++] = buf8
[i
];
12621 for (i
= 0; i
< NVRAM_SELFBOOT_DATA_SIZE
; i
++) {
12622 u8 hw8
= hweight8(data
[i
]);
12624 if ((hw8
& 0x1) && parity
[i
])
12626 else if (!(hw8
& 0x1) && !parity
[i
])
12635 /* Bootstrap checksum at offset 0x10 */
12636 csum
= calc_crc((unsigned char *) buf
, 0x10);
12637 if (csum
!= le32_to_cpu(buf
[0x10/4]))
12640 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12641 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
12642 if (csum
!= le32_to_cpu(buf
[0xfc/4]))
12647 buf
= tg3_vpd_readblock(tp
, &len
);
12651 i
= pci_vpd_find_tag((u8
*)buf
, 0, len
, PCI_VPD_LRDT_RO_DATA
);
12653 j
= pci_vpd_lrdt_size(&((u8
*)buf
)[i
]);
12657 if (i
+ PCI_VPD_LRDT_TAG_SIZE
+ j
> len
)
12660 i
+= PCI_VPD_LRDT_TAG_SIZE
;
12661 j
= pci_vpd_find_info_keyword((u8
*)buf
, i
, j
,
12662 PCI_VPD_RO_KEYWORD_CHKSUM
);
12666 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
12668 for (i
= 0; i
<= j
; i
++)
12669 csum8
+= ((u8
*)buf
)[i
];
12683 #define TG3_SERDES_TIMEOUT_SEC 2
12684 #define TG3_COPPER_TIMEOUT_SEC 6
12686 static int tg3_test_link(struct tg3
*tp
)
12690 if (!netif_running(tp
->dev
))
12693 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
12694 max
= TG3_SERDES_TIMEOUT_SEC
;
12696 max
= TG3_COPPER_TIMEOUT_SEC
;
12698 for (i
= 0; i
< max
; i
++) {
12702 if (msleep_interruptible(1000))
12709 /* Only test the commonly used registers */
12710 static int tg3_test_registers(struct tg3
*tp
)
12712 int i
, is_5705
, is_5750
;
12713 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
12717 #define TG3_FL_5705 0x1
12718 #define TG3_FL_NOT_5705 0x2
12719 #define TG3_FL_NOT_5788 0x4
12720 #define TG3_FL_NOT_5750 0x8
12724 /* MAC Control Registers */
12725 { MAC_MODE
, TG3_FL_NOT_5705
,
12726 0x00000000, 0x00ef6f8c },
12727 { MAC_MODE
, TG3_FL_5705
,
12728 0x00000000, 0x01ef6b8c },
12729 { MAC_STATUS
, TG3_FL_NOT_5705
,
12730 0x03800107, 0x00000000 },
12731 { MAC_STATUS
, TG3_FL_5705
,
12732 0x03800100, 0x00000000 },
12733 { MAC_ADDR_0_HIGH
, 0x0000,
12734 0x00000000, 0x0000ffff },
12735 { MAC_ADDR_0_LOW
, 0x0000,
12736 0x00000000, 0xffffffff },
12737 { MAC_RX_MTU_SIZE
, 0x0000,
12738 0x00000000, 0x0000ffff },
12739 { MAC_TX_MODE
, 0x0000,
12740 0x00000000, 0x00000070 },
12741 { MAC_TX_LENGTHS
, 0x0000,
12742 0x00000000, 0x00003fff },
12743 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
12744 0x00000000, 0x000007fc },
12745 { MAC_RX_MODE
, TG3_FL_5705
,
12746 0x00000000, 0x000007dc },
12747 { MAC_HASH_REG_0
, 0x0000,
12748 0x00000000, 0xffffffff },
12749 { MAC_HASH_REG_1
, 0x0000,
12750 0x00000000, 0xffffffff },
12751 { MAC_HASH_REG_2
, 0x0000,
12752 0x00000000, 0xffffffff },
12753 { MAC_HASH_REG_3
, 0x0000,
12754 0x00000000, 0xffffffff },
12756 /* Receive Data and Receive BD Initiator Control Registers. */
12757 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
12758 0x00000000, 0xffffffff },
12759 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
12760 0x00000000, 0xffffffff },
12761 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
12762 0x00000000, 0x00000003 },
12763 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
12764 0x00000000, 0xffffffff },
12765 { RCVDBDI_STD_BD
+0, 0x0000,
12766 0x00000000, 0xffffffff },
12767 { RCVDBDI_STD_BD
+4, 0x0000,
12768 0x00000000, 0xffffffff },
12769 { RCVDBDI_STD_BD
+8, 0x0000,
12770 0x00000000, 0xffff0002 },
12771 { RCVDBDI_STD_BD
+0xc, 0x0000,
12772 0x00000000, 0xffffffff },
12774 /* Receive BD Initiator Control Registers. */
12775 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
12776 0x00000000, 0xffffffff },
12777 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
12778 0x00000000, 0x000003ff },
12779 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
12780 0x00000000, 0xffffffff },
12782 /* Host Coalescing Control Registers. */
12783 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
12784 0x00000000, 0x00000004 },
12785 { HOSTCC_MODE
, TG3_FL_5705
,
12786 0x00000000, 0x000000f6 },
12787 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
12788 0x00000000, 0xffffffff },
12789 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
12790 0x00000000, 0x000003ff },
12791 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
12792 0x00000000, 0xffffffff },
12793 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
12794 0x00000000, 0x000003ff },
12795 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
12796 0x00000000, 0xffffffff },
12797 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12798 0x00000000, 0x000000ff },
12799 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
12800 0x00000000, 0xffffffff },
12801 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12802 0x00000000, 0x000000ff },
12803 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12804 0x00000000, 0xffffffff },
12805 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
12806 0x00000000, 0xffffffff },
12807 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12808 0x00000000, 0xffffffff },
12809 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12810 0x00000000, 0x000000ff },
12811 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
12812 0x00000000, 0xffffffff },
12813 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
12814 0x00000000, 0x000000ff },
12815 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
12816 0x00000000, 0xffffffff },
12817 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
12818 0x00000000, 0xffffffff },
12819 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
12820 0x00000000, 0xffffffff },
12821 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
12822 0x00000000, 0xffffffff },
12823 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
12824 0x00000000, 0xffffffff },
12825 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
12826 0xffffffff, 0x00000000 },
12827 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
12828 0xffffffff, 0x00000000 },
12830 /* Buffer Manager Control Registers. */
12831 { BUFMGR_MB_POOL_ADDR
, TG3_FL_NOT_5750
,
12832 0x00000000, 0x007fff80 },
12833 { BUFMGR_MB_POOL_SIZE
, TG3_FL_NOT_5750
,
12834 0x00000000, 0x007fffff },
12835 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
12836 0x00000000, 0x0000003f },
12837 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
12838 0x00000000, 0x000001ff },
12839 { BUFMGR_MB_HIGH_WATER
, 0x0000,
12840 0x00000000, 0x000001ff },
12841 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
12842 0xffffffff, 0x00000000 },
12843 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
12844 0xffffffff, 0x00000000 },
12846 /* Mailbox Registers */
12847 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
12848 0x00000000, 0x000001ff },
12849 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
12850 0x00000000, 0x000001ff },
12851 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
12852 0x00000000, 0x000007ff },
12853 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
12854 0x00000000, 0x000001ff },
12856 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12859 is_5705
= is_5750
= 0;
12860 if (tg3_flag(tp
, 5705_PLUS
)) {
12862 if (tg3_flag(tp
, 5750_PLUS
))
12866 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
12867 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
12870 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
12873 if (tg3_flag(tp
, IS_5788
) &&
12874 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
12877 if (is_5750
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5750
))
12880 offset
= (u32
) reg_tbl
[i
].offset
;
12881 read_mask
= reg_tbl
[i
].read_mask
;
12882 write_mask
= reg_tbl
[i
].write_mask
;
12884 /* Save the original register content */
12885 save_val
= tr32(offset
);
12887 /* Determine the read-only value. */
12888 read_val
= save_val
& read_mask
;
12890 /* Write zero to the register, then make sure the read-only bits
12891 * are not changed and the read/write bits are all zeros.
12895 val
= tr32(offset
);
12897 /* Test the read-only and read/write bits. */
12898 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
12901 /* Write ones to all the bits defined by RdMask and WrMask, then
12902 * make sure the read-only bits are not changed and the
12903 * read/write bits are all ones.
12905 tw32(offset
, read_mask
| write_mask
);
12907 val
= tr32(offset
);
12909 /* Test the read-only bits. */
12910 if ((val
& read_mask
) != read_val
)
12913 /* Test the read/write bits. */
12914 if ((val
& write_mask
) != write_mask
)
12917 tw32(offset
, save_val
);
12923 if (netif_msg_hw(tp
))
12924 netdev_err(tp
->dev
,
12925 "Register test failed at offset %x\n", offset
);
12926 tw32(offset
, save_val
);
12930 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
12932 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12936 for (i
= 0; i
< ARRAY_SIZE(test_pattern
); i
++) {
12937 for (j
= 0; j
< len
; j
+= 4) {
12940 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
12941 tg3_read_mem(tp
, offset
+ j
, &val
);
12942 if (val
!= test_pattern
[i
])
12949 static int tg3_test_memory(struct tg3
*tp
)
12951 static struct mem_entry
{
12954 } mem_tbl_570x
[] = {
12955 { 0x00000000, 0x00b50},
12956 { 0x00002000, 0x1c000},
12957 { 0xffffffff, 0x00000}
12958 }, mem_tbl_5705
[] = {
12959 { 0x00000100, 0x0000c},
12960 { 0x00000200, 0x00008},
12961 { 0x00004000, 0x00800},
12962 { 0x00006000, 0x01000},
12963 { 0x00008000, 0x02000},
12964 { 0x00010000, 0x0e000},
12965 { 0xffffffff, 0x00000}
12966 }, mem_tbl_5755
[] = {
12967 { 0x00000200, 0x00008},
12968 { 0x00004000, 0x00800},
12969 { 0x00006000, 0x00800},
12970 { 0x00008000, 0x02000},
12971 { 0x00010000, 0x0c000},
12972 { 0xffffffff, 0x00000}
12973 }, mem_tbl_5906
[] = {
12974 { 0x00000200, 0x00008},
12975 { 0x00004000, 0x00400},
12976 { 0x00006000, 0x00400},
12977 { 0x00008000, 0x01000},
12978 { 0x00010000, 0x01000},
12979 { 0xffffffff, 0x00000}
12980 }, mem_tbl_5717
[] = {
12981 { 0x00000200, 0x00008},
12982 { 0x00010000, 0x0a000},
12983 { 0x00020000, 0x13c00},
12984 { 0xffffffff, 0x00000}
12985 }, mem_tbl_57765
[] = {
12986 { 0x00000200, 0x00008},
12987 { 0x00004000, 0x00800},
12988 { 0x00006000, 0x09800},
12989 { 0x00010000, 0x0a000},
12990 { 0xffffffff, 0x00000}
12992 struct mem_entry
*mem_tbl
;
12996 if (tg3_flag(tp
, 5717_PLUS
))
12997 mem_tbl
= mem_tbl_5717
;
12998 else if (tg3_flag(tp
, 57765_CLASS
) ||
12999 tg3_asic_rev(tp
) == ASIC_REV_5762
)
13000 mem_tbl
= mem_tbl_57765
;
13001 else if (tg3_flag(tp
, 5755_PLUS
))
13002 mem_tbl
= mem_tbl_5755
;
13003 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
13004 mem_tbl
= mem_tbl_5906
;
13005 else if (tg3_flag(tp
, 5705_PLUS
))
13006 mem_tbl
= mem_tbl_5705
;
13008 mem_tbl
= mem_tbl_570x
;
13010 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
13011 err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
, mem_tbl
[i
].len
);
13019 #define TG3_TSO_MSS 500
13021 #define TG3_TSO_IP_HDR_LEN 20
13022 #define TG3_TSO_TCP_HDR_LEN 20
13023 #define TG3_TSO_TCP_OPT_LEN 12
13025 static const u8 tg3_tso_header
[] = {
13027 0x45, 0x00, 0x00, 0x00,
13028 0x00, 0x00, 0x40, 0x00,
13029 0x40, 0x06, 0x00, 0x00,
13030 0x0a, 0x00, 0x00, 0x01,
13031 0x0a, 0x00, 0x00, 0x02,
13032 0x0d, 0x00, 0xe0, 0x00,
13033 0x00, 0x00, 0x01, 0x00,
13034 0x00, 0x00, 0x02, 0x00,
13035 0x80, 0x10, 0x10, 0x00,
13036 0x14, 0x09, 0x00, 0x00,
13037 0x01, 0x01, 0x08, 0x0a,
13038 0x11, 0x11, 0x11, 0x11,
13039 0x11, 0x11, 0x11, 0x11,
13042 static int tg3_run_loopback(struct tg3
*tp
, u32 pktsz
, bool tso_loopback
)
13044 u32 rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
13045 u32 base_flags
= 0, mss
= 0, desc_idx
, coal_now
, data_off
, val
;
13047 struct sk_buff
*skb
;
13048 u8
*tx_data
, *rx_data
;
13050 int num_pkts
, tx_len
, rx_len
, i
, err
;
13051 struct tg3_rx_buffer_desc
*desc
;
13052 struct tg3_napi
*tnapi
, *rnapi
;
13053 struct tg3_rx_prodring_set
*tpr
= &tp
->napi
[0].prodring
;
13055 tnapi
= &tp
->napi
[0];
13056 rnapi
= &tp
->napi
[0];
13057 if (tp
->irq_cnt
> 1) {
13058 if (tg3_flag(tp
, ENABLE_RSS
))
13059 rnapi
= &tp
->napi
[1];
13060 if (tg3_flag(tp
, ENABLE_TSS
))
13061 tnapi
= &tp
->napi
[1];
13063 coal_now
= tnapi
->coal_now
| rnapi
->coal_now
;
13068 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
13072 tx_data
= skb_put(skb
, tx_len
);
13073 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
13074 memset(tx_data
+ 6, 0x0, 8);
13076 tw32(MAC_RX_MTU_SIZE
, tx_len
+ ETH_FCS_LEN
);
13078 if (tso_loopback
) {
13079 struct iphdr
*iph
= (struct iphdr
*)&tx_data
[ETH_HLEN
];
13081 u32 hdr_len
= TG3_TSO_IP_HDR_LEN
+ TG3_TSO_TCP_HDR_LEN
+
13082 TG3_TSO_TCP_OPT_LEN
;
13084 memcpy(tx_data
+ ETH_ALEN
* 2, tg3_tso_header
,
13085 sizeof(tg3_tso_header
));
13088 val
= tx_len
- ETH_ALEN
* 2 - sizeof(tg3_tso_header
);
13089 num_pkts
= DIV_ROUND_UP(val
, TG3_TSO_MSS
);
13091 /* Set the total length field in the IP header */
13092 iph
->tot_len
= htons((u16
)(mss
+ hdr_len
));
13094 base_flags
= (TXD_FLAG_CPU_PRE_DMA
|
13095 TXD_FLAG_CPU_POST_DMA
);
13097 if (tg3_flag(tp
, HW_TSO_1
) ||
13098 tg3_flag(tp
, HW_TSO_2
) ||
13099 tg3_flag(tp
, HW_TSO_3
)) {
13101 val
= ETH_HLEN
+ TG3_TSO_IP_HDR_LEN
;
13102 th
= (struct tcphdr
*)&tx_data
[val
];
13105 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
13107 if (tg3_flag(tp
, HW_TSO_3
)) {
13108 mss
|= (hdr_len
& 0xc) << 12;
13109 if (hdr_len
& 0x10)
13110 base_flags
|= 0x00000010;
13111 base_flags
|= (hdr_len
& 0x3e0) << 5;
13112 } else if (tg3_flag(tp
, HW_TSO_2
))
13113 mss
|= hdr_len
<< 9;
13114 else if (tg3_flag(tp
, HW_TSO_1
) ||
13115 tg3_asic_rev(tp
) == ASIC_REV_5705
) {
13116 mss
|= (TG3_TSO_TCP_OPT_LEN
<< 9);
13118 base_flags
|= (TG3_TSO_TCP_OPT_LEN
<< 10);
13121 data_off
= ETH_ALEN
* 2 + sizeof(tg3_tso_header
);
13124 data_off
= ETH_HLEN
;
13126 if (tg3_flag(tp
, USE_JUMBO_BDFLAG
) &&
13127 tx_len
> VLAN_ETH_FRAME_LEN
)
13128 base_flags
|= TXD_FLAG_JMB_PKT
;
13131 for (i
= data_off
; i
< tx_len
; i
++)
13132 tx_data
[i
] = (u8
) (i
& 0xff);
13134 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
13135 if (pci_dma_mapping_error(tp
->pdev
, map
)) {
13136 dev_kfree_skb(skb
);
13140 val
= tnapi
->tx_prod
;
13141 tnapi
->tx_buffers
[val
].skb
= skb
;
13142 dma_unmap_addr_set(&tnapi
->tx_buffers
[val
], mapping
, map
);
13144 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13149 rx_start_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13151 budget
= tg3_tx_avail(tnapi
);
13152 if (tg3_tx_frag_set(tnapi
, &val
, &budget
, map
, tx_len
,
13153 base_flags
| TXD_FLAG_END
, mss
, 0)) {
13154 tnapi
->tx_buffers
[val
].skb
= NULL
;
13155 dev_kfree_skb(skb
);
13161 /* Sync BD data before updating mailbox */
13164 tw32_tx_mbox(tnapi
->prodmbox
, tnapi
->tx_prod
);
13165 tr32_mailbox(tnapi
->prodmbox
);
13169 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13170 for (i
= 0; i
< 35; i
++) {
13171 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
13176 tx_idx
= tnapi
->hw_status
->idx
[0].tx_consumer
;
13177 rx_idx
= rnapi
->hw_status
->idx
[0].rx_producer
;
13178 if ((tx_idx
== tnapi
->tx_prod
) &&
13179 (rx_idx
== (rx_start_idx
+ num_pkts
)))
13183 tg3_tx_skb_unmap(tnapi
, tnapi
->tx_prod
- 1, -1);
13184 dev_kfree_skb(skb
);
13186 if (tx_idx
!= tnapi
->tx_prod
)
13189 if (rx_idx
!= rx_start_idx
+ num_pkts
)
13193 while (rx_idx
!= rx_start_idx
) {
13194 desc
= &rnapi
->rx_rcb
[rx_start_idx
++];
13195 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
13196 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
13198 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
13199 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
13202 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
)
13205 if (!tso_loopback
) {
13206 if (rx_len
!= tx_len
)
13209 if (pktsz
<= TG3_RX_STD_DMA_SZ
- ETH_FCS_LEN
) {
13210 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
13213 if (opaque_key
!= RXD_OPAQUE_RING_JUMBO
)
13216 } else if ((desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
13217 (desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
13218 >> RXD_TCPCSUM_SHIFT
!= 0xffff) {
13222 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
13223 rx_data
= tpr
->rx_std_buffers
[desc_idx
].data
;
13224 map
= dma_unmap_addr(&tpr
->rx_std_buffers
[desc_idx
],
13226 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
13227 rx_data
= tpr
->rx_jmb_buffers
[desc_idx
].data
;
13228 map
= dma_unmap_addr(&tpr
->rx_jmb_buffers
[desc_idx
],
13233 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
,
13234 PCI_DMA_FROMDEVICE
);
13236 rx_data
+= TG3_RX_OFFSET(tp
);
13237 for (i
= data_off
; i
< rx_len
; i
++, val
++) {
13238 if (*(rx_data
+ i
) != (u8
) (val
& 0xff))
13245 /* tg3_free_rings will unmap and free the rx_data */
13250 #define TG3_STD_LOOPBACK_FAILED 1
13251 #define TG3_JMB_LOOPBACK_FAILED 2
13252 #define TG3_TSO_LOOPBACK_FAILED 4
13253 #define TG3_LOOPBACK_FAILED \
13254 (TG3_STD_LOOPBACK_FAILED | \
13255 TG3_JMB_LOOPBACK_FAILED | \
13256 TG3_TSO_LOOPBACK_FAILED)
13258 static int tg3_test_loopback(struct tg3
*tp
, u64
*data
, bool do_extlpbk
)
13262 u32 jmb_pkt_sz
= 9000;
13265 jmb_pkt_sz
= tp
->dma_limit
- ETH_HLEN
;
13267 eee_cap
= tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
;
13268 tp
->phy_flags
&= ~TG3_PHYFLG_EEE_CAP
;
13270 if (!netif_running(tp
->dev
)) {
13271 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13272 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13274 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13278 err
= tg3_reset_hw(tp
, true);
13280 data
[TG3_MAC_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13281 data
[TG3_PHY_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13283 data
[TG3_EXT_LOOPB_TEST
] = TG3_LOOPBACK_FAILED
;
13287 if (tg3_flag(tp
, ENABLE_RSS
)) {
13290 /* Reroute all rx packets to the 1st queue */
13291 for (i
= MAC_RSS_INDIR_TBL_0
;
13292 i
< MAC_RSS_INDIR_TBL_0
+ TG3_RSS_INDIR_TBL_SIZE
; i
+= 4)
13296 /* HW errata - mac loopback fails in some cases on 5780.
13297 * Normal traffic and PHY loopback are not affected by
13298 * errata. Also, the MAC loopback test is deprecated for
13299 * all newer ASIC revisions.
13301 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
13302 !tg3_flag(tp
, CPMU_PRESENT
)) {
13303 tg3_mac_loopback(tp
, true);
13305 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13306 data
[TG3_MAC_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13308 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13309 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13310 data
[TG3_MAC_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13312 tg3_mac_loopback(tp
, false);
13315 if (!(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) &&
13316 !tg3_flag(tp
, USE_PHYLIB
)) {
13319 tg3_phy_lpbk_set(tp
, 0, false);
13321 /* Wait for link */
13322 for (i
= 0; i
< 100; i
++) {
13323 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
13328 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13329 data
[TG3_PHY_LOOPB_TEST
] |= TG3_STD_LOOPBACK_FAILED
;
13330 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13331 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13332 data
[TG3_PHY_LOOPB_TEST
] |= TG3_TSO_LOOPBACK_FAILED
;
13333 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13334 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13335 data
[TG3_PHY_LOOPB_TEST
] |= TG3_JMB_LOOPBACK_FAILED
;
13338 tg3_phy_lpbk_set(tp
, 0, true);
13340 /* All link indications report up, but the hardware
13341 * isn't really ready for about 20 msec. Double it
13346 if (tg3_run_loopback(tp
, ETH_FRAME_LEN
, false))
13347 data
[TG3_EXT_LOOPB_TEST
] |=
13348 TG3_STD_LOOPBACK_FAILED
;
13349 if (tg3_flag(tp
, TSO_CAPABLE
) &&
13350 tg3_run_loopback(tp
, ETH_FRAME_LEN
, true))
13351 data
[TG3_EXT_LOOPB_TEST
] |=
13352 TG3_TSO_LOOPBACK_FAILED
;
13353 if (tg3_flag(tp
, JUMBO_RING_ENABLE
) &&
13354 tg3_run_loopback(tp
, jmb_pkt_sz
+ ETH_HLEN
, false))
13355 data
[TG3_EXT_LOOPB_TEST
] |=
13356 TG3_JMB_LOOPBACK_FAILED
;
13359 /* Re-enable gphy autopowerdown. */
13360 if (tp
->phy_flags
& TG3_PHYFLG_ENABLE_APD
)
13361 tg3_phy_toggle_apd(tp
, true);
13364 err
= (data
[TG3_MAC_LOOPB_TEST
] | data
[TG3_PHY_LOOPB_TEST
] |
13365 data
[TG3_EXT_LOOPB_TEST
]) ? -EIO
: 0;
13368 tp
->phy_flags
|= eee_cap
;
13373 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
13376 struct tg3
*tp
= netdev_priv(dev
);
13377 bool doextlpbk
= etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
;
13379 if ((tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
) &&
13380 tg3_power_up(tp
)) {
13381 etest
->flags
|= ETH_TEST_FL_FAILED
;
13382 memset(data
, 1, sizeof(u64
) * TG3_NUM_TEST
);
13386 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
13388 if (tg3_test_nvram(tp
) != 0) {
13389 etest
->flags
|= ETH_TEST_FL_FAILED
;
13390 data
[TG3_NVRAM_TEST
] = 1;
13392 if (!doextlpbk
&& tg3_test_link(tp
)) {
13393 etest
->flags
|= ETH_TEST_FL_FAILED
;
13394 data
[TG3_LINK_TEST
] = 1;
13396 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
13397 int err
, err2
= 0, irq_sync
= 0;
13399 if (netif_running(dev
)) {
13401 tg3_netif_stop(tp
);
13405 tg3_full_lock(tp
, irq_sync
);
13406 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
13407 err
= tg3_nvram_lock(tp
);
13408 tg3_halt_cpu(tp
, RX_CPU_BASE
);
13409 if (!tg3_flag(tp
, 5705_PLUS
))
13410 tg3_halt_cpu(tp
, TX_CPU_BASE
);
13412 tg3_nvram_unlock(tp
);
13414 if (tp
->phy_flags
& TG3_PHYFLG_MII_SERDES
)
13417 if (tg3_test_registers(tp
) != 0) {
13418 etest
->flags
|= ETH_TEST_FL_FAILED
;
13419 data
[TG3_REGISTER_TEST
] = 1;
13422 if (tg3_test_memory(tp
) != 0) {
13423 etest
->flags
|= ETH_TEST_FL_FAILED
;
13424 data
[TG3_MEMORY_TEST
] = 1;
13428 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
13430 if (tg3_test_loopback(tp
, data
, doextlpbk
))
13431 etest
->flags
|= ETH_TEST_FL_FAILED
;
13433 tg3_full_unlock(tp
);
13435 if (tg3_test_interrupt(tp
) != 0) {
13436 etest
->flags
|= ETH_TEST_FL_FAILED
;
13437 data
[TG3_INTERRUPT_TEST
] = 1;
13440 tg3_full_lock(tp
, 0);
13442 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13443 if (netif_running(dev
)) {
13444 tg3_flag_set(tp
, INIT_COMPLETE
);
13445 err2
= tg3_restart_hw(tp
, true);
13447 tg3_netif_start(tp
);
13450 tg3_full_unlock(tp
);
13452 if (irq_sync
&& !err2
)
13455 if (tp
->phy_flags
& TG3_PHYFLG_IS_LOW_POWER
)
13456 tg3_power_down(tp
);
13460 static int tg3_hwtstamp_ioctl(struct net_device
*dev
,
13461 struct ifreq
*ifr
, int cmd
)
13463 struct tg3
*tp
= netdev_priv(dev
);
13464 struct hwtstamp_config stmpconf
;
13466 if (!tg3_flag(tp
, PTP_CAPABLE
))
13469 if (copy_from_user(&stmpconf
, ifr
->ifr_data
, sizeof(stmpconf
)))
13472 if (stmpconf
.flags
)
13475 switch (stmpconf
.tx_type
) {
13476 case HWTSTAMP_TX_ON
:
13477 tg3_flag_set(tp
, TX_TSTAMP_EN
);
13479 case HWTSTAMP_TX_OFF
:
13480 tg3_flag_clear(tp
, TX_TSTAMP_EN
);
13486 switch (stmpconf
.rx_filter
) {
13487 case HWTSTAMP_FILTER_NONE
:
13490 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
13491 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13492 TG3_RX_PTP_CTL_ALL_V1_EVENTS
;
13494 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
13495 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13496 TG3_RX_PTP_CTL_SYNC_EVNT
;
13498 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
13499 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V1_EN
|
13500 TG3_RX_PTP_CTL_DELAY_REQ
;
13502 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
13503 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13504 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13506 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
13507 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13508 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13510 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
13511 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13512 TG3_RX_PTP_CTL_ALL_V2_EVENTS
;
13514 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
13515 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13516 TG3_RX_PTP_CTL_SYNC_EVNT
;
13518 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
13519 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13520 TG3_RX_PTP_CTL_SYNC_EVNT
;
13522 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
13523 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13524 TG3_RX_PTP_CTL_SYNC_EVNT
;
13526 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
13527 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_EN
|
13528 TG3_RX_PTP_CTL_DELAY_REQ
;
13530 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
13531 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN
|
13532 TG3_RX_PTP_CTL_DELAY_REQ
;
13534 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
13535 tp
->rxptpctl
= TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN
|
13536 TG3_RX_PTP_CTL_DELAY_REQ
;
13542 if (netif_running(dev
) && tp
->rxptpctl
)
13543 tw32(TG3_RX_PTP_CTL
,
13544 tp
->rxptpctl
| TG3_RX_PTP_CTL_HWTS_INTERLOCK
);
13546 return copy_to_user(ifr
->ifr_data
, &stmpconf
, sizeof(stmpconf
)) ?
13550 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
13552 struct mii_ioctl_data
*data
= if_mii(ifr
);
13553 struct tg3
*tp
= netdev_priv(dev
);
13556 if (tg3_flag(tp
, USE_PHYLIB
)) {
13557 struct phy_device
*phydev
;
13558 if (!(tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
))
13560 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
13561 return phy_mii_ioctl(phydev
, ifr
, cmd
);
13566 data
->phy_id
= tp
->phy_addr
;
13569 case SIOCGMIIREG
: {
13572 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13573 break; /* We have no PHY */
13575 if (!netif_running(dev
))
13578 spin_lock_bh(&tp
->lock
);
13579 err
= __tg3_readphy(tp
, data
->phy_id
& 0x1f,
13580 data
->reg_num
& 0x1f, &mii_regval
);
13581 spin_unlock_bh(&tp
->lock
);
13583 data
->val_out
= mii_regval
;
13589 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
13590 break; /* We have no PHY */
13592 if (!netif_running(dev
))
13595 spin_lock_bh(&tp
->lock
);
13596 err
= __tg3_writephy(tp
, data
->phy_id
& 0x1f,
13597 data
->reg_num
& 0x1f, data
->val_in
);
13598 spin_unlock_bh(&tp
->lock
);
13602 case SIOCSHWTSTAMP
:
13603 return tg3_hwtstamp_ioctl(dev
, ifr
, cmd
);
13609 return -EOPNOTSUPP
;
13612 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13614 struct tg3
*tp
= netdev_priv(dev
);
13616 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
13620 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
13622 struct tg3
*tp
= netdev_priv(dev
);
13623 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
13624 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
13626 if (!tg3_flag(tp
, 5705_PLUS
)) {
13627 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
13628 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
13629 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
13630 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
13633 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
13634 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
13635 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
13636 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
13637 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
13638 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
13639 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
13640 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
13641 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
13642 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
13645 /* No rx interrupts will be generated if both are zero */
13646 if ((ec
->rx_coalesce_usecs
== 0) &&
13647 (ec
->rx_max_coalesced_frames
== 0))
13650 /* No tx interrupts will be generated if both are zero */
13651 if ((ec
->tx_coalesce_usecs
== 0) &&
13652 (ec
->tx_max_coalesced_frames
== 0))
13655 /* Only copy relevant parameters, ignore all others. */
13656 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
13657 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
13658 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
13659 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
13660 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
13661 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
13662 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
13663 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
13664 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
13666 if (netif_running(dev
)) {
13667 tg3_full_lock(tp
, 0);
13668 __tg3_set_coalesce(tp
, &tp
->coal
);
13669 tg3_full_unlock(tp
);
13674 static const struct ethtool_ops tg3_ethtool_ops
= {
13675 .get_settings
= tg3_get_settings
,
13676 .set_settings
= tg3_set_settings
,
13677 .get_drvinfo
= tg3_get_drvinfo
,
13678 .get_regs_len
= tg3_get_regs_len
,
13679 .get_regs
= tg3_get_regs
,
13680 .get_wol
= tg3_get_wol
,
13681 .set_wol
= tg3_set_wol
,
13682 .get_msglevel
= tg3_get_msglevel
,
13683 .set_msglevel
= tg3_set_msglevel
,
13684 .nway_reset
= tg3_nway_reset
,
13685 .get_link
= ethtool_op_get_link
,
13686 .get_eeprom_len
= tg3_get_eeprom_len
,
13687 .get_eeprom
= tg3_get_eeprom
,
13688 .set_eeprom
= tg3_set_eeprom
,
13689 .get_ringparam
= tg3_get_ringparam
,
13690 .set_ringparam
= tg3_set_ringparam
,
13691 .get_pauseparam
= tg3_get_pauseparam
,
13692 .set_pauseparam
= tg3_set_pauseparam
,
13693 .self_test
= tg3_self_test
,
13694 .get_strings
= tg3_get_strings
,
13695 .set_phys_id
= tg3_set_phys_id
,
13696 .get_ethtool_stats
= tg3_get_ethtool_stats
,
13697 .get_coalesce
= tg3_get_coalesce
,
13698 .set_coalesce
= tg3_set_coalesce
,
13699 .get_sset_count
= tg3_get_sset_count
,
13700 .get_rxnfc
= tg3_get_rxnfc
,
13701 .get_rxfh_indir_size
= tg3_get_rxfh_indir_size
,
13702 .get_rxfh_indir
= tg3_get_rxfh_indir
,
13703 .set_rxfh_indir
= tg3_set_rxfh_indir
,
13704 .get_channels
= tg3_get_channels
,
13705 .set_channels
= tg3_set_channels
,
13706 .get_ts_info
= tg3_get_ts_info
,
13709 static struct rtnl_link_stats64
*tg3_get_stats64(struct net_device
*dev
,
13710 struct rtnl_link_stats64
*stats
)
13712 struct tg3
*tp
= netdev_priv(dev
);
13714 spin_lock_bh(&tp
->lock
);
13715 if (!tp
->hw_stats
) {
13716 spin_unlock_bh(&tp
->lock
);
13717 return &tp
->net_stats_prev
;
13720 tg3_get_nstats(tp
, stats
);
13721 spin_unlock_bh(&tp
->lock
);
13726 static void tg3_set_rx_mode(struct net_device
*dev
)
13728 struct tg3
*tp
= netdev_priv(dev
);
13730 if (!netif_running(dev
))
13733 tg3_full_lock(tp
, 0);
13734 __tg3_set_rx_mode(dev
);
13735 tg3_full_unlock(tp
);
13738 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
13741 dev
->mtu
= new_mtu
;
13743 if (new_mtu
> ETH_DATA_LEN
) {
13744 if (tg3_flag(tp
, 5780_CLASS
)) {
13745 netdev_update_features(dev
);
13746 tg3_flag_clear(tp
, TSO_CAPABLE
);
13748 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
13751 if (tg3_flag(tp
, 5780_CLASS
)) {
13752 tg3_flag_set(tp
, TSO_CAPABLE
);
13753 netdev_update_features(dev
);
13755 tg3_flag_clear(tp
, JUMBO_RING_ENABLE
);
13759 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
13761 struct tg3
*tp
= netdev_priv(dev
);
13763 bool reset_phy
= false;
13765 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
13768 if (!netif_running(dev
)) {
13769 /* We'll just catch it later when the
13772 tg3_set_mtu(dev
, tp
, new_mtu
);
13778 tg3_netif_stop(tp
);
13780 tg3_full_lock(tp
, 1);
13782 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
13784 tg3_set_mtu(dev
, tp
, new_mtu
);
13786 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13787 * breaks all requests to 256 bytes.
13789 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
13792 err
= tg3_restart_hw(tp
, reset_phy
);
13795 tg3_netif_start(tp
);
13797 tg3_full_unlock(tp
);
13805 static const struct net_device_ops tg3_netdev_ops
= {
13806 .ndo_open
= tg3_open
,
13807 .ndo_stop
= tg3_close
,
13808 .ndo_start_xmit
= tg3_start_xmit
,
13809 .ndo_get_stats64
= tg3_get_stats64
,
13810 .ndo_validate_addr
= eth_validate_addr
,
13811 .ndo_set_rx_mode
= tg3_set_rx_mode
,
13812 .ndo_set_mac_address
= tg3_set_mac_addr
,
13813 .ndo_do_ioctl
= tg3_ioctl
,
13814 .ndo_tx_timeout
= tg3_tx_timeout
,
13815 .ndo_change_mtu
= tg3_change_mtu
,
13816 .ndo_fix_features
= tg3_fix_features
,
13817 .ndo_set_features
= tg3_set_features
,
13818 #ifdef CONFIG_NET_POLL_CONTROLLER
13819 .ndo_poll_controller
= tg3_poll_controller
,
13823 static void tg3_get_eeprom_size(struct tg3
*tp
)
13825 u32 cursize
, val
, magic
;
13827 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
13829 if (tg3_nvram_read(tp
, 0, &magic
) != 0)
13832 if ((magic
!= TG3_EEPROM_MAGIC
) &&
13833 ((magic
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
) &&
13834 ((magic
& TG3_EEPROM_MAGIC_HW_MSK
) != TG3_EEPROM_MAGIC_HW
))
13838 * Size the chip by reading offsets at increasing powers of two.
13839 * When we encounter our validation signature, we know the addressing
13840 * has wrapped around, and thus have our chip size.
13844 while (cursize
< tp
->nvram_size
) {
13845 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
13854 tp
->nvram_size
= cursize
;
13857 static void tg3_get_nvram_size(struct tg3
*tp
)
13861 if (tg3_flag(tp
, NO_NVRAM
) || tg3_nvram_read(tp
, 0, &val
) != 0)
13864 /* Selfboot format */
13865 if (val
!= TG3_EEPROM_MAGIC
) {
13866 tg3_get_eeprom_size(tp
);
13870 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
13872 /* This is confusing. We want to operate on the
13873 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13874 * call will read from NVRAM and byteswap the data
13875 * according to the byteswapping settings for all
13876 * other register accesses. This ensures the data we
13877 * want will always reside in the lower 16-bits.
13878 * However, the data in NVRAM is in LE format, which
13879 * means the data from the NVRAM read will always be
13880 * opposite the endianness of the CPU. The 16-bit
13881 * byteswap then brings the data to CPU endianness.
13883 tp
->nvram_size
= swab16((u16
)(val
& 0x0000ffff)) * 1024;
13887 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
13890 static void tg3_get_nvram_info(struct tg3
*tp
)
13894 nvcfg1
= tr32(NVRAM_CFG1
);
13895 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
13896 tg3_flag_set(tp
, FLASH
);
13898 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
13899 tw32(NVRAM_CFG1
, nvcfg1
);
13902 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
13903 tg3_flag(tp
, 5780_CLASS
)) {
13904 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
13905 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
13906 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13907 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13908 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13910 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
13911 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13912 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
13914 case FLASH_VENDOR_ATMEL_EEPROM
:
13915 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13916 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
13917 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13919 case FLASH_VENDOR_ST
:
13920 tp
->nvram_jedecnum
= JEDEC_ST
;
13921 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
13922 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13924 case FLASH_VENDOR_SAIFUN
:
13925 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
13926 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
13928 case FLASH_VENDOR_SST_SMALL
:
13929 case FLASH_VENDOR_SST_LARGE
:
13930 tp
->nvram_jedecnum
= JEDEC_SST
;
13931 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
13935 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13936 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
13937 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13941 static void tg3_nvram_get_pagesize(struct tg3
*tp
, u32 nvmcfg1
)
13943 switch (nvmcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
13944 case FLASH_5752PAGE_SIZE_256
:
13945 tp
->nvram_pagesize
= 256;
13947 case FLASH_5752PAGE_SIZE_512
:
13948 tp
->nvram_pagesize
= 512;
13950 case FLASH_5752PAGE_SIZE_1K
:
13951 tp
->nvram_pagesize
= 1024;
13953 case FLASH_5752PAGE_SIZE_2K
:
13954 tp
->nvram_pagesize
= 2048;
13956 case FLASH_5752PAGE_SIZE_4K
:
13957 tp
->nvram_pagesize
= 4096;
13959 case FLASH_5752PAGE_SIZE_264
:
13960 tp
->nvram_pagesize
= 264;
13962 case FLASH_5752PAGE_SIZE_528
:
13963 tp
->nvram_pagesize
= 528;
13968 static void tg3_get_5752_nvram_info(struct tg3
*tp
)
13972 nvcfg1
= tr32(NVRAM_CFG1
);
13974 /* NVRAM protection for TPM */
13975 if (nvcfg1
& (1 << 27))
13976 tg3_flag_set(tp
, PROTECTED_NVRAM
);
13978 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
13979 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
13980 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
13981 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13982 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13984 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
13985 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
13986 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13987 tg3_flag_set(tp
, FLASH
);
13989 case FLASH_5752VENDOR_ST_M45PE10
:
13990 case FLASH_5752VENDOR_ST_M45PE20
:
13991 case FLASH_5752VENDOR_ST_M45PE40
:
13992 tp
->nvram_jedecnum
= JEDEC_ST
;
13993 tg3_flag_set(tp
, NVRAM_BUFFERED
);
13994 tg3_flag_set(tp
, FLASH
);
13998 if (tg3_flag(tp
, FLASH
)) {
13999 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14001 /* For eeprom, set pagesize to maximum eeprom size */
14002 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14004 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14005 tw32(NVRAM_CFG1
, nvcfg1
);
14009 static void tg3_get_5755_nvram_info(struct tg3
*tp
)
14011 u32 nvcfg1
, protect
= 0;
14013 nvcfg1
= tr32(NVRAM_CFG1
);
14015 /* NVRAM protection for TPM */
14016 if (nvcfg1
& (1 << 27)) {
14017 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14021 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14023 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14024 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14025 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14026 case FLASH_5755VENDOR_ATMEL_FLASH_5
:
14027 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14028 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14029 tg3_flag_set(tp
, FLASH
);
14030 tp
->nvram_pagesize
= 264;
14031 if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_1
||
14032 nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_5
)
14033 tp
->nvram_size
= (protect
? 0x3e200 :
14034 TG3_NVRAM_SIZE_512KB
);
14035 else if (nvcfg1
== FLASH_5755VENDOR_ATMEL_FLASH_2
)
14036 tp
->nvram_size
= (protect
? 0x1f200 :
14037 TG3_NVRAM_SIZE_256KB
);
14039 tp
->nvram_size
= (protect
? 0x1f200 :
14040 TG3_NVRAM_SIZE_128KB
);
14042 case FLASH_5752VENDOR_ST_M45PE10
:
14043 case FLASH_5752VENDOR_ST_M45PE20
:
14044 case FLASH_5752VENDOR_ST_M45PE40
:
14045 tp
->nvram_jedecnum
= JEDEC_ST
;
14046 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14047 tg3_flag_set(tp
, FLASH
);
14048 tp
->nvram_pagesize
= 256;
14049 if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE10
)
14050 tp
->nvram_size
= (protect
?
14051 TG3_NVRAM_SIZE_64KB
:
14052 TG3_NVRAM_SIZE_128KB
);
14053 else if (nvcfg1
== FLASH_5752VENDOR_ST_M45PE20
)
14054 tp
->nvram_size
= (protect
?
14055 TG3_NVRAM_SIZE_64KB
:
14056 TG3_NVRAM_SIZE_256KB
);
14058 tp
->nvram_size
= (protect
?
14059 TG3_NVRAM_SIZE_128KB
:
14060 TG3_NVRAM_SIZE_512KB
);
14065 static void tg3_get_5787_nvram_info(struct tg3
*tp
)
14069 nvcfg1
= tr32(NVRAM_CFG1
);
14071 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14072 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
14073 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14074 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
14075 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14076 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14077 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14078 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14080 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14081 tw32(NVRAM_CFG1
, nvcfg1
);
14083 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14084 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
14085 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
14086 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
14087 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14088 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14089 tg3_flag_set(tp
, FLASH
);
14090 tp
->nvram_pagesize
= 264;
14092 case FLASH_5752VENDOR_ST_M45PE10
:
14093 case FLASH_5752VENDOR_ST_M45PE20
:
14094 case FLASH_5752VENDOR_ST_M45PE40
:
14095 tp
->nvram_jedecnum
= JEDEC_ST
;
14096 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14097 tg3_flag_set(tp
, FLASH
);
14098 tp
->nvram_pagesize
= 256;
14103 static void tg3_get_5761_nvram_info(struct tg3
*tp
)
14105 u32 nvcfg1
, protect
= 0;
14107 nvcfg1
= tr32(NVRAM_CFG1
);
14109 /* NVRAM protection for TPM */
14110 if (nvcfg1
& (1 << 27)) {
14111 tg3_flag_set(tp
, PROTECTED_NVRAM
);
14115 nvcfg1
&= NVRAM_CFG1_5752VENDOR_MASK
;
14117 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14118 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14119 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14120 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14121 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14122 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14123 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14124 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14125 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14126 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14127 tg3_flag_set(tp
, FLASH
);
14128 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14129 tp
->nvram_pagesize
= 256;
14131 case FLASH_5761VENDOR_ST_A_M45PE20
:
14132 case FLASH_5761VENDOR_ST_A_M45PE40
:
14133 case FLASH_5761VENDOR_ST_A_M45PE80
:
14134 case FLASH_5761VENDOR_ST_A_M45PE16
:
14135 case FLASH_5761VENDOR_ST_M_M45PE20
:
14136 case FLASH_5761VENDOR_ST_M_M45PE40
:
14137 case FLASH_5761VENDOR_ST_M_M45PE80
:
14138 case FLASH_5761VENDOR_ST_M_M45PE16
:
14139 tp
->nvram_jedecnum
= JEDEC_ST
;
14140 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14141 tg3_flag_set(tp
, FLASH
);
14142 tp
->nvram_pagesize
= 256;
14147 tp
->nvram_size
= tr32(NVRAM_ADDR_LOCKOUT
);
14150 case FLASH_5761VENDOR_ATMEL_ADB161D
:
14151 case FLASH_5761VENDOR_ATMEL_MDB161D
:
14152 case FLASH_5761VENDOR_ST_A_M45PE16
:
14153 case FLASH_5761VENDOR_ST_M_M45PE16
:
14154 tp
->nvram_size
= TG3_NVRAM_SIZE_2MB
;
14156 case FLASH_5761VENDOR_ATMEL_ADB081D
:
14157 case FLASH_5761VENDOR_ATMEL_MDB081D
:
14158 case FLASH_5761VENDOR_ST_A_M45PE80
:
14159 case FLASH_5761VENDOR_ST_M_M45PE80
:
14160 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14162 case FLASH_5761VENDOR_ATMEL_ADB041D
:
14163 case FLASH_5761VENDOR_ATMEL_MDB041D
:
14164 case FLASH_5761VENDOR_ST_A_M45PE40
:
14165 case FLASH_5761VENDOR_ST_M_M45PE40
:
14166 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14168 case FLASH_5761VENDOR_ATMEL_ADB021D
:
14169 case FLASH_5761VENDOR_ATMEL_MDB021D
:
14170 case FLASH_5761VENDOR_ST_A_M45PE20
:
14171 case FLASH_5761VENDOR_ST_M_M45PE20
:
14172 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14178 static void tg3_get_5906_nvram_info(struct tg3
*tp
)
14180 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14181 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14182 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14185 static void tg3_get_57780_nvram_info(struct tg3
*tp
)
14189 nvcfg1
= tr32(NVRAM_CFG1
);
14191 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14192 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
14193 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
14194 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14195 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14196 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14198 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14199 tw32(NVRAM_CFG1
, nvcfg1
);
14201 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14202 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14203 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14204 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14205 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14206 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14207 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14208 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14209 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14210 tg3_flag_set(tp
, FLASH
);
14212 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14213 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
14214 case FLASH_57780VENDOR_ATMEL_AT45DB011D
:
14215 case FLASH_57780VENDOR_ATMEL_AT45DB011B
:
14216 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14218 case FLASH_57780VENDOR_ATMEL_AT45DB021D
:
14219 case FLASH_57780VENDOR_ATMEL_AT45DB021B
:
14220 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14222 case FLASH_57780VENDOR_ATMEL_AT45DB041D
:
14223 case FLASH_57780VENDOR_ATMEL_AT45DB041B
:
14224 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14228 case FLASH_5752VENDOR_ST_M45PE10
:
14229 case FLASH_5752VENDOR_ST_M45PE20
:
14230 case FLASH_5752VENDOR_ST_M45PE40
:
14231 tp
->nvram_jedecnum
= JEDEC_ST
;
14232 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14233 tg3_flag_set(tp
, FLASH
);
14235 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14236 case FLASH_5752VENDOR_ST_M45PE10
:
14237 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14239 case FLASH_5752VENDOR_ST_M45PE20
:
14240 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14242 case FLASH_5752VENDOR_ST_M45PE40
:
14243 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14248 tg3_flag_set(tp
, NO_NVRAM
);
14252 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14253 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14254 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14258 static void tg3_get_5717_nvram_info(struct tg3
*tp
)
14262 nvcfg1
= tr32(NVRAM_CFG1
);
14264 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14265 case FLASH_5717VENDOR_ATMEL_EEPROM
:
14266 case FLASH_5717VENDOR_MICRO_EEPROM
:
14267 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14268 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14269 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14271 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14272 tw32(NVRAM_CFG1
, nvcfg1
);
14274 case FLASH_5717VENDOR_ATMEL_MDB011D
:
14275 case FLASH_5717VENDOR_ATMEL_ADB011B
:
14276 case FLASH_5717VENDOR_ATMEL_ADB011D
:
14277 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14278 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14279 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14280 case FLASH_5717VENDOR_ATMEL_45USPT
:
14281 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14282 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14283 tg3_flag_set(tp
, FLASH
);
14285 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14286 case FLASH_5717VENDOR_ATMEL_MDB021D
:
14287 /* Detect size with tg3_nvram_get_size() */
14289 case FLASH_5717VENDOR_ATMEL_ADB021B
:
14290 case FLASH_5717VENDOR_ATMEL_ADB021D
:
14291 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14294 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14298 case FLASH_5717VENDOR_ST_M_M25PE10
:
14299 case FLASH_5717VENDOR_ST_A_M25PE10
:
14300 case FLASH_5717VENDOR_ST_M_M45PE10
:
14301 case FLASH_5717VENDOR_ST_A_M45PE10
:
14302 case FLASH_5717VENDOR_ST_M_M25PE20
:
14303 case FLASH_5717VENDOR_ST_A_M25PE20
:
14304 case FLASH_5717VENDOR_ST_M_M45PE20
:
14305 case FLASH_5717VENDOR_ST_A_M45PE20
:
14306 case FLASH_5717VENDOR_ST_25USPT
:
14307 case FLASH_5717VENDOR_ST_45USPT
:
14308 tp
->nvram_jedecnum
= JEDEC_ST
;
14309 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14310 tg3_flag_set(tp
, FLASH
);
14312 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
14313 case FLASH_5717VENDOR_ST_M_M25PE20
:
14314 case FLASH_5717VENDOR_ST_M_M45PE20
:
14315 /* Detect size with tg3_nvram_get_size() */
14317 case FLASH_5717VENDOR_ST_A_M25PE20
:
14318 case FLASH_5717VENDOR_ST_A_M45PE20
:
14319 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14322 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14327 tg3_flag_set(tp
, NO_NVRAM
);
14331 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14332 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14333 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14336 static void tg3_get_5720_nvram_info(struct tg3
*tp
)
14338 u32 nvcfg1
, nvmpinstrp
;
14340 nvcfg1
= tr32(NVRAM_CFG1
);
14341 nvmpinstrp
= nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
;
14343 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14344 if (!(nvcfg1
& NVRAM_CFG1_5762VENDOR_MASK
)) {
14345 tg3_flag_set(tp
, NO_NVRAM
);
14349 switch (nvmpinstrp
) {
14350 case FLASH_5762_EEPROM_HD
:
14351 nvmpinstrp
= FLASH_5720_EEPROM_HD
;
14353 case FLASH_5762_EEPROM_LD
:
14354 nvmpinstrp
= FLASH_5720_EEPROM_LD
;
14356 case FLASH_5720VENDOR_M_ST_M45PE20
:
14357 /* This pinstrap supports multiple sizes, so force it
14358 * to read the actual size from location 0xf0.
14360 nvmpinstrp
= FLASH_5720VENDOR_ST_45USPT
;
14365 switch (nvmpinstrp
) {
14366 case FLASH_5720_EEPROM_HD
:
14367 case FLASH_5720_EEPROM_LD
:
14368 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14369 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14371 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
14372 tw32(NVRAM_CFG1
, nvcfg1
);
14373 if (nvmpinstrp
== FLASH_5720_EEPROM_HD
)
14374 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
14376 tp
->nvram_pagesize
= ATMEL_AT24C02_CHIP_SIZE
;
14378 case FLASH_5720VENDOR_M_ATMEL_DB011D
:
14379 case FLASH_5720VENDOR_A_ATMEL_DB011B
:
14380 case FLASH_5720VENDOR_A_ATMEL_DB011D
:
14381 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14382 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14383 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14384 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14385 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14386 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14387 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14388 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14389 case FLASH_5720VENDOR_ATMEL_45USPT
:
14390 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
14391 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14392 tg3_flag_set(tp
, FLASH
);
14394 switch (nvmpinstrp
) {
14395 case FLASH_5720VENDOR_M_ATMEL_DB021D
:
14396 case FLASH_5720VENDOR_A_ATMEL_DB021B
:
14397 case FLASH_5720VENDOR_A_ATMEL_DB021D
:
14398 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14400 case FLASH_5720VENDOR_M_ATMEL_DB041D
:
14401 case FLASH_5720VENDOR_A_ATMEL_DB041B
:
14402 case FLASH_5720VENDOR_A_ATMEL_DB041D
:
14403 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14405 case FLASH_5720VENDOR_M_ATMEL_DB081D
:
14406 case FLASH_5720VENDOR_A_ATMEL_DB081D
:
14407 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14410 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14411 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14415 case FLASH_5720VENDOR_M_ST_M25PE10
:
14416 case FLASH_5720VENDOR_M_ST_M45PE10
:
14417 case FLASH_5720VENDOR_A_ST_M25PE10
:
14418 case FLASH_5720VENDOR_A_ST_M45PE10
:
14419 case FLASH_5720VENDOR_M_ST_M25PE20
:
14420 case FLASH_5720VENDOR_M_ST_M45PE20
:
14421 case FLASH_5720VENDOR_A_ST_M25PE20
:
14422 case FLASH_5720VENDOR_A_ST_M45PE20
:
14423 case FLASH_5720VENDOR_M_ST_M25PE40
:
14424 case FLASH_5720VENDOR_M_ST_M45PE40
:
14425 case FLASH_5720VENDOR_A_ST_M25PE40
:
14426 case FLASH_5720VENDOR_A_ST_M45PE40
:
14427 case FLASH_5720VENDOR_M_ST_M25PE80
:
14428 case FLASH_5720VENDOR_M_ST_M45PE80
:
14429 case FLASH_5720VENDOR_A_ST_M25PE80
:
14430 case FLASH_5720VENDOR_A_ST_M45PE80
:
14431 case FLASH_5720VENDOR_ST_25USPT
:
14432 case FLASH_5720VENDOR_ST_45USPT
:
14433 tp
->nvram_jedecnum
= JEDEC_ST
;
14434 tg3_flag_set(tp
, NVRAM_BUFFERED
);
14435 tg3_flag_set(tp
, FLASH
);
14437 switch (nvmpinstrp
) {
14438 case FLASH_5720VENDOR_M_ST_M25PE20
:
14439 case FLASH_5720VENDOR_M_ST_M45PE20
:
14440 case FLASH_5720VENDOR_A_ST_M25PE20
:
14441 case FLASH_5720VENDOR_A_ST_M45PE20
:
14442 tp
->nvram_size
= TG3_NVRAM_SIZE_256KB
;
14444 case FLASH_5720VENDOR_M_ST_M25PE40
:
14445 case FLASH_5720VENDOR_M_ST_M45PE40
:
14446 case FLASH_5720VENDOR_A_ST_M25PE40
:
14447 case FLASH_5720VENDOR_A_ST_M45PE40
:
14448 tp
->nvram_size
= TG3_NVRAM_SIZE_512KB
;
14450 case FLASH_5720VENDOR_M_ST_M25PE80
:
14451 case FLASH_5720VENDOR_M_ST_M45PE80
:
14452 case FLASH_5720VENDOR_A_ST_M25PE80
:
14453 case FLASH_5720VENDOR_A_ST_M45PE80
:
14454 tp
->nvram_size
= TG3_NVRAM_SIZE_1MB
;
14457 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
14458 tp
->nvram_size
= TG3_NVRAM_SIZE_128KB
;
14463 tg3_flag_set(tp
, NO_NVRAM
);
14467 tg3_nvram_get_pagesize(tp
, nvcfg1
);
14468 if (tp
->nvram_pagesize
!= 264 && tp
->nvram_pagesize
!= 528)
14469 tg3_flag_set(tp
, NO_NVRAM_ADDR_TRANS
);
14471 if (tg3_asic_rev(tp
) == ASIC_REV_5762
) {
14474 if (tg3_nvram_read(tp
, 0, &val
))
14477 if (val
!= TG3_EEPROM_MAGIC
&&
14478 (val
& TG3_EEPROM_MAGIC_FW_MSK
) != TG3_EEPROM_MAGIC_FW
)
14479 tg3_flag_set(tp
, NO_NVRAM
);
14483 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14484 static void tg3_nvram_init(struct tg3
*tp
)
14486 if (tg3_flag(tp
, IS_SSB_CORE
)) {
14487 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14488 tg3_flag_clear(tp
, NVRAM
);
14489 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14490 tg3_flag_set(tp
, NO_NVRAM
);
14494 tw32_f(GRC_EEPROM_ADDR
,
14495 (EEPROM_ADDR_FSM_RESET
|
14496 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
14497 EEPROM_ADDR_CLKPERD_SHIFT
)));
14501 /* Enable seeprom accesses. */
14502 tw32_f(GRC_LOCAL_CTRL
,
14503 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
14506 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14507 tg3_asic_rev(tp
) != ASIC_REV_5701
) {
14508 tg3_flag_set(tp
, NVRAM
);
14510 if (tg3_nvram_lock(tp
)) {
14511 netdev_warn(tp
->dev
,
14512 "Cannot get nvram lock, %s failed\n",
14516 tg3_enable_nvram_access(tp
);
14518 tp
->nvram_size
= 0;
14520 if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
14521 tg3_get_5752_nvram_info(tp
);
14522 else if (tg3_asic_rev(tp
) == ASIC_REV_5755
)
14523 tg3_get_5755_nvram_info(tp
);
14524 else if (tg3_asic_rev(tp
) == ASIC_REV_5787
||
14525 tg3_asic_rev(tp
) == ASIC_REV_5784
||
14526 tg3_asic_rev(tp
) == ASIC_REV_5785
)
14527 tg3_get_5787_nvram_info(tp
);
14528 else if (tg3_asic_rev(tp
) == ASIC_REV_5761
)
14529 tg3_get_5761_nvram_info(tp
);
14530 else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
14531 tg3_get_5906_nvram_info(tp
);
14532 else if (tg3_asic_rev(tp
) == ASIC_REV_57780
||
14533 tg3_flag(tp
, 57765_CLASS
))
14534 tg3_get_57780_nvram_info(tp
);
14535 else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
14536 tg3_asic_rev(tp
) == ASIC_REV_5719
)
14537 tg3_get_5717_nvram_info(tp
);
14538 else if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
14539 tg3_asic_rev(tp
) == ASIC_REV_5762
)
14540 tg3_get_5720_nvram_info(tp
);
14542 tg3_get_nvram_info(tp
);
14544 if (tp
->nvram_size
== 0)
14545 tg3_get_nvram_size(tp
);
14547 tg3_disable_nvram_access(tp
);
14548 tg3_nvram_unlock(tp
);
14551 tg3_flag_clear(tp
, NVRAM
);
14552 tg3_flag_clear(tp
, NVRAM_BUFFERED
);
14554 tg3_get_eeprom_size(tp
);
14558 struct subsys_tbl_ent
{
14559 u16 subsys_vendor
, subsys_devid
;
14563 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
14564 /* Broadcom boards. */
14565 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14566 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6
, TG3_PHY_ID_BCM5401
},
14567 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14568 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5
, TG3_PHY_ID_BCM5701
},
14569 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14570 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6
, TG3_PHY_ID_BCM8002
},
14571 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14572 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9
, 0 },
14573 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14574 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1
, TG3_PHY_ID_BCM5701
},
14575 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14576 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8
, TG3_PHY_ID_BCM5701
},
14577 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14578 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7
, 0 },
14579 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14580 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10
, TG3_PHY_ID_BCM5701
},
14581 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14582 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12
, TG3_PHY_ID_BCM5701
},
14583 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14584 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1
, TG3_PHY_ID_BCM5703
},
14585 { TG3PCI_SUBVENDOR_ID_BROADCOM
,
14586 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2
, TG3_PHY_ID_BCM5703
},
14589 { TG3PCI_SUBVENDOR_ID_3COM
,
14590 TG3PCI_SUBDEVICE_ID_3COM_3C996T
, TG3_PHY_ID_BCM5401
},
14591 { TG3PCI_SUBVENDOR_ID_3COM
,
14592 TG3PCI_SUBDEVICE_ID_3COM_3C996BT
, TG3_PHY_ID_BCM5701
},
14593 { TG3PCI_SUBVENDOR_ID_3COM
,
14594 TG3PCI_SUBDEVICE_ID_3COM_3C996SX
, 0 },
14595 { TG3PCI_SUBVENDOR_ID_3COM
,
14596 TG3PCI_SUBDEVICE_ID_3COM_3C1000T
, TG3_PHY_ID_BCM5701
},
14597 { TG3PCI_SUBVENDOR_ID_3COM
,
14598 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01
, TG3_PHY_ID_BCM5701
},
14601 { TG3PCI_SUBVENDOR_ID_DELL
,
14602 TG3PCI_SUBDEVICE_ID_DELL_VIPER
, TG3_PHY_ID_BCM5401
},
14603 { TG3PCI_SUBVENDOR_ID_DELL
,
14604 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR
, TG3_PHY_ID_BCM5401
},
14605 { TG3PCI_SUBVENDOR_ID_DELL
,
14606 TG3PCI_SUBDEVICE_ID_DELL_MERLOT
, TG3_PHY_ID_BCM5411
},
14607 { TG3PCI_SUBVENDOR_ID_DELL
,
14608 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT
, TG3_PHY_ID_BCM5411
},
14610 /* Compaq boards. */
14611 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14612 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE
, TG3_PHY_ID_BCM5701
},
14613 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14614 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2
, TG3_PHY_ID_BCM5701
},
14615 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14616 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING
, 0 },
14617 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14618 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780
, TG3_PHY_ID_BCM5701
},
14619 { TG3PCI_SUBVENDOR_ID_COMPAQ
,
14620 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2
, TG3_PHY_ID_BCM5701
},
14623 { TG3PCI_SUBVENDOR_ID_IBM
,
14624 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2
, 0 }
14627 static struct subsys_tbl_ent
*tg3_lookup_by_subsys(struct tg3
*tp
)
14631 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
14632 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
14633 tp
->pdev
->subsystem_vendor
) &&
14634 (subsys_id_to_phy_id
[i
].subsys_devid
==
14635 tp
->pdev
->subsystem_device
))
14636 return &subsys_id_to_phy_id
[i
];
14641 static void tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
14645 tp
->phy_id
= TG3_PHY_ID_INVALID
;
14646 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14648 /* Assume an onboard device and WOL capable by default. */
14649 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14650 tg3_flag_set(tp
, WOL_CAP
);
14652 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
14653 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
)) {
14654 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14655 tg3_flag_set(tp
, IS_NIC
);
14657 val
= tr32(VCPU_CFGSHDW
);
14658 if (val
& VCPU_CFGSHDW_ASPM_DBNC
)
14659 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14660 if ((val
& VCPU_CFGSHDW_WOL_ENABLE
) &&
14661 (val
& VCPU_CFGSHDW_WOL_MAGPKT
)) {
14662 tg3_flag_set(tp
, WOL_ENABLE
);
14663 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14668 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
14669 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
14670 u32 nic_cfg
, led_cfg
;
14671 u32 nic_phy_id
, ver
, cfg2
= 0, cfg4
= 0, eeprom_phy_id
;
14672 int eeprom_phy_serdes
= 0;
14674 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
14675 tp
->nic_sram_data_cfg
= nic_cfg
;
14677 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
14678 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
14679 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
14680 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
14681 tg3_asic_rev(tp
) != ASIC_REV_5703
&&
14682 (ver
> 0) && (ver
< 0x100))
14683 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
14685 if (tg3_asic_rev(tp
) == ASIC_REV_5785
)
14686 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_4
, &cfg4
);
14688 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
14689 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
14690 eeprom_phy_serdes
= 1;
14692 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
14693 if (nic_phy_id
!= 0) {
14694 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
14695 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
14697 eeprom_phy_id
= (id1
>> 16) << 10;
14698 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
14699 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
14703 tp
->phy_id
= eeprom_phy_id
;
14704 if (eeprom_phy_serdes
) {
14705 if (!tg3_flag(tp
, 5705_PLUS
))
14706 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
14708 tp
->phy_flags
|= TG3_PHYFLG_MII_SERDES
;
14711 if (tg3_flag(tp
, 5750_PLUS
))
14712 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
14713 SHASTA_EXT_LED_MODE_MASK
);
14715 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
14719 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
14720 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14723 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
14724 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14727 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
14728 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
14730 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14731 * read on some older 5700/5701 bootcode.
14733 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
14734 tg3_asic_rev(tp
) == ASIC_REV_5701
)
14735 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14739 case SHASTA_EXT_LED_SHARED
:
14740 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
14741 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
&&
14742 tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A1
)
14743 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14744 LED_CTRL_MODE_PHY_2
);
14747 case SHASTA_EXT_LED_MAC
:
14748 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
14751 case SHASTA_EXT_LED_COMBO
:
14752 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
14753 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5750_A0
)
14754 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
14755 LED_CTRL_MODE_PHY_2
);
14760 if ((tg3_asic_rev(tp
) == ASIC_REV_5700
||
14761 tg3_asic_rev(tp
) == ASIC_REV_5701
) &&
14762 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
14763 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
14765 if (tg3_chip_rev(tp
) == CHIPREV_5784_AX
)
14766 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
14768 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
) {
14769 tg3_flag_set(tp
, EEPROM_WRITE_PROT
);
14770 if ((tp
->pdev
->subsystem_vendor
==
14771 PCI_VENDOR_ID_ARIMA
) &&
14772 (tp
->pdev
->subsystem_device
== 0x205a ||
14773 tp
->pdev
->subsystem_device
== 0x2063))
14774 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14776 tg3_flag_clear(tp
, EEPROM_WRITE_PROT
);
14777 tg3_flag_set(tp
, IS_NIC
);
14780 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
14781 tg3_flag_set(tp
, ENABLE_ASF
);
14782 if (tg3_flag(tp
, 5750_PLUS
))
14783 tg3_flag_set(tp
, ASF_NEW_HANDSHAKE
);
14786 if ((nic_cfg
& NIC_SRAM_DATA_CFG_APE_ENABLE
) &&
14787 tg3_flag(tp
, 5750_PLUS
))
14788 tg3_flag_set(tp
, ENABLE_APE
);
14790 if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
&&
14791 !(nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
))
14792 tg3_flag_clear(tp
, WOL_CAP
);
14794 if (tg3_flag(tp
, WOL_CAP
) &&
14795 (nic_cfg
& NIC_SRAM_DATA_CFG_WOL_ENABLE
)) {
14796 tg3_flag_set(tp
, WOL_ENABLE
);
14797 device_set_wakeup_enable(&tp
->pdev
->dev
, true);
14800 if (cfg2
& (1 << 17))
14801 tp
->phy_flags
|= TG3_PHYFLG_CAPACITIVE_COUPLING
;
14803 /* serdes signal pre-emphasis in register 0x590 set by */
14804 /* bootcode if bit 18 is set */
14805 if (cfg2
& (1 << 18))
14806 tp
->phy_flags
|= TG3_PHYFLG_SERDES_PREEMPHASIS
;
14808 if ((tg3_flag(tp
, 57765_PLUS
) ||
14809 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
14810 tg3_chip_rev(tp
) != CHIPREV_5784_AX
)) &&
14811 (cfg2
& NIC_SRAM_DATA_CFG_2_APD_EN
))
14812 tp
->phy_flags
|= TG3_PHYFLG_ENABLE_APD
;
14814 if (tg3_flag(tp
, PCI_EXPRESS
)) {
14817 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_3
, &cfg3
);
14818 if (tg3_asic_rev(tp
) != ASIC_REV_5785
&&
14819 !tg3_flag(tp
, 57765_PLUS
) &&
14820 (cfg3
& NIC_SRAM_ASPM_DEBOUNCE
))
14821 tg3_flag_set(tp
, ASPM_WORKAROUND
);
14822 if (cfg3
& NIC_SRAM_LNK_FLAP_AVOID
)
14823 tp
->phy_flags
|= TG3_PHYFLG_KEEP_LINK_ON_PWRDN
;
14824 if (cfg3
& NIC_SRAM_1G_ON_VAUX_OK
)
14825 tp
->phy_flags
|= TG3_PHYFLG_1G_ON_VAUX_OK
;
14828 if (cfg4
& NIC_SRAM_RGMII_INBAND_DISABLE
)
14829 tg3_flag_set(tp
, RGMII_INBAND_DISABLE
);
14830 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_RX_EN
)
14831 tg3_flag_set(tp
, RGMII_EXT_IBND_RX_EN
);
14832 if (cfg4
& NIC_SRAM_RGMII_EXT_IBND_TX_EN
)
14833 tg3_flag_set(tp
, RGMII_EXT_IBND_TX_EN
);
14836 if (tg3_flag(tp
, WOL_CAP
))
14837 device_set_wakeup_enable(&tp
->pdev
->dev
,
14838 tg3_flag(tp
, WOL_ENABLE
));
14840 device_set_wakeup_capable(&tp
->pdev
->dev
, false);
14843 static int tg3_ape_otp_read(struct tg3
*tp
, u32 offset
, u32
*val
)
14846 u32 val2
, off
= offset
* 8;
14848 err
= tg3_nvram_lock(tp
);
14852 tg3_ape_write32(tp
, TG3_APE_OTP_ADDR
, off
| APE_OTP_ADDR_CPU_ENABLE
);
14853 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, APE_OTP_CTRL_PROG_EN
|
14854 APE_OTP_CTRL_CMD_RD
| APE_OTP_CTRL_START
);
14855 tg3_ape_read32(tp
, TG3_APE_OTP_CTRL
);
14858 for (i
= 0; i
< 100; i
++) {
14859 val2
= tg3_ape_read32(tp
, TG3_APE_OTP_STATUS
);
14860 if (val2
& APE_OTP_STATUS_CMD_DONE
) {
14861 *val
= tg3_ape_read32(tp
, TG3_APE_OTP_RD_DATA
);
14867 tg3_ape_write32(tp
, TG3_APE_OTP_CTRL
, 0);
14869 tg3_nvram_unlock(tp
);
14870 if (val2
& APE_OTP_STATUS_CMD_DONE
)
14876 static int tg3_issue_otp_command(struct tg3
*tp
, u32 cmd
)
14881 tw32(OTP_CTRL
, cmd
| OTP_CTRL_OTP_CMD_START
);
14882 tw32(OTP_CTRL
, cmd
);
14884 /* Wait for up to 1 ms for command to execute. */
14885 for (i
= 0; i
< 100; i
++) {
14886 val
= tr32(OTP_STATUS
);
14887 if (val
& OTP_STATUS_CMD_DONE
)
14892 return (val
& OTP_STATUS_CMD_DONE
) ? 0 : -EBUSY
;
14895 /* Read the gphy configuration from the OTP region of the chip. The gphy
14896 * configuration is a 32-bit value that straddles the alignment boundary.
14897 * We do two 32-bit reads and then shift and merge the results.
14899 static u32
tg3_read_otp_phycfg(struct tg3
*tp
)
14901 u32 bhalf_otp
, thalf_otp
;
14903 tw32(OTP_MODE
, OTP_MODE_OTP_THRU_GRC
);
14905 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_INIT
))
14908 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC1
);
14910 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14913 thalf_otp
= tr32(OTP_READ_DATA
);
14915 tw32(OTP_ADDRESS
, OTP_ADDRESS_MAGIC2
);
14917 if (tg3_issue_otp_command(tp
, OTP_CTRL_OTP_CMD_READ
))
14920 bhalf_otp
= tr32(OTP_READ_DATA
);
14922 return ((thalf_otp
& 0x0000ffff) << 16) | (bhalf_otp
>> 16);
14925 static void tg3_phy_init_link_config(struct tg3
*tp
)
14927 u32 adv
= ADVERTISED_Autoneg
;
14929 if (!(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14930 adv
|= ADVERTISED_1000baseT_Half
|
14931 ADVERTISED_1000baseT_Full
;
14933 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
14934 adv
|= ADVERTISED_100baseT_Half
|
14935 ADVERTISED_100baseT_Full
|
14936 ADVERTISED_10baseT_Half
|
14937 ADVERTISED_10baseT_Full
|
14940 adv
|= ADVERTISED_FIBRE
;
14942 tp
->link_config
.advertising
= adv
;
14943 tp
->link_config
.speed
= SPEED_UNKNOWN
;
14944 tp
->link_config
.duplex
= DUPLEX_UNKNOWN
;
14945 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
14946 tp
->link_config
.active_speed
= SPEED_UNKNOWN
;
14947 tp
->link_config
.active_duplex
= DUPLEX_UNKNOWN
;
14952 static int tg3_phy_probe(struct tg3
*tp
)
14954 u32 hw_phy_id_1
, hw_phy_id_2
;
14955 u32 hw_phy_id
, hw_phy_id_masked
;
14958 /* flow control autonegotiation is default behavior */
14959 tg3_flag_set(tp
, PAUSE_AUTONEG
);
14960 tp
->link_config
.flowctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
14962 if (tg3_flag(tp
, ENABLE_APE
)) {
14963 switch (tp
->pci_fn
) {
14965 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY0
;
14968 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY1
;
14971 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY2
;
14974 tp
->phy_ape_lock
= TG3_APE_LOCK_PHY3
;
14979 if (!tg3_flag(tp
, ENABLE_ASF
) &&
14980 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
14981 !(tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
))
14982 tp
->phy_flags
&= ~(TG3_PHYFLG_1G_ON_VAUX_OK
|
14983 TG3_PHYFLG_KEEP_LINK_ON_PWRDN
);
14985 if (tg3_flag(tp
, USE_PHYLIB
))
14986 return tg3_phy_init(tp
);
14988 /* Reading the PHY ID register can conflict with ASF
14989 * firmware access to the PHY hardware.
14992 if (tg3_flag(tp
, ENABLE_ASF
) || tg3_flag(tp
, ENABLE_APE
)) {
14993 hw_phy_id
= hw_phy_id_masked
= TG3_PHY_ID_INVALID
;
14995 /* Now read the physical PHY_ID from the chip and verify
14996 * that it is sane. If it doesn't look good, we fall back
14997 * to either the hard-coded table based PHY_ID and failing
14998 * that the value found in the eeprom area.
15000 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
15001 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
15003 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
15004 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
15005 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
15007 hw_phy_id_masked
= hw_phy_id
& TG3_PHY_ID_MASK
;
15010 if (!err
&& TG3_KNOWN_PHY_ID(hw_phy_id_masked
)) {
15011 tp
->phy_id
= hw_phy_id
;
15012 if (hw_phy_id_masked
== TG3_PHY_ID_BCM8002
)
15013 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15015 tp
->phy_flags
&= ~TG3_PHYFLG_PHY_SERDES
;
15017 if (tp
->phy_id
!= TG3_PHY_ID_INVALID
) {
15018 /* Do nothing, phy ID already set up in
15019 * tg3_get_eeprom_hw_cfg().
15022 struct subsys_tbl_ent
*p
;
15024 /* No eeprom signature? Try the hardcoded
15025 * subsys device table.
15027 p
= tg3_lookup_by_subsys(tp
);
15029 tp
->phy_id
= p
->phy_id
;
15030 } else if (!tg3_flag(tp
, IS_SSB_CORE
)) {
15031 /* For now we saw the IDs 0xbc050cd0,
15032 * 0xbc050f80 and 0xbc050c30 on devices
15033 * connected to an BCM4785 and there are
15034 * probably more. Just assume that the phy is
15035 * supported when it is connected to a SSB core
15042 tp
->phy_id
== TG3_PHY_ID_BCM8002
)
15043 tp
->phy_flags
|= TG3_PHYFLG_PHY_SERDES
;
15047 if (!(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15048 (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15049 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15050 tg3_asic_rev(tp
) == ASIC_REV_57766
||
15051 tg3_asic_rev(tp
) == ASIC_REV_5762
||
15052 (tg3_asic_rev(tp
) == ASIC_REV_5717
&&
15053 tg3_chip_rev_id(tp
) != CHIPREV_ID_5717_A0
) ||
15054 (tg3_asic_rev(tp
) == ASIC_REV_57765
&&
15055 tg3_chip_rev_id(tp
) != CHIPREV_ID_57765_A0
)))
15056 tp
->phy_flags
|= TG3_PHYFLG_EEE_CAP
;
15058 tg3_phy_init_link_config(tp
);
15060 if (!(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
) &&
15061 !(tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
) &&
15062 !tg3_flag(tp
, ENABLE_APE
) &&
15063 !tg3_flag(tp
, ENABLE_ASF
)) {
15066 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
15067 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
15068 (bmsr
& BMSR_LSTATUS
))
15069 goto skip_phy_reset
;
15071 err
= tg3_phy_reset(tp
);
15075 tg3_phy_set_wirespeed(tp
);
15077 if (!tg3_phy_copper_an_config_ok(tp
, &dummy
)) {
15078 tg3_phy_autoneg_cfg(tp
, tp
->link_config
.advertising
,
15079 tp
->link_config
.flowctrl
);
15081 tg3_writephy(tp
, MII_BMCR
,
15082 BMCR_ANENABLE
| BMCR_ANRESTART
);
15087 if ((tp
->phy_id
& TG3_PHY_ID_MASK
) == TG3_PHY_ID_BCM5401
) {
15088 err
= tg3_init_5401phy_dsp(tp
);
15092 err
= tg3_init_5401phy_dsp(tp
);
15098 static void tg3_read_vpd(struct tg3
*tp
)
15101 unsigned int block_end
, rosize
, len
;
15105 vpd_data
= (u8
*)tg3_vpd_readblock(tp
, &vpdlen
);
15109 i
= pci_vpd_find_tag(vpd_data
, 0, vpdlen
, PCI_VPD_LRDT_RO_DATA
);
15111 goto out_not_found
;
15113 rosize
= pci_vpd_lrdt_size(&vpd_data
[i
]);
15114 block_end
= i
+ PCI_VPD_LRDT_TAG_SIZE
+ rosize
;
15115 i
+= PCI_VPD_LRDT_TAG_SIZE
;
15117 if (block_end
> vpdlen
)
15118 goto out_not_found
;
15120 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15121 PCI_VPD_RO_KEYWORD_MFR_ID
);
15123 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15125 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15126 if (j
+ len
> block_end
|| len
!= 4 ||
15127 memcmp(&vpd_data
[j
], "1028", 4))
15130 j
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15131 PCI_VPD_RO_KEYWORD_VENDOR0
);
15135 len
= pci_vpd_info_field_size(&vpd_data
[j
]);
15137 j
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15138 if (j
+ len
> block_end
)
15141 if (len
>= sizeof(tp
->fw_ver
))
15142 len
= sizeof(tp
->fw_ver
) - 1;
15143 memset(tp
->fw_ver
, 0, sizeof(tp
->fw_ver
));
15144 snprintf(tp
->fw_ver
, sizeof(tp
->fw_ver
), "%.*s bc ", len
,
15149 i
= pci_vpd_find_info_keyword(vpd_data
, i
, rosize
,
15150 PCI_VPD_RO_KEYWORD_PARTNO
);
15152 goto out_not_found
;
15154 len
= pci_vpd_info_field_size(&vpd_data
[i
]);
15156 i
+= PCI_VPD_INFO_FLD_HDR_SIZE
;
15157 if (len
> TG3_BPN_SIZE
||
15158 (len
+ i
) > vpdlen
)
15159 goto out_not_found
;
15161 memcpy(tp
->board_part_number
, &vpd_data
[i
], len
);
15165 if (tp
->board_part_number
[0])
15169 if (tg3_asic_rev(tp
) == ASIC_REV_5717
) {
15170 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15171 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
)
15172 strcpy(tp
->board_part_number
, "BCM5717");
15173 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
)
15174 strcpy(tp
->board_part_number
, "BCM5718");
15177 } else if (tg3_asic_rev(tp
) == ASIC_REV_57780
) {
15178 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57780
)
15179 strcpy(tp
->board_part_number
, "BCM57780");
15180 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57760
)
15181 strcpy(tp
->board_part_number
, "BCM57760");
15182 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57790
)
15183 strcpy(tp
->board_part_number
, "BCM57790");
15184 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57788
)
15185 strcpy(tp
->board_part_number
, "BCM57788");
15188 } else if (tg3_asic_rev(tp
) == ASIC_REV_57765
) {
15189 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
)
15190 strcpy(tp
->board_part_number
, "BCM57761");
15191 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
)
15192 strcpy(tp
->board_part_number
, "BCM57765");
15193 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
)
15194 strcpy(tp
->board_part_number
, "BCM57781");
15195 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
)
15196 strcpy(tp
->board_part_number
, "BCM57785");
15197 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
)
15198 strcpy(tp
->board_part_number
, "BCM57791");
15199 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
)
15200 strcpy(tp
->board_part_number
, "BCM57795");
15203 } else if (tg3_asic_rev(tp
) == ASIC_REV_57766
) {
15204 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
)
15205 strcpy(tp
->board_part_number
, "BCM57762");
15206 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
)
15207 strcpy(tp
->board_part_number
, "BCM57766");
15208 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
)
15209 strcpy(tp
->board_part_number
, "BCM57782");
15210 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15211 strcpy(tp
->board_part_number
, "BCM57786");
15214 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15215 strcpy(tp
->board_part_number
, "BCM95906");
15218 strcpy(tp
->board_part_number
, "none");
15222 static int tg3_fw_img_is_valid(struct tg3
*tp
, u32 offset
)
15226 if (tg3_nvram_read(tp
, offset
, &val
) ||
15227 (val
& 0xfc000000) != 0x0c000000 ||
15228 tg3_nvram_read(tp
, offset
+ 4, &val
) ||
15235 static void tg3_read_bc_ver(struct tg3
*tp
)
15237 u32 val
, offset
, start
, ver_offset
;
15239 bool newver
= false;
15241 if (tg3_nvram_read(tp
, 0xc, &offset
) ||
15242 tg3_nvram_read(tp
, 0x4, &start
))
15245 offset
= tg3_nvram_logical_addr(tp
, offset
);
15247 if (tg3_nvram_read(tp
, offset
, &val
))
15250 if ((val
& 0xfc000000) == 0x0c000000) {
15251 if (tg3_nvram_read(tp
, offset
+ 4, &val
))
15258 dst_off
= strlen(tp
->fw_ver
);
15261 if (TG3_VER_SIZE
- dst_off
< 16 ||
15262 tg3_nvram_read(tp
, offset
+ 8, &ver_offset
))
15265 offset
= offset
+ ver_offset
- start
;
15266 for (i
= 0; i
< 16; i
+= 4) {
15268 if (tg3_nvram_read_be32(tp
, offset
+ i
, &v
))
15271 memcpy(tp
->fw_ver
+ dst_off
+ i
, &v
, sizeof(v
));
15276 if (tg3_nvram_read(tp
, TG3_NVM_PTREV_BCVER
, &ver_offset
))
15279 major
= (ver_offset
& TG3_NVM_BCVER_MAJMSK
) >>
15280 TG3_NVM_BCVER_MAJSFT
;
15281 minor
= ver_offset
& TG3_NVM_BCVER_MINMSK
;
15282 snprintf(&tp
->fw_ver
[dst_off
], TG3_VER_SIZE
- dst_off
,
15283 "v%d.%02d", major
, minor
);
15287 static void tg3_read_hwsb_ver(struct tg3
*tp
)
15289 u32 val
, major
, minor
;
15291 /* Use native endian representation */
15292 if (tg3_nvram_read(tp
, TG3_NVM_HWSB_CFG1
, &val
))
15295 major
= (val
& TG3_NVM_HWSB_CFG1_MAJMSK
) >>
15296 TG3_NVM_HWSB_CFG1_MAJSFT
;
15297 minor
= (val
& TG3_NVM_HWSB_CFG1_MINMSK
) >>
15298 TG3_NVM_HWSB_CFG1_MINSFT
;
15300 snprintf(&tp
->fw_ver
[0], 32, "sb v%d.%02d", major
, minor
);
15303 static void tg3_read_sb_ver(struct tg3
*tp
, u32 val
)
15305 u32 offset
, major
, minor
, build
;
15307 strncat(tp
->fw_ver
, "sb", TG3_VER_SIZE
- strlen(tp
->fw_ver
) - 1);
15309 if ((val
& TG3_EEPROM_SB_FORMAT_MASK
) != TG3_EEPROM_SB_FORMAT_1
)
15312 switch (val
& TG3_EEPROM_SB_REVISION_MASK
) {
15313 case TG3_EEPROM_SB_REVISION_0
:
15314 offset
= TG3_EEPROM_SB_F1R0_EDH_OFF
;
15316 case TG3_EEPROM_SB_REVISION_2
:
15317 offset
= TG3_EEPROM_SB_F1R2_EDH_OFF
;
15319 case TG3_EEPROM_SB_REVISION_3
:
15320 offset
= TG3_EEPROM_SB_F1R3_EDH_OFF
;
15322 case TG3_EEPROM_SB_REVISION_4
:
15323 offset
= TG3_EEPROM_SB_F1R4_EDH_OFF
;
15325 case TG3_EEPROM_SB_REVISION_5
:
15326 offset
= TG3_EEPROM_SB_F1R5_EDH_OFF
;
15328 case TG3_EEPROM_SB_REVISION_6
:
15329 offset
= TG3_EEPROM_SB_F1R6_EDH_OFF
;
15335 if (tg3_nvram_read(tp
, offset
, &val
))
15338 build
= (val
& TG3_EEPROM_SB_EDH_BLD_MASK
) >>
15339 TG3_EEPROM_SB_EDH_BLD_SHFT
;
15340 major
= (val
& TG3_EEPROM_SB_EDH_MAJ_MASK
) >>
15341 TG3_EEPROM_SB_EDH_MAJ_SHFT
;
15342 minor
= val
& TG3_EEPROM_SB_EDH_MIN_MASK
;
15344 if (minor
> 99 || build
> 26)
15347 offset
= strlen(tp
->fw_ver
);
15348 snprintf(&tp
->fw_ver
[offset
], TG3_VER_SIZE
- offset
,
15349 " v%d.%02d", major
, minor
);
15352 offset
= strlen(tp
->fw_ver
);
15353 if (offset
< TG3_VER_SIZE
- 1)
15354 tp
->fw_ver
[offset
] = 'a' + build
- 1;
15358 static void tg3_read_mgmtfw_ver(struct tg3
*tp
)
15360 u32 val
, offset
, start
;
15363 for (offset
= TG3_NVM_DIR_START
;
15364 offset
< TG3_NVM_DIR_END
;
15365 offset
+= TG3_NVM_DIRENT_SIZE
) {
15366 if (tg3_nvram_read(tp
, offset
, &val
))
15369 if ((val
>> TG3_NVM_DIRTYPE_SHIFT
) == TG3_NVM_DIRTYPE_ASFINI
)
15373 if (offset
== TG3_NVM_DIR_END
)
15376 if (!tg3_flag(tp
, 5705_PLUS
))
15377 start
= 0x08000000;
15378 else if (tg3_nvram_read(tp
, offset
- 4, &start
))
15381 if (tg3_nvram_read(tp
, offset
+ 4, &offset
) ||
15382 !tg3_fw_img_is_valid(tp
, offset
) ||
15383 tg3_nvram_read(tp
, offset
+ 8, &val
))
15386 offset
+= val
- start
;
15388 vlen
= strlen(tp
->fw_ver
);
15390 tp
->fw_ver
[vlen
++] = ',';
15391 tp
->fw_ver
[vlen
++] = ' ';
15393 for (i
= 0; i
< 4; i
++) {
15395 if (tg3_nvram_read_be32(tp
, offset
, &v
))
15398 offset
+= sizeof(v
);
15400 if (vlen
> TG3_VER_SIZE
- sizeof(v
)) {
15401 memcpy(&tp
->fw_ver
[vlen
], &v
, TG3_VER_SIZE
- vlen
);
15405 memcpy(&tp
->fw_ver
[vlen
], &v
, sizeof(v
));
15410 static void tg3_probe_ncsi(struct tg3
*tp
)
15414 apedata
= tg3_ape_read32(tp
, TG3_APE_SEG_SIG
);
15415 if (apedata
!= APE_SEG_SIG_MAGIC
)
15418 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_STATUS
);
15419 if (!(apedata
& APE_FW_STATUS_READY
))
15422 if (tg3_ape_read32(tp
, TG3_APE_FW_FEATURES
) & TG3_APE_FW_FEATURE_NCSI
)
15423 tg3_flag_set(tp
, APE_HAS_NCSI
);
15426 static void tg3_read_dash_ver(struct tg3
*tp
)
15432 apedata
= tg3_ape_read32(tp
, TG3_APE_FW_VERSION
);
15434 if (tg3_flag(tp
, APE_HAS_NCSI
))
15436 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
)
15441 vlen
= strlen(tp
->fw_ver
);
15443 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " %s v%d.%d.%d.%d",
15445 (apedata
& APE_FW_VERSION_MAJMSK
) >> APE_FW_VERSION_MAJSFT
,
15446 (apedata
& APE_FW_VERSION_MINMSK
) >> APE_FW_VERSION_MINSFT
,
15447 (apedata
& APE_FW_VERSION_REVMSK
) >> APE_FW_VERSION_REVSFT
,
15448 (apedata
& APE_FW_VERSION_BLDMSK
));
15451 static void tg3_read_otp_ver(struct tg3
*tp
)
15455 if (tg3_asic_rev(tp
) != ASIC_REV_5762
)
15458 if (!tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
, &val
) &&
15459 !tg3_ape_otp_read(tp
, OTP_ADDRESS_MAGIC0
+ 4, &val2
) &&
15460 TG3_OTP_MAGIC0_VALID(val
)) {
15461 u64 val64
= (u64
) val
<< 32 | val2
;
15465 for (i
= 0; i
< 7; i
++) {
15466 if ((val64
& 0xff) == 0)
15468 ver
= val64
& 0xff;
15471 vlen
= strlen(tp
->fw_ver
);
15472 snprintf(&tp
->fw_ver
[vlen
], TG3_VER_SIZE
- vlen
, " .%02d", ver
);
15476 static void tg3_read_fw_ver(struct tg3
*tp
)
15479 bool vpd_vers
= false;
15481 if (tp
->fw_ver
[0] != 0)
15484 if (tg3_flag(tp
, NO_NVRAM
)) {
15485 strcat(tp
->fw_ver
, "sb");
15486 tg3_read_otp_ver(tp
);
15490 if (tg3_nvram_read(tp
, 0, &val
))
15493 if (val
== TG3_EEPROM_MAGIC
)
15494 tg3_read_bc_ver(tp
);
15495 else if ((val
& TG3_EEPROM_MAGIC_FW_MSK
) == TG3_EEPROM_MAGIC_FW
)
15496 tg3_read_sb_ver(tp
, val
);
15497 else if ((val
& TG3_EEPROM_MAGIC_HW_MSK
) == TG3_EEPROM_MAGIC_HW
)
15498 tg3_read_hwsb_ver(tp
);
15500 if (tg3_flag(tp
, ENABLE_ASF
)) {
15501 if (tg3_flag(tp
, ENABLE_APE
)) {
15502 tg3_probe_ncsi(tp
);
15504 tg3_read_dash_ver(tp
);
15505 } else if (!vpd_vers
) {
15506 tg3_read_mgmtfw_ver(tp
);
15510 tp
->fw_ver
[TG3_VER_SIZE
- 1] = 0;
15513 static inline u32
tg3_rx_ret_ring_size(struct tg3
*tp
)
15515 if (tg3_flag(tp
, LRG_PROD_RING_CAP
))
15516 return TG3_RX_RET_MAX_SIZE_5717
;
15517 else if (tg3_flag(tp
, JUMBO_CAPABLE
) && !tg3_flag(tp
, 5780_CLASS
))
15518 return TG3_RX_RET_MAX_SIZE_5700
;
15520 return TG3_RX_RET_MAX_SIZE_5705
;
15523 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets
) = {
15524 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
15525 { PCI_DEVICE(PCI_VENDOR_ID_AMD
, PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
15526 { PCI_DEVICE(PCI_VENDOR_ID_VIA
, PCI_DEVICE_ID_VIA_8385_0
) },
15530 static struct pci_dev
*tg3_find_peer(struct tg3
*tp
)
15532 struct pci_dev
*peer
;
15533 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
15535 for (func
= 0; func
< 8; func
++) {
15536 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
15537 if (peer
&& peer
!= tp
->pdev
)
15541 /* 5704 can be configured in single-port mode, set peer to
15542 * tp->pdev in that case.
15550 * We don't need to keep the refcount elevated; there's no way
15551 * to remove one half of this device without removing the other
15558 static void tg3_detect_asic_rev(struct tg3
*tp
, u32 misc_ctrl_reg
)
15560 tp
->pci_chip_rev_id
= misc_ctrl_reg
>> MISC_HOST_CTRL_CHIPREV_SHIFT
;
15561 if (tg3_asic_rev(tp
) == ASIC_REV_USE_PROD_ID_REG
) {
15564 /* All devices that use the alternate
15565 * ASIC REV location have a CPMU.
15567 tg3_flag_set(tp
, CPMU_PRESENT
);
15569 if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
15570 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
15571 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
15572 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
15573 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
15574 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
15575 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
15576 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
)
15577 reg
= TG3PCI_GEN2_PRODID_ASICREV
;
15578 else if (tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57781
||
15579 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57785
||
15580 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57761
||
15581 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57765
||
15582 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57791
||
15583 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57795
||
15584 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57762
||
15585 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57766
||
15586 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57782
||
15587 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_57786
)
15588 reg
= TG3PCI_GEN15_PRODID_ASICREV
;
15590 reg
= TG3PCI_PRODID_ASICREV
;
15592 pci_read_config_dword(tp
->pdev
, reg
, &tp
->pci_chip_rev_id
);
15595 /* Wrong chip ID in 5752 A0. This code can be removed later
15596 * as A0 is not in production.
15598 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5752_A0_HW
)
15599 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
15601 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_C0
)
15602 tp
->pci_chip_rev_id
= CHIPREV_ID_5720_A0
;
15604 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15605 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15606 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15607 tg3_flag_set(tp
, 5717_PLUS
);
15609 if (tg3_asic_rev(tp
) == ASIC_REV_57765
||
15610 tg3_asic_rev(tp
) == ASIC_REV_57766
)
15611 tg3_flag_set(tp
, 57765_CLASS
);
15613 if (tg3_flag(tp
, 57765_CLASS
) || tg3_flag(tp
, 5717_PLUS
) ||
15614 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15615 tg3_flag_set(tp
, 57765_PLUS
);
15617 /* Intentionally exclude ASIC_REV_5906 */
15618 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
15619 tg3_asic_rev(tp
) == ASIC_REV_5787
||
15620 tg3_asic_rev(tp
) == ASIC_REV_5784
||
15621 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15622 tg3_asic_rev(tp
) == ASIC_REV_5785
||
15623 tg3_asic_rev(tp
) == ASIC_REV_57780
||
15624 tg3_flag(tp
, 57765_PLUS
))
15625 tg3_flag_set(tp
, 5755_PLUS
);
15627 if (tg3_asic_rev(tp
) == ASIC_REV_5780
||
15628 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15629 tg3_flag_set(tp
, 5780_CLASS
);
15631 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
15632 tg3_asic_rev(tp
) == ASIC_REV_5752
||
15633 tg3_asic_rev(tp
) == ASIC_REV_5906
||
15634 tg3_flag(tp
, 5755_PLUS
) ||
15635 tg3_flag(tp
, 5780_CLASS
))
15636 tg3_flag_set(tp
, 5750_PLUS
);
15638 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
15639 tg3_flag(tp
, 5750_PLUS
))
15640 tg3_flag_set(tp
, 5705_PLUS
);
15643 static bool tg3_10_100_only_device(struct tg3
*tp
,
15644 const struct pci_device_id
*ent
)
15646 u32 grc_misc_cfg
= tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
;
15648 if ((tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15649 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
15650 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
))
15653 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_10_100_ONLY
) {
15654 if (tg3_asic_rev(tp
) == ASIC_REV_5705
) {
15655 if (ent
->driver_data
& TG3_DRV_DATA_FLAG_5705_10_100
)
15665 static int tg3_get_invariants(struct tg3
*tp
, const struct pci_device_id
*ent
)
15668 u32 pci_state_reg
, grc_misc_cfg
;
15673 /* Force memory write invalidate off. If we leave it on,
15674 * then on 5700_BX chips we have to enable a workaround.
15675 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15676 * to match the cacheline size. The Broadcom driver have this
15677 * workaround but turns MWI off all the times so never uses
15678 * it. This seems to suggest that the workaround is insufficient.
15680 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
15681 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
15682 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
15684 /* Important! -- Make sure register accesses are byteswapped
15685 * correctly. Also, for those chips that require it, make
15686 * sure that indirect register accesses are enabled before
15687 * the first operation.
15689 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15691 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
15692 MISC_HOST_CTRL_CHIPREV
);
15693 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
15694 tp
->misc_host_ctrl
);
15696 tg3_detect_asic_rev(tp
, misc_ctrl_reg
);
15698 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15699 * we need to disable memory and use config. cycles
15700 * only to access all registers. The 5702/03 chips
15701 * can mistakenly decode the special cycles from the
15702 * ICH chipsets as memory write cycles, causing corruption
15703 * of register and memory space. Only certain ICH bridges
15704 * will drive special cycles with non-zero data during the
15705 * address phase which can fall within the 5703's address
15706 * range. This is not an ICH bug as the PCI spec allows
15707 * non-zero address during special cycles. However, only
15708 * these ICH bridges are known to drive non-zero addresses
15709 * during special cycles.
15711 * Since special cycles do not cross PCI bridges, we only
15712 * enable this workaround if the 5703 is on the secondary
15713 * bus of these ICH bridges.
15715 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A1
) ||
15716 (tg3_chip_rev_id(tp
) == CHIPREV_ID_5703_A2
)) {
15717 static struct tg3_dev_id
{
15721 } ich_chipsets
[] = {
15722 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
15724 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
15726 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
15728 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
15732 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
15733 struct pci_dev
*bridge
= NULL
;
15735 while (pci_id
->vendor
!= 0) {
15736 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
15742 if (pci_id
->rev
!= PCI_ANY_ID
) {
15743 if (bridge
->revision
> pci_id
->rev
)
15746 if (bridge
->subordinate
&&
15747 (bridge
->subordinate
->number
==
15748 tp
->pdev
->bus
->number
)) {
15749 tg3_flag_set(tp
, ICH_WORKAROUND
);
15750 pci_dev_put(bridge
);
15756 if (tg3_asic_rev(tp
) == ASIC_REV_5701
) {
15757 static struct tg3_dev_id
{
15760 } bridge_chipsets
[] = {
15761 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_0
},
15762 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_PXH_1
},
15765 struct tg3_dev_id
*pci_id
= &bridge_chipsets
[0];
15766 struct pci_dev
*bridge
= NULL
;
15768 while (pci_id
->vendor
!= 0) {
15769 bridge
= pci_get_device(pci_id
->vendor
,
15776 if (bridge
->subordinate
&&
15777 (bridge
->subordinate
->number
<=
15778 tp
->pdev
->bus
->number
) &&
15779 (bridge
->subordinate
->busn_res
.end
>=
15780 tp
->pdev
->bus
->number
)) {
15781 tg3_flag_set(tp
, 5701_DMA_BUG
);
15782 pci_dev_put(bridge
);
15788 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15789 * DMA addresses > 40-bit. This bridge may have other additional
15790 * 57xx devices behind it in some 4-port NIC designs for example.
15791 * Any tg3 device found behind the bridge will also need the 40-bit
15794 if (tg3_flag(tp
, 5780_CLASS
)) {
15795 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15796 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
15798 struct pci_dev
*bridge
= NULL
;
15801 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
15802 PCI_DEVICE_ID_SERVERWORKS_EPB
,
15804 if (bridge
&& bridge
->subordinate
&&
15805 (bridge
->subordinate
->number
<=
15806 tp
->pdev
->bus
->number
) &&
15807 (bridge
->subordinate
->busn_res
.end
>=
15808 tp
->pdev
->bus
->number
)) {
15809 tg3_flag_set(tp
, 40BIT_DMA_BUG
);
15810 pci_dev_put(bridge
);
15816 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
15817 tg3_asic_rev(tp
) == ASIC_REV_5714
)
15818 tp
->pdev_peer
= tg3_find_peer(tp
);
15820 /* Determine TSO capabilities */
15821 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
)
15822 ; /* Do nothing. HW bug. */
15823 else if (tg3_flag(tp
, 57765_PLUS
))
15824 tg3_flag_set(tp
, HW_TSO_3
);
15825 else if (tg3_flag(tp
, 5755_PLUS
) ||
15826 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15827 tg3_flag_set(tp
, HW_TSO_2
);
15828 else if (tg3_flag(tp
, 5750_PLUS
)) {
15829 tg3_flag_set(tp
, HW_TSO_1
);
15830 tg3_flag_set(tp
, TSO_BUG
);
15831 if (tg3_asic_rev(tp
) == ASIC_REV_5750
&&
15832 tg3_chip_rev_id(tp
) >= CHIPREV_ID_5750_C2
)
15833 tg3_flag_clear(tp
, TSO_BUG
);
15834 } else if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
15835 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
15836 tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) {
15837 tg3_flag_set(tp
, FW_TSO
);
15838 tg3_flag_set(tp
, TSO_BUG
);
15839 if (tg3_asic_rev(tp
) == ASIC_REV_5705
)
15840 tp
->fw_needed
= FIRMWARE_TG3TSO5
;
15842 tp
->fw_needed
= FIRMWARE_TG3TSO
;
15845 /* Selectively allow TSO based on operating conditions */
15846 if (tg3_flag(tp
, HW_TSO_1
) ||
15847 tg3_flag(tp
, HW_TSO_2
) ||
15848 tg3_flag(tp
, HW_TSO_3
) ||
15849 tg3_flag(tp
, FW_TSO
)) {
15850 /* For firmware TSO, assume ASF is disabled.
15851 * We'll disable TSO later if we discover ASF
15852 * is enabled in tg3_get_eeprom_hw_cfg().
15854 tg3_flag_set(tp
, TSO_CAPABLE
);
15856 tg3_flag_clear(tp
, TSO_CAPABLE
);
15857 tg3_flag_clear(tp
, TSO_BUG
);
15858 tp
->fw_needed
= NULL
;
15861 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
)
15862 tp
->fw_needed
= FIRMWARE_TG3
;
15864 if (tg3_asic_rev(tp
) == ASIC_REV_57766
)
15865 tp
->fw_needed
= FIRMWARE_TG357766
;
15869 if (tg3_flag(tp
, 5750_PLUS
)) {
15870 tg3_flag_set(tp
, SUPPORT_MSI
);
15871 if (tg3_chip_rev(tp
) == CHIPREV_5750_AX
||
15872 tg3_chip_rev(tp
) == CHIPREV_5750_BX
||
15873 (tg3_asic_rev(tp
) == ASIC_REV_5714
&&
15874 tg3_chip_rev_id(tp
) <= CHIPREV_ID_5714_A2
&&
15875 tp
->pdev_peer
== tp
->pdev
))
15876 tg3_flag_clear(tp
, SUPPORT_MSI
);
15878 if (tg3_flag(tp
, 5755_PLUS
) ||
15879 tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15880 tg3_flag_set(tp
, 1SHOT_MSI
);
15883 if (tg3_flag(tp
, 57765_PLUS
)) {
15884 tg3_flag_set(tp
, SUPPORT_MSIX
);
15885 tp
->irq_max
= TG3_IRQ_MAX_VECS
;
15891 if (tp
->irq_max
> 1) {
15892 tp
->rxq_max
= TG3_RSS_MAX_NUM_QS
;
15893 tg3_rss_init_dflt_indir_tbl(tp
, TG3_RSS_MAX_NUM_QS
);
15895 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
15896 tg3_asic_rev(tp
) == ASIC_REV_5720
)
15897 tp
->txq_max
= tp
->irq_max
- 1;
15900 if (tg3_flag(tp
, 5755_PLUS
) ||
15901 tg3_asic_rev(tp
) == ASIC_REV_5906
)
15902 tg3_flag_set(tp
, SHORT_DMA_BUG
);
15904 if (tg3_asic_rev(tp
) == ASIC_REV_5719
)
15905 tp
->dma_limit
= TG3_TX_BD_DMA_MAX_4K
;
15907 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
15908 tg3_asic_rev(tp
) == ASIC_REV_5719
||
15909 tg3_asic_rev(tp
) == ASIC_REV_5720
||
15910 tg3_asic_rev(tp
) == ASIC_REV_5762
)
15911 tg3_flag_set(tp
, LRG_PROD_RING_CAP
);
15913 if (tg3_flag(tp
, 57765_PLUS
) &&
15914 tg3_chip_rev_id(tp
) != CHIPREV_ID_5719_A0
)
15915 tg3_flag_set(tp
, USE_JUMBO_BDFLAG
);
15917 if (!tg3_flag(tp
, 5705_PLUS
) ||
15918 tg3_flag(tp
, 5780_CLASS
) ||
15919 tg3_flag(tp
, USE_JUMBO_BDFLAG
))
15920 tg3_flag_set(tp
, JUMBO_CAPABLE
);
15922 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
15925 if (pci_is_pcie(tp
->pdev
)) {
15928 tg3_flag_set(tp
, PCI_EXPRESS
);
15930 pcie_capability_read_word(tp
->pdev
, PCI_EXP_LNKCTL
, &lnkctl
);
15931 if (lnkctl
& PCI_EXP_LNKCTL_CLKREQ_EN
) {
15932 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
15933 tg3_flag_clear(tp
, HW_TSO_2
);
15934 tg3_flag_clear(tp
, TSO_CAPABLE
);
15936 if (tg3_asic_rev(tp
) == ASIC_REV_5784
||
15937 tg3_asic_rev(tp
) == ASIC_REV_5761
||
15938 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A0
||
15939 tg3_chip_rev_id(tp
) == CHIPREV_ID_57780_A1
)
15940 tg3_flag_set(tp
, CLKREQ_BUG
);
15941 } else if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5717_A0
) {
15942 tg3_flag_set(tp
, L1PLLPD_EN
);
15944 } else if (tg3_asic_rev(tp
) == ASIC_REV_5785
) {
15945 /* BCM5785 devices are effectively PCIe devices, and should
15946 * follow PCIe codepaths, but do not have a PCIe capabilities
15949 tg3_flag_set(tp
, PCI_EXPRESS
);
15950 } else if (!tg3_flag(tp
, 5705_PLUS
) ||
15951 tg3_flag(tp
, 5780_CLASS
)) {
15952 tp
->pcix_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_PCIX
);
15953 if (!tp
->pcix_cap
) {
15954 dev_err(&tp
->pdev
->dev
,
15955 "Cannot find PCI-X capability, aborting\n");
15959 if (!(pci_state_reg
& PCISTATE_CONV_PCI_MODE
))
15960 tg3_flag_set(tp
, PCIX_MODE
);
15963 /* If we have an AMD 762 or VIA K8T800 chipset, write
15964 * reordering to the mailbox registers done by the host
15965 * controller can cause major troubles. We read back from
15966 * every mailbox register write to force the writes to be
15967 * posted to the chip in order.
15969 if (pci_dev_present(tg3_write_reorder_chipsets
) &&
15970 !tg3_flag(tp
, PCI_EXPRESS
))
15971 tg3_flag_set(tp
, MBOX_WRITE_REORDER
);
15973 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
,
15974 &tp
->pci_cacheline_sz
);
15975 pci_read_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15976 &tp
->pci_lat_timer
);
15977 if (tg3_asic_rev(tp
) == ASIC_REV_5703
&&
15978 tp
->pci_lat_timer
< 64) {
15979 tp
->pci_lat_timer
= 64;
15980 pci_write_config_byte(tp
->pdev
, PCI_LATENCY_TIMER
,
15981 tp
->pci_lat_timer
);
15984 /* Important! -- It is critical that the PCI-X hw workaround
15985 * situation is decided before the first MMIO register access.
15987 if (tg3_chip_rev(tp
) == CHIPREV_5700_BX
) {
15988 /* 5700 BX chips need to have their TX producer index
15989 * mailboxes written twice to workaround a bug.
15991 tg3_flag_set(tp
, TXD_MBOX_HWBUG
);
15993 /* If we are in PCI-X mode, enable register write workaround.
15995 * The workaround is to use indirect register accesses
15996 * for all chip writes not to mailbox registers.
15998 if (tg3_flag(tp
, PCIX_MODE
)) {
16001 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16003 /* The chip can have it's power management PCI config
16004 * space registers clobbered due to this bug.
16005 * So explicitly force the chip into D0 here.
16007 pci_read_config_dword(tp
->pdev
,
16008 tp
->pm_cap
+ PCI_PM_CTRL
,
16010 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
16011 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
16012 pci_write_config_dword(tp
->pdev
,
16013 tp
->pm_cap
+ PCI_PM_CTRL
,
16016 /* Also, force SERR#/PERR# in PCI command. */
16017 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16018 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
16019 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16023 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
16024 tg3_flag_set(tp
, PCI_HIGH_SPEED
);
16025 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
16026 tg3_flag_set(tp
, PCI_32BIT
);
16028 /* Chip-specific fixup from Broadcom driver */
16029 if ((tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
) &&
16030 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
16031 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
16032 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
16035 /* Default fast path register access methods */
16036 tp
->read32
= tg3_read32
;
16037 tp
->write32
= tg3_write32
;
16038 tp
->read32_mbox
= tg3_read32
;
16039 tp
->write32_mbox
= tg3_write32
;
16040 tp
->write32_tx_mbox
= tg3_write32
;
16041 tp
->write32_rx_mbox
= tg3_write32
;
16043 /* Various workaround register access methods */
16044 if (tg3_flag(tp
, PCIX_TARGET_HWBUG
))
16045 tp
->write32
= tg3_write_indirect_reg32
;
16046 else if (tg3_asic_rev(tp
) == ASIC_REV_5701
||
16047 (tg3_flag(tp
, PCI_EXPRESS
) &&
16048 tg3_chip_rev_id(tp
) == CHIPREV_ID_5750_A0
)) {
16050 * Back to back register writes can cause problems on these
16051 * chips, the workaround is to read back all reg writes
16052 * except those to mailbox regs.
16054 * See tg3_write_indirect_reg32().
16056 tp
->write32
= tg3_write_flush_reg32
;
16059 if (tg3_flag(tp
, TXD_MBOX_HWBUG
) || tg3_flag(tp
, MBOX_WRITE_REORDER
)) {
16060 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
16061 if (tg3_flag(tp
, MBOX_WRITE_REORDER
))
16062 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16065 if (tg3_flag(tp
, ICH_WORKAROUND
)) {
16066 tp
->read32
= tg3_read_indirect_reg32
;
16067 tp
->write32
= tg3_write_indirect_reg32
;
16068 tp
->read32_mbox
= tg3_read_indirect_mbox
;
16069 tp
->write32_mbox
= tg3_write_indirect_mbox
;
16070 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
16071 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
16076 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
16077 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
16078 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
16080 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16081 tp
->read32_mbox
= tg3_read32_mbox_5906
;
16082 tp
->write32_mbox
= tg3_write32_mbox_5906
;
16083 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
16084 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
16087 if (tp
->write32
== tg3_write_indirect_reg32
||
16088 (tg3_flag(tp
, PCIX_MODE
) &&
16089 (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16090 tg3_asic_rev(tp
) == ASIC_REV_5701
)))
16091 tg3_flag_set(tp
, SRAM_USE_CONFIG
);
16093 /* The memory arbiter has to be enabled in order for SRAM accesses
16094 * to succeed. Normally on powerup the tg3 chip firmware will make
16095 * sure it is enabled, but other entities such as system netboot
16096 * code might disable it.
16098 val
= tr32(MEMARB_MODE
);
16099 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
16101 tp
->pci_fn
= PCI_FUNC(tp
->pdev
->devfn
) & 3;
16102 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16103 tg3_flag(tp
, 5780_CLASS
)) {
16104 if (tg3_flag(tp
, PCIX_MODE
)) {
16105 pci_read_config_dword(tp
->pdev
,
16106 tp
->pcix_cap
+ PCI_X_STATUS
,
16108 tp
->pci_fn
= val
& 0x7;
16110 } else if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16111 tg3_asic_rev(tp
) == ASIC_REV_5719
||
16112 tg3_asic_rev(tp
) == ASIC_REV_5720
) {
16113 tg3_read_mem(tp
, NIC_SRAM_CPMU_STATUS
, &val
);
16114 if ((val
& NIC_SRAM_CPMUSTAT_SIG_MSK
) != NIC_SRAM_CPMUSTAT_SIG
)
16115 val
= tr32(TG3_CPMU_STATUS
);
16117 if (tg3_asic_rev(tp
) == ASIC_REV_5717
)
16118 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5717
) ? 1 : 0;
16120 tp
->pci_fn
= (val
& TG3_CPMU_STATUS_FMSK_5719
) >>
16121 TG3_CPMU_STATUS_FSHFT_5719
;
16124 if (tg3_flag(tp
, FLUSH_POSTED_WRITES
)) {
16125 tp
->write32_tx_mbox
= tg3_write_flush_reg32
;
16126 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
16129 /* Get eeprom hw config before calling tg3_set_power_state().
16130 * In particular, the TG3_FLAG_IS_NIC flag must be
16131 * determined before calling tg3_set_power_state() so that
16132 * we know whether or not to switch out of Vaux power.
16133 * When the flag is set, it means that GPIO1 is used for eeprom
16134 * write protect and also implies that it is a LOM where GPIOs
16135 * are not used to switch power.
16137 tg3_get_eeprom_hw_cfg(tp
);
16139 if (tg3_flag(tp
, FW_TSO
) && tg3_flag(tp
, ENABLE_ASF
)) {
16140 tg3_flag_clear(tp
, TSO_CAPABLE
);
16141 tg3_flag_clear(tp
, TSO_BUG
);
16142 tp
->fw_needed
= NULL
;
16145 if (tg3_flag(tp
, ENABLE_APE
)) {
16146 /* Allow reads and writes to the
16147 * APE register and memory space.
16149 pci_state_reg
|= PCISTATE_ALLOW_APE_CTLSPC_WR
|
16150 PCISTATE_ALLOW_APE_SHMEM_WR
|
16151 PCISTATE_ALLOW_APE_PSPACE_WR
;
16152 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16155 tg3_ape_lock_init(tp
);
16158 /* Set up tp->grc_local_ctrl before calling
16159 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16160 * will bring 5700's external PHY out of reset.
16161 * It is also used as eeprom write protect on LOMs.
16163 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
16164 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16165 tg3_flag(tp
, EEPROM_WRITE_PROT
))
16166 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
16167 GRC_LCLCTRL_GPIO_OUTPUT1
);
16168 /* Unused GPIO3 must be driven as output on 5752 because there
16169 * are no pull-up resistors on unused GPIO pins.
16171 else if (tg3_asic_rev(tp
) == ASIC_REV_5752
)
16172 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
16174 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16175 tg3_asic_rev(tp
) == ASIC_REV_57780
||
16176 tg3_flag(tp
, 57765_CLASS
))
16177 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16179 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
16180 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
) {
16181 /* Turn off the debug UART. */
16182 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
16183 if (tg3_flag(tp
, IS_NIC
))
16184 /* Keep VMain power. */
16185 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
16186 GRC_LCLCTRL_GPIO_OUTPUT0
;
16189 if (tg3_asic_rev(tp
) == ASIC_REV_5762
)
16190 tp
->grc_local_ctrl
|=
16191 tr32(GRC_LOCAL_CTRL
) & GRC_LCLCTRL_GPIO_UART_SEL
;
16193 /* Switch out of Vaux if it is a NIC */
16194 tg3_pwrsrc_switch_to_vmain(tp
);
16196 /* Derive initial jumbo mode from MTU assigned in
16197 * ether_setup() via the alloc_etherdev() call
16199 if (tp
->dev
->mtu
> ETH_DATA_LEN
&& !tg3_flag(tp
, 5780_CLASS
))
16200 tg3_flag_set(tp
, JUMBO_RING_ENABLE
);
16202 /* Determine WakeOnLan speed to use. */
16203 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16204 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16205 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16206 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
) {
16207 tg3_flag_clear(tp
, WOL_SPEED_100MB
);
16209 tg3_flag_set(tp
, WOL_SPEED_100MB
);
16212 if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16213 tp
->phy_flags
|= TG3_PHYFLG_IS_FET
;
16215 /* A few boards don't want Ethernet@WireSpeed phy feature */
16216 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16217 (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16218 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A0
) &&
16219 (tg3_chip_rev_id(tp
) != CHIPREV_ID_5705_A1
)) ||
16220 (tp
->phy_flags
& TG3_PHYFLG_IS_FET
) ||
16221 (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
))
16222 tp
->phy_flags
|= TG3_PHYFLG_NO_ETH_WIRE_SPEED
;
16224 if (tg3_chip_rev(tp
) == CHIPREV_5703_AX
||
16225 tg3_chip_rev(tp
) == CHIPREV_5704_AX
)
16226 tp
->phy_flags
|= TG3_PHYFLG_ADC_BUG
;
16227 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5704_A0
)
16228 tp
->phy_flags
|= TG3_PHYFLG_5704_A0_BUG
;
16230 if (tg3_flag(tp
, 5705_PLUS
) &&
16231 !(tp
->phy_flags
& TG3_PHYFLG_IS_FET
) &&
16232 tg3_asic_rev(tp
) != ASIC_REV_5785
&&
16233 tg3_asic_rev(tp
) != ASIC_REV_57780
&&
16234 !tg3_flag(tp
, 57765_PLUS
)) {
16235 if (tg3_asic_rev(tp
) == ASIC_REV_5755
||
16236 tg3_asic_rev(tp
) == ASIC_REV_5787
||
16237 tg3_asic_rev(tp
) == ASIC_REV_5784
||
16238 tg3_asic_rev(tp
) == ASIC_REV_5761
) {
16239 if (tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5756
&&
16240 tp
->pdev
->device
!= PCI_DEVICE_ID_TIGON3_5722
)
16241 tp
->phy_flags
|= TG3_PHYFLG_JITTER_BUG
;
16242 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5755M
)
16243 tp
->phy_flags
|= TG3_PHYFLG_ADJUST_TRIM
;
16245 tp
->phy_flags
|= TG3_PHYFLG_BER_BUG
;
16248 if (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
16249 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) {
16250 tp
->phy_otp
= tg3_read_otp_phycfg(tp
);
16251 if (tp
->phy_otp
== 0)
16252 tp
->phy_otp
= TG3_OTP_DEFAULT
;
16255 if (tg3_flag(tp
, CPMU_PRESENT
))
16256 tp
->mi_mode
= MAC_MI_MODE_500KHZ_CONST
;
16258 tp
->mi_mode
= MAC_MI_MODE_BASE
;
16260 tp
->coalesce_mode
= 0;
16261 if (tg3_chip_rev(tp
) != CHIPREV_5700_AX
&&
16262 tg3_chip_rev(tp
) != CHIPREV_5700_BX
)
16263 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
16265 /* Set these bits to enable statistics workaround. */
16266 if (tg3_asic_rev(tp
) == ASIC_REV_5717
||
16267 tg3_chip_rev_id(tp
) == CHIPREV_ID_5719_A0
||
16268 tg3_chip_rev_id(tp
) == CHIPREV_ID_5720_A0
) {
16269 tp
->coalesce_mode
|= HOSTCC_MODE_ATTN
;
16270 tp
->grc_mode
|= GRC_MODE_IRQ_ON_FLOW_ATTN
;
16273 if (tg3_asic_rev(tp
) == ASIC_REV_5785
||
16274 tg3_asic_rev(tp
) == ASIC_REV_57780
)
16275 tg3_flag_set(tp
, USE_PHYLIB
);
16277 err
= tg3_mdio_init(tp
);
16281 /* Initialize data/descriptor byte/word swapping. */
16282 val
= tr32(GRC_MODE
);
16283 if (tg3_asic_rev(tp
) == ASIC_REV_5720
||
16284 tg3_asic_rev(tp
) == ASIC_REV_5762
)
16285 val
&= (GRC_MODE_BYTE_SWAP_B2HRX_DATA
|
16286 GRC_MODE_WORD_SWAP_B2HRX_DATA
|
16287 GRC_MODE_B2HRX_ENABLE
|
16288 GRC_MODE_HTX2B_ENABLE
|
16289 GRC_MODE_HOST_STACKUP
);
16291 val
&= GRC_MODE_HOST_STACKUP
;
16293 tw32(GRC_MODE
, val
| tp
->grc_mode
);
16295 tg3_switch_clocks(tp
);
16297 /* Clear this out for sanity. */
16298 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16300 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
16302 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
16303 !tg3_flag(tp
, PCIX_TARGET_HWBUG
)) {
16304 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_A0
||
16305 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B0
||
16306 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B2
||
16307 tg3_chip_rev_id(tp
) == CHIPREV_ID_5701_B5
) {
16308 void __iomem
*sram_base
;
16310 /* Write some dummy words into the SRAM status block
16311 * area, see if it reads back correctly. If the return
16312 * value is bad, force enable the PCIX workaround.
16314 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
16316 writel(0x00000000, sram_base
);
16317 writel(0x00000000, sram_base
+ 4);
16318 writel(0xffffffff, sram_base
+ 4);
16319 if (readl(sram_base
) != 0x00000000)
16320 tg3_flag_set(tp
, PCIX_TARGET_HWBUG
);
16325 tg3_nvram_init(tp
);
16327 /* If the device has an NVRAM, no need to load patch firmware */
16328 if (tg3_asic_rev(tp
) == ASIC_REV_57766
&&
16329 !tg3_flag(tp
, NO_NVRAM
))
16330 tp
->fw_needed
= NULL
;
16332 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
16333 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
16335 if (tg3_asic_rev(tp
) == ASIC_REV_5705
&&
16336 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
16337 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
16338 tg3_flag_set(tp
, IS_5788
);
16340 if (!tg3_flag(tp
, IS_5788
) &&
16341 tg3_asic_rev(tp
) != ASIC_REV_5700
)
16342 tg3_flag_set(tp
, TAGGED_STATUS
);
16343 if (tg3_flag(tp
, TAGGED_STATUS
)) {
16344 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
16345 HOSTCC_MODE_CLRTICK_TXBD
);
16347 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
16348 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
16349 tp
->misc_host_ctrl
);
16352 /* Preserve the APE MAC_MODE bits */
16353 if (tg3_flag(tp
, ENABLE_APE
))
16354 tp
->mac_mode
= MAC_MODE_APE_TX_EN
| MAC_MODE_APE_RX_EN
;
16358 if (tg3_10_100_only_device(tp
, ent
))
16359 tp
->phy_flags
|= TG3_PHYFLG_10_100_ONLY
;
16361 err
= tg3_phy_probe(tp
);
16363 dev_err(&tp
->pdev
->dev
, "phy probe failed, err %d\n", err
);
16364 /* ... but do not return immediately ... */
16369 tg3_read_fw_ver(tp
);
16371 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
) {
16372 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16374 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16375 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16377 tp
->phy_flags
&= ~TG3_PHYFLG_USE_MI_INTERRUPT
;
16380 /* 5700 {AX,BX} chips have a broken status block link
16381 * change bit implementation, so we must use the
16382 * status register in those cases.
16384 if (tg3_asic_rev(tp
) == ASIC_REV_5700
)
16385 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16387 tg3_flag_clear(tp
, USE_LINKCHG_REG
);
16389 /* The led_ctrl is set during tg3_phy_probe, here we might
16390 * have to force the link status polling mechanism based
16391 * upon subsystem IDs.
16393 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
16394 tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16395 !(tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)) {
16396 tp
->phy_flags
|= TG3_PHYFLG_USE_MI_INTERRUPT
;
16397 tg3_flag_set(tp
, USE_LINKCHG_REG
);
16400 /* For all SERDES we poll the MAC status register. */
16401 if (tp
->phy_flags
& TG3_PHYFLG_PHY_SERDES
)
16402 tg3_flag_set(tp
, POLL_SERDES
);
16404 tg3_flag_clear(tp
, POLL_SERDES
);
16406 tp
->rx_offset
= NET_SKB_PAD
+ NET_IP_ALIGN
;
16407 tp
->rx_copy_thresh
= TG3_RX_COPY_THRESHOLD
;
16408 if (tg3_asic_rev(tp
) == ASIC_REV_5701
&&
16409 tg3_flag(tp
, PCIX_MODE
)) {
16410 tp
->rx_offset
= NET_SKB_PAD
;
16411 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16412 tp
->rx_copy_thresh
= ~(u16
)0;
16416 tp
->rx_std_ring_mask
= TG3_RX_STD_RING_SIZE(tp
) - 1;
16417 tp
->rx_jmb_ring_mask
= TG3_RX_JMB_RING_SIZE(tp
) - 1;
16418 tp
->rx_ret_ring_mask
= tg3_rx_ret_ring_size(tp
) - 1;
16420 tp
->rx_std_max_post
= tp
->rx_std_ring_mask
+ 1;
16422 /* Increment the rx prod index on the rx std ring by at most
16423 * 8 for these chips to workaround hw errata.
16425 if (tg3_asic_rev(tp
) == ASIC_REV_5750
||
16426 tg3_asic_rev(tp
) == ASIC_REV_5752
||
16427 tg3_asic_rev(tp
) == ASIC_REV_5755
)
16428 tp
->rx_std_max_post
= 8;
16430 if (tg3_flag(tp
, ASPM_WORKAROUND
))
16431 tp
->pwrmgmt_thresh
= tr32(PCIE_PWR_MGMT_THRESH
) &
16432 PCIE_PWR_MGMT_L1_THRESH_MSK
;
16437 #ifdef CONFIG_SPARC
16438 static int tg3_get_macaddr_sparc(struct tg3
*tp
)
16440 struct net_device
*dev
= tp
->dev
;
16441 struct pci_dev
*pdev
= tp
->pdev
;
16442 struct device_node
*dp
= pci_device_to_OF_node(pdev
);
16443 const unsigned char *addr
;
16446 addr
= of_get_property(dp
, "local-mac-address", &len
);
16447 if (addr
&& len
== 6) {
16448 memcpy(dev
->dev_addr
, addr
, 6);
16454 static int tg3_get_default_macaddr_sparc(struct tg3
*tp
)
16456 struct net_device
*dev
= tp
->dev
;
16458 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
16463 static int tg3_get_device_address(struct tg3
*tp
)
16465 struct net_device
*dev
= tp
->dev
;
16466 u32 hi
, lo
, mac_offset
;
16470 #ifdef CONFIG_SPARC
16471 if (!tg3_get_macaddr_sparc(tp
))
16475 if (tg3_flag(tp
, IS_SSB_CORE
)) {
16476 err
= ssb_gige_get_macaddr(tp
->pdev
, &dev
->dev_addr
[0]);
16477 if (!err
&& is_valid_ether_addr(&dev
->dev_addr
[0]))
16482 if (tg3_asic_rev(tp
) == ASIC_REV_5704
||
16483 tg3_flag(tp
, 5780_CLASS
)) {
16484 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
16486 if (tg3_nvram_lock(tp
))
16487 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
16489 tg3_nvram_unlock(tp
);
16490 } else if (tg3_flag(tp
, 5717_PLUS
)) {
16491 if (tp
->pci_fn
& 1)
16493 if (tp
->pci_fn
> 1)
16494 mac_offset
+= 0x18c;
16495 } else if (tg3_asic_rev(tp
) == ASIC_REV_5906
)
16498 /* First try to get it from MAC address mailbox. */
16499 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
16500 if ((hi
>> 16) == 0x484b) {
16501 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16502 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
16504 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
16505 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16506 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16507 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16508 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
16510 /* Some old bootcode may report a 0 MAC address in SRAM */
16511 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
16514 /* Next, try NVRAM. */
16515 if (!tg3_flag(tp
, NO_NVRAM
) &&
16516 !tg3_nvram_read_be32(tp
, mac_offset
+ 0, &hi
) &&
16517 !tg3_nvram_read_be32(tp
, mac_offset
+ 4, &lo
)) {
16518 memcpy(&dev
->dev_addr
[0], ((char *)&hi
) + 2, 2);
16519 memcpy(&dev
->dev_addr
[2], (char *)&lo
, sizeof(lo
));
16521 /* Finally just fetch it out of the MAC control regs. */
16523 hi
= tr32(MAC_ADDR_0_HIGH
);
16524 lo
= tr32(MAC_ADDR_0_LOW
);
16526 dev
->dev_addr
[5] = lo
& 0xff;
16527 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
16528 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
16529 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
16530 dev
->dev_addr
[1] = hi
& 0xff;
16531 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
16535 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
16536 #ifdef CONFIG_SPARC
16537 if (!tg3_get_default_macaddr_sparc(tp
))
16545 #define BOUNDARY_SINGLE_CACHELINE 1
16546 #define BOUNDARY_MULTI_CACHELINE 2
16548 static u32
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
16550 int cacheline_size
;
16554 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
16556 cacheline_size
= 1024;
16558 cacheline_size
= (int) byte
* 4;
16560 /* On 5703 and later chips, the boundary bits have no
16563 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16564 tg3_asic_rev(tp
) != ASIC_REV_5701
&&
16565 !tg3_flag(tp
, PCI_EXPRESS
))
16568 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16569 goal
= BOUNDARY_MULTI_CACHELINE
;
16571 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16572 goal
= BOUNDARY_SINGLE_CACHELINE
;
16578 if (tg3_flag(tp
, 57765_PLUS
)) {
16579 val
= goal
? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT
;
16586 /* PCI controllers on most RISC systems tend to disconnect
16587 * when a device tries to burst across a cache-line boundary.
16588 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16590 * Unfortunately, for PCI-E there are only limited
16591 * write-side controls for this, and thus for reads
16592 * we will still get the disconnects. We'll also waste
16593 * these PCI cycles for both read and write for chips
16594 * other than 5700 and 5701 which do not implement the
16597 if (tg3_flag(tp
, PCIX_MODE
) && !tg3_flag(tp
, PCI_EXPRESS
)) {
16598 switch (cacheline_size
) {
16603 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16604 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
16605 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
16607 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16608 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16613 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
16614 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
16618 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
16619 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
16622 } else if (tg3_flag(tp
, PCI_EXPRESS
)) {
16623 switch (cacheline_size
) {
16627 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16628 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16629 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
16635 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
16636 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
16640 switch (cacheline_size
) {
16642 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16643 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
16644 DMA_RWCTRL_WRITE_BNDRY_16
);
16649 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16650 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
16651 DMA_RWCTRL_WRITE_BNDRY_32
);
16656 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16657 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
16658 DMA_RWCTRL_WRITE_BNDRY_64
);
16663 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
16664 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
16665 DMA_RWCTRL_WRITE_BNDRY_128
);
16670 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
16671 DMA_RWCTRL_WRITE_BNDRY_256
);
16674 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
16675 DMA_RWCTRL_WRITE_BNDRY_512
);
16679 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
16680 DMA_RWCTRL_WRITE_BNDRY_1024
);
16689 static int tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
,
16690 int size
, bool to_device
)
16692 struct tg3_internal_buffer_desc test_desc
;
16693 u32 sram_dma_descs
;
16696 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
16698 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
16699 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
16700 tw32(RDMAC_STATUS
, 0);
16701 tw32(WDMAC_STATUS
, 0);
16703 tw32(BUFMGR_MODE
, 0);
16704 tw32(FTQ_RESET
, 0);
16706 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
16707 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
16708 test_desc
.nic_mbuf
= 0x00002100;
16709 test_desc
.len
= size
;
16712 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16713 * the *second* time the tg3 driver was getting loaded after an
16716 * Broadcom tells me:
16717 * ...the DMA engine is connected to the GRC block and a DMA
16718 * reset may affect the GRC block in some unpredictable way...
16719 * The behavior of resets to individual blocks has not been tested.
16721 * Broadcom noted the GRC reset will also reset all sub-components.
16724 test_desc
.cqid_sqid
= (13 << 8) | 2;
16726 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
16729 test_desc
.cqid_sqid
= (16 << 8) | 7;
16731 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
16734 test_desc
.flags
= 0x00000005;
16736 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
16739 val
= *(((u32
*)&test_desc
) + i
);
16740 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
16741 sram_dma_descs
+ (i
* sizeof(u32
)));
16742 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
16744 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
16747 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
16749 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
16752 for (i
= 0; i
< 40; i
++) {
16756 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
16758 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
16759 if ((val
& 0xffff) == sram_dma_descs
) {
16770 #define TEST_BUFFER_SIZE 0x2000
16772 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets
) = {
16773 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
16777 static int tg3_test_dma(struct tg3
*tp
)
16779 dma_addr_t buf_dma
;
16780 u32
*buf
, saved_dma_rwctrl
;
16783 buf
= dma_alloc_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
,
16784 &buf_dma
, GFP_KERNEL
);
16790 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
16791 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
16793 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
16795 if (tg3_flag(tp
, 57765_PLUS
))
16798 if (tg3_flag(tp
, PCI_EXPRESS
)) {
16799 /* DMA read watermark not used on PCIE */
16800 tp
->dma_rwctrl
|= 0x00180000;
16801 } else if (!tg3_flag(tp
, PCIX_MODE
)) {
16802 if (tg3_asic_rev(tp
) == ASIC_REV_5705
||
16803 tg3_asic_rev(tp
) == ASIC_REV_5750
)
16804 tp
->dma_rwctrl
|= 0x003f0000;
16806 tp
->dma_rwctrl
|= 0x003f000f;
16808 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16809 tg3_asic_rev(tp
) == ASIC_REV_5704
) {
16810 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
16811 u32 read_water
= 0x7;
16813 /* If the 5704 is behind the EPB bridge, we can
16814 * do the less restrictive ONE_DMA workaround for
16815 * better performance.
16817 if (tg3_flag(tp
, 40BIT_DMA_BUG
) &&
16818 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16819 tp
->dma_rwctrl
|= 0x8000;
16820 else if (ccval
== 0x6 || ccval
== 0x7)
16821 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16823 if (tg3_asic_rev(tp
) == ASIC_REV_5703
)
16825 /* Set bit 23 to enable PCIX hw bug fix */
16827 (read_water
<< DMA_RWCTRL_READ_WATER_SHIFT
) |
16828 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT
) |
16830 } else if (tg3_asic_rev(tp
) == ASIC_REV_5780
) {
16831 /* 5780 always in PCIX mode */
16832 tp
->dma_rwctrl
|= 0x00144000;
16833 } else if (tg3_asic_rev(tp
) == ASIC_REV_5714
) {
16834 /* 5714 always in PCIX mode */
16835 tp
->dma_rwctrl
|= 0x00148000;
16837 tp
->dma_rwctrl
|= 0x001b000f;
16840 if (tg3_flag(tp
, ONE_DMA_AT_ONCE
))
16841 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
16843 if (tg3_asic_rev(tp
) == ASIC_REV_5703
||
16844 tg3_asic_rev(tp
) == ASIC_REV_5704
)
16845 tp
->dma_rwctrl
&= 0xfffffff0;
16847 if (tg3_asic_rev(tp
) == ASIC_REV_5700
||
16848 tg3_asic_rev(tp
) == ASIC_REV_5701
) {
16849 /* Remove this if it causes problems for some boards. */
16850 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
16852 /* On 5700/5701 chips, we need to set this bit.
16853 * Otherwise the chip will issue cacheline transactions
16854 * to streamable DMA memory with not all the byte
16855 * enables turned on. This is an error on several
16856 * RISC PCI controllers, in particular sparc64.
16858 * On 5703/5704 chips, this bit has been reassigned
16859 * a different meaning. In particular, it is used
16860 * on those chips to enable a PCI-X workaround.
16862 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
16865 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16868 /* Unneeded, already done by tg3_get_invariants. */
16869 tg3_switch_clocks(tp
);
16872 if (tg3_asic_rev(tp
) != ASIC_REV_5700
&&
16873 tg3_asic_rev(tp
) != ASIC_REV_5701
)
16876 /* It is best to perform DMA test with maximum write burst size
16877 * to expose the 5700/5701 write DMA bug.
16879 saved_dma_rwctrl
= tp
->dma_rwctrl
;
16880 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16881 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16886 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
16889 /* Send the buffer to the chip. */
16890 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, true);
16892 dev_err(&tp
->pdev
->dev
,
16893 "%s: Buffer write failed. err = %d\n",
16899 /* validate data reached card RAM correctly. */
16900 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16902 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
16903 if (le32_to_cpu(val
) != p
[i
]) {
16904 dev_err(&tp
->pdev
->dev
,
16905 "%s: Buffer corrupted on device! "
16906 "(%d != %d)\n", __func__
, val
, i
);
16907 /* ret = -ENODEV here? */
16912 /* Now read it back. */
16913 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, false);
16915 dev_err(&tp
->pdev
->dev
, "%s: Buffer read failed. "
16916 "err = %d\n", __func__
, ret
);
16921 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
16925 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16926 DMA_RWCTRL_WRITE_BNDRY_16
) {
16927 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16928 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16929 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16932 dev_err(&tp
->pdev
->dev
,
16933 "%s: Buffer corrupted on read back! "
16934 "(%d != %d)\n", __func__
, p
[i
], i
);
16940 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
16946 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
16947 DMA_RWCTRL_WRITE_BNDRY_16
) {
16948 /* DMA test passed without adjusting DMA boundary,
16949 * now look for chipsets that are known to expose the
16950 * DMA bug without failing the test.
16952 if (pci_dev_present(tg3_dma_wait_state_chipsets
)) {
16953 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
16954 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
16956 /* Safe to use the calculated DMA boundary. */
16957 tp
->dma_rwctrl
= saved_dma_rwctrl
;
16960 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
16964 dma_free_coherent(&tp
->pdev
->dev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
16969 static void tg3_init_bufmgr_config(struct tg3
*tp
)
16971 if (tg3_flag(tp
, 57765_PLUS
)) {
16972 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16973 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16974 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16975 DEFAULT_MB_MACRX_LOW_WATER_57765
;
16976 tp
->bufmgr_config
.mbuf_high_water
=
16977 DEFAULT_MB_HIGH_WATER_57765
;
16979 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
16980 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16981 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
16982 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765
;
16983 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
16984 DEFAULT_MB_HIGH_WATER_JUMBO_57765
;
16985 } else if (tg3_flag(tp
, 5705_PLUS
)) {
16986 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
16987 DEFAULT_MB_RDMA_LOW_WATER_5705
;
16988 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16989 DEFAULT_MB_MACRX_LOW_WATER_5705
;
16990 tp
->bufmgr_config
.mbuf_high_water
=
16991 DEFAULT_MB_HIGH_WATER_5705
;
16992 if (tg3_asic_rev(tp
) == ASIC_REV_5906
) {
16993 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
16994 DEFAULT_MB_MACRX_LOW_WATER_5906
;
16995 tp
->bufmgr_config
.mbuf_high_water
=
16996 DEFAULT_MB_HIGH_WATER_5906
;
16999 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17000 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
17001 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17002 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
17003 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17004 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
17006 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
17007 DEFAULT_MB_RDMA_LOW_WATER
;
17008 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
17009 DEFAULT_MB_MACRX_LOW_WATER
;
17010 tp
->bufmgr_config
.mbuf_high_water
=
17011 DEFAULT_MB_HIGH_WATER
;
17013 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
17014 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
17015 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
17016 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
17017 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
17018 DEFAULT_MB_HIGH_WATER_JUMBO
;
17021 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
17022 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
17025 static char *tg3_phy_string(struct tg3
*tp
)
17027 switch (tp
->phy_id
& TG3_PHY_ID_MASK
) {
17028 case TG3_PHY_ID_BCM5400
: return "5400";
17029 case TG3_PHY_ID_BCM5401
: return "5401";
17030 case TG3_PHY_ID_BCM5411
: return "5411";
17031 case TG3_PHY_ID_BCM5701
: return "5701";
17032 case TG3_PHY_ID_BCM5703
: return "5703";
17033 case TG3_PHY_ID_BCM5704
: return "5704";
17034 case TG3_PHY_ID_BCM5705
: return "5705";
17035 case TG3_PHY_ID_BCM5750
: return "5750";
17036 case TG3_PHY_ID_BCM5752
: return "5752";
17037 case TG3_PHY_ID_BCM5714
: return "5714";
17038 case TG3_PHY_ID_BCM5780
: return "5780";
17039 case TG3_PHY_ID_BCM5755
: return "5755";
17040 case TG3_PHY_ID_BCM5787
: return "5787";
17041 case TG3_PHY_ID_BCM5784
: return "5784";
17042 case TG3_PHY_ID_BCM5756
: return "5722/5756";
17043 case TG3_PHY_ID_BCM5906
: return "5906";
17044 case TG3_PHY_ID_BCM5761
: return "5761";
17045 case TG3_PHY_ID_BCM5718C
: return "5718C";
17046 case TG3_PHY_ID_BCM5718S
: return "5718S";
17047 case TG3_PHY_ID_BCM57765
: return "57765";
17048 case TG3_PHY_ID_BCM5719C
: return "5719C";
17049 case TG3_PHY_ID_BCM5720C
: return "5720C";
17050 case TG3_PHY_ID_BCM5762
: return "5762C";
17051 case TG3_PHY_ID_BCM8002
: return "8002/serdes";
17052 case 0: return "serdes";
17053 default: return "unknown";
17057 static char *tg3_bus_string(struct tg3
*tp
, char *str
)
17059 if (tg3_flag(tp
, PCI_EXPRESS
)) {
17060 strcpy(str
, "PCI Express");
17062 } else if (tg3_flag(tp
, PCIX_MODE
)) {
17063 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
17065 strcpy(str
, "PCIX:");
17067 if ((clock_ctrl
== 7) ||
17068 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
17069 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
17070 strcat(str
, "133MHz");
17071 else if (clock_ctrl
== 0)
17072 strcat(str
, "33MHz");
17073 else if (clock_ctrl
== 2)
17074 strcat(str
, "50MHz");
17075 else if (clock_ctrl
== 4)
17076 strcat(str
, "66MHz");
17077 else if (clock_ctrl
== 6)
17078 strcat(str
, "100MHz");
17080 strcpy(str
, "PCI:");
17081 if (tg3_flag(tp
, PCI_HIGH_SPEED
))
17082 strcat(str
, "66MHz");
17084 strcat(str
, "33MHz");
17086 if (tg3_flag(tp
, PCI_32BIT
))
17087 strcat(str
, ":32-bit");
17089 strcat(str
, ":64-bit");
17093 static void tg3_init_coal(struct tg3
*tp
)
17095 struct ethtool_coalesce
*ec
= &tp
->coal
;
17097 memset(ec
, 0, sizeof(*ec
));
17098 ec
->cmd
= ETHTOOL_GCOALESCE
;
17099 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
17100 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
17101 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
17102 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
17103 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
17104 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
17105 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
17106 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
17107 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
17109 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
17110 HOSTCC_MODE_CLRTICK_TXBD
)) {
17111 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
17112 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
17113 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
17114 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
17117 if (tg3_flag(tp
, 5705_PLUS
)) {
17118 ec
->rx_coalesce_usecs_irq
= 0;
17119 ec
->tx_coalesce_usecs_irq
= 0;
17120 ec
->stats_block_coalesce_usecs
= 0;
17124 static int tg3_init_one(struct pci_dev
*pdev
,
17125 const struct pci_device_id
*ent
)
17127 struct net_device
*dev
;
17129 int i
, err
, pm_cap
;
17130 u32 sndmbx
, rcvmbx
, intmbx
;
17132 u64 dma_mask
, persist_dma_mask
;
17133 netdev_features_t features
= 0;
17135 printk_once(KERN_INFO
"%s\n", version
);
17137 err
= pci_enable_device(pdev
);
17139 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
17143 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
17145 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting\n");
17146 goto err_out_disable_pdev
;
17149 pci_set_master(pdev
);
17151 /* Find power-management capability. */
17152 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
17154 dev_err(&pdev
->dev
,
17155 "Cannot find Power Management capability, aborting\n");
17157 goto err_out_free_res
;
17160 err
= pci_set_power_state(pdev
, PCI_D0
);
17162 dev_err(&pdev
->dev
, "Transition to D0 failed, aborting\n");
17163 goto err_out_free_res
;
17166 dev
= alloc_etherdev_mq(sizeof(*tp
), TG3_IRQ_MAX_VECS
);
17169 goto err_out_power_down
;
17172 SET_NETDEV_DEV(dev
, &pdev
->dev
);
17174 tp
= netdev_priv(dev
);
17177 tp
->pm_cap
= pm_cap
;
17178 tp
->rx_mode
= TG3_DEF_RX_MODE
;
17179 tp
->tx_mode
= TG3_DEF_TX_MODE
;
17183 tp
->msg_enable
= tg3_debug
;
17185 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
17187 if (pdev_is_ssb_gige_core(pdev
)) {
17188 tg3_flag_set(tp
, IS_SSB_CORE
);
17189 if (ssb_gige_must_flush_posted_writes(pdev
))
17190 tg3_flag_set(tp
, FLUSH_POSTED_WRITES
);
17191 if (ssb_gige_one_dma_at_once(pdev
))
17192 tg3_flag_set(tp
, ONE_DMA_AT_ONCE
);
17193 if (ssb_gige_have_roboswitch(pdev
))
17194 tg3_flag_set(tp
, ROBOSWITCH
);
17195 if (ssb_gige_is_rgmii(pdev
))
17196 tg3_flag_set(tp
, RGMII_MODE
);
17199 /* The word/byte swap controls here control register access byte
17200 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17203 tp
->misc_host_ctrl
=
17204 MISC_HOST_CTRL_MASK_PCI_INT
|
17205 MISC_HOST_CTRL_WORD_SWAP
|
17206 MISC_HOST_CTRL_INDIR_ACCESS
|
17207 MISC_HOST_CTRL_PCISTATE_RW
;
17209 /* The NONFRM (non-frame) byte/word swap controls take effect
17210 * on descriptor entries, anything which isn't packet data.
17212 * The StrongARM chips on the board (one for tx, one for rx)
17213 * are running in big-endian mode.
17215 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
17216 GRC_MODE_WSWAP_NONFRM_DATA
);
17217 #ifdef __BIG_ENDIAN
17218 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
17220 spin_lock_init(&tp
->lock
);
17221 spin_lock_init(&tp
->indirect_lock
);
17222 INIT_WORK(&tp
->reset_task
, tg3_reset_task
);
17224 tp
->regs
= pci_ioremap_bar(pdev
, BAR_0
);
17226 dev_err(&pdev
->dev
, "Cannot map device registers, aborting\n");
17228 goto err_out_free_dev
;
17231 if (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761
||
17232 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5761E
||
17233 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761S
||
17234 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5761SE
||
17235 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717
||
17236 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5717_C
||
17237 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5718
||
17238 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5719
||
17239 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5720
||
17240 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5762
||
17241 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5725
||
17242 tp
->pdev
->device
== TG3PCI_DEVICE_TIGON3_5727
) {
17243 tg3_flag_set(tp
, ENABLE_APE
);
17244 tp
->aperegs
= pci_ioremap_bar(pdev
, BAR_2
);
17245 if (!tp
->aperegs
) {
17246 dev_err(&pdev
->dev
,
17247 "Cannot map APE registers, aborting\n");
17249 goto err_out_iounmap
;
17253 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
17254 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
17256 dev
->ethtool_ops
= &tg3_ethtool_ops
;
17257 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
17258 dev
->netdev_ops
= &tg3_netdev_ops
;
17259 dev
->irq
= pdev
->irq
;
17261 err
= tg3_get_invariants(tp
, ent
);
17263 dev_err(&pdev
->dev
,
17264 "Problem fetching invariants of chip, aborting\n");
17265 goto err_out_apeunmap
;
17268 /* The EPB bridge inside 5714, 5715, and 5780 and any
17269 * device behind the EPB cannot support DMA addresses > 40-bit.
17270 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17271 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17272 * do DMA address check in tg3_start_xmit().
17274 if (tg3_flag(tp
, IS_5788
))
17275 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
17276 else if (tg3_flag(tp
, 40BIT_DMA_BUG
)) {
17277 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(40);
17278 #ifdef CONFIG_HIGHMEM
17279 dma_mask
= DMA_BIT_MASK(64);
17282 persist_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
17284 /* Configure DMA attributes. */
17285 if (dma_mask
> DMA_BIT_MASK(32)) {
17286 err
= pci_set_dma_mask(pdev
, dma_mask
);
17288 features
|= NETIF_F_HIGHDMA
;
17289 err
= pci_set_consistent_dma_mask(pdev
,
17292 dev_err(&pdev
->dev
, "Unable to obtain 64 bit "
17293 "DMA for consistent allocations\n");
17294 goto err_out_apeunmap
;
17298 if (err
|| dma_mask
== DMA_BIT_MASK(32)) {
17299 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
17301 dev_err(&pdev
->dev
,
17302 "No usable DMA configuration, aborting\n");
17303 goto err_out_apeunmap
;
17307 tg3_init_bufmgr_config(tp
);
17309 features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
17311 /* 5700 B0 chips do not support checksumming correctly due
17312 * to hardware bugs.
17314 if (tg3_chip_rev_id(tp
) != CHIPREV_ID_5700_B0
) {
17315 features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
17317 if (tg3_flag(tp
, 5755_PLUS
))
17318 features
|= NETIF_F_IPV6_CSUM
;
17321 /* TSO is on by default on chips that support hardware TSO.
17322 * Firmware TSO on older chips gives lower performance, so it
17323 * is off by default, but can be enabled using ethtool.
17325 if ((tg3_flag(tp
, HW_TSO_1
) ||
17326 tg3_flag(tp
, HW_TSO_2
) ||
17327 tg3_flag(tp
, HW_TSO_3
)) &&
17328 (features
& NETIF_F_IP_CSUM
))
17329 features
|= NETIF_F_TSO
;
17330 if (tg3_flag(tp
, HW_TSO_2
) || tg3_flag(tp
, HW_TSO_3
)) {
17331 if (features
& NETIF_F_IPV6_CSUM
)
17332 features
|= NETIF_F_TSO6
;
17333 if (tg3_flag(tp
, HW_TSO_3
) ||
17334 tg3_asic_rev(tp
) == ASIC_REV_5761
||
17335 (tg3_asic_rev(tp
) == ASIC_REV_5784
&&
17336 tg3_chip_rev(tp
) != CHIPREV_5784_AX
) ||
17337 tg3_asic_rev(tp
) == ASIC_REV_5785
||
17338 tg3_asic_rev(tp
) == ASIC_REV_57780
)
17339 features
|= NETIF_F_TSO_ECN
;
17342 dev
->features
|= features
;
17343 dev
->vlan_features
|= features
;
17346 * Add loopback capability only for a subset of devices that support
17347 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17348 * loopback for the remaining devices.
17350 if (tg3_asic_rev(tp
) != ASIC_REV_5780
&&
17351 !tg3_flag(tp
, CPMU_PRESENT
))
17352 /* Add the loopback capability */
17353 features
|= NETIF_F_LOOPBACK
;
17355 dev
->hw_features
|= features
;
17357 if (tg3_chip_rev_id(tp
) == CHIPREV_ID_5705_A1
&&
17358 !tg3_flag(tp
, TSO_CAPABLE
) &&
17359 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
17360 tg3_flag_set(tp
, MAX_RXPEND_64
);
17361 tp
->rx_pending
= 63;
17364 err
= tg3_get_device_address(tp
);
17366 dev_err(&pdev
->dev
,
17367 "Could not obtain valid ethernet address, aborting\n");
17368 goto err_out_apeunmap
;
17372 * Reset chip in case UNDI or EFI driver did not shutdown
17373 * DMA self test will enable WDMAC and we'll see (spurious)
17374 * pending DMA on the PCI bus at that point.
17376 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
17377 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
17378 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
17379 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17382 err
= tg3_test_dma(tp
);
17384 dev_err(&pdev
->dev
, "DMA engine test failed, aborting\n");
17385 goto err_out_apeunmap
;
17388 intmbx
= MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
;
17389 rcvmbx
= MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
;
17390 sndmbx
= MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
;
17391 for (i
= 0; i
< tp
->irq_max
; i
++) {
17392 struct tg3_napi
*tnapi
= &tp
->napi
[i
];
17395 tnapi
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
17397 tnapi
->int_mbox
= intmbx
;
17403 tnapi
->consmbox
= rcvmbx
;
17404 tnapi
->prodmbox
= sndmbx
;
17407 tnapi
->coal_now
= HOSTCC_MODE_COAL_VEC1_NOW
<< (i
- 1);
17409 tnapi
->coal_now
= HOSTCC_MODE_NOW
;
17411 if (!tg3_flag(tp
, SUPPORT_MSIX
))
17415 * If we support MSIX, we'll be using RSS. If we're using
17416 * RSS, the first vector only handles link interrupts and the
17417 * remaining vectors handle rx and tx interrupts. Reuse the
17418 * mailbox values for the next iteration. The values we setup
17419 * above are still useful for the single vectored mode.
17434 pci_set_drvdata(pdev
, dev
);
17436 if (tg3_asic_rev(tp
) == ASIC_REV_5719
||
17437 tg3_asic_rev(tp
) == ASIC_REV_5720
||
17438 tg3_asic_rev(tp
) == ASIC_REV_5762
)
17439 tg3_flag_set(tp
, PTP_CAPABLE
);
17441 if (tg3_flag(tp
, 5717_PLUS
)) {
17442 /* Resume a low-power mode */
17443 tg3_frob_aux_power(tp
, false);
17446 tg3_timer_init(tp
);
17448 tg3_carrier_off(tp
);
17450 err
= register_netdev(dev
);
17452 dev_err(&pdev
->dev
, "Cannot register net device, aborting\n");
17453 goto err_out_apeunmap
;
17456 netdev_info(dev
, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17457 tp
->board_part_number
,
17458 tg3_chip_rev_id(tp
),
17459 tg3_bus_string(tp
, str
),
17462 if (tp
->phy_flags
& TG3_PHYFLG_IS_CONNECTED
) {
17463 struct phy_device
*phydev
;
17464 phydev
= tp
->mdio_bus
->phy_map
[TG3_PHY_MII_ADDR
];
17466 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17467 phydev
->drv
->name
, dev_name(&phydev
->dev
));
17471 if (tp
->phy_flags
& TG3_PHYFLG_10_100_ONLY
)
17472 ethtype
= "10/100Base-TX";
17473 else if (tp
->phy_flags
& TG3_PHYFLG_ANY_SERDES
)
17474 ethtype
= "1000Base-SX";
17476 ethtype
= "10/100/1000Base-T";
17478 netdev_info(dev
, "attached PHY is %s (%s Ethernet) "
17479 "(WireSpeed[%d], EEE[%d])\n",
17480 tg3_phy_string(tp
), ethtype
,
17481 (tp
->phy_flags
& TG3_PHYFLG_NO_ETH_WIRE_SPEED
) == 0,
17482 (tp
->phy_flags
& TG3_PHYFLG_EEE_CAP
) != 0);
17485 netdev_info(dev
, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17486 (dev
->features
& NETIF_F_RXCSUM
) != 0,
17487 tg3_flag(tp
, USE_LINKCHG_REG
) != 0,
17488 (tp
->phy_flags
& TG3_PHYFLG_USE_MI_INTERRUPT
) != 0,
17489 tg3_flag(tp
, ENABLE_ASF
) != 0,
17490 tg3_flag(tp
, TSO_CAPABLE
) != 0);
17491 netdev_info(dev
, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17493 pdev
->dma_mask
== DMA_BIT_MASK(32) ? 32 :
17494 ((u64
)pdev
->dma_mask
) == DMA_BIT_MASK(40) ? 40 : 64);
17496 pci_save_state(pdev
);
17502 iounmap(tp
->aperegs
);
17503 tp
->aperegs
= NULL
;
17515 err_out_power_down
:
17516 pci_set_power_state(pdev
, PCI_D3hot
);
17519 pci_release_regions(pdev
);
17521 err_out_disable_pdev
:
17522 pci_disable_device(pdev
);
17523 pci_set_drvdata(pdev
, NULL
);
17527 static void tg3_remove_one(struct pci_dev
*pdev
)
17529 struct net_device
*dev
= pci_get_drvdata(pdev
);
17532 struct tg3
*tp
= netdev_priv(dev
);
17534 release_firmware(tp
->fw
);
17536 tg3_reset_task_cancel(tp
);
17538 if (tg3_flag(tp
, USE_PHYLIB
)) {
17543 unregister_netdev(dev
);
17545 iounmap(tp
->aperegs
);
17546 tp
->aperegs
= NULL
;
17553 pci_release_regions(pdev
);
17554 pci_disable_device(pdev
);
17555 pci_set_drvdata(pdev
, NULL
);
17559 #ifdef CONFIG_PM_SLEEP
17560 static int tg3_suspend(struct device
*device
)
17562 struct pci_dev
*pdev
= to_pci_dev(device
);
17563 struct net_device
*dev
= pci_get_drvdata(pdev
);
17564 struct tg3
*tp
= netdev_priv(dev
);
17567 if (!netif_running(dev
))
17570 tg3_reset_task_cancel(tp
);
17572 tg3_netif_stop(tp
);
17574 tg3_timer_stop(tp
);
17576 tg3_full_lock(tp
, 1);
17577 tg3_disable_ints(tp
);
17578 tg3_full_unlock(tp
);
17580 netif_device_detach(dev
);
17582 tg3_full_lock(tp
, 0);
17583 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
17584 tg3_flag_clear(tp
, INIT_COMPLETE
);
17585 tg3_full_unlock(tp
);
17587 err
= tg3_power_down_prepare(tp
);
17591 tg3_full_lock(tp
, 0);
17593 tg3_flag_set(tp
, INIT_COMPLETE
);
17594 err2
= tg3_restart_hw(tp
, true);
17598 tg3_timer_start(tp
);
17600 netif_device_attach(dev
);
17601 tg3_netif_start(tp
);
17604 tg3_full_unlock(tp
);
17613 static int tg3_resume(struct device
*device
)
17615 struct pci_dev
*pdev
= to_pci_dev(device
);
17616 struct net_device
*dev
= pci_get_drvdata(pdev
);
17617 struct tg3
*tp
= netdev_priv(dev
);
17620 if (!netif_running(dev
))
17623 netif_device_attach(dev
);
17625 tg3_full_lock(tp
, 0);
17627 tg3_flag_set(tp
, INIT_COMPLETE
);
17628 err
= tg3_restart_hw(tp
,
17629 !(tp
->phy_flags
& TG3_PHYFLG_KEEP_LINK_ON_PWRDN
));
17633 tg3_timer_start(tp
);
17635 tg3_netif_start(tp
);
17638 tg3_full_unlock(tp
);
17645 #endif /* CONFIG_PM_SLEEP */
17647 static SIMPLE_DEV_PM_OPS(tg3_pm_ops
, tg3_suspend
, tg3_resume
);
17650 * tg3_io_error_detected - called when PCI error is detected
17651 * @pdev: Pointer to PCI device
17652 * @state: The current pci connection state
17654 * This function is called after a PCI bus error affecting
17655 * this device has been detected.
17657 static pci_ers_result_t
tg3_io_error_detected(struct pci_dev
*pdev
,
17658 pci_channel_state_t state
)
17660 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17661 struct tg3
*tp
= netdev_priv(netdev
);
17662 pci_ers_result_t err
= PCI_ERS_RESULT_NEED_RESET
;
17664 netdev_info(netdev
, "PCI I/O error detected\n");
17668 if (!netif_running(netdev
))
17673 tg3_netif_stop(tp
);
17675 tg3_timer_stop(tp
);
17677 /* Want to make sure that the reset task doesn't run */
17678 tg3_reset_task_cancel(tp
);
17680 netif_device_detach(netdev
);
17682 /* Clean up software state, even if MMIO is blocked */
17683 tg3_full_lock(tp
, 0);
17684 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
17685 tg3_full_unlock(tp
);
17688 if (state
== pci_channel_io_perm_failure
)
17689 err
= PCI_ERS_RESULT_DISCONNECT
;
17691 pci_disable_device(pdev
);
17699 * tg3_io_slot_reset - called after the pci bus has been reset.
17700 * @pdev: Pointer to PCI device
17702 * Restart the card from scratch, as if from a cold-boot.
17703 * At this point, the card has exprienced a hard reset,
17704 * followed by fixups by BIOS, and has its config space
17705 * set up identically to what it was at cold boot.
17707 static pci_ers_result_t
tg3_io_slot_reset(struct pci_dev
*pdev
)
17709 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17710 struct tg3
*tp
= netdev_priv(netdev
);
17711 pci_ers_result_t rc
= PCI_ERS_RESULT_DISCONNECT
;
17716 if (pci_enable_device(pdev
)) {
17717 netdev_err(netdev
, "Cannot re-enable PCI device after reset.\n");
17721 pci_set_master(pdev
);
17722 pci_restore_state(pdev
);
17723 pci_save_state(pdev
);
17725 if (!netif_running(netdev
)) {
17726 rc
= PCI_ERS_RESULT_RECOVERED
;
17730 err
= tg3_power_up(tp
);
17734 rc
= PCI_ERS_RESULT_RECOVERED
;
17743 * tg3_io_resume - called when traffic can start flowing again.
17744 * @pdev: Pointer to PCI device
17746 * This callback is called when the error recovery driver tells
17747 * us that its OK to resume normal operation.
17749 static void tg3_io_resume(struct pci_dev
*pdev
)
17751 struct net_device
*netdev
= pci_get_drvdata(pdev
);
17752 struct tg3
*tp
= netdev_priv(netdev
);
17757 if (!netif_running(netdev
))
17760 tg3_full_lock(tp
, 0);
17761 tg3_flag_set(tp
, INIT_COMPLETE
);
17762 err
= tg3_restart_hw(tp
, true);
17764 tg3_full_unlock(tp
);
17765 netdev_err(netdev
, "Cannot restart hardware after reset.\n");
17769 netif_device_attach(netdev
);
17771 tg3_timer_start(tp
);
17773 tg3_netif_start(tp
);
17775 tg3_full_unlock(tp
);
17783 static const struct pci_error_handlers tg3_err_handler
= {
17784 .error_detected
= tg3_io_error_detected
,
17785 .slot_reset
= tg3_io_slot_reset
,
17786 .resume
= tg3_io_resume
17789 static struct pci_driver tg3_driver
= {
17790 .name
= DRV_MODULE_NAME
,
17791 .id_table
= tg3_pci_tbl
,
17792 .probe
= tg3_init_one
,
17793 .remove
= tg3_remove_one
,
17794 .err_handler
= &tg3_err_handler
,
17795 .driver
.pm
= &tg3_pm_ops
,
17798 static int __init
tg3_init(void)
17800 return pci_register_driver(&tg3_driver
);
17803 static void __exit
tg3_cleanup(void)
17805 pci_unregister_driver(&tg3_driver
);
17808 module_init(tg3_init
);
17809 module_exit(tg3_cleanup
);