tg3: Don't turn off led on 5719 serdes port 0
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
445
446
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
458 };
459
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 writel(val, mbox);
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
594 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 unsigned long flags;
621
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 unsigned long flags;
646
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 int i;
673 u32 regbase, bit;
674
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
679
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
696 }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 int i, off;
703 int ret = 0;
704 u32 status, req, gnt, bit;
705
706 if (!tg3_flag(tp, ENABLE_APE))
707 return 0;
708
709 switch (locknum) {
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 return 0;
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
719 break;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
738 off = 4 * locknum;
739
740 tg3_ape_write32(tp, req + off, bit);
741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit)
746 break;
747 if (pci_channel_offline(tp->pdev))
748 break;
749
750 udelay(10);
751 }
752
753 if (status != bit) {
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp, gnt + off, bit);
756 ret = -EBUSY;
757 }
758
759 return ret;
760 }
761
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764 u32 gnt, bit;
765
766 if (!tg3_flag(tp, ENABLE_APE))
767 return;
768
769 switch (locknum) {
770 case TG3_APE_LOCK_GPIO:
771 if (tg3_asic_rev(tp) == ASIC_REV_5761)
772 return;
773 case TG3_APE_LOCK_GRC:
774 case TG3_APE_LOCK_MEM:
775 if (!tp->pci_fn)
776 bit = APE_LOCK_GRANT_DRIVER;
777 else
778 bit = 1 << tp->pci_fn;
779 break;
780 case TG3_APE_LOCK_PHY0:
781 case TG3_APE_LOCK_PHY1:
782 case TG3_APE_LOCK_PHY2:
783 case TG3_APE_LOCK_PHY3:
784 bit = APE_LOCK_GRANT_DRIVER;
785 break;
786 default:
787 return;
788 }
789
790 if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 gnt = TG3_APE_LOCK_GRANT;
792 else
793 gnt = TG3_APE_PER_LOCK_GRANT;
794
795 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800 u32 apedata;
801
802 while (timeout_us) {
803 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804 return -EBUSY;
805
806 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808 break;
809
810 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811
812 udelay(10);
813 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814 }
815
816 return timeout_us ? 0 : -EBUSY;
817 }
818
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821 u32 i, apedata;
822
823 for (i = 0; i < timeout_us / 10; i++) {
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825
826 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827 break;
828
829 udelay(10);
830 }
831
832 return i == timeout_us / 10;
833 }
834
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 len)
837 {
838 int err;
839 u32 i, bufoff, msgoff, maxlen, apedata;
840
841 if (!tg3_flag(tp, APE_HAS_NCSI))
842 return 0;
843
844 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845 if (apedata != APE_SEG_SIG_MAGIC)
846 return -ENODEV;
847
848 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 if (!(apedata & APE_FW_STATUS_READY))
850 return -EAGAIN;
851
852 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853 TG3_APE_SHMEM_BASE;
854 msgoff = bufoff + 2 * sizeof(u32);
855 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856
857 while (len) {
858 u32 length;
859
860 /* Cap xfer sizes to scratchpad limits. */
861 length = (len > maxlen) ? maxlen : len;
862 len -= length;
863
864 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 if (!(apedata & APE_FW_STATUS_READY))
866 return -EAGAIN;
867
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err = tg3_ape_event_lock(tp, 1000);
870 if (err)
871 return err;
872
873 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874 APE_EVENT_STATUS_SCRTCHPD_READ |
875 APE_EVENT_STATUS_EVENT_PENDING;
876 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877
878 tg3_ape_write32(tp, bufoff, base_off);
879 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880
881 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883
884 base_off += length;
885
886 if (tg3_ape_wait_for_event(tp, 30000))
887 return -EAGAIN;
888
889 for (i = 0; length; i += 4, length -= 4) {
890 u32 val = tg3_ape_read32(tp, msgoff + i);
891 memcpy(data, &val, sizeof(u32));
892 data++;
893 }
894 }
895
896 return 0;
897 }
898
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901 int err;
902 u32 apedata;
903
904 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905 if (apedata != APE_SEG_SIG_MAGIC)
906 return -EAGAIN;
907
908 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909 if (!(apedata & APE_FW_STATUS_READY))
910 return -EAGAIN;
911
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err = tg3_ape_event_lock(tp, 1000);
914 if (err)
915 return err;
916
917 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918 event | APE_EVENT_STATUS_EVENT_PENDING);
919
920 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922
923 return 0;
924 }
925
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928 u32 event;
929 u32 apedata;
930
931 if (!tg3_flag(tp, ENABLE_APE))
932 return;
933
934 switch (kind) {
935 case RESET_KIND_INIT:
936 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937 APE_HOST_SEG_SIG_MAGIC);
938 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939 APE_HOST_SEG_LEN_MAGIC);
940 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945 APE_HOST_BEHAV_NO_PHYLOCK);
946 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947 TG3_APE_HOST_DRVR_STATE_START);
948
949 event = APE_EVENT_STATUS_STATE_START;
950 break;
951 case RESET_KIND_SHUTDOWN:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
956 */
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958
959 if (device_may_wakeup(&tp->pdev->dev) &&
960 tg3_flag(tp, WOL_ENABLE)) {
961 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962 TG3_APE_HOST_WOL_SPEED_AUTO);
963 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964 } else
965 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968
969 event = APE_EVENT_STATUS_STATE_UNLOAD;
970 break;
971 case RESET_KIND_SUSPEND:
972 event = APE_EVENT_STATUS_STATE_SUSPEND;
973 break;
974 default:
975 return;
976 }
977
978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979
980 tg3_ape_send_event(tp, event);
981 }
982
983 static void tg3_disable_ints(struct tg3 *tp)
984 {
985 int i;
986
987 tw32(TG3PCI_MISC_HOST_CTRL,
988 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
989 for (i = 0; i < tp->irq_max; i++)
990 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
991 }
992
993 static void tg3_enable_ints(struct tg3 *tp)
994 {
995 int i;
996
997 tp->irq_sync = 0;
998 wmb();
999
1000 tw32(TG3PCI_MISC_HOST_CTRL,
1001 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1002
1003 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1004 for (i = 0; i < tp->irq_cnt; i++) {
1005 struct tg3_napi *tnapi = &tp->napi[i];
1006
1007 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 if (tg3_flag(tp, 1SHOT_MSI))
1009 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010
1011 tp->coal_now |= tnapi->coal_now;
1012 }
1013
1014 /* Force an initial interrupt */
1015 if (!tg3_flag(tp, TAGGED_STATUS) &&
1016 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1017 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1018 else
1019 tw32(HOSTCC_MODE, tp->coal_now);
1020
1021 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1022 }
1023
1024 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1025 {
1026 struct tg3 *tp = tnapi->tp;
1027 struct tg3_hw_status *sblk = tnapi->hw_status;
1028 unsigned int work_exists = 0;
1029
1030 /* check for phy events */
1031 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1032 if (sblk->status & SD_STATUS_LINK_CHG)
1033 work_exists = 1;
1034 }
1035
1036 /* check for TX work to do */
1037 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1038 work_exists = 1;
1039
1040 /* check for RX work to do */
1041 if (tnapi->rx_rcb_prod_idx &&
1042 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1043 work_exists = 1;
1044
1045 return work_exists;
1046 }
1047
1048 /* tg3_int_reenable
1049 * similar to tg3_enable_ints, but it accurately determines whether there
1050 * is new work pending and can return without flushing the PIO write
1051 * which reenables interrupts
1052 */
1053 static void tg3_int_reenable(struct tg3_napi *tnapi)
1054 {
1055 struct tg3 *tp = tnapi->tp;
1056
1057 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1058 mmiowb();
1059
1060 /* When doing tagged status, this work check is unnecessary.
1061 * The last_tag we write above tells the chip which piece of
1062 * work we've completed.
1063 */
1064 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1065 tw32(HOSTCC_MODE, tp->coalesce_mode |
1066 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1067 }
1068
1069 static void tg3_switch_clocks(struct tg3 *tp)
1070 {
1071 u32 clock_ctrl;
1072 u32 orig_clock_ctrl;
1073
1074 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1075 return;
1076
1077 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1078
1079 orig_clock_ctrl = clock_ctrl;
1080 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1081 CLOCK_CTRL_CLKRUN_OENABLE |
1082 0x1f);
1083 tp->pci_clock_ctrl = clock_ctrl;
1084
1085 if (tg3_flag(tp, 5705_PLUS)) {
1086 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1089 }
1090 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 clock_ctrl |
1093 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1094 40);
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1097 40);
1098 }
1099 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1100 }
1101
1102 #define PHY_BUSY_LOOPS 5000
1103
1104 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1105 u32 *val)
1106 {
1107 u32 frame_val;
1108 unsigned int loops;
1109 int ret;
1110
1111 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1112 tw32_f(MAC_MI_MODE,
1113 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 udelay(80);
1115 }
1116
1117 tg3_ape_lock(tp, tp->phy_ape_lock);
1118
1119 *val = 0x0;
1120
1121 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1122 MI_COM_PHY_ADDR_MASK);
1123 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1124 MI_COM_REG_ADDR_MASK);
1125 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1126
1127 tw32_f(MAC_MI_COM, frame_val);
1128
1129 loops = PHY_BUSY_LOOPS;
1130 while (loops != 0) {
1131 udelay(10);
1132 frame_val = tr32(MAC_MI_COM);
1133
1134 if ((frame_val & MI_COM_BUSY) == 0) {
1135 udelay(5);
1136 frame_val = tr32(MAC_MI_COM);
1137 break;
1138 }
1139 loops -= 1;
1140 }
1141
1142 ret = -EBUSY;
1143 if (loops != 0) {
1144 *val = frame_val & MI_COM_DATA_MASK;
1145 ret = 0;
1146 }
1147
1148 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1149 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 udelay(80);
1151 }
1152
1153 tg3_ape_unlock(tp, tp->phy_ape_lock);
1154
1155 return ret;
1156 }
1157
1158 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1159 {
1160 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1161 }
1162
1163 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1164 u32 val)
1165 {
1166 u32 frame_val;
1167 unsigned int loops;
1168 int ret;
1169
1170 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1171 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1172 return 0;
1173
1174 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1175 tw32_f(MAC_MI_MODE,
1176 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 udelay(80);
1178 }
1179
1180 tg3_ape_lock(tp, tp->phy_ape_lock);
1181
1182 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1183 MI_COM_PHY_ADDR_MASK);
1184 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1185 MI_COM_REG_ADDR_MASK);
1186 frame_val |= (val & MI_COM_DATA_MASK);
1187 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1188
1189 tw32_f(MAC_MI_COM, frame_val);
1190
1191 loops = PHY_BUSY_LOOPS;
1192 while (loops != 0) {
1193 udelay(10);
1194 frame_val = tr32(MAC_MI_COM);
1195 if ((frame_val & MI_COM_BUSY) == 0) {
1196 udelay(5);
1197 frame_val = tr32(MAC_MI_COM);
1198 break;
1199 }
1200 loops -= 1;
1201 }
1202
1203 ret = -EBUSY;
1204 if (loops != 0)
1205 ret = 0;
1206
1207 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1208 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 udelay(80);
1210 }
1211
1212 tg3_ape_unlock(tp, tp->phy_ape_lock);
1213
1214 return ret;
1215 }
1216
1217 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1218 {
1219 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1220 }
1221
1222 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 {
1224 int err;
1225
1226 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 if (err)
1228 goto done;
1229
1230 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 if (err)
1232 goto done;
1233
1234 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1235 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 if (err)
1237 goto done;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1240
1241 done:
1242 return err;
1243 }
1244
1245 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 {
1247 int err;
1248
1249 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 if (err)
1255 goto done;
1256
1257 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 if (err)
1260 goto done;
1261
1262 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1263
1264 done:
1265 return err;
1266 }
1267
1268 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 {
1270 int err;
1271
1272 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1273 if (!err)
1274 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1275
1276 return err;
1277 }
1278
1279 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 {
1281 int err;
1282
1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284 if (!err)
1285 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287 return err;
1288 }
1289
1290 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 {
1292 int err;
1293
1294 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1295 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1296 MII_TG3_AUXCTL_SHDWSEL_MISC);
1297 if (!err)
1298 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1299
1300 return err;
1301 }
1302
1303 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1304 {
1305 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1306 set |= MII_TG3_AUXCTL_MISC_WREN;
1307
1308 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1309 }
1310
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1312 {
1313 u32 val;
1314 int err;
1315
1316 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1317
1318 if (err)
1319 return err;
1320 if (enable)
1321
1322 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 else
1324 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325
1326 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1327 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1328
1329 return err;
1330 }
1331
1332 static int tg3_bmcr_reset(struct tg3 *tp)
1333 {
1334 u32 phy_control;
1335 int limit, err;
1336
1337 /* OK, reset it, and poll the BMCR_RESET bit until it
1338 * clears or we time out.
1339 */
1340 phy_control = BMCR_RESET;
1341 err = tg3_writephy(tp, MII_BMCR, phy_control);
1342 if (err != 0)
1343 return -EBUSY;
1344
1345 limit = 5000;
1346 while (limit--) {
1347 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if (err != 0)
1349 return -EBUSY;
1350
1351 if ((phy_control & BMCR_RESET) == 0) {
1352 udelay(40);
1353 break;
1354 }
1355 udelay(10);
1356 }
1357 if (limit < 0)
1358 return -EBUSY;
1359
1360 return 0;
1361 }
1362
1363 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1364 {
1365 struct tg3 *tp = bp->priv;
1366 u32 val;
1367
1368 spin_lock_bh(&tp->lock);
1369
1370 if (tg3_readphy(tp, reg, &val))
1371 val = -EIO;
1372
1373 spin_unlock_bh(&tp->lock);
1374
1375 return val;
1376 }
1377
1378 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1379 {
1380 struct tg3 *tp = bp->priv;
1381 u32 ret = 0;
1382
1383 spin_lock_bh(&tp->lock);
1384
1385 if (tg3_writephy(tp, reg, val))
1386 ret = -EIO;
1387
1388 spin_unlock_bh(&tp->lock);
1389
1390 return ret;
1391 }
1392
1393 static int tg3_mdio_reset(struct mii_bus *bp)
1394 {
1395 return 0;
1396 }
1397
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1399 {
1400 u32 val;
1401 struct phy_device *phydev;
1402
1403 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1404 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405 case PHY_ID_BCM50610:
1406 case PHY_ID_BCM50610M:
1407 val = MAC_PHYCFG2_50610_LED_MODES;
1408 break;
1409 case PHY_ID_BCMAC131:
1410 val = MAC_PHYCFG2_AC131_LED_MODES;
1411 break;
1412 case PHY_ID_RTL8211C:
1413 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1414 break;
1415 case PHY_ID_RTL8201E:
1416 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1417 break;
1418 default:
1419 return;
1420 }
1421
1422 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423 tw32(MAC_PHYCFG2, val);
1424
1425 val = tr32(MAC_PHYCFG1);
1426 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429 tw32(MAC_PHYCFG1, val);
1430
1431 return;
1432 }
1433
1434 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436 MAC_PHYCFG2_FMODE_MASK_MASK |
1437 MAC_PHYCFG2_GMODE_MASK_MASK |
1438 MAC_PHYCFG2_ACT_MASK_MASK |
1439 MAC_PHYCFG2_QUAL_MASK_MASK |
1440 MAC_PHYCFG2_INBAND_ENABLE;
1441
1442 tw32(MAC_PHYCFG2, val);
1443
1444 val = tr32(MAC_PHYCFG1);
1445 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1452 }
1453 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455 tw32(MAC_PHYCFG1, val);
1456
1457 val = tr32(MAC_EXT_RGMII_MODE);
1458 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459 MAC_RGMII_MODE_RX_QUALITY |
1460 MAC_RGMII_MODE_RX_ACTIVITY |
1461 MAC_RGMII_MODE_RX_ENG_DET |
1462 MAC_RGMII_MODE_TX_ENABLE |
1463 MAC_RGMII_MODE_TX_LOWPWR |
1464 MAC_RGMII_MODE_TX_RESET);
1465 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467 val |= MAC_RGMII_MODE_RX_INT_B |
1468 MAC_RGMII_MODE_RX_QUALITY |
1469 MAC_RGMII_MODE_RX_ACTIVITY |
1470 MAC_RGMII_MODE_RX_ENG_DET;
1471 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472 val |= MAC_RGMII_MODE_TX_ENABLE |
1473 MAC_RGMII_MODE_TX_LOWPWR |
1474 MAC_RGMII_MODE_TX_RESET;
1475 }
1476 tw32(MAC_EXT_RGMII_MODE, val);
1477 }
1478
1479 static void tg3_mdio_start(struct tg3 *tp)
1480 {
1481 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482 tw32_f(MAC_MI_MODE, tp->mi_mode);
1483 udelay(80);
1484
1485 if (tg3_flag(tp, MDIOBUS_INITED) &&
1486 tg3_asic_rev(tp) == ASIC_REV_5785)
1487 tg3_mdio_config_5785(tp);
1488 }
1489
1490 static int tg3_mdio_init(struct tg3 *tp)
1491 {
1492 int i;
1493 u32 reg;
1494 struct phy_device *phydev;
1495
1496 if (tg3_flag(tp, 5717_PLUS)) {
1497 u32 is_serdes;
1498
1499 tp->phy_addr = tp->pci_fn + 1;
1500
1501 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1503 else
1504 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 if (is_serdes)
1507 tp->phy_addr += 7;
1508 } else
1509 tp->phy_addr = TG3_PHY_MII_ADDR;
1510
1511 tg3_mdio_start(tp);
1512
1513 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1514 return 0;
1515
1516 tp->mdio_bus = mdiobus_alloc();
1517 if (tp->mdio_bus == NULL)
1518 return -ENOMEM;
1519
1520 tp->mdio_bus->name = "tg3 mdio bus";
1521 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1522 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1523 tp->mdio_bus->priv = tp;
1524 tp->mdio_bus->parent = &tp->pdev->dev;
1525 tp->mdio_bus->read = &tg3_mdio_read;
1526 tp->mdio_bus->write = &tg3_mdio_write;
1527 tp->mdio_bus->reset = &tg3_mdio_reset;
1528 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1529 tp->mdio_bus->irq = &tp->mdio_irq[0];
1530
1531 for (i = 0; i < PHY_MAX_ADDR; i++)
1532 tp->mdio_bus->irq[i] = PHY_POLL;
1533
1534 /* The bus registration will look for all the PHYs on the mdio bus.
1535 * Unfortunately, it does not ensure the PHY is powered up before
1536 * accessing the PHY ID registers. A chip reset is the
1537 * quickest way to bring the device back to an operational state..
1538 */
1539 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1540 tg3_bmcr_reset(tp);
1541
1542 i = mdiobus_register(tp->mdio_bus);
1543 if (i) {
1544 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1545 mdiobus_free(tp->mdio_bus);
1546 return i;
1547 }
1548
1549 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1550
1551 if (!phydev || !phydev->drv) {
1552 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1553 mdiobus_unregister(tp->mdio_bus);
1554 mdiobus_free(tp->mdio_bus);
1555 return -ENODEV;
1556 }
1557
1558 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1559 case PHY_ID_BCM57780:
1560 phydev->interface = PHY_INTERFACE_MODE_GMII;
1561 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1562 break;
1563 case PHY_ID_BCM50610:
1564 case PHY_ID_BCM50610M:
1565 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1566 PHY_BRCM_RX_REFCLK_UNUSED |
1567 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1568 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1569 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1570 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1571 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1572 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1573 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1574 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1575 /* fallthru */
1576 case PHY_ID_RTL8211C:
1577 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1578 break;
1579 case PHY_ID_RTL8201E:
1580 case PHY_ID_BCMAC131:
1581 phydev->interface = PHY_INTERFACE_MODE_MII;
1582 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 break;
1585 }
1586
1587 tg3_flag_set(tp, MDIOBUS_INITED);
1588
1589 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1590 tg3_mdio_config_5785(tp);
1591
1592 return 0;
1593 }
1594
1595 static void tg3_mdio_fini(struct tg3 *tp)
1596 {
1597 if (tg3_flag(tp, MDIOBUS_INITED)) {
1598 tg3_flag_clear(tp, MDIOBUS_INITED);
1599 mdiobus_unregister(tp->mdio_bus);
1600 mdiobus_free(tp->mdio_bus);
1601 }
1602 }
1603
1604 /* tp->lock is held. */
1605 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 {
1607 u32 val;
1608
1609 val = tr32(GRC_RX_CPU_EVENT);
1610 val |= GRC_RX_CPU_DRIVER_EVENT;
1611 tw32_f(GRC_RX_CPU_EVENT, val);
1612
1613 tp->last_event_jiffies = jiffies;
1614 }
1615
1616 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1617
1618 /* tp->lock is held. */
1619 static void tg3_wait_for_event_ack(struct tg3 *tp)
1620 {
1621 int i;
1622 unsigned int delay_cnt;
1623 long time_remain;
1624
1625 /* If enough time has passed, no wait is necessary. */
1626 time_remain = (long)(tp->last_event_jiffies + 1 +
1627 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1628 (long)jiffies;
1629 if (time_remain < 0)
1630 return;
1631
1632 /* Check if we can shorten the wait time. */
1633 delay_cnt = jiffies_to_usecs(time_remain);
1634 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1635 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1636 delay_cnt = (delay_cnt >> 3) + 1;
1637
1638 for (i = 0; i < delay_cnt; i++) {
1639 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1640 break;
1641 if (pci_channel_offline(tp->pdev))
1642 break;
1643
1644 udelay(8);
1645 }
1646 }
1647
1648 /* tp->lock is held. */
1649 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1650 {
1651 u32 reg, val;
1652
1653 val = 0;
1654 if (!tg3_readphy(tp, MII_BMCR, &reg))
1655 val = reg << 16;
1656 if (!tg3_readphy(tp, MII_BMSR, &reg))
1657 val |= (reg & 0xffff);
1658 *data++ = val;
1659
1660 val = 0;
1661 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1662 val = reg << 16;
1663 if (!tg3_readphy(tp, MII_LPA, &reg))
1664 val |= (reg & 0xffff);
1665 *data++ = val;
1666
1667 val = 0;
1668 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1669 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1670 val = reg << 16;
1671 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1672 val |= (reg & 0xffff);
1673 }
1674 *data++ = val;
1675
1676 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1677 val = reg << 16;
1678 else
1679 val = 0;
1680 *data++ = val;
1681 }
1682
1683 /* tp->lock is held. */
1684 static void tg3_ump_link_report(struct tg3 *tp)
1685 {
1686 u32 data[4];
1687
1688 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1689 return;
1690
1691 tg3_phy_gather_ump_data(tp, data);
1692
1693 tg3_wait_for_event_ack(tp);
1694
1695 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1697 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1698 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1699 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1701
1702 tg3_generate_fw_event(tp);
1703 }
1704
1705 /* tp->lock is held. */
1706 static void tg3_stop_fw(struct tg3 *tp)
1707 {
1708 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1709 /* Wait for RX cpu to ACK the previous event. */
1710 tg3_wait_for_event_ack(tp);
1711
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1713
1714 tg3_generate_fw_event(tp);
1715
1716 /* Wait for RX cpu to ACK this event. */
1717 tg3_wait_for_event_ack(tp);
1718 }
1719 }
1720
1721 /* tp->lock is held. */
1722 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1723 {
1724 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1725 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1726
1727 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1728 switch (kind) {
1729 case RESET_KIND_INIT:
1730 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1731 DRV_STATE_START);
1732 break;
1733
1734 case RESET_KIND_SHUTDOWN:
1735 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736 DRV_STATE_UNLOAD);
1737 break;
1738
1739 case RESET_KIND_SUSPEND:
1740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 DRV_STATE_SUSPEND);
1742 break;
1743
1744 default:
1745 break;
1746 }
1747 }
1748
1749 if (kind == RESET_KIND_INIT ||
1750 kind == RESET_KIND_SUSPEND)
1751 tg3_ape_driver_state_change(tp, kind);
1752 }
1753
1754 /* tp->lock is held. */
1755 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1756 {
1757 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1758 switch (kind) {
1759 case RESET_KIND_INIT:
1760 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761 DRV_STATE_START_DONE);
1762 break;
1763
1764 case RESET_KIND_SHUTDOWN:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_UNLOAD_DONE);
1767 break;
1768
1769 default:
1770 break;
1771 }
1772 }
1773
1774 if (kind == RESET_KIND_SHUTDOWN)
1775 tg3_ape_driver_state_change(tp, kind);
1776 }
1777
1778 /* tp->lock is held. */
1779 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1780 {
1781 if (tg3_flag(tp, ENABLE_ASF)) {
1782 switch (kind) {
1783 case RESET_KIND_INIT:
1784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785 DRV_STATE_START);
1786 break;
1787
1788 case RESET_KIND_SHUTDOWN:
1789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790 DRV_STATE_UNLOAD);
1791 break;
1792
1793 case RESET_KIND_SUSPEND:
1794 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795 DRV_STATE_SUSPEND);
1796 break;
1797
1798 default:
1799 break;
1800 }
1801 }
1802 }
1803
1804 static int tg3_poll_fw(struct tg3 *tp)
1805 {
1806 int i;
1807 u32 val;
1808
1809 if (tg3_flag(tp, NO_FWARE_REPORTED))
1810 return 0;
1811
1812 if (tg3_flag(tp, IS_SSB_CORE)) {
1813 /* We don't use firmware. */
1814 return 0;
1815 }
1816
1817 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1818 /* Wait up to 20ms for init done. */
1819 for (i = 0; i < 200; i++) {
1820 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1821 return 0;
1822 if (pci_channel_offline(tp->pdev))
1823 return -ENODEV;
1824
1825 udelay(100);
1826 }
1827 return -ENODEV;
1828 }
1829
1830 /* Wait for firmware initialization to complete. */
1831 for (i = 0; i < 100000; i++) {
1832 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1833 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1834 break;
1835 if (pci_channel_offline(tp->pdev)) {
1836 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1837 tg3_flag_set(tp, NO_FWARE_REPORTED);
1838 netdev_info(tp->dev, "No firmware running\n");
1839 }
1840
1841 break;
1842 }
1843
1844 udelay(10);
1845 }
1846
1847 /* Chip might not be fitted with firmware. Some Sun onboard
1848 * parts are configured like that. So don't signal the timeout
1849 * of the above loop as an error, but do report the lack of
1850 * running firmware once.
1851 */
1852 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1853 tg3_flag_set(tp, NO_FWARE_REPORTED);
1854
1855 netdev_info(tp->dev, "No firmware running\n");
1856 }
1857
1858 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1859 /* The 57765 A0 needs a little more
1860 * time to do some important work.
1861 */
1862 mdelay(10);
1863 }
1864
1865 return 0;
1866 }
1867
1868 static void tg3_link_report(struct tg3 *tp)
1869 {
1870 if (!netif_carrier_ok(tp->dev)) {
1871 netif_info(tp, link, tp->dev, "Link is down\n");
1872 tg3_ump_link_report(tp);
1873 } else if (netif_msg_link(tp)) {
1874 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1875 (tp->link_config.active_speed == SPEED_1000 ?
1876 1000 :
1877 (tp->link_config.active_speed == SPEED_100 ?
1878 100 : 10)),
1879 (tp->link_config.active_duplex == DUPLEX_FULL ?
1880 "full" : "half"));
1881
1882 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1883 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1884 "on" : "off",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1886 "on" : "off");
1887
1888 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1889 netdev_info(tp->dev, "EEE is %s\n",
1890 tp->setlpicnt ? "enabled" : "disabled");
1891
1892 tg3_ump_link_report(tp);
1893 }
1894
1895 tp->link_up = netif_carrier_ok(tp->dev);
1896 }
1897
1898 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1899 {
1900 u32 flowctrl = 0;
1901
1902 if (adv & ADVERTISE_PAUSE_CAP) {
1903 flowctrl |= FLOW_CTRL_RX;
1904 if (!(adv & ADVERTISE_PAUSE_ASYM))
1905 flowctrl |= FLOW_CTRL_TX;
1906 } else if (adv & ADVERTISE_PAUSE_ASYM)
1907 flowctrl |= FLOW_CTRL_TX;
1908
1909 return flowctrl;
1910 }
1911
1912 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1913 {
1914 u16 miireg;
1915
1916 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1917 miireg = ADVERTISE_1000XPAUSE;
1918 else if (flow_ctrl & FLOW_CTRL_TX)
1919 miireg = ADVERTISE_1000XPSE_ASYM;
1920 else if (flow_ctrl & FLOW_CTRL_RX)
1921 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1922 else
1923 miireg = 0;
1924
1925 return miireg;
1926 }
1927
1928 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1929 {
1930 u32 flowctrl = 0;
1931
1932 if (adv & ADVERTISE_1000XPAUSE) {
1933 flowctrl |= FLOW_CTRL_RX;
1934 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1935 flowctrl |= FLOW_CTRL_TX;
1936 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1937 flowctrl |= FLOW_CTRL_TX;
1938
1939 return flowctrl;
1940 }
1941
1942 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1943 {
1944 u8 cap = 0;
1945
1946 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1947 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1948 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1949 if (lcladv & ADVERTISE_1000XPAUSE)
1950 cap = FLOW_CTRL_RX;
1951 if (rmtadv & ADVERTISE_1000XPAUSE)
1952 cap = FLOW_CTRL_TX;
1953 }
1954
1955 return cap;
1956 }
1957
1958 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1959 {
1960 u8 autoneg;
1961 u8 flowctrl = 0;
1962 u32 old_rx_mode = tp->rx_mode;
1963 u32 old_tx_mode = tp->tx_mode;
1964
1965 if (tg3_flag(tp, USE_PHYLIB))
1966 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1967 else
1968 autoneg = tp->link_config.autoneg;
1969
1970 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1971 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1972 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1973 else
1974 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1975 } else
1976 flowctrl = tp->link_config.flowctrl;
1977
1978 tp->link_config.active_flowctrl = flowctrl;
1979
1980 if (flowctrl & FLOW_CTRL_RX)
1981 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1982 else
1983 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1984
1985 if (old_rx_mode != tp->rx_mode)
1986 tw32_f(MAC_RX_MODE, tp->rx_mode);
1987
1988 if (flowctrl & FLOW_CTRL_TX)
1989 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1990 else
1991 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1992
1993 if (old_tx_mode != tp->tx_mode)
1994 tw32_f(MAC_TX_MODE, tp->tx_mode);
1995 }
1996
1997 static void tg3_adjust_link(struct net_device *dev)
1998 {
1999 u8 oldflowctrl, linkmesg = 0;
2000 u32 mac_mode, lcl_adv, rmt_adv;
2001 struct tg3 *tp = netdev_priv(dev);
2002 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2003
2004 spin_lock_bh(&tp->lock);
2005
2006 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2007 MAC_MODE_HALF_DUPLEX);
2008
2009 oldflowctrl = tp->link_config.active_flowctrl;
2010
2011 if (phydev->link) {
2012 lcl_adv = 0;
2013 rmt_adv = 0;
2014
2015 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2016 mac_mode |= MAC_MODE_PORT_MODE_MII;
2017 else if (phydev->speed == SPEED_1000 ||
2018 tg3_asic_rev(tp) != ASIC_REV_5785)
2019 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2020 else
2021 mac_mode |= MAC_MODE_PORT_MODE_MII;
2022
2023 if (phydev->duplex == DUPLEX_HALF)
2024 mac_mode |= MAC_MODE_HALF_DUPLEX;
2025 else {
2026 lcl_adv = mii_advertise_flowctrl(
2027 tp->link_config.flowctrl);
2028
2029 if (phydev->pause)
2030 rmt_adv = LPA_PAUSE_CAP;
2031 if (phydev->asym_pause)
2032 rmt_adv |= LPA_PAUSE_ASYM;
2033 }
2034
2035 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2036 } else
2037 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2038
2039 if (mac_mode != tp->mac_mode) {
2040 tp->mac_mode = mac_mode;
2041 tw32_f(MAC_MODE, tp->mac_mode);
2042 udelay(40);
2043 }
2044
2045 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2046 if (phydev->speed == SPEED_10)
2047 tw32(MAC_MI_STAT,
2048 MAC_MI_STAT_10MBPS_MODE |
2049 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2050 else
2051 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 }
2053
2054 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2055 tw32(MAC_TX_LENGTHS,
2056 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2057 (6 << TX_LENGTHS_IPG_SHIFT) |
2058 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2059 else
2060 tw32(MAC_TX_LENGTHS,
2061 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062 (6 << TX_LENGTHS_IPG_SHIFT) |
2063 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2064
2065 if (phydev->link != tp->old_link ||
2066 phydev->speed != tp->link_config.active_speed ||
2067 phydev->duplex != tp->link_config.active_duplex ||
2068 oldflowctrl != tp->link_config.active_flowctrl)
2069 linkmesg = 1;
2070
2071 tp->old_link = phydev->link;
2072 tp->link_config.active_speed = phydev->speed;
2073 tp->link_config.active_duplex = phydev->duplex;
2074
2075 spin_unlock_bh(&tp->lock);
2076
2077 if (linkmesg)
2078 tg3_link_report(tp);
2079 }
2080
2081 static int tg3_phy_init(struct tg3 *tp)
2082 {
2083 struct phy_device *phydev;
2084
2085 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2086 return 0;
2087
2088 /* Bring the PHY back to a known state. */
2089 tg3_bmcr_reset(tp);
2090
2091 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2092
2093 /* Attach the MAC to the PHY. */
2094 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2095 tg3_adjust_link, phydev->interface);
2096 if (IS_ERR(phydev)) {
2097 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2098 return PTR_ERR(phydev);
2099 }
2100
2101 /* Mask with MAC supported features. */
2102 switch (phydev->interface) {
2103 case PHY_INTERFACE_MODE_GMII:
2104 case PHY_INTERFACE_MODE_RGMII:
2105 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2106 phydev->supported &= (PHY_GBIT_FEATURES |
2107 SUPPORTED_Pause |
2108 SUPPORTED_Asym_Pause);
2109 break;
2110 }
2111 /* fallthru */
2112 case PHY_INTERFACE_MODE_MII:
2113 phydev->supported &= (PHY_BASIC_FEATURES |
2114 SUPPORTED_Pause |
2115 SUPPORTED_Asym_Pause);
2116 break;
2117 default:
2118 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2119 return -EINVAL;
2120 }
2121
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124 phydev->advertising = phydev->supported;
2125
2126 return 0;
2127 }
2128
2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131 struct phy_device *phydev;
2132
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134 return;
2135
2136 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2137
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 phydev->advertising = tp->link_config.advertising;
2144 }
2145
2146 phy_start(phydev);
2147
2148 phy_start_aneg(phydev);
2149 }
2150
2151 static void tg3_phy_stop(struct tg3 *tp)
2152 {
2153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154 return;
2155
2156 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2157 }
2158
2159 static void tg3_phy_fini(struct tg3 *tp)
2160 {
2161 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2163 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164 }
2165 }
2166
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2168 {
2169 int err;
2170 u32 val;
2171
2172 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2173 return 0;
2174
2175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176 /* Cannot do read-modify-write on 5401 */
2177 err = tg3_phy_auxctl_write(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2180 0x4c20);
2181 goto done;
2182 }
2183
2184 err = tg3_phy_auxctl_read(tp,
2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186 if (err)
2187 return err;
2188
2189 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190 err = tg3_phy_auxctl_write(tp,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2192
2193 done:
2194 return err;
2195 }
2196
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 {
2199 u32 phytest;
2200
2201 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2202 u32 phy;
2203
2204 tg3_writephy(tp, MII_TG3_FET_TEST,
2205 phytest | MII_TG3_FET_SHADOW_EN);
2206 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2207 if (enable)
2208 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2209 else
2210 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2212 }
2213 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214 }
2215 }
2216
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 {
2219 u32 reg;
2220
2221 if (!tg3_flag(tp, 5705_PLUS) ||
2222 (tg3_flag(tp, 5717_PLUS) &&
2223 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2224 return;
2225
2226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227 tg3_phy_fet_toggle_apd(tp, enable);
2228 return;
2229 }
2230
2231 reg = MII_TG3_MISC_SHDW_WREN |
2232 MII_TG3_MISC_SHDW_SCR5_SEL |
2233 MII_TG3_MISC_SHDW_SCR5_LPED |
2234 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239
2240 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2241
2242
2243 reg = MII_TG3_MISC_SHDW_WREN |
2244 MII_TG3_MISC_SHDW_APD_SEL |
2245 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2246 if (enable)
2247 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2248
2249 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2250 }
2251
2252 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2253 {
2254 u32 phy;
2255
2256 if (!tg3_flag(tp, 5705_PLUS) ||
2257 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2258 return;
2259
2260 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2261 u32 ephy;
2262
2263 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2264 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2265
2266 tg3_writephy(tp, MII_TG3_FET_TEST,
2267 ephy | MII_TG3_FET_SHADOW_EN);
2268 if (!tg3_readphy(tp, reg, &phy)) {
2269 if (enable)
2270 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 else
2272 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273 tg3_writephy(tp, reg, phy);
2274 }
2275 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2276 }
2277 } else {
2278 int ret;
2279
2280 ret = tg3_phy_auxctl_read(tp,
2281 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2282 if (!ret) {
2283 if (enable)
2284 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 else
2286 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287 tg3_phy_auxctl_write(tp,
2288 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2289 }
2290 }
2291 }
2292
2293 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2294 {
2295 int ret;
2296 u32 val;
2297
2298 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2299 return;
2300
2301 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2302 if (!ret)
2303 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2304 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2305 }
2306
2307 static void tg3_phy_apply_otp(struct tg3 *tp)
2308 {
2309 u32 otp, phy;
2310
2311 if (!tp->phy_otp)
2312 return;
2313
2314 otp = tp->phy_otp;
2315
2316 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2317 return;
2318
2319 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2320 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2321 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2322
2323 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2324 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2325 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2326
2327 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2328 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2329 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2330
2331 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2333
2334 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2335 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2336
2337 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2338 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2339 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2340
2341 tg3_phy_toggle_auxctl_smdsp(tp, false);
2342 }
2343
2344 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2345 {
2346 u32 val;
2347
2348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2349 return;
2350
2351 tp->setlpicnt = 0;
2352
2353 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2354 current_link_up &&
2355 tp->link_config.active_duplex == DUPLEX_FULL &&
2356 (tp->link_config.active_speed == SPEED_100 ||
2357 tp->link_config.active_speed == SPEED_1000)) {
2358 u32 eeectl;
2359
2360 if (tp->link_config.active_speed == SPEED_1000)
2361 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2362 else
2363 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2364
2365 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2366
2367 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2368 TG3_CL45_D7_EEERES_STAT, &val);
2369
2370 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2371 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2372 tp->setlpicnt = 2;
2373 }
2374
2375 if (!tp->setlpicnt) {
2376 if (current_link_up &&
2377 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2378 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2379 tg3_phy_toggle_auxctl_smdsp(tp, false);
2380 }
2381
2382 val = tr32(TG3_CPMU_EEE_MODE);
2383 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2384 }
2385 }
2386
2387 static void tg3_phy_eee_enable(struct tg3 *tp)
2388 {
2389 u32 val;
2390
2391 if (tp->link_config.active_speed == SPEED_1000 &&
2392 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2393 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2394 tg3_flag(tp, 57765_CLASS)) &&
2395 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2396 val = MII_TG3_DSP_TAP26_ALNOKO |
2397 MII_TG3_DSP_TAP26_RMRXSTO;
2398 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2399 tg3_phy_toggle_auxctl_smdsp(tp, false);
2400 }
2401
2402 val = tr32(TG3_CPMU_EEE_MODE);
2403 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2404 }
2405
2406 static int tg3_wait_macro_done(struct tg3 *tp)
2407 {
2408 int limit = 100;
2409
2410 while (limit--) {
2411 u32 tmp32;
2412
2413 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2414 if ((tmp32 & 0x1000) == 0)
2415 break;
2416 }
2417 }
2418 if (limit < 0)
2419 return -EBUSY;
2420
2421 return 0;
2422 }
2423
2424 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2425 {
2426 static const u32 test_pat[4][6] = {
2427 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2428 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2429 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2430 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2431 };
2432 int chan;
2433
2434 for (chan = 0; chan < 4; chan++) {
2435 int i;
2436
2437 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2438 (chan * 0x2000) | 0x0200);
2439 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2440
2441 for (i = 0; i < 6; i++)
2442 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2443 test_pat[chan][i]);
2444
2445 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2446 if (tg3_wait_macro_done(tp)) {
2447 *resetp = 1;
2448 return -EBUSY;
2449 }
2450
2451 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2452 (chan * 0x2000) | 0x0200);
2453 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2454 if (tg3_wait_macro_done(tp)) {
2455 *resetp = 1;
2456 return -EBUSY;
2457 }
2458
2459 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2460 if (tg3_wait_macro_done(tp)) {
2461 *resetp = 1;
2462 return -EBUSY;
2463 }
2464
2465 for (i = 0; i < 6; i += 2) {
2466 u32 low, high;
2467
2468 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2469 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2470 tg3_wait_macro_done(tp)) {
2471 *resetp = 1;
2472 return -EBUSY;
2473 }
2474 low &= 0x7fff;
2475 high &= 0x000f;
2476 if (low != test_pat[chan][i] ||
2477 high != test_pat[chan][i+1]) {
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2479 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2481
2482 return -EBUSY;
2483 }
2484 }
2485 }
2486
2487 return 0;
2488 }
2489
2490 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2491 {
2492 int chan;
2493
2494 for (chan = 0; chan < 4; chan++) {
2495 int i;
2496
2497 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2498 (chan * 0x2000) | 0x0200);
2499 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2500 for (i = 0; i < 6; i++)
2501 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2502 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2503 if (tg3_wait_macro_done(tp))
2504 return -EBUSY;
2505 }
2506
2507 return 0;
2508 }
2509
2510 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2511 {
2512 u32 reg32, phy9_orig;
2513 int retries, do_phy_reset, err;
2514
2515 retries = 10;
2516 do_phy_reset = 1;
2517 do {
2518 if (do_phy_reset) {
2519 err = tg3_bmcr_reset(tp);
2520 if (err)
2521 return err;
2522 do_phy_reset = 0;
2523 }
2524
2525 /* Disable transmitter and interrupt. */
2526 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2527 continue;
2528
2529 reg32 |= 0x3000;
2530 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2531
2532 /* Set full-duplex, 1000 mbps. */
2533 tg3_writephy(tp, MII_BMCR,
2534 BMCR_FULLDPLX | BMCR_SPEED1000);
2535
2536 /* Set to master mode. */
2537 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2538 continue;
2539
2540 tg3_writephy(tp, MII_CTRL1000,
2541 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2542
2543 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2544 if (err)
2545 return err;
2546
2547 /* Block the PHY control access. */
2548 tg3_phydsp_write(tp, 0x8005, 0x0800);
2549
2550 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2551 if (!err)
2552 break;
2553 } while (--retries);
2554
2555 err = tg3_phy_reset_chanpat(tp);
2556 if (err)
2557 return err;
2558
2559 tg3_phydsp_write(tp, 0x8005, 0x0000);
2560
2561 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2562 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2563
2564 tg3_phy_toggle_auxctl_smdsp(tp, false);
2565
2566 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2567
2568 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2569 reg32 &= ~0x3000;
2570 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2571 } else if (!err)
2572 err = -EBUSY;
2573
2574 return err;
2575 }
2576
2577 static void tg3_carrier_off(struct tg3 *tp)
2578 {
2579 netif_carrier_off(tp->dev);
2580 tp->link_up = false;
2581 }
2582
2583 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2584 {
2585 if (tg3_flag(tp, ENABLE_ASF))
2586 netdev_warn(tp->dev,
2587 "Management side-band traffic will be interrupted during phy settings change\n");
2588 }
2589
2590 /* This will reset the tigon3 PHY if there is no valid
2591 * link unless the FORCE argument is non-zero.
2592 */
2593 static int tg3_phy_reset(struct tg3 *tp)
2594 {
2595 u32 val, cpmuctrl;
2596 int err;
2597
2598 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2599 val = tr32(GRC_MISC_CFG);
2600 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2601 udelay(40);
2602 }
2603 err = tg3_readphy(tp, MII_BMSR, &val);
2604 err |= tg3_readphy(tp, MII_BMSR, &val);
2605 if (err != 0)
2606 return -EBUSY;
2607
2608 if (netif_running(tp->dev) && tp->link_up) {
2609 netif_carrier_off(tp->dev);
2610 tg3_link_report(tp);
2611 }
2612
2613 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2614 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2615 tg3_asic_rev(tp) == ASIC_REV_5705) {
2616 err = tg3_phy_reset_5703_4_5(tp);
2617 if (err)
2618 return err;
2619 goto out;
2620 }
2621
2622 cpmuctrl = 0;
2623 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2624 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2625 cpmuctrl = tr32(TG3_CPMU_CTRL);
2626 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2627 tw32(TG3_CPMU_CTRL,
2628 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2629 }
2630
2631 err = tg3_bmcr_reset(tp);
2632 if (err)
2633 return err;
2634
2635 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2636 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2637 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2638
2639 tw32(TG3_CPMU_CTRL, cpmuctrl);
2640 }
2641
2642 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2643 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2644 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2645 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2646 CPMU_LSPD_1000MB_MACCLK_12_5) {
2647 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2648 udelay(40);
2649 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2650 }
2651 }
2652
2653 if (tg3_flag(tp, 5717_PLUS) &&
2654 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2655 return 0;
2656
2657 tg3_phy_apply_otp(tp);
2658
2659 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2660 tg3_phy_toggle_apd(tp, true);
2661 else
2662 tg3_phy_toggle_apd(tp, false);
2663
2664 out:
2665 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2666 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2667 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2668 tg3_phydsp_write(tp, 0x000a, 0x0323);
2669 tg3_phy_toggle_auxctl_smdsp(tp, false);
2670 }
2671
2672 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2673 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2674 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2675 }
2676
2677 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2678 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2679 tg3_phydsp_write(tp, 0x000a, 0x310b);
2680 tg3_phydsp_write(tp, 0x201f, 0x9506);
2681 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2682 tg3_phy_toggle_auxctl_smdsp(tp, false);
2683 }
2684 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2685 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2686 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2687 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2688 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2689 tg3_writephy(tp, MII_TG3_TEST1,
2690 MII_TG3_TEST1_TRIM_EN | 0x4);
2691 } else
2692 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2693
2694 tg3_phy_toggle_auxctl_smdsp(tp, false);
2695 }
2696 }
2697
2698 /* Set Extended packet length bit (bit 14) on all chips that */
2699 /* support jumbo frames */
2700 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2701 /* Cannot do read-modify-write on 5401 */
2702 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2703 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2704 /* Set bit 14 with read-modify-write to preserve other bits */
2705 err = tg3_phy_auxctl_read(tp,
2706 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2707 if (!err)
2708 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2709 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2710 }
2711
2712 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2713 * jumbo frames transmission.
2714 */
2715 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2716 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2717 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2718 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2719 }
2720
2721 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2722 /* adjust output voltage */
2723 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2724 }
2725
2726 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2727 tg3_phydsp_write(tp, 0xffb, 0x4000);
2728
2729 tg3_phy_toggle_automdix(tp, true);
2730 tg3_phy_set_wirespeed(tp);
2731 return 0;
2732 }
2733
2734 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2735 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2736 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2737 TG3_GPIO_MSG_NEED_VAUX)
2738 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2739 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2740 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2741 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2742 (TG3_GPIO_MSG_DRVR_PRES << 12))
2743
2744 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2745 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2746 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2747 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2748 (TG3_GPIO_MSG_NEED_VAUX << 12))
2749
2750 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2751 {
2752 u32 status, shift;
2753
2754 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2755 tg3_asic_rev(tp) == ASIC_REV_5719)
2756 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2757 else
2758 status = tr32(TG3_CPMU_DRV_STATUS);
2759
2760 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2761 status &= ~(TG3_GPIO_MSG_MASK << shift);
2762 status |= (newstat << shift);
2763
2764 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2765 tg3_asic_rev(tp) == ASIC_REV_5719)
2766 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2767 else
2768 tw32(TG3_CPMU_DRV_STATUS, status);
2769
2770 return status >> TG3_APE_GPIO_MSG_SHIFT;
2771 }
2772
2773 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2774 {
2775 if (!tg3_flag(tp, IS_NIC))
2776 return 0;
2777
2778 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2779 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2780 tg3_asic_rev(tp) == ASIC_REV_5720) {
2781 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2782 return -EIO;
2783
2784 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2785
2786 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2787 TG3_GRC_LCLCTL_PWRSW_DELAY);
2788
2789 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2790 } else {
2791 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2792 TG3_GRC_LCLCTL_PWRSW_DELAY);
2793 }
2794
2795 return 0;
2796 }
2797
2798 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2799 {
2800 u32 grc_local_ctrl;
2801
2802 if (!tg3_flag(tp, IS_NIC) ||
2803 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2804 tg3_asic_rev(tp) == ASIC_REV_5701)
2805 return;
2806
2807 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2808
2809 tw32_wait_f(GRC_LOCAL_CTRL,
2810 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY);
2812
2813 tw32_wait_f(GRC_LOCAL_CTRL,
2814 grc_local_ctrl,
2815 TG3_GRC_LCLCTL_PWRSW_DELAY);
2816
2817 tw32_wait_f(GRC_LOCAL_CTRL,
2818 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 }
2821
2822 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2823 {
2824 if (!tg3_flag(tp, IS_NIC))
2825 return;
2826
2827 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2828 tg3_asic_rev(tp) == ASIC_REV_5701) {
2829 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2830 (GRC_LCLCTRL_GPIO_OE0 |
2831 GRC_LCLCTRL_GPIO_OE1 |
2832 GRC_LCLCTRL_GPIO_OE2 |
2833 GRC_LCLCTRL_GPIO_OUTPUT0 |
2834 GRC_LCLCTRL_GPIO_OUTPUT1),
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2837 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2838 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2839 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2840 GRC_LCLCTRL_GPIO_OE1 |
2841 GRC_LCLCTRL_GPIO_OE2 |
2842 GRC_LCLCTRL_GPIO_OUTPUT0 |
2843 GRC_LCLCTRL_GPIO_OUTPUT1 |
2844 tp->grc_local_ctrl;
2845 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2849 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2853 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 } else {
2856 u32 no_gpio2;
2857 u32 grc_local_ctrl = 0;
2858
2859 /* Workaround to prevent overdrawing Amps. */
2860 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2861 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2862 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2863 grc_local_ctrl,
2864 TG3_GRC_LCLCTL_PWRSW_DELAY);
2865 }
2866
2867 /* On 5753 and variants, GPIO2 cannot be used. */
2868 no_gpio2 = tp->nic_sram_data_cfg &
2869 NIC_SRAM_DATA_CFG_NO_GPIO2;
2870
2871 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2872 GRC_LCLCTRL_GPIO_OE1 |
2873 GRC_LCLCTRL_GPIO_OE2 |
2874 GRC_LCLCTRL_GPIO_OUTPUT1 |
2875 GRC_LCLCTRL_GPIO_OUTPUT2;
2876 if (no_gpio2) {
2877 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2878 GRC_LCLCTRL_GPIO_OUTPUT2);
2879 }
2880 tw32_wait_f(GRC_LOCAL_CTRL,
2881 tp->grc_local_ctrl | grc_local_ctrl,
2882 TG3_GRC_LCLCTL_PWRSW_DELAY);
2883
2884 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2885
2886 tw32_wait_f(GRC_LOCAL_CTRL,
2887 tp->grc_local_ctrl | grc_local_ctrl,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY);
2889
2890 if (!no_gpio2) {
2891 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2892 tw32_wait_f(GRC_LOCAL_CTRL,
2893 tp->grc_local_ctrl | grc_local_ctrl,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY);
2895 }
2896 }
2897 }
2898
2899 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2900 {
2901 u32 msg = 0;
2902
2903 /* Serialize power state transitions */
2904 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2905 return;
2906
2907 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2908 msg = TG3_GPIO_MSG_NEED_VAUX;
2909
2910 msg = tg3_set_function_status(tp, msg);
2911
2912 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2913 goto done;
2914
2915 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2916 tg3_pwrsrc_switch_to_vaux(tp);
2917 else
2918 tg3_pwrsrc_die_with_vmain(tp);
2919
2920 done:
2921 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2922 }
2923
2924 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2925 {
2926 bool need_vaux = false;
2927
2928 /* The GPIOs do something completely different on 57765. */
2929 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2930 return;
2931
2932 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2933 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2934 tg3_asic_rev(tp) == ASIC_REV_5720) {
2935 tg3_frob_aux_power_5717(tp, include_wol ?
2936 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2937 return;
2938 }
2939
2940 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2941 struct net_device *dev_peer;
2942
2943 dev_peer = pci_get_drvdata(tp->pdev_peer);
2944
2945 /* remove_one() may have been run on the peer. */
2946 if (dev_peer) {
2947 struct tg3 *tp_peer = netdev_priv(dev_peer);
2948
2949 if (tg3_flag(tp_peer, INIT_COMPLETE))
2950 return;
2951
2952 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2953 tg3_flag(tp_peer, ENABLE_ASF))
2954 need_vaux = true;
2955 }
2956 }
2957
2958 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2959 tg3_flag(tp, ENABLE_ASF))
2960 need_vaux = true;
2961
2962 if (need_vaux)
2963 tg3_pwrsrc_switch_to_vaux(tp);
2964 else
2965 tg3_pwrsrc_die_with_vmain(tp);
2966 }
2967
2968 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2969 {
2970 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2971 return 1;
2972 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2973 if (speed != SPEED_10)
2974 return 1;
2975 } else if (speed == SPEED_10)
2976 return 1;
2977
2978 return 0;
2979 }
2980
2981 static bool tg3_phy_power_bug(struct tg3 *tp)
2982 {
2983 switch (tg3_asic_rev(tp)) {
2984 case ASIC_REV_5700:
2985 case ASIC_REV_5704:
2986 return true;
2987 case ASIC_REV_5780:
2988 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2989 return true;
2990 return false;
2991 case ASIC_REV_5717:
2992 if (!tp->pci_fn)
2993 return true;
2994 return false;
2995 case ASIC_REV_5719:
2996 case ASIC_REV_5720:
2997 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2998 !tp->pci_fn)
2999 return true;
3000 return false;
3001 }
3002
3003 return false;
3004 }
3005
3006 static bool tg3_phy_led_bug(struct tg3 *tp)
3007 {
3008 switch (tg3_asic_rev(tp)) {
3009 case ASIC_REV_5719:
3010 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3011 !tp->pci_fn)
3012 return true;
3013 return false;
3014 }
3015
3016 return false;
3017 }
3018
3019 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3020 {
3021 u32 val;
3022
3023 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3024 return;
3025
3026 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3027 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3028 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3029 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3030
3031 sg_dig_ctrl |=
3032 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3033 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3034 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3035 }
3036 return;
3037 }
3038
3039 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3040 tg3_bmcr_reset(tp);
3041 val = tr32(GRC_MISC_CFG);
3042 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3043 udelay(40);
3044 return;
3045 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3046 u32 phytest;
3047 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3048 u32 phy;
3049
3050 tg3_writephy(tp, MII_ADVERTISE, 0);
3051 tg3_writephy(tp, MII_BMCR,
3052 BMCR_ANENABLE | BMCR_ANRESTART);
3053
3054 tg3_writephy(tp, MII_TG3_FET_TEST,
3055 phytest | MII_TG3_FET_SHADOW_EN);
3056 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3057 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3058 tg3_writephy(tp,
3059 MII_TG3_FET_SHDW_AUXMODE4,
3060 phy);
3061 }
3062 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3063 }
3064 return;
3065 } else if (do_low_power) {
3066 if (!tg3_phy_led_bug(tp))
3067 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3068 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3069
3070 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3071 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3072 MII_TG3_AUXCTL_PCTL_VREG_11V;
3073 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3074 }
3075
3076 /* The PHY should not be powered down on some chips because
3077 * of bugs.
3078 */
3079 if (tg3_phy_power_bug(tp))
3080 return;
3081
3082 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3083 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3084 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3085 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3086 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3087 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3088 }
3089
3090 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3091 }
3092
3093 /* tp->lock is held. */
3094 static int tg3_nvram_lock(struct tg3 *tp)
3095 {
3096 if (tg3_flag(tp, NVRAM)) {
3097 int i;
3098
3099 if (tp->nvram_lock_cnt == 0) {
3100 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3101 for (i = 0; i < 8000; i++) {
3102 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3103 break;
3104 udelay(20);
3105 }
3106 if (i == 8000) {
3107 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3108 return -ENODEV;
3109 }
3110 }
3111 tp->nvram_lock_cnt++;
3112 }
3113 return 0;
3114 }
3115
3116 /* tp->lock is held. */
3117 static void tg3_nvram_unlock(struct tg3 *tp)
3118 {
3119 if (tg3_flag(tp, NVRAM)) {
3120 if (tp->nvram_lock_cnt > 0)
3121 tp->nvram_lock_cnt--;
3122 if (tp->nvram_lock_cnt == 0)
3123 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3124 }
3125 }
3126
3127 /* tp->lock is held. */
3128 static void tg3_enable_nvram_access(struct tg3 *tp)
3129 {
3130 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3131 u32 nvaccess = tr32(NVRAM_ACCESS);
3132
3133 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3134 }
3135 }
3136
3137 /* tp->lock is held. */
3138 static void tg3_disable_nvram_access(struct tg3 *tp)
3139 {
3140 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3141 u32 nvaccess = tr32(NVRAM_ACCESS);
3142
3143 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3144 }
3145 }
3146
3147 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3148 u32 offset, u32 *val)
3149 {
3150 u32 tmp;
3151 int i;
3152
3153 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3154 return -EINVAL;
3155
3156 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3157 EEPROM_ADDR_DEVID_MASK |
3158 EEPROM_ADDR_READ);
3159 tw32(GRC_EEPROM_ADDR,
3160 tmp |
3161 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3162 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3163 EEPROM_ADDR_ADDR_MASK) |
3164 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3165
3166 for (i = 0; i < 1000; i++) {
3167 tmp = tr32(GRC_EEPROM_ADDR);
3168
3169 if (tmp & EEPROM_ADDR_COMPLETE)
3170 break;
3171 msleep(1);
3172 }
3173 if (!(tmp & EEPROM_ADDR_COMPLETE))
3174 return -EBUSY;
3175
3176 tmp = tr32(GRC_EEPROM_DATA);
3177
3178 /*
3179 * The data will always be opposite the native endian
3180 * format. Perform a blind byteswap to compensate.
3181 */
3182 *val = swab32(tmp);
3183
3184 return 0;
3185 }
3186
3187 #define NVRAM_CMD_TIMEOUT 10000
3188
3189 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3190 {
3191 int i;
3192
3193 tw32(NVRAM_CMD, nvram_cmd);
3194 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3195 udelay(10);
3196 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3197 udelay(10);
3198 break;
3199 }
3200 }
3201
3202 if (i == NVRAM_CMD_TIMEOUT)
3203 return -EBUSY;
3204
3205 return 0;
3206 }
3207
3208 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3209 {
3210 if (tg3_flag(tp, NVRAM) &&
3211 tg3_flag(tp, NVRAM_BUFFERED) &&
3212 tg3_flag(tp, FLASH) &&
3213 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3214 (tp->nvram_jedecnum == JEDEC_ATMEL))
3215
3216 addr = ((addr / tp->nvram_pagesize) <<
3217 ATMEL_AT45DB0X1B_PAGE_POS) +
3218 (addr % tp->nvram_pagesize);
3219
3220 return addr;
3221 }
3222
3223 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3224 {
3225 if (tg3_flag(tp, NVRAM) &&
3226 tg3_flag(tp, NVRAM_BUFFERED) &&
3227 tg3_flag(tp, FLASH) &&
3228 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3229 (tp->nvram_jedecnum == JEDEC_ATMEL))
3230
3231 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3232 tp->nvram_pagesize) +
3233 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3234
3235 return addr;
3236 }
3237
3238 /* NOTE: Data read in from NVRAM is byteswapped according to
3239 * the byteswapping settings for all other register accesses.
3240 * tg3 devices are BE devices, so on a BE machine, the data
3241 * returned will be exactly as it is seen in NVRAM. On a LE
3242 * machine, the 32-bit value will be byteswapped.
3243 */
3244 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3245 {
3246 int ret;
3247
3248 if (!tg3_flag(tp, NVRAM))
3249 return tg3_nvram_read_using_eeprom(tp, offset, val);
3250
3251 offset = tg3_nvram_phys_addr(tp, offset);
3252
3253 if (offset > NVRAM_ADDR_MSK)
3254 return -EINVAL;
3255
3256 ret = tg3_nvram_lock(tp);
3257 if (ret)
3258 return ret;
3259
3260 tg3_enable_nvram_access(tp);
3261
3262 tw32(NVRAM_ADDR, offset);
3263 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3264 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3265
3266 if (ret == 0)
3267 *val = tr32(NVRAM_RDDATA);
3268
3269 tg3_disable_nvram_access(tp);
3270
3271 tg3_nvram_unlock(tp);
3272
3273 return ret;
3274 }
3275
3276 /* Ensures NVRAM data is in bytestream format. */
3277 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3278 {
3279 u32 v;
3280 int res = tg3_nvram_read(tp, offset, &v);
3281 if (!res)
3282 *val = cpu_to_be32(v);
3283 return res;
3284 }
3285
3286 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3287 u32 offset, u32 len, u8 *buf)
3288 {
3289 int i, j, rc = 0;
3290 u32 val;
3291
3292 for (i = 0; i < len; i += 4) {
3293 u32 addr;
3294 __be32 data;
3295
3296 addr = offset + i;
3297
3298 memcpy(&data, buf + i, 4);
3299
3300 /*
3301 * The SEEPROM interface expects the data to always be opposite
3302 * the native endian format. We accomplish this by reversing
3303 * all the operations that would have been performed on the
3304 * data from a call to tg3_nvram_read_be32().
3305 */
3306 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3307
3308 val = tr32(GRC_EEPROM_ADDR);
3309 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3310
3311 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3312 EEPROM_ADDR_READ);
3313 tw32(GRC_EEPROM_ADDR, val |
3314 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3315 (addr & EEPROM_ADDR_ADDR_MASK) |
3316 EEPROM_ADDR_START |
3317 EEPROM_ADDR_WRITE);
3318
3319 for (j = 0; j < 1000; j++) {
3320 val = tr32(GRC_EEPROM_ADDR);
3321
3322 if (val & EEPROM_ADDR_COMPLETE)
3323 break;
3324 msleep(1);
3325 }
3326 if (!(val & EEPROM_ADDR_COMPLETE)) {
3327 rc = -EBUSY;
3328 break;
3329 }
3330 }
3331
3332 return rc;
3333 }
3334
3335 /* offset and length are dword aligned */
3336 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3337 u8 *buf)
3338 {
3339 int ret = 0;
3340 u32 pagesize = tp->nvram_pagesize;
3341 u32 pagemask = pagesize - 1;
3342 u32 nvram_cmd;
3343 u8 *tmp;
3344
3345 tmp = kmalloc(pagesize, GFP_KERNEL);
3346 if (tmp == NULL)
3347 return -ENOMEM;
3348
3349 while (len) {
3350 int j;
3351 u32 phy_addr, page_off, size;
3352
3353 phy_addr = offset & ~pagemask;
3354
3355 for (j = 0; j < pagesize; j += 4) {
3356 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3357 (__be32 *) (tmp + j));
3358 if (ret)
3359 break;
3360 }
3361 if (ret)
3362 break;
3363
3364 page_off = offset & pagemask;
3365 size = pagesize;
3366 if (len < size)
3367 size = len;
3368
3369 len -= size;
3370
3371 memcpy(tmp + page_off, buf, size);
3372
3373 offset = offset + (pagesize - page_off);
3374
3375 tg3_enable_nvram_access(tp);
3376
3377 /*
3378 * Before we can erase the flash page, we need
3379 * to issue a special "write enable" command.
3380 */
3381 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3382
3383 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3384 break;
3385
3386 /* Erase the target page */
3387 tw32(NVRAM_ADDR, phy_addr);
3388
3389 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3390 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3391
3392 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3393 break;
3394
3395 /* Issue another write enable to start the write. */
3396 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3397
3398 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3399 break;
3400
3401 for (j = 0; j < pagesize; j += 4) {
3402 __be32 data;
3403
3404 data = *((__be32 *) (tmp + j));
3405
3406 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3407
3408 tw32(NVRAM_ADDR, phy_addr + j);
3409
3410 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3411 NVRAM_CMD_WR;
3412
3413 if (j == 0)
3414 nvram_cmd |= NVRAM_CMD_FIRST;
3415 else if (j == (pagesize - 4))
3416 nvram_cmd |= NVRAM_CMD_LAST;
3417
3418 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3419 if (ret)
3420 break;
3421 }
3422 if (ret)
3423 break;
3424 }
3425
3426 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3427 tg3_nvram_exec_cmd(tp, nvram_cmd);
3428
3429 kfree(tmp);
3430
3431 return ret;
3432 }
3433
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3436 u8 *buf)
3437 {
3438 int i, ret = 0;
3439
3440 for (i = 0; i < len; i += 4, offset += 4) {
3441 u32 page_off, phy_addr, nvram_cmd;
3442 __be32 data;
3443
3444 memcpy(&data, buf + i, 4);
3445 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3446
3447 page_off = offset % tp->nvram_pagesize;
3448
3449 phy_addr = tg3_nvram_phys_addr(tp, offset);
3450
3451 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3452
3453 if (page_off == 0 || i == 0)
3454 nvram_cmd |= NVRAM_CMD_FIRST;
3455 if (page_off == (tp->nvram_pagesize - 4))
3456 nvram_cmd |= NVRAM_CMD_LAST;
3457
3458 if (i == (len - 4))
3459 nvram_cmd |= NVRAM_CMD_LAST;
3460
3461 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3462 !tg3_flag(tp, FLASH) ||
3463 !tg3_flag(tp, 57765_PLUS))
3464 tw32(NVRAM_ADDR, phy_addr);
3465
3466 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3467 !tg3_flag(tp, 5755_PLUS) &&
3468 (tp->nvram_jedecnum == JEDEC_ST) &&
3469 (nvram_cmd & NVRAM_CMD_FIRST)) {
3470 u32 cmd;
3471
3472 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3473 ret = tg3_nvram_exec_cmd(tp, cmd);
3474 if (ret)
3475 break;
3476 }
3477 if (!tg3_flag(tp, FLASH)) {
3478 /* We always do complete word writes to eeprom. */
3479 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3480 }
3481
3482 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3483 if (ret)
3484 break;
3485 }
3486 return ret;
3487 }
3488
3489 /* offset and length are dword aligned */
3490 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3491 {
3492 int ret;
3493
3494 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3495 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3496 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3497 udelay(40);
3498 }
3499
3500 if (!tg3_flag(tp, NVRAM)) {
3501 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3502 } else {
3503 u32 grc_mode;
3504
3505 ret = tg3_nvram_lock(tp);
3506 if (ret)
3507 return ret;
3508
3509 tg3_enable_nvram_access(tp);
3510 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3511 tw32(NVRAM_WRITE1, 0x406);
3512
3513 grc_mode = tr32(GRC_MODE);
3514 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3515
3516 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3517 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3518 buf);
3519 } else {
3520 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3521 buf);
3522 }
3523
3524 grc_mode = tr32(GRC_MODE);
3525 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3526
3527 tg3_disable_nvram_access(tp);
3528 tg3_nvram_unlock(tp);
3529 }
3530
3531 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3533 udelay(40);
3534 }
3535
3536 return ret;
3537 }
3538
3539 #define RX_CPU_SCRATCH_BASE 0x30000
3540 #define RX_CPU_SCRATCH_SIZE 0x04000
3541 #define TX_CPU_SCRATCH_BASE 0x34000
3542 #define TX_CPU_SCRATCH_SIZE 0x04000
3543
3544 /* tp->lock is held. */
3545 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3546 {
3547 int i;
3548 const int iters = 10000;
3549
3550 for (i = 0; i < iters; i++) {
3551 tw32(cpu_base + CPU_STATE, 0xffffffff);
3552 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3553 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3554 break;
3555 if (pci_channel_offline(tp->pdev))
3556 return -EBUSY;
3557 }
3558
3559 return (i == iters) ? -EBUSY : 0;
3560 }
3561
3562 /* tp->lock is held. */
3563 static int tg3_rxcpu_pause(struct tg3 *tp)
3564 {
3565 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3566
3567 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3568 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3569 udelay(10);
3570
3571 return rc;
3572 }
3573
3574 /* tp->lock is held. */
3575 static int tg3_txcpu_pause(struct tg3 *tp)
3576 {
3577 return tg3_pause_cpu(tp, TX_CPU_BASE);
3578 }
3579
3580 /* tp->lock is held. */
3581 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583 tw32(cpu_base + CPU_STATE, 0xffffffff);
3584 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3585 }
3586
3587 /* tp->lock is held. */
3588 static void tg3_rxcpu_resume(struct tg3 *tp)
3589 {
3590 tg3_resume_cpu(tp, RX_CPU_BASE);
3591 }
3592
3593 /* tp->lock is held. */
3594 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3595 {
3596 int rc;
3597
3598 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3599
3600 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3601 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3602
3603 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3604 return 0;
3605 }
3606 if (cpu_base == RX_CPU_BASE) {
3607 rc = tg3_rxcpu_pause(tp);
3608 } else {
3609 /*
3610 * There is only an Rx CPU for the 5750 derivative in the
3611 * BCM4785.
3612 */
3613 if (tg3_flag(tp, IS_SSB_CORE))
3614 return 0;
3615
3616 rc = tg3_txcpu_pause(tp);
3617 }
3618
3619 if (rc) {
3620 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3621 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3622 return -ENODEV;
3623 }
3624
3625 /* Clear firmware's nvram arbitration. */
3626 if (tg3_flag(tp, NVRAM))
3627 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3628 return 0;
3629 }
3630
3631 static int tg3_fw_data_len(struct tg3 *tp,
3632 const struct tg3_firmware_hdr *fw_hdr)
3633 {
3634 int fw_len;
3635
3636 /* Non fragmented firmware have one firmware header followed by a
3637 * contiguous chunk of data to be written. The length field in that
3638 * header is not the length of data to be written but the complete
3639 * length of the bss. The data length is determined based on
3640 * tp->fw->size minus headers.
3641 *
3642 * Fragmented firmware have a main header followed by multiple
3643 * fragments. Each fragment is identical to non fragmented firmware
3644 * with a firmware header followed by a contiguous chunk of data. In
3645 * the main header, the length field is unused and set to 0xffffffff.
3646 * In each fragment header the length is the entire size of that
3647 * fragment i.e. fragment data + header length. Data length is
3648 * therefore length field in the header minus TG3_FW_HDR_LEN.
3649 */
3650 if (tp->fw_len == 0xffffffff)
3651 fw_len = be32_to_cpu(fw_hdr->len);
3652 else
3653 fw_len = tp->fw->size;
3654
3655 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3656 }
3657
3658 /* tp->lock is held. */
3659 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3660 u32 cpu_scratch_base, int cpu_scratch_size,
3661 const struct tg3_firmware_hdr *fw_hdr)
3662 {
3663 int err, i;
3664 void (*write_op)(struct tg3 *, u32, u32);
3665 int total_len = tp->fw->size;
3666
3667 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3668 netdev_err(tp->dev,
3669 "%s: Trying to load TX cpu firmware which is 5705\n",
3670 __func__);
3671 return -EINVAL;
3672 }
3673
3674 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3675 write_op = tg3_write_mem;
3676 else
3677 write_op = tg3_write_indirect_reg32;
3678
3679 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3680 /* It is possible that bootcode is still loading at this point.
3681 * Get the nvram lock first before halting the cpu.
3682 */
3683 int lock_err = tg3_nvram_lock(tp);
3684 err = tg3_halt_cpu(tp, cpu_base);
3685 if (!lock_err)
3686 tg3_nvram_unlock(tp);
3687 if (err)
3688 goto out;
3689
3690 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3691 write_op(tp, cpu_scratch_base + i, 0);
3692 tw32(cpu_base + CPU_STATE, 0xffffffff);
3693 tw32(cpu_base + CPU_MODE,
3694 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3695 } else {
3696 /* Subtract additional main header for fragmented firmware and
3697 * advance to the first fragment
3698 */
3699 total_len -= TG3_FW_HDR_LEN;
3700 fw_hdr++;
3701 }
3702
3703 do {
3704 u32 *fw_data = (u32 *)(fw_hdr + 1);
3705 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3706 write_op(tp, cpu_scratch_base +
3707 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3708 (i * sizeof(u32)),
3709 be32_to_cpu(fw_data[i]));
3710
3711 total_len -= be32_to_cpu(fw_hdr->len);
3712
3713 /* Advance to next fragment */
3714 fw_hdr = (struct tg3_firmware_hdr *)
3715 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3716 } while (total_len > 0);
3717
3718 err = 0;
3719
3720 out:
3721 return err;
3722 }
3723
3724 /* tp->lock is held. */
3725 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3726 {
3727 int i;
3728 const int iters = 5;
3729
3730 tw32(cpu_base + CPU_STATE, 0xffffffff);
3731 tw32_f(cpu_base + CPU_PC, pc);
3732
3733 for (i = 0; i < iters; i++) {
3734 if (tr32(cpu_base + CPU_PC) == pc)
3735 break;
3736 tw32(cpu_base + CPU_STATE, 0xffffffff);
3737 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3738 tw32_f(cpu_base + CPU_PC, pc);
3739 udelay(1000);
3740 }
3741
3742 return (i == iters) ? -EBUSY : 0;
3743 }
3744
3745 /* tp->lock is held. */
3746 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3747 {
3748 const struct tg3_firmware_hdr *fw_hdr;
3749 int err;
3750
3751 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3752
3753 /* Firmware blob starts with version numbers, followed by
3754 start address and length. We are setting complete length.
3755 length = end_address_of_bss - start_address_of_text.
3756 Remainder is the blob to be loaded contiguously
3757 from start address. */
3758
3759 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3760 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3761 fw_hdr);
3762 if (err)
3763 return err;
3764
3765 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3766 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3767 fw_hdr);
3768 if (err)
3769 return err;
3770
3771 /* Now startup only the RX cpu. */
3772 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3773 be32_to_cpu(fw_hdr->base_addr));
3774 if (err) {
3775 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3776 "should be %08x\n", __func__,
3777 tr32(RX_CPU_BASE + CPU_PC),
3778 be32_to_cpu(fw_hdr->base_addr));
3779 return -ENODEV;
3780 }
3781
3782 tg3_rxcpu_resume(tp);
3783
3784 return 0;
3785 }
3786
3787 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3788 {
3789 const int iters = 1000;
3790 int i;
3791 u32 val;
3792
3793 /* Wait for boot code to complete initialization and enter service
3794 * loop. It is then safe to download service patches
3795 */
3796 for (i = 0; i < iters; i++) {
3797 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3798 break;
3799
3800 udelay(10);
3801 }
3802
3803 if (i == iters) {
3804 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3805 return -EBUSY;
3806 }
3807
3808 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3809 if (val & 0xff) {
3810 netdev_warn(tp->dev,
3811 "Other patches exist. Not downloading EEE patch\n");
3812 return -EEXIST;
3813 }
3814
3815 return 0;
3816 }
3817
3818 /* tp->lock is held. */
3819 static void tg3_load_57766_firmware(struct tg3 *tp)
3820 {
3821 struct tg3_firmware_hdr *fw_hdr;
3822
3823 if (!tg3_flag(tp, NO_NVRAM))
3824 return;
3825
3826 if (tg3_validate_rxcpu_state(tp))
3827 return;
3828
3829 if (!tp->fw)
3830 return;
3831
3832 /* This firmware blob has a different format than older firmware
3833 * releases as given below. The main difference is we have fragmented
3834 * data to be written to non-contiguous locations.
3835 *
3836 * In the beginning we have a firmware header identical to other
3837 * firmware which consists of version, base addr and length. The length
3838 * here is unused and set to 0xffffffff.
3839 *
3840 * This is followed by a series of firmware fragments which are
3841 * individually identical to previous firmware. i.e. they have the
3842 * firmware header and followed by data for that fragment. The version
3843 * field of the individual fragment header is unused.
3844 */
3845
3846 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3847 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3848 return;
3849
3850 if (tg3_rxcpu_pause(tp))
3851 return;
3852
3853 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3854 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3855
3856 tg3_rxcpu_resume(tp);
3857 }
3858
3859 /* tp->lock is held. */
3860 static int tg3_load_tso_firmware(struct tg3 *tp)
3861 {
3862 const struct tg3_firmware_hdr *fw_hdr;
3863 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3864 int err;
3865
3866 if (!tg3_flag(tp, FW_TSO))
3867 return 0;
3868
3869 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3870
3871 /* Firmware blob starts with version numbers, followed by
3872 start address and length. We are setting complete length.
3873 length = end_address_of_bss - start_address_of_text.
3874 Remainder is the blob to be loaded contiguously
3875 from start address. */
3876
3877 cpu_scratch_size = tp->fw_len;
3878
3879 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3880 cpu_base = RX_CPU_BASE;
3881 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3882 } else {
3883 cpu_base = TX_CPU_BASE;
3884 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3885 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3886 }
3887
3888 err = tg3_load_firmware_cpu(tp, cpu_base,
3889 cpu_scratch_base, cpu_scratch_size,
3890 fw_hdr);
3891 if (err)
3892 return err;
3893
3894 /* Now startup the cpu. */
3895 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3896 be32_to_cpu(fw_hdr->base_addr));
3897 if (err) {
3898 netdev_err(tp->dev,
3899 "%s fails to set CPU PC, is %08x should be %08x\n",
3900 __func__, tr32(cpu_base + CPU_PC),
3901 be32_to_cpu(fw_hdr->base_addr));
3902 return -ENODEV;
3903 }
3904
3905 tg3_resume_cpu(tp, cpu_base);
3906 return 0;
3907 }
3908
3909
3910 /* tp->lock is held. */
3911 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3912 {
3913 u32 addr_high, addr_low;
3914 int i;
3915
3916 addr_high = ((tp->dev->dev_addr[0] << 8) |
3917 tp->dev->dev_addr[1]);
3918 addr_low = ((tp->dev->dev_addr[2] << 24) |
3919 (tp->dev->dev_addr[3] << 16) |
3920 (tp->dev->dev_addr[4] << 8) |
3921 (tp->dev->dev_addr[5] << 0));
3922 for (i = 0; i < 4; i++) {
3923 if (i == 1 && skip_mac_1)
3924 continue;
3925 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3926 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3927 }
3928
3929 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3930 tg3_asic_rev(tp) == ASIC_REV_5704) {
3931 for (i = 0; i < 12; i++) {
3932 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3933 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3934 }
3935 }
3936
3937 addr_high = (tp->dev->dev_addr[0] +
3938 tp->dev->dev_addr[1] +
3939 tp->dev->dev_addr[2] +
3940 tp->dev->dev_addr[3] +
3941 tp->dev->dev_addr[4] +
3942 tp->dev->dev_addr[5]) &
3943 TX_BACKOFF_SEED_MASK;
3944 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3945 }
3946
3947 static void tg3_enable_register_access(struct tg3 *tp)
3948 {
3949 /*
3950 * Make sure register accesses (indirect or otherwise) will function
3951 * correctly.
3952 */
3953 pci_write_config_dword(tp->pdev,
3954 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3955 }
3956
3957 static int tg3_power_up(struct tg3 *tp)
3958 {
3959 int err;
3960
3961 tg3_enable_register_access(tp);
3962
3963 err = pci_set_power_state(tp->pdev, PCI_D0);
3964 if (!err) {
3965 /* Switch out of Vaux if it is a NIC */
3966 tg3_pwrsrc_switch_to_vmain(tp);
3967 } else {
3968 netdev_err(tp->dev, "Transition to D0 failed\n");
3969 }
3970
3971 return err;
3972 }
3973
3974 static int tg3_setup_phy(struct tg3 *, bool);
3975
3976 static int tg3_power_down_prepare(struct tg3 *tp)
3977 {
3978 u32 misc_host_ctrl;
3979 bool device_should_wake, do_low_power;
3980
3981 tg3_enable_register_access(tp);
3982
3983 /* Restore the CLKREQ setting. */
3984 if (tg3_flag(tp, CLKREQ_BUG))
3985 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3986 PCI_EXP_LNKCTL_CLKREQ_EN);
3987
3988 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3989 tw32(TG3PCI_MISC_HOST_CTRL,
3990 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3991
3992 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3993 tg3_flag(tp, WOL_ENABLE);
3994
3995 if (tg3_flag(tp, USE_PHYLIB)) {
3996 do_low_power = false;
3997 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3998 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3999 struct phy_device *phydev;
4000 u32 phyid, advertising;
4001
4002 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4003
4004 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4005
4006 tp->link_config.speed = phydev->speed;
4007 tp->link_config.duplex = phydev->duplex;
4008 tp->link_config.autoneg = phydev->autoneg;
4009 tp->link_config.advertising = phydev->advertising;
4010
4011 advertising = ADVERTISED_TP |
4012 ADVERTISED_Pause |
4013 ADVERTISED_Autoneg |
4014 ADVERTISED_10baseT_Half;
4015
4016 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4017 if (tg3_flag(tp, WOL_SPEED_100MB))
4018 advertising |=
4019 ADVERTISED_100baseT_Half |
4020 ADVERTISED_100baseT_Full |
4021 ADVERTISED_10baseT_Full;
4022 else
4023 advertising |= ADVERTISED_10baseT_Full;
4024 }
4025
4026 phydev->advertising = advertising;
4027
4028 phy_start_aneg(phydev);
4029
4030 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4031 if (phyid != PHY_ID_BCMAC131) {
4032 phyid &= PHY_BCM_OUI_MASK;
4033 if (phyid == PHY_BCM_OUI_1 ||
4034 phyid == PHY_BCM_OUI_2 ||
4035 phyid == PHY_BCM_OUI_3)
4036 do_low_power = true;
4037 }
4038 }
4039 } else {
4040 do_low_power = true;
4041
4042 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4043 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4044
4045 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4046 tg3_setup_phy(tp, false);
4047 }
4048
4049 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4050 u32 val;
4051
4052 val = tr32(GRC_VCPU_EXT_CTRL);
4053 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4054 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4055 int i;
4056 u32 val;
4057
4058 for (i = 0; i < 200; i++) {
4059 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4060 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4061 break;
4062 msleep(1);
4063 }
4064 }
4065 if (tg3_flag(tp, WOL_CAP))
4066 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4067 WOL_DRV_STATE_SHUTDOWN |
4068 WOL_DRV_WOL |
4069 WOL_SET_MAGIC_PKT);
4070
4071 if (device_should_wake) {
4072 u32 mac_mode;
4073
4074 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4075 if (do_low_power &&
4076 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4077 tg3_phy_auxctl_write(tp,
4078 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4079 MII_TG3_AUXCTL_PCTL_WOL_EN |
4080 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4081 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4082 udelay(40);
4083 }
4084
4085 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4086 mac_mode = MAC_MODE_PORT_MODE_GMII;
4087 else if (tp->phy_flags &
4088 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4089 if (tp->link_config.active_speed == SPEED_1000)
4090 mac_mode = MAC_MODE_PORT_MODE_GMII;
4091 else
4092 mac_mode = MAC_MODE_PORT_MODE_MII;
4093 } else
4094 mac_mode = MAC_MODE_PORT_MODE_MII;
4095
4096 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4097 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4098 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4099 SPEED_100 : SPEED_10;
4100 if (tg3_5700_link_polarity(tp, speed))
4101 mac_mode |= MAC_MODE_LINK_POLARITY;
4102 else
4103 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4104 }
4105 } else {
4106 mac_mode = MAC_MODE_PORT_MODE_TBI;
4107 }
4108
4109 if (!tg3_flag(tp, 5750_PLUS))
4110 tw32(MAC_LED_CTRL, tp->led_ctrl);
4111
4112 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4113 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4114 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4115 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4116
4117 if (tg3_flag(tp, ENABLE_APE))
4118 mac_mode |= MAC_MODE_APE_TX_EN |
4119 MAC_MODE_APE_RX_EN |
4120 MAC_MODE_TDE_ENABLE;
4121
4122 tw32_f(MAC_MODE, mac_mode);
4123 udelay(100);
4124
4125 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4126 udelay(10);
4127 }
4128
4129 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4130 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4131 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4132 u32 base_val;
4133
4134 base_val = tp->pci_clock_ctrl;
4135 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4136 CLOCK_CTRL_TXCLK_DISABLE);
4137
4138 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4139 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4140 } else if (tg3_flag(tp, 5780_CLASS) ||
4141 tg3_flag(tp, CPMU_PRESENT) ||
4142 tg3_asic_rev(tp) == ASIC_REV_5906) {
4143 /* do nothing */
4144 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4145 u32 newbits1, newbits2;
4146
4147 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4148 tg3_asic_rev(tp) == ASIC_REV_5701) {
4149 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4150 CLOCK_CTRL_TXCLK_DISABLE |
4151 CLOCK_CTRL_ALTCLK);
4152 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4153 } else if (tg3_flag(tp, 5705_PLUS)) {
4154 newbits1 = CLOCK_CTRL_625_CORE;
4155 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4156 } else {
4157 newbits1 = CLOCK_CTRL_ALTCLK;
4158 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4159 }
4160
4161 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4162 40);
4163
4164 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4165 40);
4166
4167 if (!tg3_flag(tp, 5705_PLUS)) {
4168 u32 newbits3;
4169
4170 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4171 tg3_asic_rev(tp) == ASIC_REV_5701) {
4172 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4173 CLOCK_CTRL_TXCLK_DISABLE |
4174 CLOCK_CTRL_44MHZ_CORE);
4175 } else {
4176 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4177 }
4178
4179 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4180 tp->pci_clock_ctrl | newbits3, 40);
4181 }
4182 }
4183
4184 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4185 tg3_power_down_phy(tp, do_low_power);
4186
4187 tg3_frob_aux_power(tp, true);
4188
4189 /* Workaround for unstable PLL clock */
4190 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4191 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4192 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4193 u32 val = tr32(0x7d00);
4194
4195 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4196 tw32(0x7d00, val);
4197 if (!tg3_flag(tp, ENABLE_ASF)) {
4198 int err;
4199
4200 err = tg3_nvram_lock(tp);
4201 tg3_halt_cpu(tp, RX_CPU_BASE);
4202 if (!err)
4203 tg3_nvram_unlock(tp);
4204 }
4205 }
4206
4207 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4208
4209 return 0;
4210 }
4211
4212 static void tg3_power_down(struct tg3 *tp)
4213 {
4214 tg3_power_down_prepare(tp);
4215
4216 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4217 pci_set_power_state(tp->pdev, PCI_D3hot);
4218 }
4219
4220 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4221 {
4222 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4223 case MII_TG3_AUX_STAT_10HALF:
4224 *speed = SPEED_10;
4225 *duplex = DUPLEX_HALF;
4226 break;
4227
4228 case MII_TG3_AUX_STAT_10FULL:
4229 *speed = SPEED_10;
4230 *duplex = DUPLEX_FULL;
4231 break;
4232
4233 case MII_TG3_AUX_STAT_100HALF:
4234 *speed = SPEED_100;
4235 *duplex = DUPLEX_HALF;
4236 break;
4237
4238 case MII_TG3_AUX_STAT_100FULL:
4239 *speed = SPEED_100;
4240 *duplex = DUPLEX_FULL;
4241 break;
4242
4243 case MII_TG3_AUX_STAT_1000HALF:
4244 *speed = SPEED_1000;
4245 *duplex = DUPLEX_HALF;
4246 break;
4247
4248 case MII_TG3_AUX_STAT_1000FULL:
4249 *speed = SPEED_1000;
4250 *duplex = DUPLEX_FULL;
4251 break;
4252
4253 default:
4254 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4255 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4256 SPEED_10;
4257 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4258 DUPLEX_HALF;
4259 break;
4260 }
4261 *speed = SPEED_UNKNOWN;
4262 *duplex = DUPLEX_UNKNOWN;
4263 break;
4264 }
4265 }
4266
4267 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4268 {
4269 int err = 0;
4270 u32 val, new_adv;
4271
4272 new_adv = ADVERTISE_CSMA;
4273 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4274 new_adv |= mii_advertise_flowctrl(flowctrl);
4275
4276 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4277 if (err)
4278 goto done;
4279
4280 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4281 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4282
4283 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4284 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4285 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4286
4287 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4288 if (err)
4289 goto done;
4290 }
4291
4292 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4293 goto done;
4294
4295 tw32(TG3_CPMU_EEE_MODE,
4296 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4297
4298 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4299 if (!err) {
4300 u32 err2;
4301
4302 val = 0;
4303 /* Advertise 100-BaseTX EEE ability */
4304 if (advertise & ADVERTISED_100baseT_Full)
4305 val |= MDIO_AN_EEE_ADV_100TX;
4306 /* Advertise 1000-BaseT EEE ability */
4307 if (advertise & ADVERTISED_1000baseT_Full)
4308 val |= MDIO_AN_EEE_ADV_1000T;
4309 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4310 if (err)
4311 val = 0;
4312
4313 switch (tg3_asic_rev(tp)) {
4314 case ASIC_REV_5717:
4315 case ASIC_REV_57765:
4316 case ASIC_REV_57766:
4317 case ASIC_REV_5719:
4318 /* If we advertised any eee advertisements above... */
4319 if (val)
4320 val = MII_TG3_DSP_TAP26_ALNOKO |
4321 MII_TG3_DSP_TAP26_RMRXSTO |
4322 MII_TG3_DSP_TAP26_OPCSINPT;
4323 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4324 /* Fall through */
4325 case ASIC_REV_5720:
4326 case ASIC_REV_5762:
4327 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4328 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4329 MII_TG3_DSP_CH34TP2_HIBW01);
4330 }
4331
4332 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4333 if (!err)
4334 err = err2;
4335 }
4336
4337 done:
4338 return err;
4339 }
4340
4341 static void tg3_phy_copper_begin(struct tg3 *tp)
4342 {
4343 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4344 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4345 u32 adv, fc;
4346
4347 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4348 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4349 adv = ADVERTISED_10baseT_Half |
4350 ADVERTISED_10baseT_Full;
4351 if (tg3_flag(tp, WOL_SPEED_100MB))
4352 adv |= ADVERTISED_100baseT_Half |
4353 ADVERTISED_100baseT_Full;
4354 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4355 adv |= ADVERTISED_1000baseT_Half |
4356 ADVERTISED_1000baseT_Full;
4357
4358 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4359 } else {
4360 adv = tp->link_config.advertising;
4361 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4362 adv &= ~(ADVERTISED_1000baseT_Half |
4363 ADVERTISED_1000baseT_Full);
4364
4365 fc = tp->link_config.flowctrl;
4366 }
4367
4368 tg3_phy_autoneg_cfg(tp, adv, fc);
4369
4370 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4371 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4372 /* Normally during power down we want to autonegotiate
4373 * the lowest possible speed for WOL. However, to avoid
4374 * link flap, we leave it untouched.
4375 */
4376 return;
4377 }
4378
4379 tg3_writephy(tp, MII_BMCR,
4380 BMCR_ANENABLE | BMCR_ANRESTART);
4381 } else {
4382 int i;
4383 u32 bmcr, orig_bmcr;
4384
4385 tp->link_config.active_speed = tp->link_config.speed;
4386 tp->link_config.active_duplex = tp->link_config.duplex;
4387
4388 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4389 /* With autoneg disabled, 5715 only links up when the
4390 * advertisement register has the configured speed
4391 * enabled.
4392 */
4393 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4394 }
4395
4396 bmcr = 0;
4397 switch (tp->link_config.speed) {
4398 default:
4399 case SPEED_10:
4400 break;
4401
4402 case SPEED_100:
4403 bmcr |= BMCR_SPEED100;
4404 break;
4405
4406 case SPEED_1000:
4407 bmcr |= BMCR_SPEED1000;
4408 break;
4409 }
4410
4411 if (tp->link_config.duplex == DUPLEX_FULL)
4412 bmcr |= BMCR_FULLDPLX;
4413
4414 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4415 (bmcr != orig_bmcr)) {
4416 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4417 for (i = 0; i < 1500; i++) {
4418 u32 tmp;
4419
4420 udelay(10);
4421 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4422 tg3_readphy(tp, MII_BMSR, &tmp))
4423 continue;
4424 if (!(tmp & BMSR_LSTATUS)) {
4425 udelay(40);
4426 break;
4427 }
4428 }
4429 tg3_writephy(tp, MII_BMCR, bmcr);
4430 udelay(40);
4431 }
4432 }
4433 }
4434
4435 static int tg3_phy_pull_config(struct tg3 *tp)
4436 {
4437 int err;
4438 u32 val;
4439
4440 err = tg3_readphy(tp, MII_BMCR, &val);
4441 if (err)
4442 goto done;
4443
4444 if (!(val & BMCR_ANENABLE)) {
4445 tp->link_config.autoneg = AUTONEG_DISABLE;
4446 tp->link_config.advertising = 0;
4447 tg3_flag_clear(tp, PAUSE_AUTONEG);
4448
4449 err = -EIO;
4450
4451 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4452 case 0:
4453 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4454 goto done;
4455
4456 tp->link_config.speed = SPEED_10;
4457 break;
4458 case BMCR_SPEED100:
4459 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4460 goto done;
4461
4462 tp->link_config.speed = SPEED_100;
4463 break;
4464 case BMCR_SPEED1000:
4465 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4466 tp->link_config.speed = SPEED_1000;
4467 break;
4468 }
4469 /* Fall through */
4470 default:
4471 goto done;
4472 }
4473
4474 if (val & BMCR_FULLDPLX)
4475 tp->link_config.duplex = DUPLEX_FULL;
4476 else
4477 tp->link_config.duplex = DUPLEX_HALF;
4478
4479 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4480
4481 err = 0;
4482 goto done;
4483 }
4484
4485 tp->link_config.autoneg = AUTONEG_ENABLE;
4486 tp->link_config.advertising = ADVERTISED_Autoneg;
4487 tg3_flag_set(tp, PAUSE_AUTONEG);
4488
4489 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4490 u32 adv;
4491
4492 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4493 if (err)
4494 goto done;
4495
4496 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4497 tp->link_config.advertising |= adv | ADVERTISED_TP;
4498
4499 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4500 } else {
4501 tp->link_config.advertising |= ADVERTISED_FIBRE;
4502 }
4503
4504 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4505 u32 adv;
4506
4507 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4508 err = tg3_readphy(tp, MII_CTRL1000, &val);
4509 if (err)
4510 goto done;
4511
4512 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4513 } else {
4514 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4515 if (err)
4516 goto done;
4517
4518 adv = tg3_decode_flowctrl_1000X(val);
4519 tp->link_config.flowctrl = adv;
4520
4521 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4522 adv = mii_adv_to_ethtool_adv_x(val);
4523 }
4524
4525 tp->link_config.advertising |= adv;
4526 }
4527
4528 done:
4529 return err;
4530 }
4531
4532 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4533 {
4534 int err;
4535
4536 /* Turn off tap power management. */
4537 /* Set Extended packet length bit */
4538 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4539
4540 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4541 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4542 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4543 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4544 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4545
4546 udelay(40);
4547
4548 return err;
4549 }
4550
4551 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4552 {
4553 u32 val;
4554 u32 tgtadv = 0;
4555 u32 advertising = tp->link_config.advertising;
4556
4557 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4558 return true;
4559
4560 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4561 return false;
4562
4563 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4564
4565
4566 if (advertising & ADVERTISED_100baseT_Full)
4567 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4568 if (advertising & ADVERTISED_1000baseT_Full)
4569 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4570
4571 if (val != tgtadv)
4572 return false;
4573
4574 return true;
4575 }
4576
4577 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4578 {
4579 u32 advmsk, tgtadv, advertising;
4580
4581 advertising = tp->link_config.advertising;
4582 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4583
4584 advmsk = ADVERTISE_ALL;
4585 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4586 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4587 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4588 }
4589
4590 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4591 return false;
4592
4593 if ((*lcladv & advmsk) != tgtadv)
4594 return false;
4595
4596 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4597 u32 tg3_ctrl;
4598
4599 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4600
4601 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4602 return false;
4603
4604 if (tgtadv &&
4605 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4606 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4607 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4608 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4609 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4610 } else {
4611 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4612 }
4613
4614 if (tg3_ctrl != tgtadv)
4615 return false;
4616 }
4617
4618 return true;
4619 }
4620
4621 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4622 {
4623 u32 lpeth = 0;
4624
4625 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4626 u32 val;
4627
4628 if (tg3_readphy(tp, MII_STAT1000, &val))
4629 return false;
4630
4631 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4632 }
4633
4634 if (tg3_readphy(tp, MII_LPA, rmtadv))
4635 return false;
4636
4637 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4638 tp->link_config.rmt_adv = lpeth;
4639
4640 return true;
4641 }
4642
4643 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4644 {
4645 if (curr_link_up != tp->link_up) {
4646 if (curr_link_up) {
4647 netif_carrier_on(tp->dev);
4648 } else {
4649 netif_carrier_off(tp->dev);
4650 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4651 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4652 }
4653
4654 tg3_link_report(tp);
4655 return true;
4656 }
4657
4658 return false;
4659 }
4660
4661 static void tg3_clear_mac_status(struct tg3 *tp)
4662 {
4663 tw32(MAC_EVENT, 0);
4664
4665 tw32_f(MAC_STATUS,
4666 MAC_STATUS_SYNC_CHANGED |
4667 MAC_STATUS_CFG_CHANGED |
4668 MAC_STATUS_MI_COMPLETION |
4669 MAC_STATUS_LNKSTATE_CHANGED);
4670 udelay(40);
4671 }
4672
4673 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4674 {
4675 bool current_link_up;
4676 u32 bmsr, val;
4677 u32 lcl_adv, rmt_adv;
4678 u16 current_speed;
4679 u8 current_duplex;
4680 int i, err;
4681
4682 tg3_clear_mac_status(tp);
4683
4684 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4685 tw32_f(MAC_MI_MODE,
4686 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4687 udelay(80);
4688 }
4689
4690 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4691
4692 /* Some third-party PHYs need to be reset on link going
4693 * down.
4694 */
4695 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4696 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4697 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4698 tp->link_up) {
4699 tg3_readphy(tp, MII_BMSR, &bmsr);
4700 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4701 !(bmsr & BMSR_LSTATUS))
4702 force_reset = true;
4703 }
4704 if (force_reset)
4705 tg3_phy_reset(tp);
4706
4707 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4708 tg3_readphy(tp, MII_BMSR, &bmsr);
4709 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4710 !tg3_flag(tp, INIT_COMPLETE))
4711 bmsr = 0;
4712
4713 if (!(bmsr & BMSR_LSTATUS)) {
4714 err = tg3_init_5401phy_dsp(tp);
4715 if (err)
4716 return err;
4717
4718 tg3_readphy(tp, MII_BMSR, &bmsr);
4719 for (i = 0; i < 1000; i++) {
4720 udelay(10);
4721 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4722 (bmsr & BMSR_LSTATUS)) {
4723 udelay(40);
4724 break;
4725 }
4726 }
4727
4728 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4729 TG3_PHY_REV_BCM5401_B0 &&
4730 !(bmsr & BMSR_LSTATUS) &&
4731 tp->link_config.active_speed == SPEED_1000) {
4732 err = tg3_phy_reset(tp);
4733 if (!err)
4734 err = tg3_init_5401phy_dsp(tp);
4735 if (err)
4736 return err;
4737 }
4738 }
4739 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4740 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4741 /* 5701 {A0,B0} CRC bug workaround */
4742 tg3_writephy(tp, 0x15, 0x0a75);
4743 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4744 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4745 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4746 }
4747
4748 /* Clear pending interrupts... */
4749 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4750 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4751
4752 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4753 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4754 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4755 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4756
4757 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4758 tg3_asic_rev(tp) == ASIC_REV_5701) {
4759 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4760 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4761 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4762 else
4763 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4764 }
4765
4766 current_link_up = false;
4767 current_speed = SPEED_UNKNOWN;
4768 current_duplex = DUPLEX_UNKNOWN;
4769 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4770 tp->link_config.rmt_adv = 0;
4771
4772 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4773 err = tg3_phy_auxctl_read(tp,
4774 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4775 &val);
4776 if (!err && !(val & (1 << 10))) {
4777 tg3_phy_auxctl_write(tp,
4778 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4779 val | (1 << 10));
4780 goto relink;
4781 }
4782 }
4783
4784 bmsr = 0;
4785 for (i = 0; i < 100; i++) {
4786 tg3_readphy(tp, MII_BMSR, &bmsr);
4787 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4788 (bmsr & BMSR_LSTATUS))
4789 break;
4790 udelay(40);
4791 }
4792
4793 if (bmsr & BMSR_LSTATUS) {
4794 u32 aux_stat, bmcr;
4795
4796 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4797 for (i = 0; i < 2000; i++) {
4798 udelay(10);
4799 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4800 aux_stat)
4801 break;
4802 }
4803
4804 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4805 &current_speed,
4806 &current_duplex);
4807
4808 bmcr = 0;
4809 for (i = 0; i < 200; i++) {
4810 tg3_readphy(tp, MII_BMCR, &bmcr);
4811 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4812 continue;
4813 if (bmcr && bmcr != 0x7fff)
4814 break;
4815 udelay(10);
4816 }
4817
4818 lcl_adv = 0;
4819 rmt_adv = 0;
4820
4821 tp->link_config.active_speed = current_speed;
4822 tp->link_config.active_duplex = current_duplex;
4823
4824 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4825 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4826
4827 if ((bmcr & BMCR_ANENABLE) &&
4828 eee_config_ok &&
4829 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4830 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4831 current_link_up = true;
4832
4833 /* EEE settings changes take effect only after a phy
4834 * reset. If we have skipped a reset due to Link Flap
4835 * Avoidance being enabled, do it now.
4836 */
4837 if (!eee_config_ok &&
4838 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4839 !force_reset)
4840 tg3_phy_reset(tp);
4841 } else {
4842 if (!(bmcr & BMCR_ANENABLE) &&
4843 tp->link_config.speed == current_speed &&
4844 tp->link_config.duplex == current_duplex) {
4845 current_link_up = true;
4846 }
4847 }
4848
4849 if (current_link_up &&
4850 tp->link_config.active_duplex == DUPLEX_FULL) {
4851 u32 reg, bit;
4852
4853 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4854 reg = MII_TG3_FET_GEN_STAT;
4855 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4856 } else {
4857 reg = MII_TG3_EXT_STAT;
4858 bit = MII_TG3_EXT_STAT_MDIX;
4859 }
4860
4861 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4862 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4863
4864 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4865 }
4866 }
4867
4868 relink:
4869 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4870 tg3_phy_copper_begin(tp);
4871
4872 if (tg3_flag(tp, ROBOSWITCH)) {
4873 current_link_up = true;
4874 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4875 current_speed = SPEED_1000;
4876 current_duplex = DUPLEX_FULL;
4877 tp->link_config.active_speed = current_speed;
4878 tp->link_config.active_duplex = current_duplex;
4879 }
4880
4881 tg3_readphy(tp, MII_BMSR, &bmsr);
4882 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4883 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4884 current_link_up = true;
4885 }
4886
4887 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4888 if (current_link_up) {
4889 if (tp->link_config.active_speed == SPEED_100 ||
4890 tp->link_config.active_speed == SPEED_10)
4891 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4892 else
4893 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4894 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4895 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4896 else
4897 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4898
4899 /* In order for the 5750 core in BCM4785 chip to work properly
4900 * in RGMII mode, the Led Control Register must be set up.
4901 */
4902 if (tg3_flag(tp, RGMII_MODE)) {
4903 u32 led_ctrl = tr32(MAC_LED_CTRL);
4904 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4905
4906 if (tp->link_config.active_speed == SPEED_10)
4907 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4908 else if (tp->link_config.active_speed == SPEED_100)
4909 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4910 LED_CTRL_100MBPS_ON);
4911 else if (tp->link_config.active_speed == SPEED_1000)
4912 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4913 LED_CTRL_1000MBPS_ON);
4914
4915 tw32(MAC_LED_CTRL, led_ctrl);
4916 udelay(40);
4917 }
4918
4919 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4920 if (tp->link_config.active_duplex == DUPLEX_HALF)
4921 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4922
4923 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4924 if (current_link_up &&
4925 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4926 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4927 else
4928 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4929 }
4930
4931 /* ??? Without this setting Netgear GA302T PHY does not
4932 * ??? send/receive packets...
4933 */
4934 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4935 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4936 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4937 tw32_f(MAC_MI_MODE, tp->mi_mode);
4938 udelay(80);
4939 }
4940
4941 tw32_f(MAC_MODE, tp->mac_mode);
4942 udelay(40);
4943
4944 tg3_phy_eee_adjust(tp, current_link_up);
4945
4946 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4947 /* Polled via timer. */
4948 tw32_f(MAC_EVENT, 0);
4949 } else {
4950 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4951 }
4952 udelay(40);
4953
4954 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4955 current_link_up &&
4956 tp->link_config.active_speed == SPEED_1000 &&
4957 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4958 udelay(120);
4959 tw32_f(MAC_STATUS,
4960 (MAC_STATUS_SYNC_CHANGED |
4961 MAC_STATUS_CFG_CHANGED));
4962 udelay(40);
4963 tg3_write_mem(tp,
4964 NIC_SRAM_FIRMWARE_MBOX,
4965 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4966 }
4967
4968 /* Prevent send BD corruption. */
4969 if (tg3_flag(tp, CLKREQ_BUG)) {
4970 if (tp->link_config.active_speed == SPEED_100 ||
4971 tp->link_config.active_speed == SPEED_10)
4972 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4973 PCI_EXP_LNKCTL_CLKREQ_EN);
4974 else
4975 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4976 PCI_EXP_LNKCTL_CLKREQ_EN);
4977 }
4978
4979 tg3_test_and_report_link_chg(tp, current_link_up);
4980
4981 return 0;
4982 }
4983
4984 struct tg3_fiber_aneginfo {
4985 int state;
4986 #define ANEG_STATE_UNKNOWN 0
4987 #define ANEG_STATE_AN_ENABLE 1
4988 #define ANEG_STATE_RESTART_INIT 2
4989 #define ANEG_STATE_RESTART 3
4990 #define ANEG_STATE_DISABLE_LINK_OK 4
4991 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4992 #define ANEG_STATE_ABILITY_DETECT 6
4993 #define ANEG_STATE_ACK_DETECT_INIT 7
4994 #define ANEG_STATE_ACK_DETECT 8
4995 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4996 #define ANEG_STATE_COMPLETE_ACK 10
4997 #define ANEG_STATE_IDLE_DETECT_INIT 11
4998 #define ANEG_STATE_IDLE_DETECT 12
4999 #define ANEG_STATE_LINK_OK 13
5000 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5001 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5002
5003 u32 flags;
5004 #define MR_AN_ENABLE 0x00000001
5005 #define MR_RESTART_AN 0x00000002
5006 #define MR_AN_COMPLETE 0x00000004
5007 #define MR_PAGE_RX 0x00000008
5008 #define MR_NP_LOADED 0x00000010
5009 #define MR_TOGGLE_TX 0x00000020
5010 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5011 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5012 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5013 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5014 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5015 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5016 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5017 #define MR_TOGGLE_RX 0x00002000
5018 #define MR_NP_RX 0x00004000
5019
5020 #define MR_LINK_OK 0x80000000
5021
5022 unsigned long link_time, cur_time;
5023
5024 u32 ability_match_cfg;
5025 int ability_match_count;
5026
5027 char ability_match, idle_match, ack_match;
5028
5029 u32 txconfig, rxconfig;
5030 #define ANEG_CFG_NP 0x00000080
5031 #define ANEG_CFG_ACK 0x00000040
5032 #define ANEG_CFG_RF2 0x00000020
5033 #define ANEG_CFG_RF1 0x00000010
5034 #define ANEG_CFG_PS2 0x00000001
5035 #define ANEG_CFG_PS1 0x00008000
5036 #define ANEG_CFG_HD 0x00004000
5037 #define ANEG_CFG_FD 0x00002000
5038 #define ANEG_CFG_INVAL 0x00001f06
5039
5040 };
5041 #define ANEG_OK 0
5042 #define ANEG_DONE 1
5043 #define ANEG_TIMER_ENAB 2
5044 #define ANEG_FAILED -1
5045
5046 #define ANEG_STATE_SETTLE_TIME 10000
5047
5048 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5049 struct tg3_fiber_aneginfo *ap)
5050 {
5051 u16 flowctrl;
5052 unsigned long delta;
5053 u32 rx_cfg_reg;
5054 int ret;
5055
5056 if (ap->state == ANEG_STATE_UNKNOWN) {
5057 ap->rxconfig = 0;
5058 ap->link_time = 0;
5059 ap->cur_time = 0;
5060 ap->ability_match_cfg = 0;
5061 ap->ability_match_count = 0;
5062 ap->ability_match = 0;
5063 ap->idle_match = 0;
5064 ap->ack_match = 0;
5065 }
5066 ap->cur_time++;
5067
5068 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5069 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5070
5071 if (rx_cfg_reg != ap->ability_match_cfg) {
5072 ap->ability_match_cfg = rx_cfg_reg;
5073 ap->ability_match = 0;
5074 ap->ability_match_count = 0;
5075 } else {
5076 if (++ap->ability_match_count > 1) {
5077 ap->ability_match = 1;
5078 ap->ability_match_cfg = rx_cfg_reg;
5079 }
5080 }
5081 if (rx_cfg_reg & ANEG_CFG_ACK)
5082 ap->ack_match = 1;
5083 else
5084 ap->ack_match = 0;
5085
5086 ap->idle_match = 0;
5087 } else {
5088 ap->idle_match = 1;
5089 ap->ability_match_cfg = 0;
5090 ap->ability_match_count = 0;
5091 ap->ability_match = 0;
5092 ap->ack_match = 0;
5093
5094 rx_cfg_reg = 0;
5095 }
5096
5097 ap->rxconfig = rx_cfg_reg;
5098 ret = ANEG_OK;
5099
5100 switch (ap->state) {
5101 case ANEG_STATE_UNKNOWN:
5102 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5103 ap->state = ANEG_STATE_AN_ENABLE;
5104
5105 /* fallthru */
5106 case ANEG_STATE_AN_ENABLE:
5107 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5108 if (ap->flags & MR_AN_ENABLE) {
5109 ap->link_time = 0;
5110 ap->cur_time = 0;
5111 ap->ability_match_cfg = 0;
5112 ap->ability_match_count = 0;
5113 ap->ability_match = 0;
5114 ap->idle_match = 0;
5115 ap->ack_match = 0;
5116
5117 ap->state = ANEG_STATE_RESTART_INIT;
5118 } else {
5119 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5120 }
5121 break;
5122
5123 case ANEG_STATE_RESTART_INIT:
5124 ap->link_time = ap->cur_time;
5125 ap->flags &= ~(MR_NP_LOADED);
5126 ap->txconfig = 0;
5127 tw32(MAC_TX_AUTO_NEG, 0);
5128 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5129 tw32_f(MAC_MODE, tp->mac_mode);
5130 udelay(40);
5131
5132 ret = ANEG_TIMER_ENAB;
5133 ap->state = ANEG_STATE_RESTART;
5134
5135 /* fallthru */
5136 case ANEG_STATE_RESTART:
5137 delta = ap->cur_time - ap->link_time;
5138 if (delta > ANEG_STATE_SETTLE_TIME)
5139 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5140 else
5141 ret = ANEG_TIMER_ENAB;
5142 break;
5143
5144 case ANEG_STATE_DISABLE_LINK_OK:
5145 ret = ANEG_DONE;
5146 break;
5147
5148 case ANEG_STATE_ABILITY_DETECT_INIT:
5149 ap->flags &= ~(MR_TOGGLE_TX);
5150 ap->txconfig = ANEG_CFG_FD;
5151 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5152 if (flowctrl & ADVERTISE_1000XPAUSE)
5153 ap->txconfig |= ANEG_CFG_PS1;
5154 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5155 ap->txconfig |= ANEG_CFG_PS2;
5156 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5157 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5158 tw32_f(MAC_MODE, tp->mac_mode);
5159 udelay(40);
5160
5161 ap->state = ANEG_STATE_ABILITY_DETECT;
5162 break;
5163
5164 case ANEG_STATE_ABILITY_DETECT:
5165 if (ap->ability_match != 0 && ap->rxconfig != 0)
5166 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5167 break;
5168
5169 case ANEG_STATE_ACK_DETECT_INIT:
5170 ap->txconfig |= ANEG_CFG_ACK;
5171 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5172 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5173 tw32_f(MAC_MODE, tp->mac_mode);
5174 udelay(40);
5175
5176 ap->state = ANEG_STATE_ACK_DETECT;
5177
5178 /* fallthru */
5179 case ANEG_STATE_ACK_DETECT:
5180 if (ap->ack_match != 0) {
5181 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5182 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5183 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5184 } else {
5185 ap->state = ANEG_STATE_AN_ENABLE;
5186 }
5187 } else if (ap->ability_match != 0 &&
5188 ap->rxconfig == 0) {
5189 ap->state = ANEG_STATE_AN_ENABLE;
5190 }
5191 break;
5192
5193 case ANEG_STATE_COMPLETE_ACK_INIT:
5194 if (ap->rxconfig & ANEG_CFG_INVAL) {
5195 ret = ANEG_FAILED;
5196 break;
5197 }
5198 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5199 MR_LP_ADV_HALF_DUPLEX |
5200 MR_LP_ADV_SYM_PAUSE |
5201 MR_LP_ADV_ASYM_PAUSE |
5202 MR_LP_ADV_REMOTE_FAULT1 |
5203 MR_LP_ADV_REMOTE_FAULT2 |
5204 MR_LP_ADV_NEXT_PAGE |
5205 MR_TOGGLE_RX |
5206 MR_NP_RX);
5207 if (ap->rxconfig & ANEG_CFG_FD)
5208 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5209 if (ap->rxconfig & ANEG_CFG_HD)
5210 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5211 if (ap->rxconfig & ANEG_CFG_PS1)
5212 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5213 if (ap->rxconfig & ANEG_CFG_PS2)
5214 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5215 if (ap->rxconfig & ANEG_CFG_RF1)
5216 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5217 if (ap->rxconfig & ANEG_CFG_RF2)
5218 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5219 if (ap->rxconfig & ANEG_CFG_NP)
5220 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5221
5222 ap->link_time = ap->cur_time;
5223
5224 ap->flags ^= (MR_TOGGLE_TX);
5225 if (ap->rxconfig & 0x0008)
5226 ap->flags |= MR_TOGGLE_RX;
5227 if (ap->rxconfig & ANEG_CFG_NP)
5228 ap->flags |= MR_NP_RX;
5229 ap->flags |= MR_PAGE_RX;
5230
5231 ap->state = ANEG_STATE_COMPLETE_ACK;
5232 ret = ANEG_TIMER_ENAB;
5233 break;
5234
5235 case ANEG_STATE_COMPLETE_ACK:
5236 if (ap->ability_match != 0 &&
5237 ap->rxconfig == 0) {
5238 ap->state = ANEG_STATE_AN_ENABLE;
5239 break;
5240 }
5241 delta = ap->cur_time - ap->link_time;
5242 if (delta > ANEG_STATE_SETTLE_TIME) {
5243 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5244 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5245 } else {
5246 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5247 !(ap->flags & MR_NP_RX)) {
5248 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5249 } else {
5250 ret = ANEG_FAILED;
5251 }
5252 }
5253 }
5254 break;
5255
5256 case ANEG_STATE_IDLE_DETECT_INIT:
5257 ap->link_time = ap->cur_time;
5258 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5259 tw32_f(MAC_MODE, tp->mac_mode);
5260 udelay(40);
5261
5262 ap->state = ANEG_STATE_IDLE_DETECT;
5263 ret = ANEG_TIMER_ENAB;
5264 break;
5265
5266 case ANEG_STATE_IDLE_DETECT:
5267 if (ap->ability_match != 0 &&
5268 ap->rxconfig == 0) {
5269 ap->state = ANEG_STATE_AN_ENABLE;
5270 break;
5271 }
5272 delta = ap->cur_time - ap->link_time;
5273 if (delta > ANEG_STATE_SETTLE_TIME) {
5274 /* XXX another gem from the Broadcom driver :( */
5275 ap->state = ANEG_STATE_LINK_OK;
5276 }
5277 break;
5278
5279 case ANEG_STATE_LINK_OK:
5280 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5281 ret = ANEG_DONE;
5282 break;
5283
5284 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5285 /* ??? unimplemented */
5286 break;
5287
5288 case ANEG_STATE_NEXT_PAGE_WAIT:
5289 /* ??? unimplemented */
5290 break;
5291
5292 default:
5293 ret = ANEG_FAILED;
5294 break;
5295 }
5296
5297 return ret;
5298 }
5299
5300 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5301 {
5302 int res = 0;
5303 struct tg3_fiber_aneginfo aninfo;
5304 int status = ANEG_FAILED;
5305 unsigned int tick;
5306 u32 tmp;
5307
5308 tw32_f(MAC_TX_AUTO_NEG, 0);
5309
5310 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5311 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5312 udelay(40);
5313
5314 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5315 udelay(40);
5316
5317 memset(&aninfo, 0, sizeof(aninfo));
5318 aninfo.flags |= MR_AN_ENABLE;
5319 aninfo.state = ANEG_STATE_UNKNOWN;
5320 aninfo.cur_time = 0;
5321 tick = 0;
5322 while (++tick < 195000) {
5323 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5324 if (status == ANEG_DONE || status == ANEG_FAILED)
5325 break;
5326
5327 udelay(1);
5328 }
5329
5330 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5331 tw32_f(MAC_MODE, tp->mac_mode);
5332 udelay(40);
5333
5334 *txflags = aninfo.txconfig;
5335 *rxflags = aninfo.flags;
5336
5337 if (status == ANEG_DONE &&
5338 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5339 MR_LP_ADV_FULL_DUPLEX)))
5340 res = 1;
5341
5342 return res;
5343 }
5344
5345 static void tg3_init_bcm8002(struct tg3 *tp)
5346 {
5347 u32 mac_status = tr32(MAC_STATUS);
5348 int i;
5349
5350 /* Reset when initting first time or we have a link. */
5351 if (tg3_flag(tp, INIT_COMPLETE) &&
5352 !(mac_status & MAC_STATUS_PCS_SYNCED))
5353 return;
5354
5355 /* Set PLL lock range. */
5356 tg3_writephy(tp, 0x16, 0x8007);
5357
5358 /* SW reset */
5359 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5360
5361 /* Wait for reset to complete. */
5362 /* XXX schedule_timeout() ... */
5363 for (i = 0; i < 500; i++)
5364 udelay(10);
5365
5366 /* Config mode; select PMA/Ch 1 regs. */
5367 tg3_writephy(tp, 0x10, 0x8411);
5368
5369 /* Enable auto-lock and comdet, select txclk for tx. */
5370 tg3_writephy(tp, 0x11, 0x0a10);
5371
5372 tg3_writephy(tp, 0x18, 0x00a0);
5373 tg3_writephy(tp, 0x16, 0x41ff);
5374
5375 /* Assert and deassert POR. */
5376 tg3_writephy(tp, 0x13, 0x0400);
5377 udelay(40);
5378 tg3_writephy(tp, 0x13, 0x0000);
5379
5380 tg3_writephy(tp, 0x11, 0x0a50);
5381 udelay(40);
5382 tg3_writephy(tp, 0x11, 0x0a10);
5383
5384 /* Wait for signal to stabilize */
5385 /* XXX schedule_timeout() ... */
5386 for (i = 0; i < 15000; i++)
5387 udelay(10);
5388
5389 /* Deselect the channel register so we can read the PHYID
5390 * later.
5391 */
5392 tg3_writephy(tp, 0x10, 0x8011);
5393 }
5394
5395 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5396 {
5397 u16 flowctrl;
5398 bool current_link_up;
5399 u32 sg_dig_ctrl, sg_dig_status;
5400 u32 serdes_cfg, expected_sg_dig_ctrl;
5401 int workaround, port_a;
5402
5403 serdes_cfg = 0;
5404 expected_sg_dig_ctrl = 0;
5405 workaround = 0;
5406 port_a = 1;
5407 current_link_up = false;
5408
5409 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5410 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5411 workaround = 1;
5412 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5413 port_a = 0;
5414
5415 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5416 /* preserve bits 20-23 for voltage regulator */
5417 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5418 }
5419
5420 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5421
5422 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5423 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5424 if (workaround) {
5425 u32 val = serdes_cfg;
5426
5427 if (port_a)
5428 val |= 0xc010000;
5429 else
5430 val |= 0x4010000;
5431 tw32_f(MAC_SERDES_CFG, val);
5432 }
5433
5434 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5435 }
5436 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5437 tg3_setup_flow_control(tp, 0, 0);
5438 current_link_up = true;
5439 }
5440 goto out;
5441 }
5442
5443 /* Want auto-negotiation. */
5444 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5445
5446 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5447 if (flowctrl & ADVERTISE_1000XPAUSE)
5448 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5449 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5450 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5451
5452 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5453 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5454 tp->serdes_counter &&
5455 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5456 MAC_STATUS_RCVD_CFG)) ==
5457 MAC_STATUS_PCS_SYNCED)) {
5458 tp->serdes_counter--;
5459 current_link_up = true;
5460 goto out;
5461 }
5462 restart_autoneg:
5463 if (workaround)
5464 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5465 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5466 udelay(5);
5467 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5468
5469 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5471 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5472 MAC_STATUS_SIGNAL_DET)) {
5473 sg_dig_status = tr32(SG_DIG_STATUS);
5474 mac_status = tr32(MAC_STATUS);
5475
5476 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5477 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5478 u32 local_adv = 0, remote_adv = 0;
5479
5480 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5481 local_adv |= ADVERTISE_1000XPAUSE;
5482 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5483 local_adv |= ADVERTISE_1000XPSE_ASYM;
5484
5485 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5486 remote_adv |= LPA_1000XPAUSE;
5487 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5488 remote_adv |= LPA_1000XPAUSE_ASYM;
5489
5490 tp->link_config.rmt_adv =
5491 mii_adv_to_ethtool_adv_x(remote_adv);
5492
5493 tg3_setup_flow_control(tp, local_adv, remote_adv);
5494 current_link_up = true;
5495 tp->serdes_counter = 0;
5496 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5497 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5498 if (tp->serdes_counter)
5499 tp->serdes_counter--;
5500 else {
5501 if (workaround) {
5502 u32 val = serdes_cfg;
5503
5504 if (port_a)
5505 val |= 0xc010000;
5506 else
5507 val |= 0x4010000;
5508
5509 tw32_f(MAC_SERDES_CFG, val);
5510 }
5511
5512 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5513 udelay(40);
5514
5515 /* Link parallel detection - link is up */
5516 /* only if we have PCS_SYNC and not */
5517 /* receiving config code words */
5518 mac_status = tr32(MAC_STATUS);
5519 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5520 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5521 tg3_setup_flow_control(tp, 0, 0);
5522 current_link_up = true;
5523 tp->phy_flags |=
5524 TG3_PHYFLG_PARALLEL_DETECT;
5525 tp->serdes_counter =
5526 SERDES_PARALLEL_DET_TIMEOUT;
5527 } else
5528 goto restart_autoneg;
5529 }
5530 }
5531 } else {
5532 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5533 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5534 }
5535
5536 out:
5537 return current_link_up;
5538 }
5539
5540 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5541 {
5542 bool current_link_up = false;
5543
5544 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5545 goto out;
5546
5547 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5548 u32 txflags, rxflags;
5549 int i;
5550
5551 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5552 u32 local_adv = 0, remote_adv = 0;
5553
5554 if (txflags & ANEG_CFG_PS1)
5555 local_adv |= ADVERTISE_1000XPAUSE;
5556 if (txflags & ANEG_CFG_PS2)
5557 local_adv |= ADVERTISE_1000XPSE_ASYM;
5558
5559 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5560 remote_adv |= LPA_1000XPAUSE;
5561 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5562 remote_adv |= LPA_1000XPAUSE_ASYM;
5563
5564 tp->link_config.rmt_adv =
5565 mii_adv_to_ethtool_adv_x(remote_adv);
5566
5567 tg3_setup_flow_control(tp, local_adv, remote_adv);
5568
5569 current_link_up = true;
5570 }
5571 for (i = 0; i < 30; i++) {
5572 udelay(20);
5573 tw32_f(MAC_STATUS,
5574 (MAC_STATUS_SYNC_CHANGED |
5575 MAC_STATUS_CFG_CHANGED));
5576 udelay(40);
5577 if ((tr32(MAC_STATUS) &
5578 (MAC_STATUS_SYNC_CHANGED |
5579 MAC_STATUS_CFG_CHANGED)) == 0)
5580 break;
5581 }
5582
5583 mac_status = tr32(MAC_STATUS);
5584 if (!current_link_up &&
5585 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5586 !(mac_status & MAC_STATUS_RCVD_CFG))
5587 current_link_up = true;
5588 } else {
5589 tg3_setup_flow_control(tp, 0, 0);
5590
5591 /* Forcing 1000FD link up. */
5592 current_link_up = true;
5593
5594 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5595 udelay(40);
5596
5597 tw32_f(MAC_MODE, tp->mac_mode);
5598 udelay(40);
5599 }
5600
5601 out:
5602 return current_link_up;
5603 }
5604
5605 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5606 {
5607 u32 orig_pause_cfg;
5608 u16 orig_active_speed;
5609 u8 orig_active_duplex;
5610 u32 mac_status;
5611 bool current_link_up;
5612 int i;
5613
5614 orig_pause_cfg = tp->link_config.active_flowctrl;
5615 orig_active_speed = tp->link_config.active_speed;
5616 orig_active_duplex = tp->link_config.active_duplex;
5617
5618 if (!tg3_flag(tp, HW_AUTONEG) &&
5619 tp->link_up &&
5620 tg3_flag(tp, INIT_COMPLETE)) {
5621 mac_status = tr32(MAC_STATUS);
5622 mac_status &= (MAC_STATUS_PCS_SYNCED |
5623 MAC_STATUS_SIGNAL_DET |
5624 MAC_STATUS_CFG_CHANGED |
5625 MAC_STATUS_RCVD_CFG);
5626 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5627 MAC_STATUS_SIGNAL_DET)) {
5628 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5629 MAC_STATUS_CFG_CHANGED));
5630 return 0;
5631 }
5632 }
5633
5634 tw32_f(MAC_TX_AUTO_NEG, 0);
5635
5636 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5637 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5638 tw32_f(MAC_MODE, tp->mac_mode);
5639 udelay(40);
5640
5641 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5642 tg3_init_bcm8002(tp);
5643
5644 /* Enable link change event even when serdes polling. */
5645 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5646 udelay(40);
5647
5648 current_link_up = false;
5649 tp->link_config.rmt_adv = 0;
5650 mac_status = tr32(MAC_STATUS);
5651
5652 if (tg3_flag(tp, HW_AUTONEG))
5653 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5654 else
5655 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5656
5657 tp->napi[0].hw_status->status =
5658 (SD_STATUS_UPDATED |
5659 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5660
5661 for (i = 0; i < 100; i++) {
5662 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5663 MAC_STATUS_CFG_CHANGED));
5664 udelay(5);
5665 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5666 MAC_STATUS_CFG_CHANGED |
5667 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5668 break;
5669 }
5670
5671 mac_status = tr32(MAC_STATUS);
5672 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5673 current_link_up = false;
5674 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5675 tp->serdes_counter == 0) {
5676 tw32_f(MAC_MODE, (tp->mac_mode |
5677 MAC_MODE_SEND_CONFIGS));
5678 udelay(1);
5679 tw32_f(MAC_MODE, tp->mac_mode);
5680 }
5681 }
5682
5683 if (current_link_up) {
5684 tp->link_config.active_speed = SPEED_1000;
5685 tp->link_config.active_duplex = DUPLEX_FULL;
5686 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5687 LED_CTRL_LNKLED_OVERRIDE |
5688 LED_CTRL_1000MBPS_ON));
5689 } else {
5690 tp->link_config.active_speed = SPEED_UNKNOWN;
5691 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5692 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5693 LED_CTRL_LNKLED_OVERRIDE |
5694 LED_CTRL_TRAFFIC_OVERRIDE));
5695 }
5696
5697 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5698 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5699 if (orig_pause_cfg != now_pause_cfg ||
5700 orig_active_speed != tp->link_config.active_speed ||
5701 orig_active_duplex != tp->link_config.active_duplex)
5702 tg3_link_report(tp);
5703 }
5704
5705 return 0;
5706 }
5707
5708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5709 {
5710 int err = 0;
5711 u32 bmsr, bmcr;
5712 u16 current_speed = SPEED_UNKNOWN;
5713 u8 current_duplex = DUPLEX_UNKNOWN;
5714 bool current_link_up = false;
5715 u32 local_adv, remote_adv, sgsr;
5716
5717 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5718 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5719 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5720 (sgsr & SERDES_TG3_SGMII_MODE)) {
5721
5722 if (force_reset)
5723 tg3_phy_reset(tp);
5724
5725 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5726
5727 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5728 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5729 } else {
5730 current_link_up = true;
5731 if (sgsr & SERDES_TG3_SPEED_1000) {
5732 current_speed = SPEED_1000;
5733 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5734 } else if (sgsr & SERDES_TG3_SPEED_100) {
5735 current_speed = SPEED_100;
5736 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5737 } else {
5738 current_speed = SPEED_10;
5739 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5740 }
5741
5742 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5743 current_duplex = DUPLEX_FULL;
5744 else
5745 current_duplex = DUPLEX_HALF;
5746 }
5747
5748 tw32_f(MAC_MODE, tp->mac_mode);
5749 udelay(40);
5750
5751 tg3_clear_mac_status(tp);
5752
5753 goto fiber_setup_done;
5754 }
5755
5756 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5757 tw32_f(MAC_MODE, tp->mac_mode);
5758 udelay(40);
5759
5760 tg3_clear_mac_status(tp);
5761
5762 if (force_reset)
5763 tg3_phy_reset(tp);
5764
5765 tp->link_config.rmt_adv = 0;
5766
5767 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5768 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5769 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5770 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5771 bmsr |= BMSR_LSTATUS;
5772 else
5773 bmsr &= ~BMSR_LSTATUS;
5774 }
5775
5776 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5777
5778 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5779 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5780 /* do nothing, just check for link up at the end */
5781 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5782 u32 adv, newadv;
5783
5784 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5785 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5786 ADVERTISE_1000XPAUSE |
5787 ADVERTISE_1000XPSE_ASYM |
5788 ADVERTISE_SLCT);
5789
5790 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5791 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5792
5793 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5794 tg3_writephy(tp, MII_ADVERTISE, newadv);
5795 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5796 tg3_writephy(tp, MII_BMCR, bmcr);
5797
5798 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5799 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5800 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5801
5802 return err;
5803 }
5804 } else {
5805 u32 new_bmcr;
5806
5807 bmcr &= ~BMCR_SPEED1000;
5808 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5809
5810 if (tp->link_config.duplex == DUPLEX_FULL)
5811 new_bmcr |= BMCR_FULLDPLX;
5812
5813 if (new_bmcr != bmcr) {
5814 /* BMCR_SPEED1000 is a reserved bit that needs
5815 * to be set on write.
5816 */
5817 new_bmcr |= BMCR_SPEED1000;
5818
5819 /* Force a linkdown */
5820 if (tp->link_up) {
5821 u32 adv;
5822
5823 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5824 adv &= ~(ADVERTISE_1000XFULL |
5825 ADVERTISE_1000XHALF |
5826 ADVERTISE_SLCT);
5827 tg3_writephy(tp, MII_ADVERTISE, adv);
5828 tg3_writephy(tp, MII_BMCR, bmcr |
5829 BMCR_ANRESTART |
5830 BMCR_ANENABLE);
5831 udelay(10);
5832 tg3_carrier_off(tp);
5833 }
5834 tg3_writephy(tp, MII_BMCR, new_bmcr);
5835 bmcr = new_bmcr;
5836 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5837 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5838 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5839 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5840 bmsr |= BMSR_LSTATUS;
5841 else
5842 bmsr &= ~BMSR_LSTATUS;
5843 }
5844 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5845 }
5846 }
5847
5848 if (bmsr & BMSR_LSTATUS) {
5849 current_speed = SPEED_1000;
5850 current_link_up = true;
5851 if (bmcr & BMCR_FULLDPLX)
5852 current_duplex = DUPLEX_FULL;
5853 else
5854 current_duplex = DUPLEX_HALF;
5855
5856 local_adv = 0;
5857 remote_adv = 0;
5858
5859 if (bmcr & BMCR_ANENABLE) {
5860 u32 common;
5861
5862 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5863 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5864 common = local_adv & remote_adv;
5865 if (common & (ADVERTISE_1000XHALF |
5866 ADVERTISE_1000XFULL)) {
5867 if (common & ADVERTISE_1000XFULL)
5868 current_duplex = DUPLEX_FULL;
5869 else
5870 current_duplex = DUPLEX_HALF;
5871
5872 tp->link_config.rmt_adv =
5873 mii_adv_to_ethtool_adv_x(remote_adv);
5874 } else if (!tg3_flag(tp, 5780_CLASS)) {
5875 /* Link is up via parallel detect */
5876 } else {
5877 current_link_up = false;
5878 }
5879 }
5880 }
5881
5882 fiber_setup_done:
5883 if (current_link_up && current_duplex == DUPLEX_FULL)
5884 tg3_setup_flow_control(tp, local_adv, remote_adv);
5885
5886 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5887 if (tp->link_config.active_duplex == DUPLEX_HALF)
5888 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5889
5890 tw32_f(MAC_MODE, tp->mac_mode);
5891 udelay(40);
5892
5893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5894
5895 tp->link_config.active_speed = current_speed;
5896 tp->link_config.active_duplex = current_duplex;
5897
5898 tg3_test_and_report_link_chg(tp, current_link_up);
5899 return err;
5900 }
5901
5902 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5903 {
5904 if (tp->serdes_counter) {
5905 /* Give autoneg time to complete. */
5906 tp->serdes_counter--;
5907 return;
5908 }
5909
5910 if (!tp->link_up &&
5911 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5912 u32 bmcr;
5913
5914 tg3_readphy(tp, MII_BMCR, &bmcr);
5915 if (bmcr & BMCR_ANENABLE) {
5916 u32 phy1, phy2;
5917
5918 /* Select shadow register 0x1f */
5919 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5920 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5921
5922 /* Select expansion interrupt status register */
5923 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5924 MII_TG3_DSP_EXP1_INT_STAT);
5925 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5926 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5927
5928 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5929 /* We have signal detect and not receiving
5930 * config code words, link is up by parallel
5931 * detection.
5932 */
5933
5934 bmcr &= ~BMCR_ANENABLE;
5935 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5936 tg3_writephy(tp, MII_BMCR, bmcr);
5937 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5938 }
5939 }
5940 } else if (tp->link_up &&
5941 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5942 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5943 u32 phy2;
5944
5945 /* Select expansion interrupt status register */
5946 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5947 MII_TG3_DSP_EXP1_INT_STAT);
5948 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5949 if (phy2 & 0x20) {
5950 u32 bmcr;
5951
5952 /* Config code words received, turn on autoneg. */
5953 tg3_readphy(tp, MII_BMCR, &bmcr);
5954 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5955
5956 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5957
5958 }
5959 }
5960 }
5961
5962 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5963 {
5964 u32 val;
5965 int err;
5966
5967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5968 err = tg3_setup_fiber_phy(tp, force_reset);
5969 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5970 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5971 else
5972 err = tg3_setup_copper_phy(tp, force_reset);
5973
5974 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5975 u32 scale;
5976
5977 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5978 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5979 scale = 65;
5980 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5981 scale = 6;
5982 else
5983 scale = 12;
5984
5985 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5986 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5987 tw32(GRC_MISC_CFG, val);
5988 }
5989
5990 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5991 (6 << TX_LENGTHS_IPG_SHIFT);
5992 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5993 tg3_asic_rev(tp) == ASIC_REV_5762)
5994 val |= tr32(MAC_TX_LENGTHS) &
5995 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5996 TX_LENGTHS_CNT_DWN_VAL_MSK);
5997
5998 if (tp->link_config.active_speed == SPEED_1000 &&
5999 tp->link_config.active_duplex == DUPLEX_HALF)
6000 tw32(MAC_TX_LENGTHS, val |
6001 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6002 else
6003 tw32(MAC_TX_LENGTHS, val |
6004 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6005
6006 if (!tg3_flag(tp, 5705_PLUS)) {
6007 if (tp->link_up) {
6008 tw32(HOSTCC_STAT_COAL_TICKS,
6009 tp->coal.stats_block_coalesce_usecs);
6010 } else {
6011 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6012 }
6013 }
6014
6015 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6016 val = tr32(PCIE_PWR_MGMT_THRESH);
6017 if (!tp->link_up)
6018 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6019 tp->pwrmgmt_thresh;
6020 else
6021 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6022 tw32(PCIE_PWR_MGMT_THRESH, val);
6023 }
6024
6025 return err;
6026 }
6027
6028 /* tp->lock must be held */
6029 static u64 tg3_refclk_read(struct tg3 *tp)
6030 {
6031 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6032 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6033 }
6034
6035 /* tp->lock must be held */
6036 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6037 {
6038 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6039 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6040 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6041 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6042 }
6043
6044 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6045 static inline void tg3_full_unlock(struct tg3 *tp);
6046 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6047 {
6048 struct tg3 *tp = netdev_priv(dev);
6049
6050 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6051 SOF_TIMESTAMPING_RX_SOFTWARE |
6052 SOF_TIMESTAMPING_SOFTWARE;
6053
6054 if (tg3_flag(tp, PTP_CAPABLE)) {
6055 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6056 SOF_TIMESTAMPING_RX_HARDWARE |
6057 SOF_TIMESTAMPING_RAW_HARDWARE;
6058 }
6059
6060 if (tp->ptp_clock)
6061 info->phc_index = ptp_clock_index(tp->ptp_clock);
6062 else
6063 info->phc_index = -1;
6064
6065 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6066
6067 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6068 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6069 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6070 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6071 return 0;
6072 }
6073
6074 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6075 {
6076 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6077 bool neg_adj = false;
6078 u32 correction = 0;
6079
6080 if (ppb < 0) {
6081 neg_adj = true;
6082 ppb = -ppb;
6083 }
6084
6085 /* Frequency adjustment is performed using hardware with a 24 bit
6086 * accumulator and a programmable correction value. On each clk, the
6087 * correction value gets added to the accumulator and when it
6088 * overflows, the time counter is incremented/decremented.
6089 *
6090 * So conversion from ppb to correction value is
6091 * ppb * (1 << 24) / 1000000000
6092 */
6093 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6094 TG3_EAV_REF_CLK_CORRECT_MASK;
6095
6096 tg3_full_lock(tp, 0);
6097
6098 if (correction)
6099 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6100 TG3_EAV_REF_CLK_CORRECT_EN |
6101 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6102 else
6103 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6104
6105 tg3_full_unlock(tp);
6106
6107 return 0;
6108 }
6109
6110 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6111 {
6112 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6113
6114 tg3_full_lock(tp, 0);
6115 tp->ptp_adjust += delta;
6116 tg3_full_unlock(tp);
6117
6118 return 0;
6119 }
6120
6121 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6122 {
6123 u64 ns;
6124 u32 remainder;
6125 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6126
6127 tg3_full_lock(tp, 0);
6128 ns = tg3_refclk_read(tp);
6129 ns += tp->ptp_adjust;
6130 tg3_full_unlock(tp);
6131
6132 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6133 ts->tv_nsec = remainder;
6134
6135 return 0;
6136 }
6137
6138 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6139 const struct timespec *ts)
6140 {
6141 u64 ns;
6142 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6143
6144 ns = timespec_to_ns(ts);
6145
6146 tg3_full_lock(tp, 0);
6147 tg3_refclk_write(tp, ns);
6148 tp->ptp_adjust = 0;
6149 tg3_full_unlock(tp);
6150
6151 return 0;
6152 }
6153
6154 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6155 struct ptp_clock_request *rq, int on)
6156 {
6157 return -EOPNOTSUPP;
6158 }
6159
6160 static const struct ptp_clock_info tg3_ptp_caps = {
6161 .owner = THIS_MODULE,
6162 .name = "tg3 clock",
6163 .max_adj = 250000000,
6164 .n_alarm = 0,
6165 .n_ext_ts = 0,
6166 .n_per_out = 0,
6167 .pps = 0,
6168 .adjfreq = tg3_ptp_adjfreq,
6169 .adjtime = tg3_ptp_adjtime,
6170 .gettime = tg3_ptp_gettime,
6171 .settime = tg3_ptp_settime,
6172 .enable = tg3_ptp_enable,
6173 };
6174
6175 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6176 struct skb_shared_hwtstamps *timestamp)
6177 {
6178 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6179 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6180 tp->ptp_adjust);
6181 }
6182
6183 /* tp->lock must be held */
6184 static void tg3_ptp_init(struct tg3 *tp)
6185 {
6186 if (!tg3_flag(tp, PTP_CAPABLE))
6187 return;
6188
6189 /* Initialize the hardware clock to the system time. */
6190 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6191 tp->ptp_adjust = 0;
6192 tp->ptp_info = tg3_ptp_caps;
6193 }
6194
6195 /* tp->lock must be held */
6196 static void tg3_ptp_resume(struct tg3 *tp)
6197 {
6198 if (!tg3_flag(tp, PTP_CAPABLE))
6199 return;
6200
6201 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6202 tp->ptp_adjust = 0;
6203 }
6204
6205 static void tg3_ptp_fini(struct tg3 *tp)
6206 {
6207 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6208 return;
6209
6210 ptp_clock_unregister(tp->ptp_clock);
6211 tp->ptp_clock = NULL;
6212 tp->ptp_adjust = 0;
6213 }
6214
6215 static inline int tg3_irq_sync(struct tg3 *tp)
6216 {
6217 return tp->irq_sync;
6218 }
6219
6220 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6221 {
6222 int i;
6223
6224 dst = (u32 *)((u8 *)dst + off);
6225 for (i = 0; i < len; i += sizeof(u32))
6226 *dst++ = tr32(off + i);
6227 }
6228
6229 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6230 {
6231 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6232 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6233 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6234 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6235 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6236 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6237 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6238 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6239 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6240 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6241 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6242 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6243 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6244 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6245 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6246 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6247 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6248 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6249 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6250
6251 if (tg3_flag(tp, SUPPORT_MSIX))
6252 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6253
6254 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6255 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6256 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6257 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6258 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6259 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6260 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6261 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6262
6263 if (!tg3_flag(tp, 5705_PLUS)) {
6264 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6265 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6266 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6267 }
6268
6269 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6270 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6271 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6272 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6273 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6274
6275 if (tg3_flag(tp, NVRAM))
6276 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6277 }
6278
6279 static void tg3_dump_state(struct tg3 *tp)
6280 {
6281 int i;
6282 u32 *regs;
6283
6284 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6285 if (!regs)
6286 return;
6287
6288 if (tg3_flag(tp, PCI_EXPRESS)) {
6289 /* Read up to but not including private PCI registers */
6290 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6291 regs[i / sizeof(u32)] = tr32(i);
6292 } else
6293 tg3_dump_legacy_regs(tp, regs);
6294
6295 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6296 if (!regs[i + 0] && !regs[i + 1] &&
6297 !regs[i + 2] && !regs[i + 3])
6298 continue;
6299
6300 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6301 i * 4,
6302 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6303 }
6304
6305 kfree(regs);
6306
6307 for (i = 0; i < tp->irq_cnt; i++) {
6308 struct tg3_napi *tnapi = &tp->napi[i];
6309
6310 /* SW status block */
6311 netdev_err(tp->dev,
6312 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6313 i,
6314 tnapi->hw_status->status,
6315 tnapi->hw_status->status_tag,
6316 tnapi->hw_status->rx_jumbo_consumer,
6317 tnapi->hw_status->rx_consumer,
6318 tnapi->hw_status->rx_mini_consumer,
6319 tnapi->hw_status->idx[0].rx_producer,
6320 tnapi->hw_status->idx[0].tx_consumer);
6321
6322 netdev_err(tp->dev,
6323 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6324 i,
6325 tnapi->last_tag, tnapi->last_irq_tag,
6326 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6327 tnapi->rx_rcb_ptr,
6328 tnapi->prodring.rx_std_prod_idx,
6329 tnapi->prodring.rx_std_cons_idx,
6330 tnapi->prodring.rx_jmb_prod_idx,
6331 tnapi->prodring.rx_jmb_cons_idx);
6332 }
6333 }
6334
6335 /* This is called whenever we suspect that the system chipset is re-
6336 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6337 * is bogus tx completions. We try to recover by setting the
6338 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6339 * in the workqueue.
6340 */
6341 static void tg3_tx_recover(struct tg3 *tp)
6342 {
6343 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6344 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6345
6346 netdev_warn(tp->dev,
6347 "The system may be re-ordering memory-mapped I/O "
6348 "cycles to the network device, attempting to recover. "
6349 "Please report the problem to the driver maintainer "
6350 "and include system chipset information.\n");
6351
6352 spin_lock(&tp->lock);
6353 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6354 spin_unlock(&tp->lock);
6355 }
6356
6357 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6358 {
6359 /* Tell compiler to fetch tx indices from memory. */
6360 barrier();
6361 return tnapi->tx_pending -
6362 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6363 }
6364
6365 /* Tigon3 never reports partial packet sends. So we do not
6366 * need special logic to handle SKBs that have not had all
6367 * of their frags sent yet, like SunGEM does.
6368 */
6369 static void tg3_tx(struct tg3_napi *tnapi)
6370 {
6371 struct tg3 *tp = tnapi->tp;
6372 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6373 u32 sw_idx = tnapi->tx_cons;
6374 struct netdev_queue *txq;
6375 int index = tnapi - tp->napi;
6376 unsigned int pkts_compl = 0, bytes_compl = 0;
6377
6378 if (tg3_flag(tp, ENABLE_TSS))
6379 index--;
6380
6381 txq = netdev_get_tx_queue(tp->dev, index);
6382
6383 while (sw_idx != hw_idx) {
6384 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6385 struct sk_buff *skb = ri->skb;
6386 int i, tx_bug = 0;
6387
6388 if (unlikely(skb == NULL)) {
6389 tg3_tx_recover(tp);
6390 return;
6391 }
6392
6393 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6394 struct skb_shared_hwtstamps timestamp;
6395 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6396 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6397
6398 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6399
6400 skb_tstamp_tx(skb, &timestamp);
6401 }
6402
6403 pci_unmap_single(tp->pdev,
6404 dma_unmap_addr(ri, mapping),
6405 skb_headlen(skb),
6406 PCI_DMA_TODEVICE);
6407
6408 ri->skb = NULL;
6409
6410 while (ri->fragmented) {
6411 ri->fragmented = false;
6412 sw_idx = NEXT_TX(sw_idx);
6413 ri = &tnapi->tx_buffers[sw_idx];
6414 }
6415
6416 sw_idx = NEXT_TX(sw_idx);
6417
6418 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6419 ri = &tnapi->tx_buffers[sw_idx];
6420 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6421 tx_bug = 1;
6422
6423 pci_unmap_page(tp->pdev,
6424 dma_unmap_addr(ri, mapping),
6425 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6426 PCI_DMA_TODEVICE);
6427
6428 while (ri->fragmented) {
6429 ri->fragmented = false;
6430 sw_idx = NEXT_TX(sw_idx);
6431 ri = &tnapi->tx_buffers[sw_idx];
6432 }
6433
6434 sw_idx = NEXT_TX(sw_idx);
6435 }
6436
6437 pkts_compl++;
6438 bytes_compl += skb->len;
6439
6440 dev_kfree_skb(skb);
6441
6442 if (unlikely(tx_bug)) {
6443 tg3_tx_recover(tp);
6444 return;
6445 }
6446 }
6447
6448 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6449
6450 tnapi->tx_cons = sw_idx;
6451
6452 /* Need to make the tx_cons update visible to tg3_start_xmit()
6453 * before checking for netif_queue_stopped(). Without the
6454 * memory barrier, there is a small possibility that tg3_start_xmit()
6455 * will miss it and cause the queue to be stopped forever.
6456 */
6457 smp_mb();
6458
6459 if (unlikely(netif_tx_queue_stopped(txq) &&
6460 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6461 __netif_tx_lock(txq, smp_processor_id());
6462 if (netif_tx_queue_stopped(txq) &&
6463 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6464 netif_tx_wake_queue(txq);
6465 __netif_tx_unlock(txq);
6466 }
6467 }
6468
6469 static void tg3_frag_free(bool is_frag, void *data)
6470 {
6471 if (is_frag)
6472 put_page(virt_to_head_page(data));
6473 else
6474 kfree(data);
6475 }
6476
6477 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6478 {
6479 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6480 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6481
6482 if (!ri->data)
6483 return;
6484
6485 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6486 map_sz, PCI_DMA_FROMDEVICE);
6487 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6488 ri->data = NULL;
6489 }
6490
6491
6492 /* Returns size of skb allocated or < 0 on error.
6493 *
6494 * We only need to fill in the address because the other members
6495 * of the RX descriptor are invariant, see tg3_init_rings.
6496 *
6497 * Note the purposeful assymetry of cpu vs. chip accesses. For
6498 * posting buffers we only dirty the first cache line of the RX
6499 * descriptor (containing the address). Whereas for the RX status
6500 * buffers the cpu only reads the last cacheline of the RX descriptor
6501 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6502 */
6503 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6504 u32 opaque_key, u32 dest_idx_unmasked,
6505 unsigned int *frag_size)
6506 {
6507 struct tg3_rx_buffer_desc *desc;
6508 struct ring_info *map;
6509 u8 *data;
6510 dma_addr_t mapping;
6511 int skb_size, data_size, dest_idx;
6512
6513 switch (opaque_key) {
6514 case RXD_OPAQUE_RING_STD:
6515 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6516 desc = &tpr->rx_std[dest_idx];
6517 map = &tpr->rx_std_buffers[dest_idx];
6518 data_size = tp->rx_pkt_map_sz;
6519 break;
6520
6521 case RXD_OPAQUE_RING_JUMBO:
6522 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6523 desc = &tpr->rx_jmb[dest_idx].std;
6524 map = &tpr->rx_jmb_buffers[dest_idx];
6525 data_size = TG3_RX_JMB_MAP_SZ;
6526 break;
6527
6528 default:
6529 return -EINVAL;
6530 }
6531
6532 /* Do not overwrite any of the map or rp information
6533 * until we are sure we can commit to a new buffer.
6534 *
6535 * Callers depend upon this behavior and assume that
6536 * we leave everything unchanged if we fail.
6537 */
6538 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6539 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6540 if (skb_size <= PAGE_SIZE) {
6541 data = netdev_alloc_frag(skb_size);
6542 *frag_size = skb_size;
6543 } else {
6544 data = kmalloc(skb_size, GFP_ATOMIC);
6545 *frag_size = 0;
6546 }
6547 if (!data)
6548 return -ENOMEM;
6549
6550 mapping = pci_map_single(tp->pdev,
6551 data + TG3_RX_OFFSET(tp),
6552 data_size,
6553 PCI_DMA_FROMDEVICE);
6554 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6555 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6556 return -EIO;
6557 }
6558
6559 map->data = data;
6560 dma_unmap_addr_set(map, mapping, mapping);
6561
6562 desc->addr_hi = ((u64)mapping >> 32);
6563 desc->addr_lo = ((u64)mapping & 0xffffffff);
6564
6565 return data_size;
6566 }
6567
6568 /* We only need to move over in the address because the other
6569 * members of the RX descriptor are invariant. See notes above
6570 * tg3_alloc_rx_data for full details.
6571 */
6572 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6573 struct tg3_rx_prodring_set *dpr,
6574 u32 opaque_key, int src_idx,
6575 u32 dest_idx_unmasked)
6576 {
6577 struct tg3 *tp = tnapi->tp;
6578 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6579 struct ring_info *src_map, *dest_map;
6580 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6581 int dest_idx;
6582
6583 switch (opaque_key) {
6584 case RXD_OPAQUE_RING_STD:
6585 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6586 dest_desc = &dpr->rx_std[dest_idx];
6587 dest_map = &dpr->rx_std_buffers[dest_idx];
6588 src_desc = &spr->rx_std[src_idx];
6589 src_map = &spr->rx_std_buffers[src_idx];
6590 break;
6591
6592 case RXD_OPAQUE_RING_JUMBO:
6593 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6594 dest_desc = &dpr->rx_jmb[dest_idx].std;
6595 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6596 src_desc = &spr->rx_jmb[src_idx].std;
6597 src_map = &spr->rx_jmb_buffers[src_idx];
6598 break;
6599
6600 default:
6601 return;
6602 }
6603
6604 dest_map->data = src_map->data;
6605 dma_unmap_addr_set(dest_map, mapping,
6606 dma_unmap_addr(src_map, mapping));
6607 dest_desc->addr_hi = src_desc->addr_hi;
6608 dest_desc->addr_lo = src_desc->addr_lo;
6609
6610 /* Ensure that the update to the skb happens after the physical
6611 * addresses have been transferred to the new BD location.
6612 */
6613 smp_wmb();
6614
6615 src_map->data = NULL;
6616 }
6617
6618 /* The RX ring scheme is composed of multiple rings which post fresh
6619 * buffers to the chip, and one special ring the chip uses to report
6620 * status back to the host.
6621 *
6622 * The special ring reports the status of received packets to the
6623 * host. The chip does not write into the original descriptor the
6624 * RX buffer was obtained from. The chip simply takes the original
6625 * descriptor as provided by the host, updates the status and length
6626 * field, then writes this into the next status ring entry.
6627 *
6628 * Each ring the host uses to post buffers to the chip is described
6629 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6630 * it is first placed into the on-chip ram. When the packet's length
6631 * is known, it walks down the TG3_BDINFO entries to select the ring.
6632 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6633 * which is within the range of the new packet's length is chosen.
6634 *
6635 * The "separate ring for rx status" scheme may sound queer, but it makes
6636 * sense from a cache coherency perspective. If only the host writes
6637 * to the buffer post rings, and only the chip writes to the rx status
6638 * rings, then cache lines never move beyond shared-modified state.
6639 * If both the host and chip were to write into the same ring, cache line
6640 * eviction could occur since both entities want it in an exclusive state.
6641 */
6642 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6643 {
6644 struct tg3 *tp = tnapi->tp;
6645 u32 work_mask, rx_std_posted = 0;
6646 u32 std_prod_idx, jmb_prod_idx;
6647 u32 sw_idx = tnapi->rx_rcb_ptr;
6648 u16 hw_idx;
6649 int received;
6650 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6651
6652 hw_idx = *(tnapi->rx_rcb_prod_idx);
6653 /*
6654 * We need to order the read of hw_idx and the read of
6655 * the opaque cookie.
6656 */
6657 rmb();
6658 work_mask = 0;
6659 received = 0;
6660 std_prod_idx = tpr->rx_std_prod_idx;
6661 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6662 while (sw_idx != hw_idx && budget > 0) {
6663 struct ring_info *ri;
6664 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6665 unsigned int len;
6666 struct sk_buff *skb;
6667 dma_addr_t dma_addr;
6668 u32 opaque_key, desc_idx, *post_ptr;
6669 u8 *data;
6670 u64 tstamp = 0;
6671
6672 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6673 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6674 if (opaque_key == RXD_OPAQUE_RING_STD) {
6675 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6676 dma_addr = dma_unmap_addr(ri, mapping);
6677 data = ri->data;
6678 post_ptr = &std_prod_idx;
6679 rx_std_posted++;
6680 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6681 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6682 dma_addr = dma_unmap_addr(ri, mapping);
6683 data = ri->data;
6684 post_ptr = &jmb_prod_idx;
6685 } else
6686 goto next_pkt_nopost;
6687
6688 work_mask |= opaque_key;
6689
6690 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6691 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6692 drop_it:
6693 tg3_recycle_rx(tnapi, tpr, opaque_key,
6694 desc_idx, *post_ptr);
6695 drop_it_no_recycle:
6696 /* Other statistics kept track of by card. */
6697 tp->rx_dropped++;
6698 goto next_pkt;
6699 }
6700
6701 prefetch(data + TG3_RX_OFFSET(tp));
6702 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6703 ETH_FCS_LEN;
6704
6705 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6706 RXD_FLAG_PTPSTAT_PTPV1 ||
6707 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6708 RXD_FLAG_PTPSTAT_PTPV2) {
6709 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6710 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6711 }
6712
6713 if (len > TG3_RX_COPY_THRESH(tp)) {
6714 int skb_size;
6715 unsigned int frag_size;
6716
6717 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6718 *post_ptr, &frag_size);
6719 if (skb_size < 0)
6720 goto drop_it;
6721
6722 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6723 PCI_DMA_FROMDEVICE);
6724
6725 skb = build_skb(data, frag_size);
6726 if (!skb) {
6727 tg3_frag_free(frag_size != 0, data);
6728 goto drop_it_no_recycle;
6729 }
6730 skb_reserve(skb, TG3_RX_OFFSET(tp));
6731 /* Ensure that the update to the data happens
6732 * after the usage of the old DMA mapping.
6733 */
6734 smp_wmb();
6735
6736 ri->data = NULL;
6737
6738 } else {
6739 tg3_recycle_rx(tnapi, tpr, opaque_key,
6740 desc_idx, *post_ptr);
6741
6742 skb = netdev_alloc_skb(tp->dev,
6743 len + TG3_RAW_IP_ALIGN);
6744 if (skb == NULL)
6745 goto drop_it_no_recycle;
6746
6747 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6748 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6749 memcpy(skb->data,
6750 data + TG3_RX_OFFSET(tp),
6751 len);
6752 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6753 }
6754
6755 skb_put(skb, len);
6756 if (tstamp)
6757 tg3_hwclock_to_timestamp(tp, tstamp,
6758 skb_hwtstamps(skb));
6759
6760 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6761 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6762 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6763 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6764 skb->ip_summed = CHECKSUM_UNNECESSARY;
6765 else
6766 skb_checksum_none_assert(skb);
6767
6768 skb->protocol = eth_type_trans(skb, tp->dev);
6769
6770 if (len > (tp->dev->mtu + ETH_HLEN) &&
6771 skb->protocol != htons(ETH_P_8021Q)) {
6772 dev_kfree_skb(skb);
6773 goto drop_it_no_recycle;
6774 }
6775
6776 if (desc->type_flags & RXD_FLAG_VLAN &&
6777 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6778 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6779 desc->err_vlan & RXD_VLAN_MASK);
6780
6781 napi_gro_receive(&tnapi->napi, skb);
6782
6783 received++;
6784 budget--;
6785
6786 next_pkt:
6787 (*post_ptr)++;
6788
6789 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6790 tpr->rx_std_prod_idx = std_prod_idx &
6791 tp->rx_std_ring_mask;
6792 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6793 tpr->rx_std_prod_idx);
6794 work_mask &= ~RXD_OPAQUE_RING_STD;
6795 rx_std_posted = 0;
6796 }
6797 next_pkt_nopost:
6798 sw_idx++;
6799 sw_idx &= tp->rx_ret_ring_mask;
6800
6801 /* Refresh hw_idx to see if there is new work */
6802 if (sw_idx == hw_idx) {
6803 hw_idx = *(tnapi->rx_rcb_prod_idx);
6804 rmb();
6805 }
6806 }
6807
6808 /* ACK the status ring. */
6809 tnapi->rx_rcb_ptr = sw_idx;
6810 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6811
6812 /* Refill RX ring(s). */
6813 if (!tg3_flag(tp, ENABLE_RSS)) {
6814 /* Sync BD data before updating mailbox */
6815 wmb();
6816
6817 if (work_mask & RXD_OPAQUE_RING_STD) {
6818 tpr->rx_std_prod_idx = std_prod_idx &
6819 tp->rx_std_ring_mask;
6820 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6821 tpr->rx_std_prod_idx);
6822 }
6823 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6824 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6825 tp->rx_jmb_ring_mask;
6826 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6827 tpr->rx_jmb_prod_idx);
6828 }
6829 mmiowb();
6830 } else if (work_mask) {
6831 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6832 * updated before the producer indices can be updated.
6833 */
6834 smp_wmb();
6835
6836 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6837 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6838
6839 if (tnapi != &tp->napi[1]) {
6840 tp->rx_refill = true;
6841 napi_schedule(&tp->napi[1].napi);
6842 }
6843 }
6844
6845 return received;
6846 }
6847
6848 static void tg3_poll_link(struct tg3 *tp)
6849 {
6850 /* handle link change and other phy events */
6851 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6852 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6853
6854 if (sblk->status & SD_STATUS_LINK_CHG) {
6855 sblk->status = SD_STATUS_UPDATED |
6856 (sblk->status & ~SD_STATUS_LINK_CHG);
6857 spin_lock(&tp->lock);
6858 if (tg3_flag(tp, USE_PHYLIB)) {
6859 tw32_f(MAC_STATUS,
6860 (MAC_STATUS_SYNC_CHANGED |
6861 MAC_STATUS_CFG_CHANGED |
6862 MAC_STATUS_MI_COMPLETION |
6863 MAC_STATUS_LNKSTATE_CHANGED));
6864 udelay(40);
6865 } else
6866 tg3_setup_phy(tp, false);
6867 spin_unlock(&tp->lock);
6868 }
6869 }
6870 }
6871
6872 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6873 struct tg3_rx_prodring_set *dpr,
6874 struct tg3_rx_prodring_set *spr)
6875 {
6876 u32 si, di, cpycnt, src_prod_idx;
6877 int i, err = 0;
6878
6879 while (1) {
6880 src_prod_idx = spr->rx_std_prod_idx;
6881
6882 /* Make sure updates to the rx_std_buffers[] entries and the
6883 * standard producer index are seen in the correct order.
6884 */
6885 smp_rmb();
6886
6887 if (spr->rx_std_cons_idx == src_prod_idx)
6888 break;
6889
6890 if (spr->rx_std_cons_idx < src_prod_idx)
6891 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6892 else
6893 cpycnt = tp->rx_std_ring_mask + 1 -
6894 spr->rx_std_cons_idx;
6895
6896 cpycnt = min(cpycnt,
6897 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6898
6899 si = spr->rx_std_cons_idx;
6900 di = dpr->rx_std_prod_idx;
6901
6902 for (i = di; i < di + cpycnt; i++) {
6903 if (dpr->rx_std_buffers[i].data) {
6904 cpycnt = i - di;
6905 err = -ENOSPC;
6906 break;
6907 }
6908 }
6909
6910 if (!cpycnt)
6911 break;
6912
6913 /* Ensure that updates to the rx_std_buffers ring and the
6914 * shadowed hardware producer ring from tg3_recycle_skb() are
6915 * ordered correctly WRT the skb check above.
6916 */
6917 smp_rmb();
6918
6919 memcpy(&dpr->rx_std_buffers[di],
6920 &spr->rx_std_buffers[si],
6921 cpycnt * sizeof(struct ring_info));
6922
6923 for (i = 0; i < cpycnt; i++, di++, si++) {
6924 struct tg3_rx_buffer_desc *sbd, *dbd;
6925 sbd = &spr->rx_std[si];
6926 dbd = &dpr->rx_std[di];
6927 dbd->addr_hi = sbd->addr_hi;
6928 dbd->addr_lo = sbd->addr_lo;
6929 }
6930
6931 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6932 tp->rx_std_ring_mask;
6933 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6934 tp->rx_std_ring_mask;
6935 }
6936
6937 while (1) {
6938 src_prod_idx = spr->rx_jmb_prod_idx;
6939
6940 /* Make sure updates to the rx_jmb_buffers[] entries and
6941 * the jumbo producer index are seen in the correct order.
6942 */
6943 smp_rmb();
6944
6945 if (spr->rx_jmb_cons_idx == src_prod_idx)
6946 break;
6947
6948 if (spr->rx_jmb_cons_idx < src_prod_idx)
6949 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6950 else
6951 cpycnt = tp->rx_jmb_ring_mask + 1 -
6952 spr->rx_jmb_cons_idx;
6953
6954 cpycnt = min(cpycnt,
6955 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6956
6957 si = spr->rx_jmb_cons_idx;
6958 di = dpr->rx_jmb_prod_idx;
6959
6960 for (i = di; i < di + cpycnt; i++) {
6961 if (dpr->rx_jmb_buffers[i].data) {
6962 cpycnt = i - di;
6963 err = -ENOSPC;
6964 break;
6965 }
6966 }
6967
6968 if (!cpycnt)
6969 break;
6970
6971 /* Ensure that updates to the rx_jmb_buffers ring and the
6972 * shadowed hardware producer ring from tg3_recycle_skb() are
6973 * ordered correctly WRT the skb check above.
6974 */
6975 smp_rmb();
6976
6977 memcpy(&dpr->rx_jmb_buffers[di],
6978 &spr->rx_jmb_buffers[si],
6979 cpycnt * sizeof(struct ring_info));
6980
6981 for (i = 0; i < cpycnt; i++, di++, si++) {
6982 struct tg3_rx_buffer_desc *sbd, *dbd;
6983 sbd = &spr->rx_jmb[si].std;
6984 dbd = &dpr->rx_jmb[di].std;
6985 dbd->addr_hi = sbd->addr_hi;
6986 dbd->addr_lo = sbd->addr_lo;
6987 }
6988
6989 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6990 tp->rx_jmb_ring_mask;
6991 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6992 tp->rx_jmb_ring_mask;
6993 }
6994
6995 return err;
6996 }
6997
6998 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6999 {
7000 struct tg3 *tp = tnapi->tp;
7001
7002 /* run TX completion thread */
7003 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7004 tg3_tx(tnapi);
7005 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7006 return work_done;
7007 }
7008
7009 if (!tnapi->rx_rcb_prod_idx)
7010 return work_done;
7011
7012 /* run RX thread, within the bounds set by NAPI.
7013 * All RX "locking" is done by ensuring outside
7014 * code synchronizes with tg3->napi.poll()
7015 */
7016 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7017 work_done += tg3_rx(tnapi, budget - work_done);
7018
7019 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7020 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7021 int i, err = 0;
7022 u32 std_prod_idx = dpr->rx_std_prod_idx;
7023 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7024
7025 tp->rx_refill = false;
7026 for (i = 1; i <= tp->rxq_cnt; i++)
7027 err |= tg3_rx_prodring_xfer(tp, dpr,
7028 &tp->napi[i].prodring);
7029
7030 wmb();
7031
7032 if (std_prod_idx != dpr->rx_std_prod_idx)
7033 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7034 dpr->rx_std_prod_idx);
7035
7036 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7037 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7038 dpr->rx_jmb_prod_idx);
7039
7040 mmiowb();
7041
7042 if (err)
7043 tw32_f(HOSTCC_MODE, tp->coal_now);
7044 }
7045
7046 return work_done;
7047 }
7048
7049 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7050 {
7051 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7052 schedule_work(&tp->reset_task);
7053 }
7054
7055 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7056 {
7057 cancel_work_sync(&tp->reset_task);
7058 tg3_flag_clear(tp, RESET_TASK_PENDING);
7059 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7060 }
7061
7062 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7063 {
7064 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7065 struct tg3 *tp = tnapi->tp;
7066 int work_done = 0;
7067 struct tg3_hw_status *sblk = tnapi->hw_status;
7068
7069 while (1) {
7070 work_done = tg3_poll_work(tnapi, work_done, budget);
7071
7072 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7073 goto tx_recovery;
7074
7075 if (unlikely(work_done >= budget))
7076 break;
7077
7078 /* tp->last_tag is used in tg3_int_reenable() below
7079 * to tell the hw how much work has been processed,
7080 * so we must read it before checking for more work.
7081 */
7082 tnapi->last_tag = sblk->status_tag;
7083 tnapi->last_irq_tag = tnapi->last_tag;
7084 rmb();
7085
7086 /* check for RX/TX work to do */
7087 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7088 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7089
7090 /* This test here is not race free, but will reduce
7091 * the number of interrupts by looping again.
7092 */
7093 if (tnapi == &tp->napi[1] && tp->rx_refill)
7094 continue;
7095
7096 napi_complete(napi);
7097 /* Reenable interrupts. */
7098 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7099
7100 /* This test here is synchronized by napi_schedule()
7101 * and napi_complete() to close the race condition.
7102 */
7103 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7104 tw32(HOSTCC_MODE, tp->coalesce_mode |
7105 HOSTCC_MODE_ENABLE |
7106 tnapi->coal_now);
7107 }
7108 mmiowb();
7109 break;
7110 }
7111 }
7112
7113 return work_done;
7114
7115 tx_recovery:
7116 /* work_done is guaranteed to be less than budget. */
7117 napi_complete(napi);
7118 tg3_reset_task_schedule(tp);
7119 return work_done;
7120 }
7121
7122 static void tg3_process_error(struct tg3 *tp)
7123 {
7124 u32 val;
7125 bool real_error = false;
7126
7127 if (tg3_flag(tp, ERROR_PROCESSED))
7128 return;
7129
7130 /* Check Flow Attention register */
7131 val = tr32(HOSTCC_FLOW_ATTN);
7132 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7133 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7134 real_error = true;
7135 }
7136
7137 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7138 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7139 real_error = true;
7140 }
7141
7142 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7143 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7144 real_error = true;
7145 }
7146
7147 if (!real_error)
7148 return;
7149
7150 tg3_dump_state(tp);
7151
7152 tg3_flag_set(tp, ERROR_PROCESSED);
7153 tg3_reset_task_schedule(tp);
7154 }
7155
7156 static int tg3_poll(struct napi_struct *napi, int budget)
7157 {
7158 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7159 struct tg3 *tp = tnapi->tp;
7160 int work_done = 0;
7161 struct tg3_hw_status *sblk = tnapi->hw_status;
7162
7163 while (1) {
7164 if (sblk->status & SD_STATUS_ERROR)
7165 tg3_process_error(tp);
7166
7167 tg3_poll_link(tp);
7168
7169 work_done = tg3_poll_work(tnapi, work_done, budget);
7170
7171 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7172 goto tx_recovery;
7173
7174 if (unlikely(work_done >= budget))
7175 break;
7176
7177 if (tg3_flag(tp, TAGGED_STATUS)) {
7178 /* tp->last_tag is used in tg3_int_reenable() below
7179 * to tell the hw how much work has been processed,
7180 * so we must read it before checking for more work.
7181 */
7182 tnapi->last_tag = sblk->status_tag;
7183 tnapi->last_irq_tag = tnapi->last_tag;
7184 rmb();
7185 } else
7186 sblk->status &= ~SD_STATUS_UPDATED;
7187
7188 if (likely(!tg3_has_work(tnapi))) {
7189 napi_complete(napi);
7190 tg3_int_reenable(tnapi);
7191 break;
7192 }
7193 }
7194
7195 return work_done;
7196
7197 tx_recovery:
7198 /* work_done is guaranteed to be less than budget. */
7199 napi_complete(napi);
7200 tg3_reset_task_schedule(tp);
7201 return work_done;
7202 }
7203
7204 static void tg3_napi_disable(struct tg3 *tp)
7205 {
7206 int i;
7207
7208 for (i = tp->irq_cnt - 1; i >= 0; i--)
7209 napi_disable(&tp->napi[i].napi);
7210 }
7211
7212 static void tg3_napi_enable(struct tg3 *tp)
7213 {
7214 int i;
7215
7216 for (i = 0; i < tp->irq_cnt; i++)
7217 napi_enable(&tp->napi[i].napi);
7218 }
7219
7220 static void tg3_napi_init(struct tg3 *tp)
7221 {
7222 int i;
7223
7224 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7225 for (i = 1; i < tp->irq_cnt; i++)
7226 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7227 }
7228
7229 static void tg3_napi_fini(struct tg3 *tp)
7230 {
7231 int i;
7232
7233 for (i = 0; i < tp->irq_cnt; i++)
7234 netif_napi_del(&tp->napi[i].napi);
7235 }
7236
7237 static inline void tg3_netif_stop(struct tg3 *tp)
7238 {
7239 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7240 tg3_napi_disable(tp);
7241 netif_carrier_off(tp->dev);
7242 netif_tx_disable(tp->dev);
7243 }
7244
7245 /* tp->lock must be held */
7246 static inline void tg3_netif_start(struct tg3 *tp)
7247 {
7248 tg3_ptp_resume(tp);
7249
7250 /* NOTE: unconditional netif_tx_wake_all_queues is only
7251 * appropriate so long as all callers are assured to
7252 * have free tx slots (such as after tg3_init_hw)
7253 */
7254 netif_tx_wake_all_queues(tp->dev);
7255
7256 if (tp->link_up)
7257 netif_carrier_on(tp->dev);
7258
7259 tg3_napi_enable(tp);
7260 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7261 tg3_enable_ints(tp);
7262 }
7263
7264 static void tg3_irq_quiesce(struct tg3 *tp)
7265 {
7266 int i;
7267
7268 BUG_ON(tp->irq_sync);
7269
7270 tp->irq_sync = 1;
7271 smp_mb();
7272
7273 for (i = 0; i < tp->irq_cnt; i++)
7274 synchronize_irq(tp->napi[i].irq_vec);
7275 }
7276
7277 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7278 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7279 * with as well. Most of the time, this is not necessary except when
7280 * shutting down the device.
7281 */
7282 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7283 {
7284 spin_lock_bh(&tp->lock);
7285 if (irq_sync)
7286 tg3_irq_quiesce(tp);
7287 }
7288
7289 static inline void tg3_full_unlock(struct tg3 *tp)
7290 {
7291 spin_unlock_bh(&tp->lock);
7292 }
7293
7294 /* One-shot MSI handler - Chip automatically disables interrupt
7295 * after sending MSI so driver doesn't have to do it.
7296 */
7297 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7298 {
7299 struct tg3_napi *tnapi = dev_id;
7300 struct tg3 *tp = tnapi->tp;
7301
7302 prefetch(tnapi->hw_status);
7303 if (tnapi->rx_rcb)
7304 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7305
7306 if (likely(!tg3_irq_sync(tp)))
7307 napi_schedule(&tnapi->napi);
7308
7309 return IRQ_HANDLED;
7310 }
7311
7312 /* MSI ISR - No need to check for interrupt sharing and no need to
7313 * flush status block and interrupt mailbox. PCI ordering rules
7314 * guarantee that MSI will arrive after the status block.
7315 */
7316 static irqreturn_t tg3_msi(int irq, void *dev_id)
7317 {
7318 struct tg3_napi *tnapi = dev_id;
7319 struct tg3 *tp = tnapi->tp;
7320
7321 prefetch(tnapi->hw_status);
7322 if (tnapi->rx_rcb)
7323 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7324 /*
7325 * Writing any value to intr-mbox-0 clears PCI INTA# and
7326 * chip-internal interrupt pending events.
7327 * Writing non-zero to intr-mbox-0 additional tells the
7328 * NIC to stop sending us irqs, engaging "in-intr-handler"
7329 * event coalescing.
7330 */
7331 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7332 if (likely(!tg3_irq_sync(tp)))
7333 napi_schedule(&tnapi->napi);
7334
7335 return IRQ_RETVAL(1);
7336 }
7337
7338 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7339 {
7340 struct tg3_napi *tnapi = dev_id;
7341 struct tg3 *tp = tnapi->tp;
7342 struct tg3_hw_status *sblk = tnapi->hw_status;
7343 unsigned int handled = 1;
7344
7345 /* In INTx mode, it is possible for the interrupt to arrive at
7346 * the CPU before the status block posted prior to the interrupt.
7347 * Reading the PCI State register will confirm whether the
7348 * interrupt is ours and will flush the status block.
7349 */
7350 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7351 if (tg3_flag(tp, CHIP_RESETTING) ||
7352 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7353 handled = 0;
7354 goto out;
7355 }
7356 }
7357
7358 /*
7359 * Writing any value to intr-mbox-0 clears PCI INTA# and
7360 * chip-internal interrupt pending events.
7361 * Writing non-zero to intr-mbox-0 additional tells the
7362 * NIC to stop sending us irqs, engaging "in-intr-handler"
7363 * event coalescing.
7364 *
7365 * Flush the mailbox to de-assert the IRQ immediately to prevent
7366 * spurious interrupts. The flush impacts performance but
7367 * excessive spurious interrupts can be worse in some cases.
7368 */
7369 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7370 if (tg3_irq_sync(tp))
7371 goto out;
7372 sblk->status &= ~SD_STATUS_UPDATED;
7373 if (likely(tg3_has_work(tnapi))) {
7374 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7375 napi_schedule(&tnapi->napi);
7376 } else {
7377 /* No work, shared interrupt perhaps? re-enable
7378 * interrupts, and flush that PCI write
7379 */
7380 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7381 0x00000000);
7382 }
7383 out:
7384 return IRQ_RETVAL(handled);
7385 }
7386
7387 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7388 {
7389 struct tg3_napi *tnapi = dev_id;
7390 struct tg3 *tp = tnapi->tp;
7391 struct tg3_hw_status *sblk = tnapi->hw_status;
7392 unsigned int handled = 1;
7393
7394 /* In INTx mode, it is possible for the interrupt to arrive at
7395 * the CPU before the status block posted prior to the interrupt.
7396 * Reading the PCI State register will confirm whether the
7397 * interrupt is ours and will flush the status block.
7398 */
7399 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7400 if (tg3_flag(tp, CHIP_RESETTING) ||
7401 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7402 handled = 0;
7403 goto out;
7404 }
7405 }
7406
7407 /*
7408 * writing any value to intr-mbox-0 clears PCI INTA# and
7409 * chip-internal interrupt pending events.
7410 * writing non-zero to intr-mbox-0 additional tells the
7411 * NIC to stop sending us irqs, engaging "in-intr-handler"
7412 * event coalescing.
7413 *
7414 * Flush the mailbox to de-assert the IRQ immediately to prevent
7415 * spurious interrupts. The flush impacts performance but
7416 * excessive spurious interrupts can be worse in some cases.
7417 */
7418 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7419
7420 /*
7421 * In a shared interrupt configuration, sometimes other devices'
7422 * interrupts will scream. We record the current status tag here
7423 * so that the above check can report that the screaming interrupts
7424 * are unhandled. Eventually they will be silenced.
7425 */
7426 tnapi->last_irq_tag = sblk->status_tag;
7427
7428 if (tg3_irq_sync(tp))
7429 goto out;
7430
7431 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7432
7433 napi_schedule(&tnapi->napi);
7434
7435 out:
7436 return IRQ_RETVAL(handled);
7437 }
7438
7439 /* ISR for interrupt test */
7440 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7441 {
7442 struct tg3_napi *tnapi = dev_id;
7443 struct tg3 *tp = tnapi->tp;
7444 struct tg3_hw_status *sblk = tnapi->hw_status;
7445
7446 if ((sblk->status & SD_STATUS_UPDATED) ||
7447 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7448 tg3_disable_ints(tp);
7449 return IRQ_RETVAL(1);
7450 }
7451 return IRQ_RETVAL(0);
7452 }
7453
7454 #ifdef CONFIG_NET_POLL_CONTROLLER
7455 static void tg3_poll_controller(struct net_device *dev)
7456 {
7457 int i;
7458 struct tg3 *tp = netdev_priv(dev);
7459
7460 if (tg3_irq_sync(tp))
7461 return;
7462
7463 for (i = 0; i < tp->irq_cnt; i++)
7464 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7465 }
7466 #endif
7467
7468 static void tg3_tx_timeout(struct net_device *dev)
7469 {
7470 struct tg3 *tp = netdev_priv(dev);
7471
7472 if (netif_msg_tx_err(tp)) {
7473 netdev_err(dev, "transmit timed out, resetting\n");
7474 tg3_dump_state(tp);
7475 }
7476
7477 tg3_reset_task_schedule(tp);
7478 }
7479
7480 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7481 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7482 {
7483 u32 base = (u32) mapping & 0xffffffff;
7484
7485 return (base > 0xffffdcc0) && (base + len + 8 < base);
7486 }
7487
7488 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7489 * of any 4GB boundaries: 4G, 8G, etc
7490 */
7491 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7492 u32 len, u32 mss)
7493 {
7494 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7495 u32 base = (u32) mapping & 0xffffffff;
7496
7497 return ((base + len + (mss & 0x3fff)) < base);
7498 }
7499 return 0;
7500 }
7501
7502 /* Test for DMA addresses > 40-bit */
7503 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7504 int len)
7505 {
7506 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7507 if (tg3_flag(tp, 40BIT_DMA_BUG))
7508 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7509 return 0;
7510 #else
7511 return 0;
7512 #endif
7513 }
7514
7515 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7516 dma_addr_t mapping, u32 len, u32 flags,
7517 u32 mss, u32 vlan)
7518 {
7519 txbd->addr_hi = ((u64) mapping >> 32);
7520 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7521 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7522 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7523 }
7524
7525 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7526 dma_addr_t map, u32 len, u32 flags,
7527 u32 mss, u32 vlan)
7528 {
7529 struct tg3 *tp = tnapi->tp;
7530 bool hwbug = false;
7531
7532 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7533 hwbug = true;
7534
7535 if (tg3_4g_overflow_test(map, len))
7536 hwbug = true;
7537
7538 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7539 hwbug = true;
7540
7541 if (tg3_40bit_overflow_test(tp, map, len))
7542 hwbug = true;
7543
7544 if (tp->dma_limit) {
7545 u32 prvidx = *entry;
7546 u32 tmp_flag = flags & ~TXD_FLAG_END;
7547 while (len > tp->dma_limit && *budget) {
7548 u32 frag_len = tp->dma_limit;
7549 len -= tp->dma_limit;
7550
7551 /* Avoid the 8byte DMA problem */
7552 if (len <= 8) {
7553 len += tp->dma_limit / 2;
7554 frag_len = tp->dma_limit / 2;
7555 }
7556
7557 tnapi->tx_buffers[*entry].fragmented = true;
7558
7559 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7560 frag_len, tmp_flag, mss, vlan);
7561 *budget -= 1;
7562 prvidx = *entry;
7563 *entry = NEXT_TX(*entry);
7564
7565 map += frag_len;
7566 }
7567
7568 if (len) {
7569 if (*budget) {
7570 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7571 len, flags, mss, vlan);
7572 *budget -= 1;
7573 *entry = NEXT_TX(*entry);
7574 } else {
7575 hwbug = true;
7576 tnapi->tx_buffers[prvidx].fragmented = false;
7577 }
7578 }
7579 } else {
7580 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7581 len, flags, mss, vlan);
7582 *entry = NEXT_TX(*entry);
7583 }
7584
7585 return hwbug;
7586 }
7587
7588 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7589 {
7590 int i;
7591 struct sk_buff *skb;
7592 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7593
7594 skb = txb->skb;
7595 txb->skb = NULL;
7596
7597 pci_unmap_single(tnapi->tp->pdev,
7598 dma_unmap_addr(txb, mapping),
7599 skb_headlen(skb),
7600 PCI_DMA_TODEVICE);
7601
7602 while (txb->fragmented) {
7603 txb->fragmented = false;
7604 entry = NEXT_TX(entry);
7605 txb = &tnapi->tx_buffers[entry];
7606 }
7607
7608 for (i = 0; i <= last; i++) {
7609 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7610
7611 entry = NEXT_TX(entry);
7612 txb = &tnapi->tx_buffers[entry];
7613
7614 pci_unmap_page(tnapi->tp->pdev,
7615 dma_unmap_addr(txb, mapping),
7616 skb_frag_size(frag), PCI_DMA_TODEVICE);
7617
7618 while (txb->fragmented) {
7619 txb->fragmented = false;
7620 entry = NEXT_TX(entry);
7621 txb = &tnapi->tx_buffers[entry];
7622 }
7623 }
7624 }
7625
7626 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7627 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7628 struct sk_buff **pskb,
7629 u32 *entry, u32 *budget,
7630 u32 base_flags, u32 mss, u32 vlan)
7631 {
7632 struct tg3 *tp = tnapi->tp;
7633 struct sk_buff *new_skb, *skb = *pskb;
7634 dma_addr_t new_addr = 0;
7635 int ret = 0;
7636
7637 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7638 new_skb = skb_copy(skb, GFP_ATOMIC);
7639 else {
7640 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7641
7642 new_skb = skb_copy_expand(skb,
7643 skb_headroom(skb) + more_headroom,
7644 skb_tailroom(skb), GFP_ATOMIC);
7645 }
7646
7647 if (!new_skb) {
7648 ret = -1;
7649 } else {
7650 /* New SKB is guaranteed to be linear. */
7651 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7652 PCI_DMA_TODEVICE);
7653 /* Make sure the mapping succeeded */
7654 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7655 dev_kfree_skb(new_skb);
7656 ret = -1;
7657 } else {
7658 u32 save_entry = *entry;
7659
7660 base_flags |= TXD_FLAG_END;
7661
7662 tnapi->tx_buffers[*entry].skb = new_skb;
7663 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7664 mapping, new_addr);
7665
7666 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7667 new_skb->len, base_flags,
7668 mss, vlan)) {
7669 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7670 dev_kfree_skb(new_skb);
7671 ret = -1;
7672 }
7673 }
7674 }
7675
7676 dev_kfree_skb(skb);
7677 *pskb = new_skb;
7678 return ret;
7679 }
7680
7681 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7682
7683 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7684 * TSO header is greater than 80 bytes.
7685 */
7686 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7687 {
7688 struct sk_buff *segs, *nskb;
7689 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7690
7691 /* Estimate the number of fragments in the worst case */
7692 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7693 netif_stop_queue(tp->dev);
7694
7695 /* netif_tx_stop_queue() must be done before checking
7696 * checking tx index in tg3_tx_avail() below, because in
7697 * tg3_tx(), we update tx index before checking for
7698 * netif_tx_queue_stopped().
7699 */
7700 smp_mb();
7701 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7702 return NETDEV_TX_BUSY;
7703
7704 netif_wake_queue(tp->dev);
7705 }
7706
7707 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7708 if (IS_ERR(segs))
7709 goto tg3_tso_bug_end;
7710
7711 do {
7712 nskb = segs;
7713 segs = segs->next;
7714 nskb->next = NULL;
7715 tg3_start_xmit(nskb, tp->dev);
7716 } while (segs);
7717
7718 tg3_tso_bug_end:
7719 dev_kfree_skb(skb);
7720
7721 return NETDEV_TX_OK;
7722 }
7723
7724 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7725 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7726 */
7727 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7728 {
7729 struct tg3 *tp = netdev_priv(dev);
7730 u32 len, entry, base_flags, mss, vlan = 0;
7731 u32 budget;
7732 int i = -1, would_hit_hwbug;
7733 dma_addr_t mapping;
7734 struct tg3_napi *tnapi;
7735 struct netdev_queue *txq;
7736 unsigned int last;
7737
7738 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7739 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7740 if (tg3_flag(tp, ENABLE_TSS))
7741 tnapi++;
7742
7743 budget = tg3_tx_avail(tnapi);
7744
7745 /* We are running in BH disabled context with netif_tx_lock
7746 * and TX reclaim runs via tp->napi.poll inside of a software
7747 * interrupt. Furthermore, IRQ processing runs lockless so we have
7748 * no IRQ context deadlocks to worry about either. Rejoice!
7749 */
7750 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7751 if (!netif_tx_queue_stopped(txq)) {
7752 netif_tx_stop_queue(txq);
7753
7754 /* This is a hard error, log it. */
7755 netdev_err(dev,
7756 "BUG! Tx Ring full when queue awake!\n");
7757 }
7758 return NETDEV_TX_BUSY;
7759 }
7760
7761 entry = tnapi->tx_prod;
7762 base_flags = 0;
7763 if (skb->ip_summed == CHECKSUM_PARTIAL)
7764 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7765
7766 mss = skb_shinfo(skb)->gso_size;
7767 if (mss) {
7768 struct iphdr *iph;
7769 u32 tcp_opt_len, hdr_len;
7770
7771 if (skb_header_cloned(skb) &&
7772 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7773 goto drop;
7774
7775 iph = ip_hdr(skb);
7776 tcp_opt_len = tcp_optlen(skb);
7777
7778 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7779
7780 if (!skb_is_gso_v6(skb)) {
7781 iph->check = 0;
7782 iph->tot_len = htons(mss + hdr_len);
7783 }
7784
7785 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7786 tg3_flag(tp, TSO_BUG))
7787 return tg3_tso_bug(tp, skb);
7788
7789 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7790 TXD_FLAG_CPU_POST_DMA);
7791
7792 if (tg3_flag(tp, HW_TSO_1) ||
7793 tg3_flag(tp, HW_TSO_2) ||
7794 tg3_flag(tp, HW_TSO_3)) {
7795 tcp_hdr(skb)->check = 0;
7796 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7797 } else
7798 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7799 iph->daddr, 0,
7800 IPPROTO_TCP,
7801 0);
7802
7803 if (tg3_flag(tp, HW_TSO_3)) {
7804 mss |= (hdr_len & 0xc) << 12;
7805 if (hdr_len & 0x10)
7806 base_flags |= 0x00000010;
7807 base_flags |= (hdr_len & 0x3e0) << 5;
7808 } else if (tg3_flag(tp, HW_TSO_2))
7809 mss |= hdr_len << 9;
7810 else if (tg3_flag(tp, HW_TSO_1) ||
7811 tg3_asic_rev(tp) == ASIC_REV_5705) {
7812 if (tcp_opt_len || iph->ihl > 5) {
7813 int tsflags;
7814
7815 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7816 mss |= (tsflags << 11);
7817 }
7818 } else {
7819 if (tcp_opt_len || iph->ihl > 5) {
7820 int tsflags;
7821
7822 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7823 base_flags |= tsflags << 12;
7824 }
7825 }
7826 }
7827
7828 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7829 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7830 base_flags |= TXD_FLAG_JMB_PKT;
7831
7832 if (vlan_tx_tag_present(skb)) {
7833 base_flags |= TXD_FLAG_VLAN;
7834 vlan = vlan_tx_tag_get(skb);
7835 }
7836
7837 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7838 tg3_flag(tp, TX_TSTAMP_EN)) {
7839 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7840 base_flags |= TXD_FLAG_HWTSTAMP;
7841 }
7842
7843 len = skb_headlen(skb);
7844
7845 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7846 if (pci_dma_mapping_error(tp->pdev, mapping))
7847 goto drop;
7848
7849
7850 tnapi->tx_buffers[entry].skb = skb;
7851 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7852
7853 would_hit_hwbug = 0;
7854
7855 if (tg3_flag(tp, 5701_DMA_BUG))
7856 would_hit_hwbug = 1;
7857
7858 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7859 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7860 mss, vlan)) {
7861 would_hit_hwbug = 1;
7862 } else if (skb_shinfo(skb)->nr_frags > 0) {
7863 u32 tmp_mss = mss;
7864
7865 if (!tg3_flag(tp, HW_TSO_1) &&
7866 !tg3_flag(tp, HW_TSO_2) &&
7867 !tg3_flag(tp, HW_TSO_3))
7868 tmp_mss = 0;
7869
7870 /* Now loop through additional data
7871 * fragments, and queue them.
7872 */
7873 last = skb_shinfo(skb)->nr_frags - 1;
7874 for (i = 0; i <= last; i++) {
7875 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7876
7877 len = skb_frag_size(frag);
7878 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7879 len, DMA_TO_DEVICE);
7880
7881 tnapi->tx_buffers[entry].skb = NULL;
7882 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7883 mapping);
7884 if (dma_mapping_error(&tp->pdev->dev, mapping))
7885 goto dma_error;
7886
7887 if (!budget ||
7888 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7889 len, base_flags |
7890 ((i == last) ? TXD_FLAG_END : 0),
7891 tmp_mss, vlan)) {
7892 would_hit_hwbug = 1;
7893 break;
7894 }
7895 }
7896 }
7897
7898 if (would_hit_hwbug) {
7899 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7900
7901 /* If the workaround fails due to memory/mapping
7902 * failure, silently drop this packet.
7903 */
7904 entry = tnapi->tx_prod;
7905 budget = tg3_tx_avail(tnapi);
7906 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7907 base_flags, mss, vlan))
7908 goto drop_nofree;
7909 }
7910
7911 skb_tx_timestamp(skb);
7912 netdev_tx_sent_queue(txq, skb->len);
7913
7914 /* Sync BD data before updating mailbox */
7915 wmb();
7916
7917 /* Packets are ready, update Tx producer idx local and on card. */
7918 tw32_tx_mbox(tnapi->prodmbox, entry);
7919
7920 tnapi->tx_prod = entry;
7921 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7922 netif_tx_stop_queue(txq);
7923
7924 /* netif_tx_stop_queue() must be done before checking
7925 * checking tx index in tg3_tx_avail() below, because in
7926 * tg3_tx(), we update tx index before checking for
7927 * netif_tx_queue_stopped().
7928 */
7929 smp_mb();
7930 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7931 netif_tx_wake_queue(txq);
7932 }
7933
7934 mmiowb();
7935 return NETDEV_TX_OK;
7936
7937 dma_error:
7938 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7939 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7940 drop:
7941 dev_kfree_skb(skb);
7942 drop_nofree:
7943 tp->tx_dropped++;
7944 return NETDEV_TX_OK;
7945 }
7946
7947 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7948 {
7949 if (enable) {
7950 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7951 MAC_MODE_PORT_MODE_MASK);
7952
7953 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7954
7955 if (!tg3_flag(tp, 5705_PLUS))
7956 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7957
7958 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7959 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7960 else
7961 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7962 } else {
7963 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7964
7965 if (tg3_flag(tp, 5705_PLUS) ||
7966 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7967 tg3_asic_rev(tp) == ASIC_REV_5700)
7968 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7969 }
7970
7971 tw32(MAC_MODE, tp->mac_mode);
7972 udelay(40);
7973 }
7974
7975 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7976 {
7977 u32 val, bmcr, mac_mode, ptest = 0;
7978
7979 tg3_phy_toggle_apd(tp, false);
7980 tg3_phy_toggle_automdix(tp, false);
7981
7982 if (extlpbk && tg3_phy_set_extloopbk(tp))
7983 return -EIO;
7984
7985 bmcr = BMCR_FULLDPLX;
7986 switch (speed) {
7987 case SPEED_10:
7988 break;
7989 case SPEED_100:
7990 bmcr |= BMCR_SPEED100;
7991 break;
7992 case SPEED_1000:
7993 default:
7994 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7995 speed = SPEED_100;
7996 bmcr |= BMCR_SPEED100;
7997 } else {
7998 speed = SPEED_1000;
7999 bmcr |= BMCR_SPEED1000;
8000 }
8001 }
8002
8003 if (extlpbk) {
8004 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8005 tg3_readphy(tp, MII_CTRL1000, &val);
8006 val |= CTL1000_AS_MASTER |
8007 CTL1000_ENABLE_MASTER;
8008 tg3_writephy(tp, MII_CTRL1000, val);
8009 } else {
8010 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8011 MII_TG3_FET_PTEST_TRIM_2;
8012 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8013 }
8014 } else
8015 bmcr |= BMCR_LOOPBACK;
8016
8017 tg3_writephy(tp, MII_BMCR, bmcr);
8018
8019 /* The write needs to be flushed for the FETs */
8020 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8021 tg3_readphy(tp, MII_BMCR, &bmcr);
8022
8023 udelay(40);
8024
8025 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8026 tg3_asic_rev(tp) == ASIC_REV_5785) {
8027 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8028 MII_TG3_FET_PTEST_FRC_TX_LINK |
8029 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8030
8031 /* The write needs to be flushed for the AC131 */
8032 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8033 }
8034
8035 /* Reset to prevent losing 1st rx packet intermittently */
8036 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8037 tg3_flag(tp, 5780_CLASS)) {
8038 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8039 udelay(10);
8040 tw32_f(MAC_RX_MODE, tp->rx_mode);
8041 }
8042
8043 mac_mode = tp->mac_mode &
8044 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8045 if (speed == SPEED_1000)
8046 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8047 else
8048 mac_mode |= MAC_MODE_PORT_MODE_MII;
8049
8050 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8051 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8052
8053 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8054 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8055 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8056 mac_mode |= MAC_MODE_LINK_POLARITY;
8057
8058 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8059 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8060 }
8061
8062 tw32(MAC_MODE, mac_mode);
8063 udelay(40);
8064
8065 return 0;
8066 }
8067
8068 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8069 {
8070 struct tg3 *tp = netdev_priv(dev);
8071
8072 if (features & NETIF_F_LOOPBACK) {
8073 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8074 return;
8075
8076 spin_lock_bh(&tp->lock);
8077 tg3_mac_loopback(tp, true);
8078 netif_carrier_on(tp->dev);
8079 spin_unlock_bh(&tp->lock);
8080 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8081 } else {
8082 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8083 return;
8084
8085 spin_lock_bh(&tp->lock);
8086 tg3_mac_loopback(tp, false);
8087 /* Force link status check */
8088 tg3_setup_phy(tp, true);
8089 spin_unlock_bh(&tp->lock);
8090 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8091 }
8092 }
8093
8094 static netdev_features_t tg3_fix_features(struct net_device *dev,
8095 netdev_features_t features)
8096 {
8097 struct tg3 *tp = netdev_priv(dev);
8098
8099 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8100 features &= ~NETIF_F_ALL_TSO;
8101
8102 return features;
8103 }
8104
8105 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8106 {
8107 netdev_features_t changed = dev->features ^ features;
8108
8109 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8110 tg3_set_loopback(dev, features);
8111
8112 return 0;
8113 }
8114
8115 static void tg3_rx_prodring_free(struct tg3 *tp,
8116 struct tg3_rx_prodring_set *tpr)
8117 {
8118 int i;
8119
8120 if (tpr != &tp->napi[0].prodring) {
8121 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8122 i = (i + 1) & tp->rx_std_ring_mask)
8123 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8124 tp->rx_pkt_map_sz);
8125
8126 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8127 for (i = tpr->rx_jmb_cons_idx;
8128 i != tpr->rx_jmb_prod_idx;
8129 i = (i + 1) & tp->rx_jmb_ring_mask) {
8130 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8131 TG3_RX_JMB_MAP_SZ);
8132 }
8133 }
8134
8135 return;
8136 }
8137
8138 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8139 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8140 tp->rx_pkt_map_sz);
8141
8142 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8143 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8144 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8145 TG3_RX_JMB_MAP_SZ);
8146 }
8147 }
8148
8149 /* Initialize rx rings for packet processing.
8150 *
8151 * The chip has been shut down and the driver detached from
8152 * the networking, so no interrupts or new tx packets will
8153 * end up in the driver. tp->{tx,}lock are held and thus
8154 * we may not sleep.
8155 */
8156 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8157 struct tg3_rx_prodring_set *tpr)
8158 {
8159 u32 i, rx_pkt_dma_sz;
8160
8161 tpr->rx_std_cons_idx = 0;
8162 tpr->rx_std_prod_idx = 0;
8163 tpr->rx_jmb_cons_idx = 0;
8164 tpr->rx_jmb_prod_idx = 0;
8165
8166 if (tpr != &tp->napi[0].prodring) {
8167 memset(&tpr->rx_std_buffers[0], 0,
8168 TG3_RX_STD_BUFF_RING_SIZE(tp));
8169 if (tpr->rx_jmb_buffers)
8170 memset(&tpr->rx_jmb_buffers[0], 0,
8171 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8172 goto done;
8173 }
8174
8175 /* Zero out all descriptors. */
8176 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8177
8178 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8179 if (tg3_flag(tp, 5780_CLASS) &&
8180 tp->dev->mtu > ETH_DATA_LEN)
8181 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8182 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8183
8184 /* Initialize invariants of the rings, we only set this
8185 * stuff once. This works because the card does not
8186 * write into the rx buffer posting rings.
8187 */
8188 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8189 struct tg3_rx_buffer_desc *rxd;
8190
8191 rxd = &tpr->rx_std[i];
8192 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8193 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8194 rxd->opaque = (RXD_OPAQUE_RING_STD |
8195 (i << RXD_OPAQUE_INDEX_SHIFT));
8196 }
8197
8198 /* Now allocate fresh SKBs for each rx ring. */
8199 for (i = 0; i < tp->rx_pending; i++) {
8200 unsigned int frag_size;
8201
8202 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8203 &frag_size) < 0) {
8204 netdev_warn(tp->dev,
8205 "Using a smaller RX standard ring. Only "
8206 "%d out of %d buffers were allocated "
8207 "successfully\n", i, tp->rx_pending);
8208 if (i == 0)
8209 goto initfail;
8210 tp->rx_pending = i;
8211 break;
8212 }
8213 }
8214
8215 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8216 goto done;
8217
8218 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8219
8220 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8221 goto done;
8222
8223 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8224 struct tg3_rx_buffer_desc *rxd;
8225
8226 rxd = &tpr->rx_jmb[i].std;
8227 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8228 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8229 RXD_FLAG_JUMBO;
8230 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8231 (i << RXD_OPAQUE_INDEX_SHIFT));
8232 }
8233
8234 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8235 unsigned int frag_size;
8236
8237 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8238 &frag_size) < 0) {
8239 netdev_warn(tp->dev,
8240 "Using a smaller RX jumbo ring. Only %d "
8241 "out of %d buffers were allocated "
8242 "successfully\n", i, tp->rx_jumbo_pending);
8243 if (i == 0)
8244 goto initfail;
8245 tp->rx_jumbo_pending = i;
8246 break;
8247 }
8248 }
8249
8250 done:
8251 return 0;
8252
8253 initfail:
8254 tg3_rx_prodring_free(tp, tpr);
8255 return -ENOMEM;
8256 }
8257
8258 static void tg3_rx_prodring_fini(struct tg3 *tp,
8259 struct tg3_rx_prodring_set *tpr)
8260 {
8261 kfree(tpr->rx_std_buffers);
8262 tpr->rx_std_buffers = NULL;
8263 kfree(tpr->rx_jmb_buffers);
8264 tpr->rx_jmb_buffers = NULL;
8265 if (tpr->rx_std) {
8266 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8267 tpr->rx_std, tpr->rx_std_mapping);
8268 tpr->rx_std = NULL;
8269 }
8270 if (tpr->rx_jmb) {
8271 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8272 tpr->rx_jmb, tpr->rx_jmb_mapping);
8273 tpr->rx_jmb = NULL;
8274 }
8275 }
8276
8277 static int tg3_rx_prodring_init(struct tg3 *tp,
8278 struct tg3_rx_prodring_set *tpr)
8279 {
8280 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8281 GFP_KERNEL);
8282 if (!tpr->rx_std_buffers)
8283 return -ENOMEM;
8284
8285 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8286 TG3_RX_STD_RING_BYTES(tp),
8287 &tpr->rx_std_mapping,
8288 GFP_KERNEL);
8289 if (!tpr->rx_std)
8290 goto err_out;
8291
8292 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8293 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8294 GFP_KERNEL);
8295 if (!tpr->rx_jmb_buffers)
8296 goto err_out;
8297
8298 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8299 TG3_RX_JMB_RING_BYTES(tp),
8300 &tpr->rx_jmb_mapping,
8301 GFP_KERNEL);
8302 if (!tpr->rx_jmb)
8303 goto err_out;
8304 }
8305
8306 return 0;
8307
8308 err_out:
8309 tg3_rx_prodring_fini(tp, tpr);
8310 return -ENOMEM;
8311 }
8312
8313 /* Free up pending packets in all rx/tx rings.
8314 *
8315 * The chip has been shut down and the driver detached from
8316 * the networking, so no interrupts or new tx packets will
8317 * end up in the driver. tp->{tx,}lock is not held and we are not
8318 * in an interrupt context and thus may sleep.
8319 */
8320 static void tg3_free_rings(struct tg3 *tp)
8321 {
8322 int i, j;
8323
8324 for (j = 0; j < tp->irq_cnt; j++) {
8325 struct tg3_napi *tnapi = &tp->napi[j];
8326
8327 tg3_rx_prodring_free(tp, &tnapi->prodring);
8328
8329 if (!tnapi->tx_buffers)
8330 continue;
8331
8332 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8333 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8334
8335 if (!skb)
8336 continue;
8337
8338 tg3_tx_skb_unmap(tnapi, i,
8339 skb_shinfo(skb)->nr_frags - 1);
8340
8341 dev_kfree_skb_any(skb);
8342 }
8343 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8344 }
8345 }
8346
8347 /* Initialize tx/rx rings for packet processing.
8348 *
8349 * The chip has been shut down and the driver detached from
8350 * the networking, so no interrupts or new tx packets will
8351 * end up in the driver. tp->{tx,}lock are held and thus
8352 * we may not sleep.
8353 */
8354 static int tg3_init_rings(struct tg3 *tp)
8355 {
8356 int i;
8357
8358 /* Free up all the SKBs. */
8359 tg3_free_rings(tp);
8360
8361 for (i = 0; i < tp->irq_cnt; i++) {
8362 struct tg3_napi *tnapi = &tp->napi[i];
8363
8364 tnapi->last_tag = 0;
8365 tnapi->last_irq_tag = 0;
8366 tnapi->hw_status->status = 0;
8367 tnapi->hw_status->status_tag = 0;
8368 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8369
8370 tnapi->tx_prod = 0;
8371 tnapi->tx_cons = 0;
8372 if (tnapi->tx_ring)
8373 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8374
8375 tnapi->rx_rcb_ptr = 0;
8376 if (tnapi->rx_rcb)
8377 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8378
8379 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8380 tg3_free_rings(tp);
8381 return -ENOMEM;
8382 }
8383 }
8384
8385 return 0;
8386 }
8387
8388 static void tg3_mem_tx_release(struct tg3 *tp)
8389 {
8390 int i;
8391
8392 for (i = 0; i < tp->irq_max; i++) {
8393 struct tg3_napi *tnapi = &tp->napi[i];
8394
8395 if (tnapi->tx_ring) {
8396 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8397 tnapi->tx_ring, tnapi->tx_desc_mapping);
8398 tnapi->tx_ring = NULL;
8399 }
8400
8401 kfree(tnapi->tx_buffers);
8402 tnapi->tx_buffers = NULL;
8403 }
8404 }
8405
8406 static int tg3_mem_tx_acquire(struct tg3 *tp)
8407 {
8408 int i;
8409 struct tg3_napi *tnapi = &tp->napi[0];
8410
8411 /* If multivector TSS is enabled, vector 0 does not handle
8412 * tx interrupts. Don't allocate any resources for it.
8413 */
8414 if (tg3_flag(tp, ENABLE_TSS))
8415 tnapi++;
8416
8417 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8418 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8419 TG3_TX_RING_SIZE, GFP_KERNEL);
8420 if (!tnapi->tx_buffers)
8421 goto err_out;
8422
8423 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8424 TG3_TX_RING_BYTES,
8425 &tnapi->tx_desc_mapping,
8426 GFP_KERNEL);
8427 if (!tnapi->tx_ring)
8428 goto err_out;
8429 }
8430
8431 return 0;
8432
8433 err_out:
8434 tg3_mem_tx_release(tp);
8435 return -ENOMEM;
8436 }
8437
8438 static void tg3_mem_rx_release(struct tg3 *tp)
8439 {
8440 int i;
8441
8442 for (i = 0; i < tp->irq_max; i++) {
8443 struct tg3_napi *tnapi = &tp->napi[i];
8444
8445 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8446
8447 if (!tnapi->rx_rcb)
8448 continue;
8449
8450 dma_free_coherent(&tp->pdev->dev,
8451 TG3_RX_RCB_RING_BYTES(tp),
8452 tnapi->rx_rcb,
8453 tnapi->rx_rcb_mapping);
8454 tnapi->rx_rcb = NULL;
8455 }
8456 }
8457
8458 static int tg3_mem_rx_acquire(struct tg3 *tp)
8459 {
8460 unsigned int i, limit;
8461
8462 limit = tp->rxq_cnt;
8463
8464 /* If RSS is enabled, we need a (dummy) producer ring
8465 * set on vector zero. This is the true hw prodring.
8466 */
8467 if (tg3_flag(tp, ENABLE_RSS))
8468 limit++;
8469
8470 for (i = 0; i < limit; i++) {
8471 struct tg3_napi *tnapi = &tp->napi[i];
8472
8473 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8474 goto err_out;
8475
8476 /* If multivector RSS is enabled, vector 0
8477 * does not handle rx or tx interrupts.
8478 * Don't allocate any resources for it.
8479 */
8480 if (!i && tg3_flag(tp, ENABLE_RSS))
8481 continue;
8482
8483 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8484 TG3_RX_RCB_RING_BYTES(tp),
8485 &tnapi->rx_rcb_mapping,
8486 GFP_KERNEL | __GFP_ZERO);
8487 if (!tnapi->rx_rcb)
8488 goto err_out;
8489 }
8490
8491 return 0;
8492
8493 err_out:
8494 tg3_mem_rx_release(tp);
8495 return -ENOMEM;
8496 }
8497
8498 /*
8499 * Must not be invoked with interrupt sources disabled and
8500 * the hardware shutdown down.
8501 */
8502 static void tg3_free_consistent(struct tg3 *tp)
8503 {
8504 int i;
8505
8506 for (i = 0; i < tp->irq_cnt; i++) {
8507 struct tg3_napi *tnapi = &tp->napi[i];
8508
8509 if (tnapi->hw_status) {
8510 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8511 tnapi->hw_status,
8512 tnapi->status_mapping);
8513 tnapi->hw_status = NULL;
8514 }
8515 }
8516
8517 tg3_mem_rx_release(tp);
8518 tg3_mem_tx_release(tp);
8519
8520 if (tp->hw_stats) {
8521 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8522 tp->hw_stats, tp->stats_mapping);
8523 tp->hw_stats = NULL;
8524 }
8525 }
8526
8527 /*
8528 * Must not be invoked with interrupt sources disabled and
8529 * the hardware shutdown down. Can sleep.
8530 */
8531 static int tg3_alloc_consistent(struct tg3 *tp)
8532 {
8533 int i;
8534
8535 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8536 sizeof(struct tg3_hw_stats),
8537 &tp->stats_mapping,
8538 GFP_KERNEL | __GFP_ZERO);
8539 if (!tp->hw_stats)
8540 goto err_out;
8541
8542 for (i = 0; i < tp->irq_cnt; i++) {
8543 struct tg3_napi *tnapi = &tp->napi[i];
8544 struct tg3_hw_status *sblk;
8545
8546 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8547 TG3_HW_STATUS_SIZE,
8548 &tnapi->status_mapping,
8549 GFP_KERNEL | __GFP_ZERO);
8550 if (!tnapi->hw_status)
8551 goto err_out;
8552
8553 sblk = tnapi->hw_status;
8554
8555 if (tg3_flag(tp, ENABLE_RSS)) {
8556 u16 *prodptr = NULL;
8557
8558 /*
8559 * When RSS is enabled, the status block format changes
8560 * slightly. The "rx_jumbo_consumer", "reserved",
8561 * and "rx_mini_consumer" members get mapped to the
8562 * other three rx return ring producer indexes.
8563 */
8564 switch (i) {
8565 case 1:
8566 prodptr = &sblk->idx[0].rx_producer;
8567 break;
8568 case 2:
8569 prodptr = &sblk->rx_jumbo_consumer;
8570 break;
8571 case 3:
8572 prodptr = &sblk->reserved;
8573 break;
8574 case 4:
8575 prodptr = &sblk->rx_mini_consumer;
8576 break;
8577 }
8578 tnapi->rx_rcb_prod_idx = prodptr;
8579 } else {
8580 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8581 }
8582 }
8583
8584 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8585 goto err_out;
8586
8587 return 0;
8588
8589 err_out:
8590 tg3_free_consistent(tp);
8591 return -ENOMEM;
8592 }
8593
8594 #define MAX_WAIT_CNT 1000
8595
8596 /* To stop a block, clear the enable bit and poll till it
8597 * clears. tp->lock is held.
8598 */
8599 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8600 {
8601 unsigned int i;
8602 u32 val;
8603
8604 if (tg3_flag(tp, 5705_PLUS)) {
8605 switch (ofs) {
8606 case RCVLSC_MODE:
8607 case DMAC_MODE:
8608 case MBFREE_MODE:
8609 case BUFMGR_MODE:
8610 case MEMARB_MODE:
8611 /* We can't enable/disable these bits of the
8612 * 5705/5750, just say success.
8613 */
8614 return 0;
8615
8616 default:
8617 break;
8618 }
8619 }
8620
8621 val = tr32(ofs);
8622 val &= ~enable_bit;
8623 tw32_f(ofs, val);
8624
8625 for (i = 0; i < MAX_WAIT_CNT; i++) {
8626 if (pci_channel_offline(tp->pdev)) {
8627 dev_err(&tp->pdev->dev,
8628 "tg3_stop_block device offline, "
8629 "ofs=%lx enable_bit=%x\n",
8630 ofs, enable_bit);
8631 return -ENODEV;
8632 }
8633
8634 udelay(100);
8635 val = tr32(ofs);
8636 if ((val & enable_bit) == 0)
8637 break;
8638 }
8639
8640 if (i == MAX_WAIT_CNT && !silent) {
8641 dev_err(&tp->pdev->dev,
8642 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8643 ofs, enable_bit);
8644 return -ENODEV;
8645 }
8646
8647 return 0;
8648 }
8649
8650 /* tp->lock is held. */
8651 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8652 {
8653 int i, err;
8654
8655 tg3_disable_ints(tp);
8656
8657 if (pci_channel_offline(tp->pdev)) {
8658 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8659 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8660 err = -ENODEV;
8661 goto err_no_dev;
8662 }
8663
8664 tp->rx_mode &= ~RX_MODE_ENABLE;
8665 tw32_f(MAC_RX_MODE, tp->rx_mode);
8666 udelay(10);
8667
8668 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8669 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8670 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8671 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8672 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8673 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8674
8675 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8676 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8677 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8678 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8679 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8680 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8681 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8682
8683 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8684 tw32_f(MAC_MODE, tp->mac_mode);
8685 udelay(40);
8686
8687 tp->tx_mode &= ~TX_MODE_ENABLE;
8688 tw32_f(MAC_TX_MODE, tp->tx_mode);
8689
8690 for (i = 0; i < MAX_WAIT_CNT; i++) {
8691 udelay(100);
8692 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8693 break;
8694 }
8695 if (i >= MAX_WAIT_CNT) {
8696 dev_err(&tp->pdev->dev,
8697 "%s timed out, TX_MODE_ENABLE will not clear "
8698 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8699 err |= -ENODEV;
8700 }
8701
8702 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8703 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8704 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8705
8706 tw32(FTQ_RESET, 0xffffffff);
8707 tw32(FTQ_RESET, 0x00000000);
8708
8709 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8710 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8711
8712 err_no_dev:
8713 for (i = 0; i < tp->irq_cnt; i++) {
8714 struct tg3_napi *tnapi = &tp->napi[i];
8715 if (tnapi->hw_status)
8716 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8717 }
8718
8719 return err;
8720 }
8721
8722 /* Save PCI command register before chip reset */
8723 static void tg3_save_pci_state(struct tg3 *tp)
8724 {
8725 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8726 }
8727
8728 /* Restore PCI state after chip reset */
8729 static void tg3_restore_pci_state(struct tg3 *tp)
8730 {
8731 u32 val;
8732
8733 /* Re-enable indirect register accesses. */
8734 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8735 tp->misc_host_ctrl);
8736
8737 /* Set MAX PCI retry to zero. */
8738 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8739 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8740 tg3_flag(tp, PCIX_MODE))
8741 val |= PCISTATE_RETRY_SAME_DMA;
8742 /* Allow reads and writes to the APE register and memory space. */
8743 if (tg3_flag(tp, ENABLE_APE))
8744 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8745 PCISTATE_ALLOW_APE_SHMEM_WR |
8746 PCISTATE_ALLOW_APE_PSPACE_WR;
8747 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8748
8749 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8750
8751 if (!tg3_flag(tp, PCI_EXPRESS)) {
8752 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8753 tp->pci_cacheline_sz);
8754 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8755 tp->pci_lat_timer);
8756 }
8757
8758 /* Make sure PCI-X relaxed ordering bit is clear. */
8759 if (tg3_flag(tp, PCIX_MODE)) {
8760 u16 pcix_cmd;
8761
8762 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8763 &pcix_cmd);
8764 pcix_cmd &= ~PCI_X_CMD_ERO;
8765 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8766 pcix_cmd);
8767 }
8768
8769 if (tg3_flag(tp, 5780_CLASS)) {
8770
8771 /* Chip reset on 5780 will reset MSI enable bit,
8772 * so need to restore it.
8773 */
8774 if (tg3_flag(tp, USING_MSI)) {
8775 u16 ctrl;
8776
8777 pci_read_config_word(tp->pdev,
8778 tp->msi_cap + PCI_MSI_FLAGS,
8779 &ctrl);
8780 pci_write_config_word(tp->pdev,
8781 tp->msi_cap + PCI_MSI_FLAGS,
8782 ctrl | PCI_MSI_FLAGS_ENABLE);
8783 val = tr32(MSGINT_MODE);
8784 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8785 }
8786 }
8787 }
8788
8789 /* tp->lock is held. */
8790 static int tg3_chip_reset(struct tg3 *tp)
8791 {
8792 u32 val;
8793 void (*write_op)(struct tg3 *, u32, u32);
8794 int i, err;
8795
8796 tg3_nvram_lock(tp);
8797
8798 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8799
8800 /* No matching tg3_nvram_unlock() after this because
8801 * chip reset below will undo the nvram lock.
8802 */
8803 tp->nvram_lock_cnt = 0;
8804
8805 /* GRC_MISC_CFG core clock reset will clear the memory
8806 * enable bit in PCI register 4 and the MSI enable bit
8807 * on some chips, so we save relevant registers here.
8808 */
8809 tg3_save_pci_state(tp);
8810
8811 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8812 tg3_flag(tp, 5755_PLUS))
8813 tw32(GRC_FASTBOOT_PC, 0);
8814
8815 /*
8816 * We must avoid the readl() that normally takes place.
8817 * It locks machines, causes machine checks, and other
8818 * fun things. So, temporarily disable the 5701
8819 * hardware workaround, while we do the reset.
8820 */
8821 write_op = tp->write32;
8822 if (write_op == tg3_write_flush_reg32)
8823 tp->write32 = tg3_write32;
8824
8825 /* Prevent the irq handler from reading or writing PCI registers
8826 * during chip reset when the memory enable bit in the PCI command
8827 * register may be cleared. The chip does not generate interrupt
8828 * at this time, but the irq handler may still be called due to irq
8829 * sharing or irqpoll.
8830 */
8831 tg3_flag_set(tp, CHIP_RESETTING);
8832 for (i = 0; i < tp->irq_cnt; i++) {
8833 struct tg3_napi *tnapi = &tp->napi[i];
8834 if (tnapi->hw_status) {
8835 tnapi->hw_status->status = 0;
8836 tnapi->hw_status->status_tag = 0;
8837 }
8838 tnapi->last_tag = 0;
8839 tnapi->last_irq_tag = 0;
8840 }
8841 smp_mb();
8842
8843 for (i = 0; i < tp->irq_cnt; i++)
8844 synchronize_irq(tp->napi[i].irq_vec);
8845
8846 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8847 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8848 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8849 }
8850
8851 /* do the reset */
8852 val = GRC_MISC_CFG_CORECLK_RESET;
8853
8854 if (tg3_flag(tp, PCI_EXPRESS)) {
8855 /* Force PCIe 1.0a mode */
8856 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8857 !tg3_flag(tp, 57765_PLUS) &&
8858 tr32(TG3_PCIE_PHY_TSTCTL) ==
8859 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8860 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8861
8862 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8863 tw32(GRC_MISC_CFG, (1 << 29));
8864 val |= (1 << 29);
8865 }
8866 }
8867
8868 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8869 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8870 tw32(GRC_VCPU_EXT_CTRL,
8871 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8872 }
8873
8874 /* Manage gphy power for all CPMU absent PCIe devices. */
8875 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8876 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8877
8878 tw32(GRC_MISC_CFG, val);
8879
8880 /* restore 5701 hardware bug workaround write method */
8881 tp->write32 = write_op;
8882
8883 /* Unfortunately, we have to delay before the PCI read back.
8884 * Some 575X chips even will not respond to a PCI cfg access
8885 * when the reset command is given to the chip.
8886 *
8887 * How do these hardware designers expect things to work
8888 * properly if the PCI write is posted for a long period
8889 * of time? It is always necessary to have some method by
8890 * which a register read back can occur to push the write
8891 * out which does the reset.
8892 *
8893 * For most tg3 variants the trick below was working.
8894 * Ho hum...
8895 */
8896 udelay(120);
8897
8898 /* Flush PCI posted writes. The normal MMIO registers
8899 * are inaccessible at this time so this is the only
8900 * way to make this reliably (actually, this is no longer
8901 * the case, see above). I tried to use indirect
8902 * register read/write but this upset some 5701 variants.
8903 */
8904 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8905
8906 udelay(120);
8907
8908 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8909 u16 val16;
8910
8911 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8912 int j;
8913 u32 cfg_val;
8914
8915 /* Wait for link training to complete. */
8916 for (j = 0; j < 5000; j++)
8917 udelay(100);
8918
8919 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8920 pci_write_config_dword(tp->pdev, 0xc4,
8921 cfg_val | (1 << 15));
8922 }
8923
8924 /* Clear the "no snoop" and "relaxed ordering" bits. */
8925 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8926 /*
8927 * Older PCIe devices only support the 128 byte
8928 * MPS setting. Enforce the restriction.
8929 */
8930 if (!tg3_flag(tp, CPMU_PRESENT))
8931 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8932 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8933
8934 /* Clear error status */
8935 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8936 PCI_EXP_DEVSTA_CED |
8937 PCI_EXP_DEVSTA_NFED |
8938 PCI_EXP_DEVSTA_FED |
8939 PCI_EXP_DEVSTA_URD);
8940 }
8941
8942 tg3_restore_pci_state(tp);
8943
8944 tg3_flag_clear(tp, CHIP_RESETTING);
8945 tg3_flag_clear(tp, ERROR_PROCESSED);
8946
8947 val = 0;
8948 if (tg3_flag(tp, 5780_CLASS))
8949 val = tr32(MEMARB_MODE);
8950 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8951
8952 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8953 tg3_stop_fw(tp);
8954 tw32(0x5000, 0x400);
8955 }
8956
8957 if (tg3_flag(tp, IS_SSB_CORE)) {
8958 /*
8959 * BCM4785: In order to avoid repercussions from using
8960 * potentially defective internal ROM, stop the Rx RISC CPU,
8961 * which is not required.
8962 */
8963 tg3_stop_fw(tp);
8964 tg3_halt_cpu(tp, RX_CPU_BASE);
8965 }
8966
8967 err = tg3_poll_fw(tp);
8968 if (err)
8969 return err;
8970
8971 tw32(GRC_MODE, tp->grc_mode);
8972
8973 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8974 val = tr32(0xc4);
8975
8976 tw32(0xc4, val | (1 << 15));
8977 }
8978
8979 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8980 tg3_asic_rev(tp) == ASIC_REV_5705) {
8981 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8982 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8983 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8984 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8985 }
8986
8987 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8988 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8989 val = tp->mac_mode;
8990 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8991 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8992 val = tp->mac_mode;
8993 } else
8994 val = 0;
8995
8996 tw32_f(MAC_MODE, val);
8997 udelay(40);
8998
8999 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9000
9001 tg3_mdio_start(tp);
9002
9003 if (tg3_flag(tp, PCI_EXPRESS) &&
9004 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9005 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9006 !tg3_flag(tp, 57765_PLUS)) {
9007 val = tr32(0x7c00);
9008
9009 tw32(0x7c00, val | (1 << 25));
9010 }
9011
9012 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9013 val = tr32(TG3_CPMU_CLCK_ORIDE);
9014 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9015 }
9016
9017 /* Reprobe ASF enable state. */
9018 tg3_flag_clear(tp, ENABLE_ASF);
9019 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9020 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9021
9022 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9023 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9024 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9025 u32 nic_cfg;
9026
9027 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9028 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9029 tg3_flag_set(tp, ENABLE_ASF);
9030 tp->last_event_jiffies = jiffies;
9031 if (tg3_flag(tp, 5750_PLUS))
9032 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9033
9034 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9035 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9036 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9037 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9038 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9039 }
9040 }
9041
9042 return 0;
9043 }
9044
9045 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9046 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9047
9048 /* tp->lock is held. */
9049 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9050 {
9051 int err;
9052
9053 tg3_stop_fw(tp);
9054
9055 tg3_write_sig_pre_reset(tp, kind);
9056
9057 tg3_abort_hw(tp, silent);
9058 err = tg3_chip_reset(tp);
9059
9060 __tg3_set_mac_addr(tp, false);
9061
9062 tg3_write_sig_legacy(tp, kind);
9063 tg3_write_sig_post_reset(tp, kind);
9064
9065 if (tp->hw_stats) {
9066 /* Save the stats across chip resets... */
9067 tg3_get_nstats(tp, &tp->net_stats_prev);
9068 tg3_get_estats(tp, &tp->estats_prev);
9069
9070 /* And make sure the next sample is new data */
9071 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9072 }
9073
9074 if (err)
9075 return err;
9076
9077 return 0;
9078 }
9079
9080 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9081 {
9082 struct tg3 *tp = netdev_priv(dev);
9083 struct sockaddr *addr = p;
9084 int err = 0;
9085 bool skip_mac_1 = false;
9086
9087 if (!is_valid_ether_addr(addr->sa_data))
9088 return -EADDRNOTAVAIL;
9089
9090 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9091
9092 if (!netif_running(dev))
9093 return 0;
9094
9095 if (tg3_flag(tp, ENABLE_ASF)) {
9096 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9097
9098 addr0_high = tr32(MAC_ADDR_0_HIGH);
9099 addr0_low = tr32(MAC_ADDR_0_LOW);
9100 addr1_high = tr32(MAC_ADDR_1_HIGH);
9101 addr1_low = tr32(MAC_ADDR_1_LOW);
9102
9103 /* Skip MAC addr 1 if ASF is using it. */
9104 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9105 !(addr1_high == 0 && addr1_low == 0))
9106 skip_mac_1 = true;
9107 }
9108 spin_lock_bh(&tp->lock);
9109 __tg3_set_mac_addr(tp, skip_mac_1);
9110 spin_unlock_bh(&tp->lock);
9111
9112 return err;
9113 }
9114
9115 /* tp->lock is held. */
9116 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9117 dma_addr_t mapping, u32 maxlen_flags,
9118 u32 nic_addr)
9119 {
9120 tg3_write_mem(tp,
9121 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9122 ((u64) mapping >> 32));
9123 tg3_write_mem(tp,
9124 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9125 ((u64) mapping & 0xffffffff));
9126 tg3_write_mem(tp,
9127 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9128 maxlen_flags);
9129
9130 if (!tg3_flag(tp, 5705_PLUS))
9131 tg3_write_mem(tp,
9132 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9133 nic_addr);
9134 }
9135
9136
9137 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9138 {
9139 int i = 0;
9140
9141 if (!tg3_flag(tp, ENABLE_TSS)) {
9142 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9143 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9144 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9145 } else {
9146 tw32(HOSTCC_TXCOL_TICKS, 0);
9147 tw32(HOSTCC_TXMAX_FRAMES, 0);
9148 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9149
9150 for (; i < tp->txq_cnt; i++) {
9151 u32 reg;
9152
9153 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9154 tw32(reg, ec->tx_coalesce_usecs);
9155 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9156 tw32(reg, ec->tx_max_coalesced_frames);
9157 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9158 tw32(reg, ec->tx_max_coalesced_frames_irq);
9159 }
9160 }
9161
9162 for (; i < tp->irq_max - 1; i++) {
9163 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9164 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9165 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9166 }
9167 }
9168
9169 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9170 {
9171 int i = 0;
9172 u32 limit = tp->rxq_cnt;
9173
9174 if (!tg3_flag(tp, ENABLE_RSS)) {
9175 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9176 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9177 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9178 limit--;
9179 } else {
9180 tw32(HOSTCC_RXCOL_TICKS, 0);
9181 tw32(HOSTCC_RXMAX_FRAMES, 0);
9182 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9183 }
9184
9185 for (; i < limit; i++) {
9186 u32 reg;
9187
9188 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9189 tw32(reg, ec->rx_coalesce_usecs);
9190 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9191 tw32(reg, ec->rx_max_coalesced_frames);
9192 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9193 tw32(reg, ec->rx_max_coalesced_frames_irq);
9194 }
9195
9196 for (; i < tp->irq_max - 1; i++) {
9197 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9198 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9199 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9200 }
9201 }
9202
9203 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9204 {
9205 tg3_coal_tx_init(tp, ec);
9206 tg3_coal_rx_init(tp, ec);
9207
9208 if (!tg3_flag(tp, 5705_PLUS)) {
9209 u32 val = ec->stats_block_coalesce_usecs;
9210
9211 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9212 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9213
9214 if (!tp->link_up)
9215 val = 0;
9216
9217 tw32(HOSTCC_STAT_COAL_TICKS, val);
9218 }
9219 }
9220
9221 /* tp->lock is held. */
9222 static void tg3_rings_reset(struct tg3 *tp)
9223 {
9224 int i;
9225 u32 stblk, txrcb, rxrcb, limit;
9226 struct tg3_napi *tnapi = &tp->napi[0];
9227
9228 /* Disable all transmit rings but the first. */
9229 if (!tg3_flag(tp, 5705_PLUS))
9230 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9231 else if (tg3_flag(tp, 5717_PLUS))
9232 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9233 else if (tg3_flag(tp, 57765_CLASS) ||
9234 tg3_asic_rev(tp) == ASIC_REV_5762)
9235 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9236 else
9237 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9238
9239 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9240 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9241 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9242 BDINFO_FLAGS_DISABLED);
9243
9244
9245 /* Disable all receive return rings but the first. */
9246 if (tg3_flag(tp, 5717_PLUS))
9247 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9248 else if (!tg3_flag(tp, 5705_PLUS))
9249 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9250 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9251 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9252 tg3_flag(tp, 57765_CLASS))
9253 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9254 else
9255 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9256
9257 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9258 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9259 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9260 BDINFO_FLAGS_DISABLED);
9261
9262 /* Disable interrupts */
9263 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9264 tp->napi[0].chk_msi_cnt = 0;
9265 tp->napi[0].last_rx_cons = 0;
9266 tp->napi[0].last_tx_cons = 0;
9267
9268 /* Zero mailbox registers. */
9269 if (tg3_flag(tp, SUPPORT_MSIX)) {
9270 for (i = 1; i < tp->irq_max; i++) {
9271 tp->napi[i].tx_prod = 0;
9272 tp->napi[i].tx_cons = 0;
9273 if (tg3_flag(tp, ENABLE_TSS))
9274 tw32_mailbox(tp->napi[i].prodmbox, 0);
9275 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9276 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9277 tp->napi[i].chk_msi_cnt = 0;
9278 tp->napi[i].last_rx_cons = 0;
9279 tp->napi[i].last_tx_cons = 0;
9280 }
9281 if (!tg3_flag(tp, ENABLE_TSS))
9282 tw32_mailbox(tp->napi[0].prodmbox, 0);
9283 } else {
9284 tp->napi[0].tx_prod = 0;
9285 tp->napi[0].tx_cons = 0;
9286 tw32_mailbox(tp->napi[0].prodmbox, 0);
9287 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9288 }
9289
9290 /* Make sure the NIC-based send BD rings are disabled. */
9291 if (!tg3_flag(tp, 5705_PLUS)) {
9292 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9293 for (i = 0; i < 16; i++)
9294 tw32_tx_mbox(mbox + i * 8, 0);
9295 }
9296
9297 txrcb = NIC_SRAM_SEND_RCB;
9298 rxrcb = NIC_SRAM_RCV_RET_RCB;
9299
9300 /* Clear status block in ram. */
9301 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9302
9303 /* Set status block DMA address */
9304 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9305 ((u64) tnapi->status_mapping >> 32));
9306 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9307 ((u64) tnapi->status_mapping & 0xffffffff));
9308
9309 if (tnapi->tx_ring) {
9310 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9311 (TG3_TX_RING_SIZE <<
9312 BDINFO_FLAGS_MAXLEN_SHIFT),
9313 NIC_SRAM_TX_BUFFER_DESC);
9314 txrcb += TG3_BDINFO_SIZE;
9315 }
9316
9317 if (tnapi->rx_rcb) {
9318 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9319 (tp->rx_ret_ring_mask + 1) <<
9320 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9321 rxrcb += TG3_BDINFO_SIZE;
9322 }
9323
9324 stblk = HOSTCC_STATBLCK_RING1;
9325
9326 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9327 u64 mapping = (u64)tnapi->status_mapping;
9328 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9329 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9330
9331 /* Clear status block in ram. */
9332 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9333
9334 if (tnapi->tx_ring) {
9335 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9336 (TG3_TX_RING_SIZE <<
9337 BDINFO_FLAGS_MAXLEN_SHIFT),
9338 NIC_SRAM_TX_BUFFER_DESC);
9339 txrcb += TG3_BDINFO_SIZE;
9340 }
9341
9342 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9343 ((tp->rx_ret_ring_mask + 1) <<
9344 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9345
9346 stblk += 8;
9347 rxrcb += TG3_BDINFO_SIZE;
9348 }
9349 }
9350
9351 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9352 {
9353 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9354
9355 if (!tg3_flag(tp, 5750_PLUS) ||
9356 tg3_flag(tp, 5780_CLASS) ||
9357 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9358 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9359 tg3_flag(tp, 57765_PLUS))
9360 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9361 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9362 tg3_asic_rev(tp) == ASIC_REV_5787)
9363 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9364 else
9365 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9366
9367 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9368 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9369
9370 val = min(nic_rep_thresh, host_rep_thresh);
9371 tw32(RCVBDI_STD_THRESH, val);
9372
9373 if (tg3_flag(tp, 57765_PLUS))
9374 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9375
9376 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9377 return;
9378
9379 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9380
9381 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9382
9383 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9384 tw32(RCVBDI_JUMBO_THRESH, val);
9385
9386 if (tg3_flag(tp, 57765_PLUS))
9387 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9388 }
9389
9390 static inline u32 calc_crc(unsigned char *buf, int len)
9391 {
9392 u32 reg;
9393 u32 tmp;
9394 int j, k;
9395
9396 reg = 0xffffffff;
9397
9398 for (j = 0; j < len; j++) {
9399 reg ^= buf[j];
9400
9401 for (k = 0; k < 8; k++) {
9402 tmp = reg & 0x01;
9403
9404 reg >>= 1;
9405
9406 if (tmp)
9407 reg ^= 0xedb88320;
9408 }
9409 }
9410
9411 return ~reg;
9412 }
9413
9414 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9415 {
9416 /* accept or reject all multicast frames */
9417 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9418 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9419 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9420 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9421 }
9422
9423 static void __tg3_set_rx_mode(struct net_device *dev)
9424 {
9425 struct tg3 *tp = netdev_priv(dev);
9426 u32 rx_mode;
9427
9428 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9429 RX_MODE_KEEP_VLAN_TAG);
9430
9431 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9432 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9433 * flag clear.
9434 */
9435 if (!tg3_flag(tp, ENABLE_ASF))
9436 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9437 #endif
9438
9439 if (dev->flags & IFF_PROMISC) {
9440 /* Promiscuous mode. */
9441 rx_mode |= RX_MODE_PROMISC;
9442 } else if (dev->flags & IFF_ALLMULTI) {
9443 /* Accept all multicast. */
9444 tg3_set_multi(tp, 1);
9445 } else if (netdev_mc_empty(dev)) {
9446 /* Reject all multicast. */
9447 tg3_set_multi(tp, 0);
9448 } else {
9449 /* Accept one or more multicast(s). */
9450 struct netdev_hw_addr *ha;
9451 u32 mc_filter[4] = { 0, };
9452 u32 regidx;
9453 u32 bit;
9454 u32 crc;
9455
9456 netdev_for_each_mc_addr(ha, dev) {
9457 crc = calc_crc(ha->addr, ETH_ALEN);
9458 bit = ~crc & 0x7f;
9459 regidx = (bit & 0x60) >> 5;
9460 bit &= 0x1f;
9461 mc_filter[regidx] |= (1 << bit);
9462 }
9463
9464 tw32(MAC_HASH_REG_0, mc_filter[0]);
9465 tw32(MAC_HASH_REG_1, mc_filter[1]);
9466 tw32(MAC_HASH_REG_2, mc_filter[2]);
9467 tw32(MAC_HASH_REG_3, mc_filter[3]);
9468 }
9469
9470 if (rx_mode != tp->rx_mode) {
9471 tp->rx_mode = rx_mode;
9472 tw32_f(MAC_RX_MODE, rx_mode);
9473 udelay(10);
9474 }
9475 }
9476
9477 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9478 {
9479 int i;
9480
9481 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9482 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9483 }
9484
9485 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9486 {
9487 int i;
9488
9489 if (!tg3_flag(tp, SUPPORT_MSIX))
9490 return;
9491
9492 if (tp->rxq_cnt == 1) {
9493 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9494 return;
9495 }
9496
9497 /* Validate table against current IRQ count */
9498 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9499 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9500 break;
9501 }
9502
9503 if (i != TG3_RSS_INDIR_TBL_SIZE)
9504 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9505 }
9506
9507 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9508 {
9509 int i = 0;
9510 u32 reg = MAC_RSS_INDIR_TBL_0;
9511
9512 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9513 u32 val = tp->rss_ind_tbl[i];
9514 i++;
9515 for (; i % 8; i++) {
9516 val <<= 4;
9517 val |= tp->rss_ind_tbl[i];
9518 }
9519 tw32(reg, val);
9520 reg += 4;
9521 }
9522 }
9523
9524 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9525 {
9526 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9527 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9528 else
9529 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9530 }
9531
9532 /* tp->lock is held. */
9533 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9534 {
9535 u32 val, rdmac_mode;
9536 int i, err, limit;
9537 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9538
9539 tg3_disable_ints(tp);
9540
9541 tg3_stop_fw(tp);
9542
9543 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9544
9545 if (tg3_flag(tp, INIT_COMPLETE))
9546 tg3_abort_hw(tp, 1);
9547
9548 /* Enable MAC control of LPI */
9549 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9550 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9551 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9552 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9553 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9554
9555 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9556
9557 tw32_f(TG3_CPMU_EEE_CTRL,
9558 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9559
9560 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9561 TG3_CPMU_EEEMD_LPI_IN_TX |
9562 TG3_CPMU_EEEMD_LPI_IN_RX |
9563 TG3_CPMU_EEEMD_EEE_ENABLE;
9564
9565 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9566 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9567
9568 if (tg3_flag(tp, ENABLE_APE))
9569 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9570
9571 tw32_f(TG3_CPMU_EEE_MODE, val);
9572
9573 tw32_f(TG3_CPMU_EEE_DBTMR1,
9574 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9575 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9576
9577 tw32_f(TG3_CPMU_EEE_DBTMR2,
9578 TG3_CPMU_DBTMR2_APE_TX_2047US |
9579 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9580 }
9581
9582 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9583 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9584 tg3_phy_pull_config(tp);
9585 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9586 }
9587
9588 if (reset_phy)
9589 tg3_phy_reset(tp);
9590
9591 err = tg3_chip_reset(tp);
9592 if (err)
9593 return err;
9594
9595 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9596
9597 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9598 val = tr32(TG3_CPMU_CTRL);
9599 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9600 tw32(TG3_CPMU_CTRL, val);
9601
9602 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9603 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9604 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9605 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9606
9607 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9608 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9609 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9610 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9611
9612 val = tr32(TG3_CPMU_HST_ACC);
9613 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9614 val |= CPMU_HST_ACC_MACCLK_6_25;
9615 tw32(TG3_CPMU_HST_ACC, val);
9616 }
9617
9618 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9619 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9620 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9621 PCIE_PWR_MGMT_L1_THRESH_4MS;
9622 tw32(PCIE_PWR_MGMT_THRESH, val);
9623
9624 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9625 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9626
9627 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9628
9629 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9630 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9631 }
9632
9633 if (tg3_flag(tp, L1PLLPD_EN)) {
9634 u32 grc_mode = tr32(GRC_MODE);
9635
9636 /* Access the lower 1K of PL PCIE block registers. */
9637 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9638 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9639
9640 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9641 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9642 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9643
9644 tw32(GRC_MODE, grc_mode);
9645 }
9646
9647 if (tg3_flag(tp, 57765_CLASS)) {
9648 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9649 u32 grc_mode = tr32(GRC_MODE);
9650
9651 /* Access the lower 1K of PL PCIE block registers. */
9652 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9653 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9654
9655 val = tr32(TG3_PCIE_TLDLPL_PORT +
9656 TG3_PCIE_PL_LO_PHYCTL5);
9657 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9658 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9659
9660 tw32(GRC_MODE, grc_mode);
9661 }
9662
9663 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9664 u32 grc_mode;
9665
9666 /* Fix transmit hangs */
9667 val = tr32(TG3_CPMU_PADRNG_CTL);
9668 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9669 tw32(TG3_CPMU_PADRNG_CTL, val);
9670
9671 grc_mode = tr32(GRC_MODE);
9672
9673 /* Access the lower 1K of DL PCIE block registers. */
9674 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9675 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9676
9677 val = tr32(TG3_PCIE_TLDLPL_PORT +
9678 TG3_PCIE_DL_LO_FTSMAX);
9679 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9680 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9681 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9682
9683 tw32(GRC_MODE, grc_mode);
9684 }
9685
9686 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9687 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9688 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9689 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9690 }
9691
9692 /* This works around an issue with Athlon chipsets on
9693 * B3 tigon3 silicon. This bit has no effect on any
9694 * other revision. But do not set this on PCI Express
9695 * chips and don't even touch the clocks if the CPMU is present.
9696 */
9697 if (!tg3_flag(tp, CPMU_PRESENT)) {
9698 if (!tg3_flag(tp, PCI_EXPRESS))
9699 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9700 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9701 }
9702
9703 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9704 tg3_flag(tp, PCIX_MODE)) {
9705 val = tr32(TG3PCI_PCISTATE);
9706 val |= PCISTATE_RETRY_SAME_DMA;
9707 tw32(TG3PCI_PCISTATE, val);
9708 }
9709
9710 if (tg3_flag(tp, ENABLE_APE)) {
9711 /* Allow reads and writes to the
9712 * APE register and memory space.
9713 */
9714 val = tr32(TG3PCI_PCISTATE);
9715 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9716 PCISTATE_ALLOW_APE_SHMEM_WR |
9717 PCISTATE_ALLOW_APE_PSPACE_WR;
9718 tw32(TG3PCI_PCISTATE, val);
9719 }
9720
9721 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9722 /* Enable some hw fixes. */
9723 val = tr32(TG3PCI_MSI_DATA);
9724 val |= (1 << 26) | (1 << 28) | (1 << 29);
9725 tw32(TG3PCI_MSI_DATA, val);
9726 }
9727
9728 /* Descriptor ring init may make accesses to the
9729 * NIC SRAM area to setup the TX descriptors, so we
9730 * can only do this after the hardware has been
9731 * successfully reset.
9732 */
9733 err = tg3_init_rings(tp);
9734 if (err)
9735 return err;
9736
9737 if (tg3_flag(tp, 57765_PLUS)) {
9738 val = tr32(TG3PCI_DMA_RW_CTRL) &
9739 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9740 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9741 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9742 if (!tg3_flag(tp, 57765_CLASS) &&
9743 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9744 tg3_asic_rev(tp) != ASIC_REV_5762)
9745 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9746 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9747 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9748 tg3_asic_rev(tp) != ASIC_REV_5761) {
9749 /* This value is determined during the probe time DMA
9750 * engine test, tg3_test_dma.
9751 */
9752 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9753 }
9754
9755 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9756 GRC_MODE_4X_NIC_SEND_RINGS |
9757 GRC_MODE_NO_TX_PHDR_CSUM |
9758 GRC_MODE_NO_RX_PHDR_CSUM);
9759 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9760
9761 /* Pseudo-header checksum is done by hardware logic and not
9762 * the offload processers, so make the chip do the pseudo-
9763 * header checksums on receive. For transmit it is more
9764 * convenient to do the pseudo-header checksum in software
9765 * as Linux does that on transmit for us in all cases.
9766 */
9767 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9768
9769 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9770 if (tp->rxptpctl)
9771 tw32(TG3_RX_PTP_CTL,
9772 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9773
9774 if (tg3_flag(tp, PTP_CAPABLE))
9775 val |= GRC_MODE_TIME_SYNC_ENABLE;
9776
9777 tw32(GRC_MODE, tp->grc_mode | val);
9778
9779 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9780 val = tr32(GRC_MISC_CFG);
9781 val &= ~0xff;
9782 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9783 tw32(GRC_MISC_CFG, val);
9784
9785 /* Initialize MBUF/DESC pool. */
9786 if (tg3_flag(tp, 5750_PLUS)) {
9787 /* Do nothing. */
9788 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9789 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9790 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9791 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9792 else
9793 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9794 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9795 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9796 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9797 int fw_len;
9798
9799 fw_len = tp->fw_len;
9800 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9801 tw32(BUFMGR_MB_POOL_ADDR,
9802 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9803 tw32(BUFMGR_MB_POOL_SIZE,
9804 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9805 }
9806
9807 if (tp->dev->mtu <= ETH_DATA_LEN) {
9808 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9809 tp->bufmgr_config.mbuf_read_dma_low_water);
9810 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9811 tp->bufmgr_config.mbuf_mac_rx_low_water);
9812 tw32(BUFMGR_MB_HIGH_WATER,
9813 tp->bufmgr_config.mbuf_high_water);
9814 } else {
9815 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9816 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9817 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9818 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9819 tw32(BUFMGR_MB_HIGH_WATER,
9820 tp->bufmgr_config.mbuf_high_water_jumbo);
9821 }
9822 tw32(BUFMGR_DMA_LOW_WATER,
9823 tp->bufmgr_config.dma_low_water);
9824 tw32(BUFMGR_DMA_HIGH_WATER,
9825 tp->bufmgr_config.dma_high_water);
9826
9827 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9828 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9829 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9830 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9831 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9832 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9833 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9834 tw32(BUFMGR_MODE, val);
9835 for (i = 0; i < 2000; i++) {
9836 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9837 break;
9838 udelay(10);
9839 }
9840 if (i >= 2000) {
9841 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9842 return -ENODEV;
9843 }
9844
9845 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9846 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9847
9848 tg3_setup_rxbd_thresholds(tp);
9849
9850 /* Initialize TG3_BDINFO's at:
9851 * RCVDBDI_STD_BD: standard eth size rx ring
9852 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9853 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9854 *
9855 * like so:
9856 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9857 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9858 * ring attribute flags
9859 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9860 *
9861 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9862 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9863 *
9864 * The size of each ring is fixed in the firmware, but the location is
9865 * configurable.
9866 */
9867 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9868 ((u64) tpr->rx_std_mapping >> 32));
9869 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9870 ((u64) tpr->rx_std_mapping & 0xffffffff));
9871 if (!tg3_flag(tp, 5717_PLUS))
9872 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9873 NIC_SRAM_RX_BUFFER_DESC);
9874
9875 /* Disable the mini ring */
9876 if (!tg3_flag(tp, 5705_PLUS))
9877 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9878 BDINFO_FLAGS_DISABLED);
9879
9880 /* Program the jumbo buffer descriptor ring control
9881 * blocks on those devices that have them.
9882 */
9883 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9884 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9885
9886 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9887 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9888 ((u64) tpr->rx_jmb_mapping >> 32));
9889 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9890 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9891 val = TG3_RX_JMB_RING_SIZE(tp) <<
9892 BDINFO_FLAGS_MAXLEN_SHIFT;
9893 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9894 val | BDINFO_FLAGS_USE_EXT_RECV);
9895 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9896 tg3_flag(tp, 57765_CLASS) ||
9897 tg3_asic_rev(tp) == ASIC_REV_5762)
9898 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9899 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9900 } else {
9901 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9902 BDINFO_FLAGS_DISABLED);
9903 }
9904
9905 if (tg3_flag(tp, 57765_PLUS)) {
9906 val = TG3_RX_STD_RING_SIZE(tp);
9907 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9908 val |= (TG3_RX_STD_DMA_SZ << 2);
9909 } else
9910 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9911 } else
9912 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9913
9914 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9915
9916 tpr->rx_std_prod_idx = tp->rx_pending;
9917 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9918
9919 tpr->rx_jmb_prod_idx =
9920 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9921 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9922
9923 tg3_rings_reset(tp);
9924
9925 /* Initialize MAC address and backoff seed. */
9926 __tg3_set_mac_addr(tp, false);
9927
9928 /* MTU + ethernet header + FCS + optional VLAN tag */
9929 tw32(MAC_RX_MTU_SIZE,
9930 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9931
9932 /* The slot time is changed by tg3_setup_phy if we
9933 * run at gigabit with half duplex.
9934 */
9935 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9936 (6 << TX_LENGTHS_IPG_SHIFT) |
9937 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9938
9939 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9940 tg3_asic_rev(tp) == ASIC_REV_5762)
9941 val |= tr32(MAC_TX_LENGTHS) &
9942 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9943 TX_LENGTHS_CNT_DWN_VAL_MSK);
9944
9945 tw32(MAC_TX_LENGTHS, val);
9946
9947 /* Receive rules. */
9948 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9949 tw32(RCVLPC_CONFIG, 0x0181);
9950
9951 /* Calculate RDMAC_MODE setting early, we need it to determine
9952 * the RCVLPC_STATE_ENABLE mask.
9953 */
9954 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9955 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9956 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9957 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9958 RDMAC_MODE_LNGREAD_ENAB);
9959
9960 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9961 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9962
9963 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9964 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9965 tg3_asic_rev(tp) == ASIC_REV_57780)
9966 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9967 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9968 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9969
9970 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9971 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9972 if (tg3_flag(tp, TSO_CAPABLE) &&
9973 tg3_asic_rev(tp) == ASIC_REV_5705) {
9974 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9975 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9976 !tg3_flag(tp, IS_5788)) {
9977 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9978 }
9979 }
9980
9981 if (tg3_flag(tp, PCI_EXPRESS))
9982 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9983
9984 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9985 tp->dma_limit = 0;
9986 if (tp->dev->mtu <= ETH_DATA_LEN) {
9987 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9988 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9989 }
9990 }
9991
9992 if (tg3_flag(tp, HW_TSO_1) ||
9993 tg3_flag(tp, HW_TSO_2) ||
9994 tg3_flag(tp, HW_TSO_3))
9995 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9996
9997 if (tg3_flag(tp, 57765_PLUS) ||
9998 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9999 tg3_asic_rev(tp) == ASIC_REV_57780)
10000 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10001
10002 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10003 tg3_asic_rev(tp) == ASIC_REV_5762)
10004 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10005
10006 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10007 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10008 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10009 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10010 tg3_flag(tp, 57765_PLUS)) {
10011 u32 tgtreg;
10012
10013 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10014 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10015 else
10016 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10017
10018 val = tr32(tgtreg);
10019 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10020 tg3_asic_rev(tp) == ASIC_REV_5762) {
10021 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10022 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10023 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10024 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10025 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10026 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10027 }
10028 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10029 }
10030
10031 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10032 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10033 tg3_asic_rev(tp) == ASIC_REV_5762) {
10034 u32 tgtreg;
10035
10036 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10037 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10038 else
10039 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10040
10041 val = tr32(tgtreg);
10042 tw32(tgtreg, val |
10043 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10044 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10045 }
10046
10047 /* Receive/send statistics. */
10048 if (tg3_flag(tp, 5750_PLUS)) {
10049 val = tr32(RCVLPC_STATS_ENABLE);
10050 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10051 tw32(RCVLPC_STATS_ENABLE, val);
10052 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10053 tg3_flag(tp, TSO_CAPABLE)) {
10054 val = tr32(RCVLPC_STATS_ENABLE);
10055 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10056 tw32(RCVLPC_STATS_ENABLE, val);
10057 } else {
10058 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10059 }
10060 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10061 tw32(SNDDATAI_STATSENAB, 0xffffff);
10062 tw32(SNDDATAI_STATSCTRL,
10063 (SNDDATAI_SCTRL_ENABLE |
10064 SNDDATAI_SCTRL_FASTUPD));
10065
10066 /* Setup host coalescing engine. */
10067 tw32(HOSTCC_MODE, 0);
10068 for (i = 0; i < 2000; i++) {
10069 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10070 break;
10071 udelay(10);
10072 }
10073
10074 __tg3_set_coalesce(tp, &tp->coal);
10075
10076 if (!tg3_flag(tp, 5705_PLUS)) {
10077 /* Status/statistics block address. See tg3_timer,
10078 * the tg3_periodic_fetch_stats call there, and
10079 * tg3_get_stats to see how this works for 5705/5750 chips.
10080 */
10081 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10082 ((u64) tp->stats_mapping >> 32));
10083 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10084 ((u64) tp->stats_mapping & 0xffffffff));
10085 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10086
10087 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10088
10089 /* Clear statistics and status block memory areas */
10090 for (i = NIC_SRAM_STATS_BLK;
10091 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10092 i += sizeof(u32)) {
10093 tg3_write_mem(tp, i, 0);
10094 udelay(40);
10095 }
10096 }
10097
10098 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10099
10100 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10101 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10102 if (!tg3_flag(tp, 5705_PLUS))
10103 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10104
10105 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10106 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10107 /* reset to prevent losing 1st rx packet intermittently */
10108 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10109 udelay(10);
10110 }
10111
10112 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10113 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10114 MAC_MODE_FHDE_ENABLE;
10115 if (tg3_flag(tp, ENABLE_APE))
10116 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10117 if (!tg3_flag(tp, 5705_PLUS) &&
10118 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10119 tg3_asic_rev(tp) != ASIC_REV_5700)
10120 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10121 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10122 udelay(40);
10123
10124 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10125 * If TG3_FLAG_IS_NIC is zero, we should read the
10126 * register to preserve the GPIO settings for LOMs. The GPIOs,
10127 * whether used as inputs or outputs, are set by boot code after
10128 * reset.
10129 */
10130 if (!tg3_flag(tp, IS_NIC)) {
10131 u32 gpio_mask;
10132
10133 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10134 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10135 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10136
10137 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10138 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10139 GRC_LCLCTRL_GPIO_OUTPUT3;
10140
10141 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10142 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10143
10144 tp->grc_local_ctrl &= ~gpio_mask;
10145 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10146
10147 /* GPIO1 must be driven high for eeprom write protect */
10148 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10149 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10150 GRC_LCLCTRL_GPIO_OUTPUT1);
10151 }
10152 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10153 udelay(100);
10154
10155 if (tg3_flag(tp, USING_MSIX)) {
10156 val = tr32(MSGINT_MODE);
10157 val |= MSGINT_MODE_ENABLE;
10158 if (tp->irq_cnt > 1)
10159 val |= MSGINT_MODE_MULTIVEC_EN;
10160 if (!tg3_flag(tp, 1SHOT_MSI))
10161 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10162 tw32(MSGINT_MODE, val);
10163 }
10164
10165 if (!tg3_flag(tp, 5705_PLUS)) {
10166 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10167 udelay(40);
10168 }
10169
10170 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10171 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10172 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10173 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10174 WDMAC_MODE_LNGREAD_ENAB);
10175
10176 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10177 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10178 if (tg3_flag(tp, TSO_CAPABLE) &&
10179 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10180 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10181 /* nothing */
10182 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10183 !tg3_flag(tp, IS_5788)) {
10184 val |= WDMAC_MODE_RX_ACCEL;
10185 }
10186 }
10187
10188 /* Enable host coalescing bug fix */
10189 if (tg3_flag(tp, 5755_PLUS))
10190 val |= WDMAC_MODE_STATUS_TAG_FIX;
10191
10192 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10193 val |= WDMAC_MODE_BURST_ALL_DATA;
10194
10195 tw32_f(WDMAC_MODE, val);
10196 udelay(40);
10197
10198 if (tg3_flag(tp, PCIX_MODE)) {
10199 u16 pcix_cmd;
10200
10201 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10202 &pcix_cmd);
10203 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10204 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10205 pcix_cmd |= PCI_X_CMD_READ_2K;
10206 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10207 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10208 pcix_cmd |= PCI_X_CMD_READ_2K;
10209 }
10210 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10211 pcix_cmd);
10212 }
10213
10214 tw32_f(RDMAC_MODE, rdmac_mode);
10215 udelay(40);
10216
10217 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10218 tg3_asic_rev(tp) == ASIC_REV_5720) {
10219 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10220 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10221 break;
10222 }
10223 if (i < TG3_NUM_RDMA_CHANNELS) {
10224 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10225 val |= tg3_lso_rd_dma_workaround_bit(tp);
10226 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10227 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10228 }
10229 }
10230
10231 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10232 if (!tg3_flag(tp, 5705_PLUS))
10233 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10234
10235 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10236 tw32(SNDDATAC_MODE,
10237 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10238 else
10239 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10240
10241 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10242 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10243 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10244 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10245 val |= RCVDBDI_MODE_LRG_RING_SZ;
10246 tw32(RCVDBDI_MODE, val);
10247 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10248 if (tg3_flag(tp, HW_TSO_1) ||
10249 tg3_flag(tp, HW_TSO_2) ||
10250 tg3_flag(tp, HW_TSO_3))
10251 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10252 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10253 if (tg3_flag(tp, ENABLE_TSS))
10254 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10255 tw32(SNDBDI_MODE, val);
10256 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10257
10258 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10259 err = tg3_load_5701_a0_firmware_fix(tp);
10260 if (err)
10261 return err;
10262 }
10263
10264 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10265 /* Ignore any errors for the firmware download. If download
10266 * fails, the device will operate with EEE disabled
10267 */
10268 tg3_load_57766_firmware(tp);
10269 }
10270
10271 if (tg3_flag(tp, TSO_CAPABLE)) {
10272 err = tg3_load_tso_firmware(tp);
10273 if (err)
10274 return err;
10275 }
10276
10277 tp->tx_mode = TX_MODE_ENABLE;
10278
10279 if (tg3_flag(tp, 5755_PLUS) ||
10280 tg3_asic_rev(tp) == ASIC_REV_5906)
10281 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10282
10283 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10284 tg3_asic_rev(tp) == ASIC_REV_5762) {
10285 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10286 tp->tx_mode &= ~val;
10287 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10288 }
10289
10290 tw32_f(MAC_TX_MODE, tp->tx_mode);
10291 udelay(100);
10292
10293 if (tg3_flag(tp, ENABLE_RSS)) {
10294 tg3_rss_write_indir_tbl(tp);
10295
10296 /* Setup the "secret" hash key. */
10297 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10298 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10299 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10300 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10301 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10302 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10303 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10304 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10305 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10306 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10307 }
10308
10309 tp->rx_mode = RX_MODE_ENABLE;
10310 if (tg3_flag(tp, 5755_PLUS))
10311 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10312
10313 if (tg3_flag(tp, ENABLE_RSS))
10314 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10315 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10316 RX_MODE_RSS_IPV6_HASH_EN |
10317 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10318 RX_MODE_RSS_IPV4_HASH_EN |
10319 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10320
10321 tw32_f(MAC_RX_MODE, tp->rx_mode);
10322 udelay(10);
10323
10324 tw32(MAC_LED_CTRL, tp->led_ctrl);
10325
10326 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10327 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10328 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10329 udelay(10);
10330 }
10331 tw32_f(MAC_RX_MODE, tp->rx_mode);
10332 udelay(10);
10333
10334 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10335 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10336 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10337 /* Set drive transmission level to 1.2V */
10338 /* only if the signal pre-emphasis bit is not set */
10339 val = tr32(MAC_SERDES_CFG);
10340 val &= 0xfffff000;
10341 val |= 0x880;
10342 tw32(MAC_SERDES_CFG, val);
10343 }
10344 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10345 tw32(MAC_SERDES_CFG, 0x616000);
10346 }
10347
10348 /* Prevent chip from dropping frames when flow control
10349 * is enabled.
10350 */
10351 if (tg3_flag(tp, 57765_CLASS))
10352 val = 1;
10353 else
10354 val = 2;
10355 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10356
10357 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10358 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10359 /* Use hardware link auto-negotiation */
10360 tg3_flag_set(tp, HW_AUTONEG);
10361 }
10362
10363 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10364 tg3_asic_rev(tp) == ASIC_REV_5714) {
10365 u32 tmp;
10366
10367 tmp = tr32(SERDES_RX_CTRL);
10368 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10369 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10370 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10371 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10372 }
10373
10374 if (!tg3_flag(tp, USE_PHYLIB)) {
10375 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10376 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10377
10378 err = tg3_setup_phy(tp, false);
10379 if (err)
10380 return err;
10381
10382 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10383 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10384 u32 tmp;
10385
10386 /* Clear CRC stats. */
10387 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10388 tg3_writephy(tp, MII_TG3_TEST1,
10389 tmp | MII_TG3_TEST1_CRC_EN);
10390 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10391 }
10392 }
10393 }
10394
10395 __tg3_set_rx_mode(tp->dev);
10396
10397 /* Initialize receive rules. */
10398 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10399 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10400 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10401 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10402
10403 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10404 limit = 8;
10405 else
10406 limit = 16;
10407 if (tg3_flag(tp, ENABLE_ASF))
10408 limit -= 4;
10409 switch (limit) {
10410 case 16:
10411 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10412 case 15:
10413 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10414 case 14:
10415 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10416 case 13:
10417 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10418 case 12:
10419 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10420 case 11:
10421 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10422 case 10:
10423 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10424 case 9:
10425 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10426 case 8:
10427 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10428 case 7:
10429 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10430 case 6:
10431 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10432 case 5:
10433 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10434 case 4:
10435 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10436 case 3:
10437 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10438 case 2:
10439 case 1:
10440
10441 default:
10442 break;
10443 }
10444
10445 if (tg3_flag(tp, ENABLE_APE))
10446 /* Write our heartbeat update interval to APE. */
10447 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10448 APE_HOST_HEARTBEAT_INT_DISABLE);
10449
10450 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10451
10452 return 0;
10453 }
10454
10455 /* Called at device open time to get the chip ready for
10456 * packet processing. Invoked with tp->lock held.
10457 */
10458 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10459 {
10460 /* Chip may have been just powered on. If so, the boot code may still
10461 * be running initialization. Wait for it to finish to avoid races in
10462 * accessing the hardware.
10463 */
10464 tg3_enable_register_access(tp);
10465 tg3_poll_fw(tp);
10466
10467 tg3_switch_clocks(tp);
10468
10469 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10470
10471 return tg3_reset_hw(tp, reset_phy);
10472 }
10473
10474 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10475 {
10476 int i;
10477
10478 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10479 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10480
10481 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10482 off += len;
10483
10484 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10485 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10486 memset(ocir, 0, TG3_OCIR_LEN);
10487 }
10488 }
10489
10490 /* sysfs attributes for hwmon */
10491 static ssize_t tg3_show_temp(struct device *dev,
10492 struct device_attribute *devattr, char *buf)
10493 {
10494 struct pci_dev *pdev = to_pci_dev(dev);
10495 struct net_device *netdev = pci_get_drvdata(pdev);
10496 struct tg3 *tp = netdev_priv(netdev);
10497 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10498 u32 temperature;
10499
10500 spin_lock_bh(&tp->lock);
10501 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10502 sizeof(temperature));
10503 spin_unlock_bh(&tp->lock);
10504 return sprintf(buf, "%u\n", temperature);
10505 }
10506
10507
10508 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10509 TG3_TEMP_SENSOR_OFFSET);
10510 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10511 TG3_TEMP_CAUTION_OFFSET);
10512 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10513 TG3_TEMP_MAX_OFFSET);
10514
10515 static struct attribute *tg3_attributes[] = {
10516 &sensor_dev_attr_temp1_input.dev_attr.attr,
10517 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10518 &sensor_dev_attr_temp1_max.dev_attr.attr,
10519 NULL
10520 };
10521
10522 static const struct attribute_group tg3_group = {
10523 .attrs = tg3_attributes,
10524 };
10525
10526 static void tg3_hwmon_close(struct tg3 *tp)
10527 {
10528 if (tp->hwmon_dev) {
10529 hwmon_device_unregister(tp->hwmon_dev);
10530 tp->hwmon_dev = NULL;
10531 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10532 }
10533 }
10534
10535 static void tg3_hwmon_open(struct tg3 *tp)
10536 {
10537 int i, err;
10538 u32 size = 0;
10539 struct pci_dev *pdev = tp->pdev;
10540 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10541
10542 tg3_sd_scan_scratchpad(tp, ocirs);
10543
10544 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10545 if (!ocirs[i].src_data_length)
10546 continue;
10547
10548 size += ocirs[i].src_hdr_length;
10549 size += ocirs[i].src_data_length;
10550 }
10551
10552 if (!size)
10553 return;
10554
10555 /* Register hwmon sysfs hooks */
10556 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10557 if (err) {
10558 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10559 return;
10560 }
10561
10562 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10563 if (IS_ERR(tp->hwmon_dev)) {
10564 tp->hwmon_dev = NULL;
10565 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10566 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10567 }
10568 }
10569
10570
10571 #define TG3_STAT_ADD32(PSTAT, REG) \
10572 do { u32 __val = tr32(REG); \
10573 (PSTAT)->low += __val; \
10574 if ((PSTAT)->low < __val) \
10575 (PSTAT)->high += 1; \
10576 } while (0)
10577
10578 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10579 {
10580 struct tg3_hw_stats *sp = tp->hw_stats;
10581
10582 if (!tp->link_up)
10583 return;
10584
10585 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10586 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10587 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10588 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10589 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10590 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10591 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10592 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10593 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10594 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10595 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10596 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10597 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10598 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10599 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10600 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10601 u32 val;
10602
10603 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10604 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10605 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10606 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10607 }
10608
10609 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10610 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10611 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10612 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10613 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10614 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10615 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10616 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10617 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10618 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10619 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10620 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10621 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10622 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10623
10624 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10625 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10626 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10627 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10628 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10629 } else {
10630 u32 val = tr32(HOSTCC_FLOW_ATTN);
10631 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10632 if (val) {
10633 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10634 sp->rx_discards.low += val;
10635 if (sp->rx_discards.low < val)
10636 sp->rx_discards.high += 1;
10637 }
10638 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10639 }
10640 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10641 }
10642
10643 static void tg3_chk_missed_msi(struct tg3 *tp)
10644 {
10645 u32 i;
10646
10647 for (i = 0; i < tp->irq_cnt; i++) {
10648 struct tg3_napi *tnapi = &tp->napi[i];
10649
10650 if (tg3_has_work(tnapi)) {
10651 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10652 tnapi->last_tx_cons == tnapi->tx_cons) {
10653 if (tnapi->chk_msi_cnt < 1) {
10654 tnapi->chk_msi_cnt++;
10655 return;
10656 }
10657 tg3_msi(0, tnapi);
10658 }
10659 }
10660 tnapi->chk_msi_cnt = 0;
10661 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10662 tnapi->last_tx_cons = tnapi->tx_cons;
10663 }
10664 }
10665
10666 static void tg3_timer(unsigned long __opaque)
10667 {
10668 struct tg3 *tp = (struct tg3 *) __opaque;
10669
10670 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10671 goto restart_timer;
10672
10673 spin_lock(&tp->lock);
10674
10675 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10676 tg3_flag(tp, 57765_CLASS))
10677 tg3_chk_missed_msi(tp);
10678
10679 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10680 /* BCM4785: Flush posted writes from GbE to host memory. */
10681 tr32(HOSTCC_MODE);
10682 }
10683
10684 if (!tg3_flag(tp, TAGGED_STATUS)) {
10685 /* All of this garbage is because when using non-tagged
10686 * IRQ status the mailbox/status_block protocol the chip
10687 * uses with the cpu is race prone.
10688 */
10689 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10690 tw32(GRC_LOCAL_CTRL,
10691 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10692 } else {
10693 tw32(HOSTCC_MODE, tp->coalesce_mode |
10694 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10695 }
10696
10697 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10698 spin_unlock(&tp->lock);
10699 tg3_reset_task_schedule(tp);
10700 goto restart_timer;
10701 }
10702 }
10703
10704 /* This part only runs once per second. */
10705 if (!--tp->timer_counter) {
10706 if (tg3_flag(tp, 5705_PLUS))
10707 tg3_periodic_fetch_stats(tp);
10708
10709 if (tp->setlpicnt && !--tp->setlpicnt)
10710 tg3_phy_eee_enable(tp);
10711
10712 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10713 u32 mac_stat;
10714 int phy_event;
10715
10716 mac_stat = tr32(MAC_STATUS);
10717
10718 phy_event = 0;
10719 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10720 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10721 phy_event = 1;
10722 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10723 phy_event = 1;
10724
10725 if (phy_event)
10726 tg3_setup_phy(tp, false);
10727 } else if (tg3_flag(tp, POLL_SERDES)) {
10728 u32 mac_stat = tr32(MAC_STATUS);
10729 int need_setup = 0;
10730
10731 if (tp->link_up &&
10732 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10733 need_setup = 1;
10734 }
10735 if (!tp->link_up &&
10736 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10737 MAC_STATUS_SIGNAL_DET))) {
10738 need_setup = 1;
10739 }
10740 if (need_setup) {
10741 if (!tp->serdes_counter) {
10742 tw32_f(MAC_MODE,
10743 (tp->mac_mode &
10744 ~MAC_MODE_PORT_MODE_MASK));
10745 udelay(40);
10746 tw32_f(MAC_MODE, tp->mac_mode);
10747 udelay(40);
10748 }
10749 tg3_setup_phy(tp, false);
10750 }
10751 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10752 tg3_flag(tp, 5780_CLASS)) {
10753 tg3_serdes_parallel_detect(tp);
10754 }
10755
10756 tp->timer_counter = tp->timer_multiplier;
10757 }
10758
10759 /* Heartbeat is only sent once every 2 seconds.
10760 *
10761 * The heartbeat is to tell the ASF firmware that the host
10762 * driver is still alive. In the event that the OS crashes,
10763 * ASF needs to reset the hardware to free up the FIFO space
10764 * that may be filled with rx packets destined for the host.
10765 * If the FIFO is full, ASF will no longer function properly.
10766 *
10767 * Unintended resets have been reported on real time kernels
10768 * where the timer doesn't run on time. Netpoll will also have
10769 * same problem.
10770 *
10771 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10772 * to check the ring condition when the heartbeat is expiring
10773 * before doing the reset. This will prevent most unintended
10774 * resets.
10775 */
10776 if (!--tp->asf_counter) {
10777 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10778 tg3_wait_for_event_ack(tp);
10779
10780 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10781 FWCMD_NICDRV_ALIVE3);
10782 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10783 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10784 TG3_FW_UPDATE_TIMEOUT_SEC);
10785
10786 tg3_generate_fw_event(tp);
10787 }
10788 tp->asf_counter = tp->asf_multiplier;
10789 }
10790
10791 spin_unlock(&tp->lock);
10792
10793 restart_timer:
10794 tp->timer.expires = jiffies + tp->timer_offset;
10795 add_timer(&tp->timer);
10796 }
10797
10798 static void tg3_timer_init(struct tg3 *tp)
10799 {
10800 if (tg3_flag(tp, TAGGED_STATUS) &&
10801 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10802 !tg3_flag(tp, 57765_CLASS))
10803 tp->timer_offset = HZ;
10804 else
10805 tp->timer_offset = HZ / 10;
10806
10807 BUG_ON(tp->timer_offset > HZ);
10808
10809 tp->timer_multiplier = (HZ / tp->timer_offset);
10810 tp->asf_multiplier = (HZ / tp->timer_offset) *
10811 TG3_FW_UPDATE_FREQ_SEC;
10812
10813 init_timer(&tp->timer);
10814 tp->timer.data = (unsigned long) tp;
10815 tp->timer.function = tg3_timer;
10816 }
10817
10818 static void tg3_timer_start(struct tg3 *tp)
10819 {
10820 tp->asf_counter = tp->asf_multiplier;
10821 tp->timer_counter = tp->timer_multiplier;
10822
10823 tp->timer.expires = jiffies + tp->timer_offset;
10824 add_timer(&tp->timer);
10825 }
10826
10827 static void tg3_timer_stop(struct tg3 *tp)
10828 {
10829 del_timer_sync(&tp->timer);
10830 }
10831
10832 /* Restart hardware after configuration changes, self-test, etc.
10833 * Invoked with tp->lock held.
10834 */
10835 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10836 __releases(tp->lock)
10837 __acquires(tp->lock)
10838 {
10839 int err;
10840
10841 err = tg3_init_hw(tp, reset_phy);
10842 if (err) {
10843 netdev_err(tp->dev,
10844 "Failed to re-initialize device, aborting\n");
10845 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10846 tg3_full_unlock(tp);
10847 tg3_timer_stop(tp);
10848 tp->irq_sync = 0;
10849 tg3_napi_enable(tp);
10850 dev_close(tp->dev);
10851 tg3_full_lock(tp, 0);
10852 }
10853 return err;
10854 }
10855
10856 static void tg3_reset_task(struct work_struct *work)
10857 {
10858 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10859 int err;
10860
10861 tg3_full_lock(tp, 0);
10862
10863 if (!netif_running(tp->dev)) {
10864 tg3_flag_clear(tp, RESET_TASK_PENDING);
10865 tg3_full_unlock(tp);
10866 return;
10867 }
10868
10869 tg3_full_unlock(tp);
10870
10871 tg3_phy_stop(tp);
10872
10873 tg3_netif_stop(tp);
10874
10875 tg3_full_lock(tp, 1);
10876
10877 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10878 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10879 tp->write32_rx_mbox = tg3_write_flush_reg32;
10880 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10881 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10882 }
10883
10884 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10885 err = tg3_init_hw(tp, true);
10886 if (err)
10887 goto out;
10888
10889 tg3_netif_start(tp);
10890
10891 out:
10892 tg3_full_unlock(tp);
10893
10894 if (!err)
10895 tg3_phy_start(tp);
10896
10897 tg3_flag_clear(tp, RESET_TASK_PENDING);
10898 }
10899
10900 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10901 {
10902 irq_handler_t fn;
10903 unsigned long flags;
10904 char *name;
10905 struct tg3_napi *tnapi = &tp->napi[irq_num];
10906
10907 if (tp->irq_cnt == 1)
10908 name = tp->dev->name;
10909 else {
10910 name = &tnapi->irq_lbl[0];
10911 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10912 name[IFNAMSIZ-1] = 0;
10913 }
10914
10915 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10916 fn = tg3_msi;
10917 if (tg3_flag(tp, 1SHOT_MSI))
10918 fn = tg3_msi_1shot;
10919 flags = 0;
10920 } else {
10921 fn = tg3_interrupt;
10922 if (tg3_flag(tp, TAGGED_STATUS))
10923 fn = tg3_interrupt_tagged;
10924 flags = IRQF_SHARED;
10925 }
10926
10927 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10928 }
10929
10930 static int tg3_test_interrupt(struct tg3 *tp)
10931 {
10932 struct tg3_napi *tnapi = &tp->napi[0];
10933 struct net_device *dev = tp->dev;
10934 int err, i, intr_ok = 0;
10935 u32 val;
10936
10937 if (!netif_running(dev))
10938 return -ENODEV;
10939
10940 tg3_disable_ints(tp);
10941
10942 free_irq(tnapi->irq_vec, tnapi);
10943
10944 /*
10945 * Turn off MSI one shot mode. Otherwise this test has no
10946 * observable way to know whether the interrupt was delivered.
10947 */
10948 if (tg3_flag(tp, 57765_PLUS)) {
10949 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10950 tw32(MSGINT_MODE, val);
10951 }
10952
10953 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10954 IRQF_SHARED, dev->name, tnapi);
10955 if (err)
10956 return err;
10957
10958 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10959 tg3_enable_ints(tp);
10960
10961 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10962 tnapi->coal_now);
10963
10964 for (i = 0; i < 5; i++) {
10965 u32 int_mbox, misc_host_ctrl;
10966
10967 int_mbox = tr32_mailbox(tnapi->int_mbox);
10968 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10969
10970 if ((int_mbox != 0) ||
10971 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10972 intr_ok = 1;
10973 break;
10974 }
10975
10976 if (tg3_flag(tp, 57765_PLUS) &&
10977 tnapi->hw_status->status_tag != tnapi->last_tag)
10978 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10979
10980 msleep(10);
10981 }
10982
10983 tg3_disable_ints(tp);
10984
10985 free_irq(tnapi->irq_vec, tnapi);
10986
10987 err = tg3_request_irq(tp, 0);
10988
10989 if (err)
10990 return err;
10991
10992 if (intr_ok) {
10993 /* Reenable MSI one shot mode. */
10994 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10995 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10996 tw32(MSGINT_MODE, val);
10997 }
10998 return 0;
10999 }
11000
11001 return -EIO;
11002 }
11003
11004 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11005 * successfully restored
11006 */
11007 static int tg3_test_msi(struct tg3 *tp)
11008 {
11009 int err;
11010 u16 pci_cmd;
11011
11012 if (!tg3_flag(tp, USING_MSI))
11013 return 0;
11014
11015 /* Turn off SERR reporting in case MSI terminates with Master
11016 * Abort.
11017 */
11018 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11019 pci_write_config_word(tp->pdev, PCI_COMMAND,
11020 pci_cmd & ~PCI_COMMAND_SERR);
11021
11022 err = tg3_test_interrupt(tp);
11023
11024 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11025
11026 if (!err)
11027 return 0;
11028
11029 /* other failures */
11030 if (err != -EIO)
11031 return err;
11032
11033 /* MSI test failed, go back to INTx mode */
11034 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11035 "to INTx mode. Please report this failure to the PCI "
11036 "maintainer and include system chipset information\n");
11037
11038 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11039
11040 pci_disable_msi(tp->pdev);
11041
11042 tg3_flag_clear(tp, USING_MSI);
11043 tp->napi[0].irq_vec = tp->pdev->irq;
11044
11045 err = tg3_request_irq(tp, 0);
11046 if (err)
11047 return err;
11048
11049 /* Need to reset the chip because the MSI cycle may have terminated
11050 * with Master Abort.
11051 */
11052 tg3_full_lock(tp, 1);
11053
11054 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11055 err = tg3_init_hw(tp, true);
11056
11057 tg3_full_unlock(tp);
11058
11059 if (err)
11060 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11061
11062 return err;
11063 }
11064
11065 static int tg3_request_firmware(struct tg3 *tp)
11066 {
11067 const struct tg3_firmware_hdr *fw_hdr;
11068
11069 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11070 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11071 tp->fw_needed);
11072 return -ENOENT;
11073 }
11074
11075 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11076
11077 /* Firmware blob starts with version numbers, followed by
11078 * start address and _full_ length including BSS sections
11079 * (which must be longer than the actual data, of course
11080 */
11081
11082 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11083 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11084 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11085 tp->fw_len, tp->fw_needed);
11086 release_firmware(tp->fw);
11087 tp->fw = NULL;
11088 return -EINVAL;
11089 }
11090
11091 /* We no longer need firmware; we have it. */
11092 tp->fw_needed = NULL;
11093 return 0;
11094 }
11095
11096 static u32 tg3_irq_count(struct tg3 *tp)
11097 {
11098 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11099
11100 if (irq_cnt > 1) {
11101 /* We want as many rx rings enabled as there are cpus.
11102 * In multiqueue MSI-X mode, the first MSI-X vector
11103 * only deals with link interrupts, etc, so we add
11104 * one to the number of vectors we are requesting.
11105 */
11106 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11107 }
11108
11109 return irq_cnt;
11110 }
11111
11112 static bool tg3_enable_msix(struct tg3 *tp)
11113 {
11114 int i, rc;
11115 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11116
11117 tp->txq_cnt = tp->txq_req;
11118 tp->rxq_cnt = tp->rxq_req;
11119 if (!tp->rxq_cnt)
11120 tp->rxq_cnt = netif_get_num_default_rss_queues();
11121 if (tp->rxq_cnt > tp->rxq_max)
11122 tp->rxq_cnt = tp->rxq_max;
11123
11124 /* Disable multiple TX rings by default. Simple round-robin hardware
11125 * scheduling of the TX rings can cause starvation of rings with
11126 * small packets when other rings have TSO or jumbo packets.
11127 */
11128 if (!tp->txq_req)
11129 tp->txq_cnt = 1;
11130
11131 tp->irq_cnt = tg3_irq_count(tp);
11132
11133 for (i = 0; i < tp->irq_max; i++) {
11134 msix_ent[i].entry = i;
11135 msix_ent[i].vector = 0;
11136 }
11137
11138 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11139 if (rc < 0) {
11140 return false;
11141 } else if (rc != 0) {
11142 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11143 return false;
11144 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11145 tp->irq_cnt, rc);
11146 tp->irq_cnt = rc;
11147 tp->rxq_cnt = max(rc - 1, 1);
11148 if (tp->txq_cnt)
11149 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11150 }
11151
11152 for (i = 0; i < tp->irq_max; i++)
11153 tp->napi[i].irq_vec = msix_ent[i].vector;
11154
11155 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11156 pci_disable_msix(tp->pdev);
11157 return false;
11158 }
11159
11160 if (tp->irq_cnt == 1)
11161 return true;
11162
11163 tg3_flag_set(tp, ENABLE_RSS);
11164
11165 if (tp->txq_cnt > 1)
11166 tg3_flag_set(tp, ENABLE_TSS);
11167
11168 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11169
11170 return true;
11171 }
11172
11173 static void tg3_ints_init(struct tg3 *tp)
11174 {
11175 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11176 !tg3_flag(tp, TAGGED_STATUS)) {
11177 /* All MSI supporting chips should support tagged
11178 * status. Assert that this is the case.
11179 */
11180 netdev_warn(tp->dev,
11181 "MSI without TAGGED_STATUS? Not using MSI\n");
11182 goto defcfg;
11183 }
11184
11185 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11186 tg3_flag_set(tp, USING_MSIX);
11187 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11188 tg3_flag_set(tp, USING_MSI);
11189
11190 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11191 u32 msi_mode = tr32(MSGINT_MODE);
11192 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11193 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11194 if (!tg3_flag(tp, 1SHOT_MSI))
11195 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11196 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11197 }
11198 defcfg:
11199 if (!tg3_flag(tp, USING_MSIX)) {
11200 tp->irq_cnt = 1;
11201 tp->napi[0].irq_vec = tp->pdev->irq;
11202 }
11203
11204 if (tp->irq_cnt == 1) {
11205 tp->txq_cnt = 1;
11206 tp->rxq_cnt = 1;
11207 netif_set_real_num_tx_queues(tp->dev, 1);
11208 netif_set_real_num_rx_queues(tp->dev, 1);
11209 }
11210 }
11211
11212 static void tg3_ints_fini(struct tg3 *tp)
11213 {
11214 if (tg3_flag(tp, USING_MSIX))
11215 pci_disable_msix(tp->pdev);
11216 else if (tg3_flag(tp, USING_MSI))
11217 pci_disable_msi(tp->pdev);
11218 tg3_flag_clear(tp, USING_MSI);
11219 tg3_flag_clear(tp, USING_MSIX);
11220 tg3_flag_clear(tp, ENABLE_RSS);
11221 tg3_flag_clear(tp, ENABLE_TSS);
11222 }
11223
11224 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11225 bool init)
11226 {
11227 struct net_device *dev = tp->dev;
11228 int i, err;
11229
11230 /*
11231 * Setup interrupts first so we know how
11232 * many NAPI resources to allocate
11233 */
11234 tg3_ints_init(tp);
11235
11236 tg3_rss_check_indir_tbl(tp);
11237
11238 /* The placement of this call is tied
11239 * to the setup and use of Host TX descriptors.
11240 */
11241 err = tg3_alloc_consistent(tp);
11242 if (err)
11243 goto err_out1;
11244
11245 tg3_napi_init(tp);
11246
11247 tg3_napi_enable(tp);
11248
11249 for (i = 0; i < tp->irq_cnt; i++) {
11250 struct tg3_napi *tnapi = &tp->napi[i];
11251 err = tg3_request_irq(tp, i);
11252 if (err) {
11253 for (i--; i >= 0; i--) {
11254 tnapi = &tp->napi[i];
11255 free_irq(tnapi->irq_vec, tnapi);
11256 }
11257 goto err_out2;
11258 }
11259 }
11260
11261 tg3_full_lock(tp, 0);
11262
11263 err = tg3_init_hw(tp, reset_phy);
11264 if (err) {
11265 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11266 tg3_free_rings(tp);
11267 }
11268
11269 tg3_full_unlock(tp);
11270
11271 if (err)
11272 goto err_out3;
11273
11274 if (test_irq && tg3_flag(tp, USING_MSI)) {
11275 err = tg3_test_msi(tp);
11276
11277 if (err) {
11278 tg3_full_lock(tp, 0);
11279 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11280 tg3_free_rings(tp);
11281 tg3_full_unlock(tp);
11282
11283 goto err_out2;
11284 }
11285
11286 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11287 u32 val = tr32(PCIE_TRANSACTION_CFG);
11288
11289 tw32(PCIE_TRANSACTION_CFG,
11290 val | PCIE_TRANS_CFG_1SHOT_MSI);
11291 }
11292 }
11293
11294 tg3_phy_start(tp);
11295
11296 tg3_hwmon_open(tp);
11297
11298 tg3_full_lock(tp, 0);
11299
11300 tg3_timer_start(tp);
11301 tg3_flag_set(tp, INIT_COMPLETE);
11302 tg3_enable_ints(tp);
11303
11304 if (init)
11305 tg3_ptp_init(tp);
11306 else
11307 tg3_ptp_resume(tp);
11308
11309
11310 tg3_full_unlock(tp);
11311
11312 netif_tx_start_all_queues(dev);
11313
11314 /*
11315 * Reset loopback feature if it was turned on while the device was down
11316 * make sure that it's installed properly now.
11317 */
11318 if (dev->features & NETIF_F_LOOPBACK)
11319 tg3_set_loopback(dev, dev->features);
11320
11321 return 0;
11322
11323 err_out3:
11324 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11325 struct tg3_napi *tnapi = &tp->napi[i];
11326 free_irq(tnapi->irq_vec, tnapi);
11327 }
11328
11329 err_out2:
11330 tg3_napi_disable(tp);
11331 tg3_napi_fini(tp);
11332 tg3_free_consistent(tp);
11333
11334 err_out1:
11335 tg3_ints_fini(tp);
11336
11337 return err;
11338 }
11339
11340 static void tg3_stop(struct tg3 *tp)
11341 {
11342 int i;
11343
11344 tg3_reset_task_cancel(tp);
11345 tg3_netif_stop(tp);
11346
11347 tg3_timer_stop(tp);
11348
11349 tg3_hwmon_close(tp);
11350
11351 tg3_phy_stop(tp);
11352
11353 tg3_full_lock(tp, 1);
11354
11355 tg3_disable_ints(tp);
11356
11357 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11358 tg3_free_rings(tp);
11359 tg3_flag_clear(tp, INIT_COMPLETE);
11360
11361 tg3_full_unlock(tp);
11362
11363 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11364 struct tg3_napi *tnapi = &tp->napi[i];
11365 free_irq(tnapi->irq_vec, tnapi);
11366 }
11367
11368 tg3_ints_fini(tp);
11369
11370 tg3_napi_fini(tp);
11371
11372 tg3_free_consistent(tp);
11373 }
11374
11375 static int tg3_open(struct net_device *dev)
11376 {
11377 struct tg3 *tp = netdev_priv(dev);
11378 int err;
11379
11380 if (tp->fw_needed) {
11381 err = tg3_request_firmware(tp);
11382 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11383 if (err) {
11384 netdev_warn(tp->dev, "EEE capability disabled\n");
11385 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11386 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11387 netdev_warn(tp->dev, "EEE capability restored\n");
11388 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11389 }
11390 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11391 if (err)
11392 return err;
11393 } else if (err) {
11394 netdev_warn(tp->dev, "TSO capability disabled\n");
11395 tg3_flag_clear(tp, TSO_CAPABLE);
11396 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11397 netdev_notice(tp->dev, "TSO capability restored\n");
11398 tg3_flag_set(tp, TSO_CAPABLE);
11399 }
11400 }
11401
11402 tg3_carrier_off(tp);
11403
11404 err = tg3_power_up(tp);
11405 if (err)
11406 return err;
11407
11408 tg3_full_lock(tp, 0);
11409
11410 tg3_disable_ints(tp);
11411 tg3_flag_clear(tp, INIT_COMPLETE);
11412
11413 tg3_full_unlock(tp);
11414
11415 err = tg3_start(tp,
11416 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11417 true, true);
11418 if (err) {
11419 tg3_frob_aux_power(tp, false);
11420 pci_set_power_state(tp->pdev, PCI_D3hot);
11421 }
11422
11423 if (tg3_flag(tp, PTP_CAPABLE)) {
11424 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11425 &tp->pdev->dev);
11426 if (IS_ERR(tp->ptp_clock))
11427 tp->ptp_clock = NULL;
11428 }
11429
11430 return err;
11431 }
11432
11433 static int tg3_close(struct net_device *dev)
11434 {
11435 struct tg3 *tp = netdev_priv(dev);
11436
11437 tg3_ptp_fini(tp);
11438
11439 tg3_stop(tp);
11440
11441 /* Clear stats across close / open calls */
11442 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11443 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11444
11445 tg3_power_down(tp);
11446
11447 tg3_carrier_off(tp);
11448
11449 return 0;
11450 }
11451
11452 static inline u64 get_stat64(tg3_stat64_t *val)
11453 {
11454 return ((u64)val->high << 32) | ((u64)val->low);
11455 }
11456
11457 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11458 {
11459 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11460
11461 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11462 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11463 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11464 u32 val;
11465
11466 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11467 tg3_writephy(tp, MII_TG3_TEST1,
11468 val | MII_TG3_TEST1_CRC_EN);
11469 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11470 } else
11471 val = 0;
11472
11473 tp->phy_crc_errors += val;
11474
11475 return tp->phy_crc_errors;
11476 }
11477
11478 return get_stat64(&hw_stats->rx_fcs_errors);
11479 }
11480
11481 #define ESTAT_ADD(member) \
11482 estats->member = old_estats->member + \
11483 get_stat64(&hw_stats->member)
11484
11485 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11486 {
11487 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11488 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11489
11490 ESTAT_ADD(rx_octets);
11491 ESTAT_ADD(rx_fragments);
11492 ESTAT_ADD(rx_ucast_packets);
11493 ESTAT_ADD(rx_mcast_packets);
11494 ESTAT_ADD(rx_bcast_packets);
11495 ESTAT_ADD(rx_fcs_errors);
11496 ESTAT_ADD(rx_align_errors);
11497 ESTAT_ADD(rx_xon_pause_rcvd);
11498 ESTAT_ADD(rx_xoff_pause_rcvd);
11499 ESTAT_ADD(rx_mac_ctrl_rcvd);
11500 ESTAT_ADD(rx_xoff_entered);
11501 ESTAT_ADD(rx_frame_too_long_errors);
11502 ESTAT_ADD(rx_jabbers);
11503 ESTAT_ADD(rx_undersize_packets);
11504 ESTAT_ADD(rx_in_length_errors);
11505 ESTAT_ADD(rx_out_length_errors);
11506 ESTAT_ADD(rx_64_or_less_octet_packets);
11507 ESTAT_ADD(rx_65_to_127_octet_packets);
11508 ESTAT_ADD(rx_128_to_255_octet_packets);
11509 ESTAT_ADD(rx_256_to_511_octet_packets);
11510 ESTAT_ADD(rx_512_to_1023_octet_packets);
11511 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11512 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11513 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11514 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11515 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11516
11517 ESTAT_ADD(tx_octets);
11518 ESTAT_ADD(tx_collisions);
11519 ESTAT_ADD(tx_xon_sent);
11520 ESTAT_ADD(tx_xoff_sent);
11521 ESTAT_ADD(tx_flow_control);
11522 ESTAT_ADD(tx_mac_errors);
11523 ESTAT_ADD(tx_single_collisions);
11524 ESTAT_ADD(tx_mult_collisions);
11525 ESTAT_ADD(tx_deferred);
11526 ESTAT_ADD(tx_excessive_collisions);
11527 ESTAT_ADD(tx_late_collisions);
11528 ESTAT_ADD(tx_collide_2times);
11529 ESTAT_ADD(tx_collide_3times);
11530 ESTAT_ADD(tx_collide_4times);
11531 ESTAT_ADD(tx_collide_5times);
11532 ESTAT_ADD(tx_collide_6times);
11533 ESTAT_ADD(tx_collide_7times);
11534 ESTAT_ADD(tx_collide_8times);
11535 ESTAT_ADD(tx_collide_9times);
11536 ESTAT_ADD(tx_collide_10times);
11537 ESTAT_ADD(tx_collide_11times);
11538 ESTAT_ADD(tx_collide_12times);
11539 ESTAT_ADD(tx_collide_13times);
11540 ESTAT_ADD(tx_collide_14times);
11541 ESTAT_ADD(tx_collide_15times);
11542 ESTAT_ADD(tx_ucast_packets);
11543 ESTAT_ADD(tx_mcast_packets);
11544 ESTAT_ADD(tx_bcast_packets);
11545 ESTAT_ADD(tx_carrier_sense_errors);
11546 ESTAT_ADD(tx_discards);
11547 ESTAT_ADD(tx_errors);
11548
11549 ESTAT_ADD(dma_writeq_full);
11550 ESTAT_ADD(dma_write_prioq_full);
11551 ESTAT_ADD(rxbds_empty);
11552 ESTAT_ADD(rx_discards);
11553 ESTAT_ADD(rx_errors);
11554 ESTAT_ADD(rx_threshold_hit);
11555
11556 ESTAT_ADD(dma_readq_full);
11557 ESTAT_ADD(dma_read_prioq_full);
11558 ESTAT_ADD(tx_comp_queue_full);
11559
11560 ESTAT_ADD(ring_set_send_prod_index);
11561 ESTAT_ADD(ring_status_update);
11562 ESTAT_ADD(nic_irqs);
11563 ESTAT_ADD(nic_avoided_irqs);
11564 ESTAT_ADD(nic_tx_threshold_hit);
11565
11566 ESTAT_ADD(mbuf_lwm_thresh_hit);
11567 }
11568
11569 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11570 {
11571 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11572 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11573
11574 stats->rx_packets = old_stats->rx_packets +
11575 get_stat64(&hw_stats->rx_ucast_packets) +
11576 get_stat64(&hw_stats->rx_mcast_packets) +
11577 get_stat64(&hw_stats->rx_bcast_packets);
11578
11579 stats->tx_packets = old_stats->tx_packets +
11580 get_stat64(&hw_stats->tx_ucast_packets) +
11581 get_stat64(&hw_stats->tx_mcast_packets) +
11582 get_stat64(&hw_stats->tx_bcast_packets);
11583
11584 stats->rx_bytes = old_stats->rx_bytes +
11585 get_stat64(&hw_stats->rx_octets);
11586 stats->tx_bytes = old_stats->tx_bytes +
11587 get_stat64(&hw_stats->tx_octets);
11588
11589 stats->rx_errors = old_stats->rx_errors +
11590 get_stat64(&hw_stats->rx_errors);
11591 stats->tx_errors = old_stats->tx_errors +
11592 get_stat64(&hw_stats->tx_errors) +
11593 get_stat64(&hw_stats->tx_mac_errors) +
11594 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11595 get_stat64(&hw_stats->tx_discards);
11596
11597 stats->multicast = old_stats->multicast +
11598 get_stat64(&hw_stats->rx_mcast_packets);
11599 stats->collisions = old_stats->collisions +
11600 get_stat64(&hw_stats->tx_collisions);
11601
11602 stats->rx_length_errors = old_stats->rx_length_errors +
11603 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11604 get_stat64(&hw_stats->rx_undersize_packets);
11605
11606 stats->rx_over_errors = old_stats->rx_over_errors +
11607 get_stat64(&hw_stats->rxbds_empty);
11608 stats->rx_frame_errors = old_stats->rx_frame_errors +
11609 get_stat64(&hw_stats->rx_align_errors);
11610 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11611 get_stat64(&hw_stats->tx_discards);
11612 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11613 get_stat64(&hw_stats->tx_carrier_sense_errors);
11614
11615 stats->rx_crc_errors = old_stats->rx_crc_errors +
11616 tg3_calc_crc_errors(tp);
11617
11618 stats->rx_missed_errors = old_stats->rx_missed_errors +
11619 get_stat64(&hw_stats->rx_discards);
11620
11621 stats->rx_dropped = tp->rx_dropped;
11622 stats->tx_dropped = tp->tx_dropped;
11623 }
11624
11625 static int tg3_get_regs_len(struct net_device *dev)
11626 {
11627 return TG3_REG_BLK_SIZE;
11628 }
11629
11630 static void tg3_get_regs(struct net_device *dev,
11631 struct ethtool_regs *regs, void *_p)
11632 {
11633 struct tg3 *tp = netdev_priv(dev);
11634
11635 regs->version = 0;
11636
11637 memset(_p, 0, TG3_REG_BLK_SIZE);
11638
11639 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11640 return;
11641
11642 tg3_full_lock(tp, 0);
11643
11644 tg3_dump_legacy_regs(tp, (u32 *)_p);
11645
11646 tg3_full_unlock(tp);
11647 }
11648
11649 static int tg3_get_eeprom_len(struct net_device *dev)
11650 {
11651 struct tg3 *tp = netdev_priv(dev);
11652
11653 return tp->nvram_size;
11654 }
11655
11656 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11657 {
11658 struct tg3 *tp = netdev_priv(dev);
11659 int ret;
11660 u8 *pd;
11661 u32 i, offset, len, b_offset, b_count;
11662 __be32 val;
11663
11664 if (tg3_flag(tp, NO_NVRAM))
11665 return -EINVAL;
11666
11667 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11668 return -EAGAIN;
11669
11670 offset = eeprom->offset;
11671 len = eeprom->len;
11672 eeprom->len = 0;
11673
11674 eeprom->magic = TG3_EEPROM_MAGIC;
11675
11676 if (offset & 3) {
11677 /* adjustments to start on required 4 byte boundary */
11678 b_offset = offset & 3;
11679 b_count = 4 - b_offset;
11680 if (b_count > len) {
11681 /* i.e. offset=1 len=2 */
11682 b_count = len;
11683 }
11684 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11685 if (ret)
11686 return ret;
11687 memcpy(data, ((char *)&val) + b_offset, b_count);
11688 len -= b_count;
11689 offset += b_count;
11690 eeprom->len += b_count;
11691 }
11692
11693 /* read bytes up to the last 4 byte boundary */
11694 pd = &data[eeprom->len];
11695 for (i = 0; i < (len - (len & 3)); i += 4) {
11696 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11697 if (ret) {
11698 eeprom->len += i;
11699 return ret;
11700 }
11701 memcpy(pd + i, &val, 4);
11702 }
11703 eeprom->len += i;
11704
11705 if (len & 3) {
11706 /* read last bytes not ending on 4 byte boundary */
11707 pd = &data[eeprom->len];
11708 b_count = len & 3;
11709 b_offset = offset + len - b_count;
11710 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11711 if (ret)
11712 return ret;
11713 memcpy(pd, &val, b_count);
11714 eeprom->len += b_count;
11715 }
11716 return 0;
11717 }
11718
11719 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11720 {
11721 struct tg3 *tp = netdev_priv(dev);
11722 int ret;
11723 u32 offset, len, b_offset, odd_len;
11724 u8 *buf;
11725 __be32 start, end;
11726
11727 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11728 return -EAGAIN;
11729
11730 if (tg3_flag(tp, NO_NVRAM) ||
11731 eeprom->magic != TG3_EEPROM_MAGIC)
11732 return -EINVAL;
11733
11734 offset = eeprom->offset;
11735 len = eeprom->len;
11736
11737 if ((b_offset = (offset & 3))) {
11738 /* adjustments to start on required 4 byte boundary */
11739 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11740 if (ret)
11741 return ret;
11742 len += b_offset;
11743 offset &= ~3;
11744 if (len < 4)
11745 len = 4;
11746 }
11747
11748 odd_len = 0;
11749 if (len & 3) {
11750 /* adjustments to end on required 4 byte boundary */
11751 odd_len = 1;
11752 len = (len + 3) & ~3;
11753 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11754 if (ret)
11755 return ret;
11756 }
11757
11758 buf = data;
11759 if (b_offset || odd_len) {
11760 buf = kmalloc(len, GFP_KERNEL);
11761 if (!buf)
11762 return -ENOMEM;
11763 if (b_offset)
11764 memcpy(buf, &start, 4);
11765 if (odd_len)
11766 memcpy(buf+len-4, &end, 4);
11767 memcpy(buf + b_offset, data, eeprom->len);
11768 }
11769
11770 ret = tg3_nvram_write_block(tp, offset, len, buf);
11771
11772 if (buf != data)
11773 kfree(buf);
11774
11775 return ret;
11776 }
11777
11778 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11779 {
11780 struct tg3 *tp = netdev_priv(dev);
11781
11782 if (tg3_flag(tp, USE_PHYLIB)) {
11783 struct phy_device *phydev;
11784 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11785 return -EAGAIN;
11786 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11787 return phy_ethtool_gset(phydev, cmd);
11788 }
11789
11790 cmd->supported = (SUPPORTED_Autoneg);
11791
11792 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11793 cmd->supported |= (SUPPORTED_1000baseT_Half |
11794 SUPPORTED_1000baseT_Full);
11795
11796 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11797 cmd->supported |= (SUPPORTED_100baseT_Half |
11798 SUPPORTED_100baseT_Full |
11799 SUPPORTED_10baseT_Half |
11800 SUPPORTED_10baseT_Full |
11801 SUPPORTED_TP);
11802 cmd->port = PORT_TP;
11803 } else {
11804 cmd->supported |= SUPPORTED_FIBRE;
11805 cmd->port = PORT_FIBRE;
11806 }
11807
11808 cmd->advertising = tp->link_config.advertising;
11809 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11810 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11811 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11812 cmd->advertising |= ADVERTISED_Pause;
11813 } else {
11814 cmd->advertising |= ADVERTISED_Pause |
11815 ADVERTISED_Asym_Pause;
11816 }
11817 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11818 cmd->advertising |= ADVERTISED_Asym_Pause;
11819 }
11820 }
11821 if (netif_running(dev) && tp->link_up) {
11822 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11823 cmd->duplex = tp->link_config.active_duplex;
11824 cmd->lp_advertising = tp->link_config.rmt_adv;
11825 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11826 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11827 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11828 else
11829 cmd->eth_tp_mdix = ETH_TP_MDI;
11830 }
11831 } else {
11832 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11833 cmd->duplex = DUPLEX_UNKNOWN;
11834 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11835 }
11836 cmd->phy_address = tp->phy_addr;
11837 cmd->transceiver = XCVR_INTERNAL;
11838 cmd->autoneg = tp->link_config.autoneg;
11839 cmd->maxtxpkt = 0;
11840 cmd->maxrxpkt = 0;
11841 return 0;
11842 }
11843
11844 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11845 {
11846 struct tg3 *tp = netdev_priv(dev);
11847 u32 speed = ethtool_cmd_speed(cmd);
11848
11849 if (tg3_flag(tp, USE_PHYLIB)) {
11850 struct phy_device *phydev;
11851 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11852 return -EAGAIN;
11853 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11854 return phy_ethtool_sset(phydev, cmd);
11855 }
11856
11857 if (cmd->autoneg != AUTONEG_ENABLE &&
11858 cmd->autoneg != AUTONEG_DISABLE)
11859 return -EINVAL;
11860
11861 if (cmd->autoneg == AUTONEG_DISABLE &&
11862 cmd->duplex != DUPLEX_FULL &&
11863 cmd->duplex != DUPLEX_HALF)
11864 return -EINVAL;
11865
11866 if (cmd->autoneg == AUTONEG_ENABLE) {
11867 u32 mask = ADVERTISED_Autoneg |
11868 ADVERTISED_Pause |
11869 ADVERTISED_Asym_Pause;
11870
11871 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11872 mask |= ADVERTISED_1000baseT_Half |
11873 ADVERTISED_1000baseT_Full;
11874
11875 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11876 mask |= ADVERTISED_100baseT_Half |
11877 ADVERTISED_100baseT_Full |
11878 ADVERTISED_10baseT_Half |
11879 ADVERTISED_10baseT_Full |
11880 ADVERTISED_TP;
11881 else
11882 mask |= ADVERTISED_FIBRE;
11883
11884 if (cmd->advertising & ~mask)
11885 return -EINVAL;
11886
11887 mask &= (ADVERTISED_1000baseT_Half |
11888 ADVERTISED_1000baseT_Full |
11889 ADVERTISED_100baseT_Half |
11890 ADVERTISED_100baseT_Full |
11891 ADVERTISED_10baseT_Half |
11892 ADVERTISED_10baseT_Full);
11893
11894 cmd->advertising &= mask;
11895 } else {
11896 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11897 if (speed != SPEED_1000)
11898 return -EINVAL;
11899
11900 if (cmd->duplex != DUPLEX_FULL)
11901 return -EINVAL;
11902 } else {
11903 if (speed != SPEED_100 &&
11904 speed != SPEED_10)
11905 return -EINVAL;
11906 }
11907 }
11908
11909 tg3_full_lock(tp, 0);
11910
11911 tp->link_config.autoneg = cmd->autoneg;
11912 if (cmd->autoneg == AUTONEG_ENABLE) {
11913 tp->link_config.advertising = (cmd->advertising |
11914 ADVERTISED_Autoneg);
11915 tp->link_config.speed = SPEED_UNKNOWN;
11916 tp->link_config.duplex = DUPLEX_UNKNOWN;
11917 } else {
11918 tp->link_config.advertising = 0;
11919 tp->link_config.speed = speed;
11920 tp->link_config.duplex = cmd->duplex;
11921 }
11922
11923 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11924
11925 tg3_warn_mgmt_link_flap(tp);
11926
11927 if (netif_running(dev))
11928 tg3_setup_phy(tp, true);
11929
11930 tg3_full_unlock(tp);
11931
11932 return 0;
11933 }
11934
11935 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11936 {
11937 struct tg3 *tp = netdev_priv(dev);
11938
11939 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11940 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11941 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11942 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11943 }
11944
11945 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11946 {
11947 struct tg3 *tp = netdev_priv(dev);
11948
11949 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11950 wol->supported = WAKE_MAGIC;
11951 else
11952 wol->supported = 0;
11953 wol->wolopts = 0;
11954 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11955 wol->wolopts = WAKE_MAGIC;
11956 memset(&wol->sopass, 0, sizeof(wol->sopass));
11957 }
11958
11959 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11960 {
11961 struct tg3 *tp = netdev_priv(dev);
11962 struct device *dp = &tp->pdev->dev;
11963
11964 if (wol->wolopts & ~WAKE_MAGIC)
11965 return -EINVAL;
11966 if ((wol->wolopts & WAKE_MAGIC) &&
11967 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11968 return -EINVAL;
11969
11970 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11971
11972 spin_lock_bh(&tp->lock);
11973 if (device_may_wakeup(dp))
11974 tg3_flag_set(tp, WOL_ENABLE);
11975 else
11976 tg3_flag_clear(tp, WOL_ENABLE);
11977 spin_unlock_bh(&tp->lock);
11978
11979 return 0;
11980 }
11981
11982 static u32 tg3_get_msglevel(struct net_device *dev)
11983 {
11984 struct tg3 *tp = netdev_priv(dev);
11985 return tp->msg_enable;
11986 }
11987
11988 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11989 {
11990 struct tg3 *tp = netdev_priv(dev);
11991 tp->msg_enable = value;
11992 }
11993
11994 static int tg3_nway_reset(struct net_device *dev)
11995 {
11996 struct tg3 *tp = netdev_priv(dev);
11997 int r;
11998
11999 if (!netif_running(dev))
12000 return -EAGAIN;
12001
12002 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12003 return -EINVAL;
12004
12005 tg3_warn_mgmt_link_flap(tp);
12006
12007 if (tg3_flag(tp, USE_PHYLIB)) {
12008 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12009 return -EAGAIN;
12010 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12011 } else {
12012 u32 bmcr;
12013
12014 spin_lock_bh(&tp->lock);
12015 r = -EINVAL;
12016 tg3_readphy(tp, MII_BMCR, &bmcr);
12017 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12018 ((bmcr & BMCR_ANENABLE) ||
12019 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12020 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12021 BMCR_ANENABLE);
12022 r = 0;
12023 }
12024 spin_unlock_bh(&tp->lock);
12025 }
12026
12027 return r;
12028 }
12029
12030 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12031 {
12032 struct tg3 *tp = netdev_priv(dev);
12033
12034 ering->rx_max_pending = tp->rx_std_ring_mask;
12035 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12036 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12037 else
12038 ering->rx_jumbo_max_pending = 0;
12039
12040 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12041
12042 ering->rx_pending = tp->rx_pending;
12043 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12044 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12045 else
12046 ering->rx_jumbo_pending = 0;
12047
12048 ering->tx_pending = tp->napi[0].tx_pending;
12049 }
12050
12051 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12052 {
12053 struct tg3 *tp = netdev_priv(dev);
12054 int i, irq_sync = 0, err = 0;
12055
12056 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12057 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12058 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12059 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12060 (tg3_flag(tp, TSO_BUG) &&
12061 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12062 return -EINVAL;
12063
12064 if (netif_running(dev)) {
12065 tg3_phy_stop(tp);
12066 tg3_netif_stop(tp);
12067 irq_sync = 1;
12068 }
12069
12070 tg3_full_lock(tp, irq_sync);
12071
12072 tp->rx_pending = ering->rx_pending;
12073
12074 if (tg3_flag(tp, MAX_RXPEND_64) &&
12075 tp->rx_pending > 63)
12076 tp->rx_pending = 63;
12077 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12078
12079 for (i = 0; i < tp->irq_max; i++)
12080 tp->napi[i].tx_pending = ering->tx_pending;
12081
12082 if (netif_running(dev)) {
12083 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12084 err = tg3_restart_hw(tp, false);
12085 if (!err)
12086 tg3_netif_start(tp);
12087 }
12088
12089 tg3_full_unlock(tp);
12090
12091 if (irq_sync && !err)
12092 tg3_phy_start(tp);
12093
12094 return err;
12095 }
12096
12097 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12098 {
12099 struct tg3 *tp = netdev_priv(dev);
12100
12101 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12102
12103 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12104 epause->rx_pause = 1;
12105 else
12106 epause->rx_pause = 0;
12107
12108 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12109 epause->tx_pause = 1;
12110 else
12111 epause->tx_pause = 0;
12112 }
12113
12114 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12115 {
12116 struct tg3 *tp = netdev_priv(dev);
12117 int err = 0;
12118
12119 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12120 tg3_warn_mgmt_link_flap(tp);
12121
12122 if (tg3_flag(tp, USE_PHYLIB)) {
12123 u32 newadv;
12124 struct phy_device *phydev;
12125
12126 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12127
12128 if (!(phydev->supported & SUPPORTED_Pause) ||
12129 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12130 (epause->rx_pause != epause->tx_pause)))
12131 return -EINVAL;
12132
12133 tp->link_config.flowctrl = 0;
12134 if (epause->rx_pause) {
12135 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12136
12137 if (epause->tx_pause) {
12138 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12139 newadv = ADVERTISED_Pause;
12140 } else
12141 newadv = ADVERTISED_Pause |
12142 ADVERTISED_Asym_Pause;
12143 } else if (epause->tx_pause) {
12144 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12145 newadv = ADVERTISED_Asym_Pause;
12146 } else
12147 newadv = 0;
12148
12149 if (epause->autoneg)
12150 tg3_flag_set(tp, PAUSE_AUTONEG);
12151 else
12152 tg3_flag_clear(tp, PAUSE_AUTONEG);
12153
12154 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12155 u32 oldadv = phydev->advertising &
12156 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12157 if (oldadv != newadv) {
12158 phydev->advertising &=
12159 ~(ADVERTISED_Pause |
12160 ADVERTISED_Asym_Pause);
12161 phydev->advertising |= newadv;
12162 if (phydev->autoneg) {
12163 /*
12164 * Always renegotiate the link to
12165 * inform our link partner of our
12166 * flow control settings, even if the
12167 * flow control is forced. Let
12168 * tg3_adjust_link() do the final
12169 * flow control setup.
12170 */
12171 return phy_start_aneg(phydev);
12172 }
12173 }
12174
12175 if (!epause->autoneg)
12176 tg3_setup_flow_control(tp, 0, 0);
12177 } else {
12178 tp->link_config.advertising &=
12179 ~(ADVERTISED_Pause |
12180 ADVERTISED_Asym_Pause);
12181 tp->link_config.advertising |= newadv;
12182 }
12183 } else {
12184 int irq_sync = 0;
12185
12186 if (netif_running(dev)) {
12187 tg3_netif_stop(tp);
12188 irq_sync = 1;
12189 }
12190
12191 tg3_full_lock(tp, irq_sync);
12192
12193 if (epause->autoneg)
12194 tg3_flag_set(tp, PAUSE_AUTONEG);
12195 else
12196 tg3_flag_clear(tp, PAUSE_AUTONEG);
12197 if (epause->rx_pause)
12198 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12199 else
12200 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12201 if (epause->tx_pause)
12202 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12203 else
12204 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12205
12206 if (netif_running(dev)) {
12207 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12208 err = tg3_restart_hw(tp, false);
12209 if (!err)
12210 tg3_netif_start(tp);
12211 }
12212
12213 tg3_full_unlock(tp);
12214 }
12215
12216 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12217
12218 return err;
12219 }
12220
12221 static int tg3_get_sset_count(struct net_device *dev, int sset)
12222 {
12223 switch (sset) {
12224 case ETH_SS_TEST:
12225 return TG3_NUM_TEST;
12226 case ETH_SS_STATS:
12227 return TG3_NUM_STATS;
12228 default:
12229 return -EOPNOTSUPP;
12230 }
12231 }
12232
12233 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12234 u32 *rules __always_unused)
12235 {
12236 struct tg3 *tp = netdev_priv(dev);
12237
12238 if (!tg3_flag(tp, SUPPORT_MSIX))
12239 return -EOPNOTSUPP;
12240
12241 switch (info->cmd) {
12242 case ETHTOOL_GRXRINGS:
12243 if (netif_running(tp->dev))
12244 info->data = tp->rxq_cnt;
12245 else {
12246 info->data = num_online_cpus();
12247 if (info->data > TG3_RSS_MAX_NUM_QS)
12248 info->data = TG3_RSS_MAX_NUM_QS;
12249 }
12250
12251 /* The first interrupt vector only
12252 * handles link interrupts.
12253 */
12254 info->data -= 1;
12255 return 0;
12256
12257 default:
12258 return -EOPNOTSUPP;
12259 }
12260 }
12261
12262 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12263 {
12264 u32 size = 0;
12265 struct tg3 *tp = netdev_priv(dev);
12266
12267 if (tg3_flag(tp, SUPPORT_MSIX))
12268 size = TG3_RSS_INDIR_TBL_SIZE;
12269
12270 return size;
12271 }
12272
12273 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12274 {
12275 struct tg3 *tp = netdev_priv(dev);
12276 int i;
12277
12278 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12279 indir[i] = tp->rss_ind_tbl[i];
12280
12281 return 0;
12282 }
12283
12284 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12285 {
12286 struct tg3 *tp = netdev_priv(dev);
12287 size_t i;
12288
12289 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12290 tp->rss_ind_tbl[i] = indir[i];
12291
12292 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12293 return 0;
12294
12295 /* It is legal to write the indirection
12296 * table while the device is running.
12297 */
12298 tg3_full_lock(tp, 0);
12299 tg3_rss_write_indir_tbl(tp);
12300 tg3_full_unlock(tp);
12301
12302 return 0;
12303 }
12304
12305 static void tg3_get_channels(struct net_device *dev,
12306 struct ethtool_channels *channel)
12307 {
12308 struct tg3 *tp = netdev_priv(dev);
12309 u32 deflt_qs = netif_get_num_default_rss_queues();
12310
12311 channel->max_rx = tp->rxq_max;
12312 channel->max_tx = tp->txq_max;
12313
12314 if (netif_running(dev)) {
12315 channel->rx_count = tp->rxq_cnt;
12316 channel->tx_count = tp->txq_cnt;
12317 } else {
12318 if (tp->rxq_req)
12319 channel->rx_count = tp->rxq_req;
12320 else
12321 channel->rx_count = min(deflt_qs, tp->rxq_max);
12322
12323 if (tp->txq_req)
12324 channel->tx_count = tp->txq_req;
12325 else
12326 channel->tx_count = min(deflt_qs, tp->txq_max);
12327 }
12328 }
12329
12330 static int tg3_set_channels(struct net_device *dev,
12331 struct ethtool_channels *channel)
12332 {
12333 struct tg3 *tp = netdev_priv(dev);
12334
12335 if (!tg3_flag(tp, SUPPORT_MSIX))
12336 return -EOPNOTSUPP;
12337
12338 if (channel->rx_count > tp->rxq_max ||
12339 channel->tx_count > tp->txq_max)
12340 return -EINVAL;
12341
12342 tp->rxq_req = channel->rx_count;
12343 tp->txq_req = channel->tx_count;
12344
12345 if (!netif_running(dev))
12346 return 0;
12347
12348 tg3_stop(tp);
12349
12350 tg3_carrier_off(tp);
12351
12352 tg3_start(tp, true, false, false);
12353
12354 return 0;
12355 }
12356
12357 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12358 {
12359 switch (stringset) {
12360 case ETH_SS_STATS:
12361 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12362 break;
12363 case ETH_SS_TEST:
12364 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12365 break;
12366 default:
12367 WARN_ON(1); /* we need a WARN() */
12368 break;
12369 }
12370 }
12371
12372 static int tg3_set_phys_id(struct net_device *dev,
12373 enum ethtool_phys_id_state state)
12374 {
12375 struct tg3 *tp = netdev_priv(dev);
12376
12377 if (!netif_running(tp->dev))
12378 return -EAGAIN;
12379
12380 switch (state) {
12381 case ETHTOOL_ID_ACTIVE:
12382 return 1; /* cycle on/off once per second */
12383
12384 case ETHTOOL_ID_ON:
12385 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12386 LED_CTRL_1000MBPS_ON |
12387 LED_CTRL_100MBPS_ON |
12388 LED_CTRL_10MBPS_ON |
12389 LED_CTRL_TRAFFIC_OVERRIDE |
12390 LED_CTRL_TRAFFIC_BLINK |
12391 LED_CTRL_TRAFFIC_LED);
12392 break;
12393
12394 case ETHTOOL_ID_OFF:
12395 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12396 LED_CTRL_TRAFFIC_OVERRIDE);
12397 break;
12398
12399 case ETHTOOL_ID_INACTIVE:
12400 tw32(MAC_LED_CTRL, tp->led_ctrl);
12401 break;
12402 }
12403
12404 return 0;
12405 }
12406
12407 static void tg3_get_ethtool_stats(struct net_device *dev,
12408 struct ethtool_stats *estats, u64 *tmp_stats)
12409 {
12410 struct tg3 *tp = netdev_priv(dev);
12411
12412 if (tp->hw_stats)
12413 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12414 else
12415 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12416 }
12417
12418 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12419 {
12420 int i;
12421 __be32 *buf;
12422 u32 offset = 0, len = 0;
12423 u32 magic, val;
12424
12425 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12426 return NULL;
12427
12428 if (magic == TG3_EEPROM_MAGIC) {
12429 for (offset = TG3_NVM_DIR_START;
12430 offset < TG3_NVM_DIR_END;
12431 offset += TG3_NVM_DIRENT_SIZE) {
12432 if (tg3_nvram_read(tp, offset, &val))
12433 return NULL;
12434
12435 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12436 TG3_NVM_DIRTYPE_EXTVPD)
12437 break;
12438 }
12439
12440 if (offset != TG3_NVM_DIR_END) {
12441 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12442 if (tg3_nvram_read(tp, offset + 4, &offset))
12443 return NULL;
12444
12445 offset = tg3_nvram_logical_addr(tp, offset);
12446 }
12447 }
12448
12449 if (!offset || !len) {
12450 offset = TG3_NVM_VPD_OFF;
12451 len = TG3_NVM_VPD_LEN;
12452 }
12453
12454 buf = kmalloc(len, GFP_KERNEL);
12455 if (buf == NULL)
12456 return NULL;
12457
12458 if (magic == TG3_EEPROM_MAGIC) {
12459 for (i = 0; i < len; i += 4) {
12460 /* The data is in little-endian format in NVRAM.
12461 * Use the big-endian read routines to preserve
12462 * the byte order as it exists in NVRAM.
12463 */
12464 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12465 goto error;
12466 }
12467 } else {
12468 u8 *ptr;
12469 ssize_t cnt;
12470 unsigned int pos = 0;
12471
12472 ptr = (u8 *)&buf[0];
12473 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12474 cnt = pci_read_vpd(tp->pdev, pos,
12475 len - pos, ptr);
12476 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12477 cnt = 0;
12478 else if (cnt < 0)
12479 goto error;
12480 }
12481 if (pos != len)
12482 goto error;
12483 }
12484
12485 *vpdlen = len;
12486
12487 return buf;
12488
12489 error:
12490 kfree(buf);
12491 return NULL;
12492 }
12493
12494 #define NVRAM_TEST_SIZE 0x100
12495 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12496 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12497 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12498 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12499 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12500 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12501 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12502 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12503
12504 static int tg3_test_nvram(struct tg3 *tp)
12505 {
12506 u32 csum, magic, len;
12507 __be32 *buf;
12508 int i, j, k, err = 0, size;
12509
12510 if (tg3_flag(tp, NO_NVRAM))
12511 return 0;
12512
12513 if (tg3_nvram_read(tp, 0, &magic) != 0)
12514 return -EIO;
12515
12516 if (magic == TG3_EEPROM_MAGIC)
12517 size = NVRAM_TEST_SIZE;
12518 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12519 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12520 TG3_EEPROM_SB_FORMAT_1) {
12521 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12522 case TG3_EEPROM_SB_REVISION_0:
12523 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12524 break;
12525 case TG3_EEPROM_SB_REVISION_2:
12526 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12527 break;
12528 case TG3_EEPROM_SB_REVISION_3:
12529 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12530 break;
12531 case TG3_EEPROM_SB_REVISION_4:
12532 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12533 break;
12534 case TG3_EEPROM_SB_REVISION_5:
12535 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12536 break;
12537 case TG3_EEPROM_SB_REVISION_6:
12538 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12539 break;
12540 default:
12541 return -EIO;
12542 }
12543 } else
12544 return 0;
12545 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12546 size = NVRAM_SELFBOOT_HW_SIZE;
12547 else
12548 return -EIO;
12549
12550 buf = kmalloc(size, GFP_KERNEL);
12551 if (buf == NULL)
12552 return -ENOMEM;
12553
12554 err = -EIO;
12555 for (i = 0, j = 0; i < size; i += 4, j++) {
12556 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12557 if (err)
12558 break;
12559 }
12560 if (i < size)
12561 goto out;
12562
12563 /* Selfboot format */
12564 magic = be32_to_cpu(buf[0]);
12565 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12566 TG3_EEPROM_MAGIC_FW) {
12567 u8 *buf8 = (u8 *) buf, csum8 = 0;
12568
12569 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12570 TG3_EEPROM_SB_REVISION_2) {
12571 /* For rev 2, the csum doesn't include the MBA. */
12572 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12573 csum8 += buf8[i];
12574 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12575 csum8 += buf8[i];
12576 } else {
12577 for (i = 0; i < size; i++)
12578 csum8 += buf8[i];
12579 }
12580
12581 if (csum8 == 0) {
12582 err = 0;
12583 goto out;
12584 }
12585
12586 err = -EIO;
12587 goto out;
12588 }
12589
12590 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12591 TG3_EEPROM_MAGIC_HW) {
12592 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12593 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12594 u8 *buf8 = (u8 *) buf;
12595
12596 /* Separate the parity bits and the data bytes. */
12597 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12598 if ((i == 0) || (i == 8)) {
12599 int l;
12600 u8 msk;
12601
12602 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12603 parity[k++] = buf8[i] & msk;
12604 i++;
12605 } else if (i == 16) {
12606 int l;
12607 u8 msk;
12608
12609 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12610 parity[k++] = buf8[i] & msk;
12611 i++;
12612
12613 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12614 parity[k++] = buf8[i] & msk;
12615 i++;
12616 }
12617 data[j++] = buf8[i];
12618 }
12619
12620 err = -EIO;
12621 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12622 u8 hw8 = hweight8(data[i]);
12623
12624 if ((hw8 & 0x1) && parity[i])
12625 goto out;
12626 else if (!(hw8 & 0x1) && !parity[i])
12627 goto out;
12628 }
12629 err = 0;
12630 goto out;
12631 }
12632
12633 err = -EIO;
12634
12635 /* Bootstrap checksum at offset 0x10 */
12636 csum = calc_crc((unsigned char *) buf, 0x10);
12637 if (csum != le32_to_cpu(buf[0x10/4]))
12638 goto out;
12639
12640 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12641 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12642 if (csum != le32_to_cpu(buf[0xfc/4]))
12643 goto out;
12644
12645 kfree(buf);
12646
12647 buf = tg3_vpd_readblock(tp, &len);
12648 if (!buf)
12649 return -ENOMEM;
12650
12651 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12652 if (i > 0) {
12653 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12654 if (j < 0)
12655 goto out;
12656
12657 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12658 goto out;
12659
12660 i += PCI_VPD_LRDT_TAG_SIZE;
12661 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12662 PCI_VPD_RO_KEYWORD_CHKSUM);
12663 if (j > 0) {
12664 u8 csum8 = 0;
12665
12666 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12667
12668 for (i = 0; i <= j; i++)
12669 csum8 += ((u8 *)buf)[i];
12670
12671 if (csum8)
12672 goto out;
12673 }
12674 }
12675
12676 err = 0;
12677
12678 out:
12679 kfree(buf);
12680 return err;
12681 }
12682
12683 #define TG3_SERDES_TIMEOUT_SEC 2
12684 #define TG3_COPPER_TIMEOUT_SEC 6
12685
12686 static int tg3_test_link(struct tg3 *tp)
12687 {
12688 int i, max;
12689
12690 if (!netif_running(tp->dev))
12691 return -ENODEV;
12692
12693 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12694 max = TG3_SERDES_TIMEOUT_SEC;
12695 else
12696 max = TG3_COPPER_TIMEOUT_SEC;
12697
12698 for (i = 0; i < max; i++) {
12699 if (tp->link_up)
12700 return 0;
12701
12702 if (msleep_interruptible(1000))
12703 break;
12704 }
12705
12706 return -EIO;
12707 }
12708
12709 /* Only test the commonly used registers */
12710 static int tg3_test_registers(struct tg3 *tp)
12711 {
12712 int i, is_5705, is_5750;
12713 u32 offset, read_mask, write_mask, val, save_val, read_val;
12714 static struct {
12715 u16 offset;
12716 u16 flags;
12717 #define TG3_FL_5705 0x1
12718 #define TG3_FL_NOT_5705 0x2
12719 #define TG3_FL_NOT_5788 0x4
12720 #define TG3_FL_NOT_5750 0x8
12721 u32 read_mask;
12722 u32 write_mask;
12723 } reg_tbl[] = {
12724 /* MAC Control Registers */
12725 { MAC_MODE, TG3_FL_NOT_5705,
12726 0x00000000, 0x00ef6f8c },
12727 { MAC_MODE, TG3_FL_5705,
12728 0x00000000, 0x01ef6b8c },
12729 { MAC_STATUS, TG3_FL_NOT_5705,
12730 0x03800107, 0x00000000 },
12731 { MAC_STATUS, TG3_FL_5705,
12732 0x03800100, 0x00000000 },
12733 { MAC_ADDR_0_HIGH, 0x0000,
12734 0x00000000, 0x0000ffff },
12735 { MAC_ADDR_0_LOW, 0x0000,
12736 0x00000000, 0xffffffff },
12737 { MAC_RX_MTU_SIZE, 0x0000,
12738 0x00000000, 0x0000ffff },
12739 { MAC_TX_MODE, 0x0000,
12740 0x00000000, 0x00000070 },
12741 { MAC_TX_LENGTHS, 0x0000,
12742 0x00000000, 0x00003fff },
12743 { MAC_RX_MODE, TG3_FL_NOT_5705,
12744 0x00000000, 0x000007fc },
12745 { MAC_RX_MODE, TG3_FL_5705,
12746 0x00000000, 0x000007dc },
12747 { MAC_HASH_REG_0, 0x0000,
12748 0x00000000, 0xffffffff },
12749 { MAC_HASH_REG_1, 0x0000,
12750 0x00000000, 0xffffffff },
12751 { MAC_HASH_REG_2, 0x0000,
12752 0x00000000, 0xffffffff },
12753 { MAC_HASH_REG_3, 0x0000,
12754 0x00000000, 0xffffffff },
12755
12756 /* Receive Data and Receive BD Initiator Control Registers. */
12757 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12758 0x00000000, 0xffffffff },
12759 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12760 0x00000000, 0xffffffff },
12761 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12762 0x00000000, 0x00000003 },
12763 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12764 0x00000000, 0xffffffff },
12765 { RCVDBDI_STD_BD+0, 0x0000,
12766 0x00000000, 0xffffffff },
12767 { RCVDBDI_STD_BD+4, 0x0000,
12768 0x00000000, 0xffffffff },
12769 { RCVDBDI_STD_BD+8, 0x0000,
12770 0x00000000, 0xffff0002 },
12771 { RCVDBDI_STD_BD+0xc, 0x0000,
12772 0x00000000, 0xffffffff },
12773
12774 /* Receive BD Initiator Control Registers. */
12775 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12776 0x00000000, 0xffffffff },
12777 { RCVBDI_STD_THRESH, TG3_FL_5705,
12778 0x00000000, 0x000003ff },
12779 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12780 0x00000000, 0xffffffff },
12781
12782 /* Host Coalescing Control Registers. */
12783 { HOSTCC_MODE, TG3_FL_NOT_5705,
12784 0x00000000, 0x00000004 },
12785 { HOSTCC_MODE, TG3_FL_5705,
12786 0x00000000, 0x000000f6 },
12787 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12788 0x00000000, 0xffffffff },
12789 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12790 0x00000000, 0x000003ff },
12791 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12792 0x00000000, 0xffffffff },
12793 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12794 0x00000000, 0x000003ff },
12795 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12796 0x00000000, 0xffffffff },
12797 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12798 0x00000000, 0x000000ff },
12799 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12800 0x00000000, 0xffffffff },
12801 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12802 0x00000000, 0x000000ff },
12803 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12804 0x00000000, 0xffffffff },
12805 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12806 0x00000000, 0xffffffff },
12807 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12808 0x00000000, 0xffffffff },
12809 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12810 0x00000000, 0x000000ff },
12811 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12812 0x00000000, 0xffffffff },
12813 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12814 0x00000000, 0x000000ff },
12815 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12816 0x00000000, 0xffffffff },
12817 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12818 0x00000000, 0xffffffff },
12819 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12820 0x00000000, 0xffffffff },
12821 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12822 0x00000000, 0xffffffff },
12823 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12824 0x00000000, 0xffffffff },
12825 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12826 0xffffffff, 0x00000000 },
12827 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12828 0xffffffff, 0x00000000 },
12829
12830 /* Buffer Manager Control Registers. */
12831 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12832 0x00000000, 0x007fff80 },
12833 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12834 0x00000000, 0x007fffff },
12835 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12836 0x00000000, 0x0000003f },
12837 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12838 0x00000000, 0x000001ff },
12839 { BUFMGR_MB_HIGH_WATER, 0x0000,
12840 0x00000000, 0x000001ff },
12841 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12842 0xffffffff, 0x00000000 },
12843 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12844 0xffffffff, 0x00000000 },
12845
12846 /* Mailbox Registers */
12847 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12848 0x00000000, 0x000001ff },
12849 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12850 0x00000000, 0x000001ff },
12851 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12852 0x00000000, 0x000007ff },
12853 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12854 0x00000000, 0x000001ff },
12855
12856 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12857 };
12858
12859 is_5705 = is_5750 = 0;
12860 if (tg3_flag(tp, 5705_PLUS)) {
12861 is_5705 = 1;
12862 if (tg3_flag(tp, 5750_PLUS))
12863 is_5750 = 1;
12864 }
12865
12866 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12867 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12868 continue;
12869
12870 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12871 continue;
12872
12873 if (tg3_flag(tp, IS_5788) &&
12874 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12875 continue;
12876
12877 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12878 continue;
12879
12880 offset = (u32) reg_tbl[i].offset;
12881 read_mask = reg_tbl[i].read_mask;
12882 write_mask = reg_tbl[i].write_mask;
12883
12884 /* Save the original register content */
12885 save_val = tr32(offset);
12886
12887 /* Determine the read-only value. */
12888 read_val = save_val & read_mask;
12889
12890 /* Write zero to the register, then make sure the read-only bits
12891 * are not changed and the read/write bits are all zeros.
12892 */
12893 tw32(offset, 0);
12894
12895 val = tr32(offset);
12896
12897 /* Test the read-only and read/write bits. */
12898 if (((val & read_mask) != read_val) || (val & write_mask))
12899 goto out;
12900
12901 /* Write ones to all the bits defined by RdMask and WrMask, then
12902 * make sure the read-only bits are not changed and the
12903 * read/write bits are all ones.
12904 */
12905 tw32(offset, read_mask | write_mask);
12906
12907 val = tr32(offset);
12908
12909 /* Test the read-only bits. */
12910 if ((val & read_mask) != read_val)
12911 goto out;
12912
12913 /* Test the read/write bits. */
12914 if ((val & write_mask) != write_mask)
12915 goto out;
12916
12917 tw32(offset, save_val);
12918 }
12919
12920 return 0;
12921
12922 out:
12923 if (netif_msg_hw(tp))
12924 netdev_err(tp->dev,
12925 "Register test failed at offset %x\n", offset);
12926 tw32(offset, save_val);
12927 return -EIO;
12928 }
12929
12930 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12931 {
12932 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12933 int i;
12934 u32 j;
12935
12936 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12937 for (j = 0; j < len; j += 4) {
12938 u32 val;
12939
12940 tg3_write_mem(tp, offset + j, test_pattern[i]);
12941 tg3_read_mem(tp, offset + j, &val);
12942 if (val != test_pattern[i])
12943 return -EIO;
12944 }
12945 }
12946 return 0;
12947 }
12948
12949 static int tg3_test_memory(struct tg3 *tp)
12950 {
12951 static struct mem_entry {
12952 u32 offset;
12953 u32 len;
12954 } mem_tbl_570x[] = {
12955 { 0x00000000, 0x00b50},
12956 { 0x00002000, 0x1c000},
12957 { 0xffffffff, 0x00000}
12958 }, mem_tbl_5705[] = {
12959 { 0x00000100, 0x0000c},
12960 { 0x00000200, 0x00008},
12961 { 0x00004000, 0x00800},
12962 { 0x00006000, 0x01000},
12963 { 0x00008000, 0x02000},
12964 { 0x00010000, 0x0e000},
12965 { 0xffffffff, 0x00000}
12966 }, mem_tbl_5755[] = {
12967 { 0x00000200, 0x00008},
12968 { 0x00004000, 0x00800},
12969 { 0x00006000, 0x00800},
12970 { 0x00008000, 0x02000},
12971 { 0x00010000, 0x0c000},
12972 { 0xffffffff, 0x00000}
12973 }, mem_tbl_5906[] = {
12974 { 0x00000200, 0x00008},
12975 { 0x00004000, 0x00400},
12976 { 0x00006000, 0x00400},
12977 { 0x00008000, 0x01000},
12978 { 0x00010000, 0x01000},
12979 { 0xffffffff, 0x00000}
12980 }, mem_tbl_5717[] = {
12981 { 0x00000200, 0x00008},
12982 { 0x00010000, 0x0a000},
12983 { 0x00020000, 0x13c00},
12984 { 0xffffffff, 0x00000}
12985 }, mem_tbl_57765[] = {
12986 { 0x00000200, 0x00008},
12987 { 0x00004000, 0x00800},
12988 { 0x00006000, 0x09800},
12989 { 0x00010000, 0x0a000},
12990 { 0xffffffff, 0x00000}
12991 };
12992 struct mem_entry *mem_tbl;
12993 int err = 0;
12994 int i;
12995
12996 if (tg3_flag(tp, 5717_PLUS))
12997 mem_tbl = mem_tbl_5717;
12998 else if (tg3_flag(tp, 57765_CLASS) ||
12999 tg3_asic_rev(tp) == ASIC_REV_5762)
13000 mem_tbl = mem_tbl_57765;
13001 else if (tg3_flag(tp, 5755_PLUS))
13002 mem_tbl = mem_tbl_5755;
13003 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13004 mem_tbl = mem_tbl_5906;
13005 else if (tg3_flag(tp, 5705_PLUS))
13006 mem_tbl = mem_tbl_5705;
13007 else
13008 mem_tbl = mem_tbl_570x;
13009
13010 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13011 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13012 if (err)
13013 break;
13014 }
13015
13016 return err;
13017 }
13018
13019 #define TG3_TSO_MSS 500
13020
13021 #define TG3_TSO_IP_HDR_LEN 20
13022 #define TG3_TSO_TCP_HDR_LEN 20
13023 #define TG3_TSO_TCP_OPT_LEN 12
13024
13025 static const u8 tg3_tso_header[] = {
13026 0x08, 0x00,
13027 0x45, 0x00, 0x00, 0x00,
13028 0x00, 0x00, 0x40, 0x00,
13029 0x40, 0x06, 0x00, 0x00,
13030 0x0a, 0x00, 0x00, 0x01,
13031 0x0a, 0x00, 0x00, 0x02,
13032 0x0d, 0x00, 0xe0, 0x00,
13033 0x00, 0x00, 0x01, 0x00,
13034 0x00, 0x00, 0x02, 0x00,
13035 0x80, 0x10, 0x10, 0x00,
13036 0x14, 0x09, 0x00, 0x00,
13037 0x01, 0x01, 0x08, 0x0a,
13038 0x11, 0x11, 0x11, 0x11,
13039 0x11, 0x11, 0x11, 0x11,
13040 };
13041
13042 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13043 {
13044 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13045 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13046 u32 budget;
13047 struct sk_buff *skb;
13048 u8 *tx_data, *rx_data;
13049 dma_addr_t map;
13050 int num_pkts, tx_len, rx_len, i, err;
13051 struct tg3_rx_buffer_desc *desc;
13052 struct tg3_napi *tnapi, *rnapi;
13053 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13054
13055 tnapi = &tp->napi[0];
13056 rnapi = &tp->napi[0];
13057 if (tp->irq_cnt > 1) {
13058 if (tg3_flag(tp, ENABLE_RSS))
13059 rnapi = &tp->napi[1];
13060 if (tg3_flag(tp, ENABLE_TSS))
13061 tnapi = &tp->napi[1];
13062 }
13063 coal_now = tnapi->coal_now | rnapi->coal_now;
13064
13065 err = -EIO;
13066
13067 tx_len = pktsz;
13068 skb = netdev_alloc_skb(tp->dev, tx_len);
13069 if (!skb)
13070 return -ENOMEM;
13071
13072 tx_data = skb_put(skb, tx_len);
13073 memcpy(tx_data, tp->dev->dev_addr, 6);
13074 memset(tx_data + 6, 0x0, 8);
13075
13076 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13077
13078 if (tso_loopback) {
13079 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13080
13081 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13082 TG3_TSO_TCP_OPT_LEN;
13083
13084 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13085 sizeof(tg3_tso_header));
13086 mss = TG3_TSO_MSS;
13087
13088 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13089 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13090
13091 /* Set the total length field in the IP header */
13092 iph->tot_len = htons((u16)(mss + hdr_len));
13093
13094 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13095 TXD_FLAG_CPU_POST_DMA);
13096
13097 if (tg3_flag(tp, HW_TSO_1) ||
13098 tg3_flag(tp, HW_TSO_2) ||
13099 tg3_flag(tp, HW_TSO_3)) {
13100 struct tcphdr *th;
13101 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13102 th = (struct tcphdr *)&tx_data[val];
13103 th->check = 0;
13104 } else
13105 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13106
13107 if (tg3_flag(tp, HW_TSO_3)) {
13108 mss |= (hdr_len & 0xc) << 12;
13109 if (hdr_len & 0x10)
13110 base_flags |= 0x00000010;
13111 base_flags |= (hdr_len & 0x3e0) << 5;
13112 } else if (tg3_flag(tp, HW_TSO_2))
13113 mss |= hdr_len << 9;
13114 else if (tg3_flag(tp, HW_TSO_1) ||
13115 tg3_asic_rev(tp) == ASIC_REV_5705) {
13116 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13117 } else {
13118 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13119 }
13120
13121 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13122 } else {
13123 num_pkts = 1;
13124 data_off = ETH_HLEN;
13125
13126 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13127 tx_len > VLAN_ETH_FRAME_LEN)
13128 base_flags |= TXD_FLAG_JMB_PKT;
13129 }
13130
13131 for (i = data_off; i < tx_len; i++)
13132 tx_data[i] = (u8) (i & 0xff);
13133
13134 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13135 if (pci_dma_mapping_error(tp->pdev, map)) {
13136 dev_kfree_skb(skb);
13137 return -EIO;
13138 }
13139
13140 val = tnapi->tx_prod;
13141 tnapi->tx_buffers[val].skb = skb;
13142 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13143
13144 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13145 rnapi->coal_now);
13146
13147 udelay(10);
13148
13149 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13150
13151 budget = tg3_tx_avail(tnapi);
13152 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13153 base_flags | TXD_FLAG_END, mss, 0)) {
13154 tnapi->tx_buffers[val].skb = NULL;
13155 dev_kfree_skb(skb);
13156 return -EIO;
13157 }
13158
13159 tnapi->tx_prod++;
13160
13161 /* Sync BD data before updating mailbox */
13162 wmb();
13163
13164 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13165 tr32_mailbox(tnapi->prodmbox);
13166
13167 udelay(10);
13168
13169 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13170 for (i = 0; i < 35; i++) {
13171 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13172 coal_now);
13173
13174 udelay(10);
13175
13176 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13177 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13178 if ((tx_idx == tnapi->tx_prod) &&
13179 (rx_idx == (rx_start_idx + num_pkts)))
13180 break;
13181 }
13182
13183 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13184 dev_kfree_skb(skb);
13185
13186 if (tx_idx != tnapi->tx_prod)
13187 goto out;
13188
13189 if (rx_idx != rx_start_idx + num_pkts)
13190 goto out;
13191
13192 val = data_off;
13193 while (rx_idx != rx_start_idx) {
13194 desc = &rnapi->rx_rcb[rx_start_idx++];
13195 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13196 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13197
13198 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13199 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13200 goto out;
13201
13202 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13203 - ETH_FCS_LEN;
13204
13205 if (!tso_loopback) {
13206 if (rx_len != tx_len)
13207 goto out;
13208
13209 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13210 if (opaque_key != RXD_OPAQUE_RING_STD)
13211 goto out;
13212 } else {
13213 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13214 goto out;
13215 }
13216 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13217 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13218 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13219 goto out;
13220 }
13221
13222 if (opaque_key == RXD_OPAQUE_RING_STD) {
13223 rx_data = tpr->rx_std_buffers[desc_idx].data;
13224 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13225 mapping);
13226 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13227 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13228 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13229 mapping);
13230 } else
13231 goto out;
13232
13233 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13234 PCI_DMA_FROMDEVICE);
13235
13236 rx_data += TG3_RX_OFFSET(tp);
13237 for (i = data_off; i < rx_len; i++, val++) {
13238 if (*(rx_data + i) != (u8) (val & 0xff))
13239 goto out;
13240 }
13241 }
13242
13243 err = 0;
13244
13245 /* tg3_free_rings will unmap and free the rx_data */
13246 out:
13247 return err;
13248 }
13249
13250 #define TG3_STD_LOOPBACK_FAILED 1
13251 #define TG3_JMB_LOOPBACK_FAILED 2
13252 #define TG3_TSO_LOOPBACK_FAILED 4
13253 #define TG3_LOOPBACK_FAILED \
13254 (TG3_STD_LOOPBACK_FAILED | \
13255 TG3_JMB_LOOPBACK_FAILED | \
13256 TG3_TSO_LOOPBACK_FAILED)
13257
13258 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13259 {
13260 int err = -EIO;
13261 u32 eee_cap;
13262 u32 jmb_pkt_sz = 9000;
13263
13264 if (tp->dma_limit)
13265 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13266
13267 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13268 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13269
13270 if (!netif_running(tp->dev)) {
13271 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13272 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13273 if (do_extlpbk)
13274 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13275 goto done;
13276 }
13277
13278 err = tg3_reset_hw(tp, true);
13279 if (err) {
13280 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13281 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13282 if (do_extlpbk)
13283 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13284 goto done;
13285 }
13286
13287 if (tg3_flag(tp, ENABLE_RSS)) {
13288 int i;
13289
13290 /* Reroute all rx packets to the 1st queue */
13291 for (i = MAC_RSS_INDIR_TBL_0;
13292 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13293 tw32(i, 0x0);
13294 }
13295
13296 /* HW errata - mac loopback fails in some cases on 5780.
13297 * Normal traffic and PHY loopback are not affected by
13298 * errata. Also, the MAC loopback test is deprecated for
13299 * all newer ASIC revisions.
13300 */
13301 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13302 !tg3_flag(tp, CPMU_PRESENT)) {
13303 tg3_mac_loopback(tp, true);
13304
13305 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13306 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13307
13308 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13309 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13310 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13311
13312 tg3_mac_loopback(tp, false);
13313 }
13314
13315 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13316 !tg3_flag(tp, USE_PHYLIB)) {
13317 int i;
13318
13319 tg3_phy_lpbk_set(tp, 0, false);
13320
13321 /* Wait for link */
13322 for (i = 0; i < 100; i++) {
13323 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13324 break;
13325 mdelay(1);
13326 }
13327
13328 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13329 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13330 if (tg3_flag(tp, TSO_CAPABLE) &&
13331 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13332 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13333 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13334 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13335 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13336
13337 if (do_extlpbk) {
13338 tg3_phy_lpbk_set(tp, 0, true);
13339
13340 /* All link indications report up, but the hardware
13341 * isn't really ready for about 20 msec. Double it
13342 * to be sure.
13343 */
13344 mdelay(40);
13345
13346 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13347 data[TG3_EXT_LOOPB_TEST] |=
13348 TG3_STD_LOOPBACK_FAILED;
13349 if (tg3_flag(tp, TSO_CAPABLE) &&
13350 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13351 data[TG3_EXT_LOOPB_TEST] |=
13352 TG3_TSO_LOOPBACK_FAILED;
13353 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13354 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13355 data[TG3_EXT_LOOPB_TEST] |=
13356 TG3_JMB_LOOPBACK_FAILED;
13357 }
13358
13359 /* Re-enable gphy autopowerdown. */
13360 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13361 tg3_phy_toggle_apd(tp, true);
13362 }
13363
13364 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13365 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13366
13367 done:
13368 tp->phy_flags |= eee_cap;
13369
13370 return err;
13371 }
13372
13373 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13374 u64 *data)
13375 {
13376 struct tg3 *tp = netdev_priv(dev);
13377 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13378
13379 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13380 tg3_power_up(tp)) {
13381 etest->flags |= ETH_TEST_FL_FAILED;
13382 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13383 return;
13384 }
13385
13386 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13387
13388 if (tg3_test_nvram(tp) != 0) {
13389 etest->flags |= ETH_TEST_FL_FAILED;
13390 data[TG3_NVRAM_TEST] = 1;
13391 }
13392 if (!doextlpbk && tg3_test_link(tp)) {
13393 etest->flags |= ETH_TEST_FL_FAILED;
13394 data[TG3_LINK_TEST] = 1;
13395 }
13396 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13397 int err, err2 = 0, irq_sync = 0;
13398
13399 if (netif_running(dev)) {
13400 tg3_phy_stop(tp);
13401 tg3_netif_stop(tp);
13402 irq_sync = 1;
13403 }
13404
13405 tg3_full_lock(tp, irq_sync);
13406 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13407 err = tg3_nvram_lock(tp);
13408 tg3_halt_cpu(tp, RX_CPU_BASE);
13409 if (!tg3_flag(tp, 5705_PLUS))
13410 tg3_halt_cpu(tp, TX_CPU_BASE);
13411 if (!err)
13412 tg3_nvram_unlock(tp);
13413
13414 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13415 tg3_phy_reset(tp);
13416
13417 if (tg3_test_registers(tp) != 0) {
13418 etest->flags |= ETH_TEST_FL_FAILED;
13419 data[TG3_REGISTER_TEST] = 1;
13420 }
13421
13422 if (tg3_test_memory(tp) != 0) {
13423 etest->flags |= ETH_TEST_FL_FAILED;
13424 data[TG3_MEMORY_TEST] = 1;
13425 }
13426
13427 if (doextlpbk)
13428 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13429
13430 if (tg3_test_loopback(tp, data, doextlpbk))
13431 etest->flags |= ETH_TEST_FL_FAILED;
13432
13433 tg3_full_unlock(tp);
13434
13435 if (tg3_test_interrupt(tp) != 0) {
13436 etest->flags |= ETH_TEST_FL_FAILED;
13437 data[TG3_INTERRUPT_TEST] = 1;
13438 }
13439
13440 tg3_full_lock(tp, 0);
13441
13442 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13443 if (netif_running(dev)) {
13444 tg3_flag_set(tp, INIT_COMPLETE);
13445 err2 = tg3_restart_hw(tp, true);
13446 if (!err2)
13447 tg3_netif_start(tp);
13448 }
13449
13450 tg3_full_unlock(tp);
13451
13452 if (irq_sync && !err2)
13453 tg3_phy_start(tp);
13454 }
13455 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13456 tg3_power_down(tp);
13457
13458 }
13459
13460 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13461 struct ifreq *ifr, int cmd)
13462 {
13463 struct tg3 *tp = netdev_priv(dev);
13464 struct hwtstamp_config stmpconf;
13465
13466 if (!tg3_flag(tp, PTP_CAPABLE))
13467 return -EINVAL;
13468
13469 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13470 return -EFAULT;
13471
13472 if (stmpconf.flags)
13473 return -EINVAL;
13474
13475 switch (stmpconf.tx_type) {
13476 case HWTSTAMP_TX_ON:
13477 tg3_flag_set(tp, TX_TSTAMP_EN);
13478 break;
13479 case HWTSTAMP_TX_OFF:
13480 tg3_flag_clear(tp, TX_TSTAMP_EN);
13481 break;
13482 default:
13483 return -ERANGE;
13484 }
13485
13486 switch (stmpconf.rx_filter) {
13487 case HWTSTAMP_FILTER_NONE:
13488 tp->rxptpctl = 0;
13489 break;
13490 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13491 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13492 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13493 break;
13494 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13495 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13496 TG3_RX_PTP_CTL_SYNC_EVNT;
13497 break;
13498 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13499 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13500 TG3_RX_PTP_CTL_DELAY_REQ;
13501 break;
13502 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13503 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13504 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13505 break;
13506 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13507 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13508 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13509 break;
13510 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13511 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13512 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13513 break;
13514 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13515 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13516 TG3_RX_PTP_CTL_SYNC_EVNT;
13517 break;
13518 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13519 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13520 TG3_RX_PTP_CTL_SYNC_EVNT;
13521 break;
13522 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13523 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13524 TG3_RX_PTP_CTL_SYNC_EVNT;
13525 break;
13526 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13527 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13528 TG3_RX_PTP_CTL_DELAY_REQ;
13529 break;
13530 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13531 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13532 TG3_RX_PTP_CTL_DELAY_REQ;
13533 break;
13534 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13535 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13536 TG3_RX_PTP_CTL_DELAY_REQ;
13537 break;
13538 default:
13539 return -ERANGE;
13540 }
13541
13542 if (netif_running(dev) && tp->rxptpctl)
13543 tw32(TG3_RX_PTP_CTL,
13544 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13545
13546 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13547 -EFAULT : 0;
13548 }
13549
13550 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13551 {
13552 struct mii_ioctl_data *data = if_mii(ifr);
13553 struct tg3 *tp = netdev_priv(dev);
13554 int err;
13555
13556 if (tg3_flag(tp, USE_PHYLIB)) {
13557 struct phy_device *phydev;
13558 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13559 return -EAGAIN;
13560 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13561 return phy_mii_ioctl(phydev, ifr, cmd);
13562 }
13563
13564 switch (cmd) {
13565 case SIOCGMIIPHY:
13566 data->phy_id = tp->phy_addr;
13567
13568 /* fallthru */
13569 case SIOCGMIIREG: {
13570 u32 mii_regval;
13571
13572 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13573 break; /* We have no PHY */
13574
13575 if (!netif_running(dev))
13576 return -EAGAIN;
13577
13578 spin_lock_bh(&tp->lock);
13579 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13580 data->reg_num & 0x1f, &mii_regval);
13581 spin_unlock_bh(&tp->lock);
13582
13583 data->val_out = mii_regval;
13584
13585 return err;
13586 }
13587
13588 case SIOCSMIIREG:
13589 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13590 break; /* We have no PHY */
13591
13592 if (!netif_running(dev))
13593 return -EAGAIN;
13594
13595 spin_lock_bh(&tp->lock);
13596 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13597 data->reg_num & 0x1f, data->val_in);
13598 spin_unlock_bh(&tp->lock);
13599
13600 return err;
13601
13602 case SIOCSHWTSTAMP:
13603 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13604
13605 default:
13606 /* do nothing */
13607 break;
13608 }
13609 return -EOPNOTSUPP;
13610 }
13611
13612 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13613 {
13614 struct tg3 *tp = netdev_priv(dev);
13615
13616 memcpy(ec, &tp->coal, sizeof(*ec));
13617 return 0;
13618 }
13619
13620 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13621 {
13622 struct tg3 *tp = netdev_priv(dev);
13623 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13624 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13625
13626 if (!tg3_flag(tp, 5705_PLUS)) {
13627 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13628 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13629 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13630 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13631 }
13632
13633 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13634 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13635 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13636 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13637 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13638 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13639 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13640 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13641 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13642 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13643 return -EINVAL;
13644
13645 /* No rx interrupts will be generated if both are zero */
13646 if ((ec->rx_coalesce_usecs == 0) &&
13647 (ec->rx_max_coalesced_frames == 0))
13648 return -EINVAL;
13649
13650 /* No tx interrupts will be generated if both are zero */
13651 if ((ec->tx_coalesce_usecs == 0) &&
13652 (ec->tx_max_coalesced_frames == 0))
13653 return -EINVAL;
13654
13655 /* Only copy relevant parameters, ignore all others. */
13656 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13657 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13658 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13659 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13660 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13661 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13662 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13663 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13664 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13665
13666 if (netif_running(dev)) {
13667 tg3_full_lock(tp, 0);
13668 __tg3_set_coalesce(tp, &tp->coal);
13669 tg3_full_unlock(tp);
13670 }
13671 return 0;
13672 }
13673
13674 static const struct ethtool_ops tg3_ethtool_ops = {
13675 .get_settings = tg3_get_settings,
13676 .set_settings = tg3_set_settings,
13677 .get_drvinfo = tg3_get_drvinfo,
13678 .get_regs_len = tg3_get_regs_len,
13679 .get_regs = tg3_get_regs,
13680 .get_wol = tg3_get_wol,
13681 .set_wol = tg3_set_wol,
13682 .get_msglevel = tg3_get_msglevel,
13683 .set_msglevel = tg3_set_msglevel,
13684 .nway_reset = tg3_nway_reset,
13685 .get_link = ethtool_op_get_link,
13686 .get_eeprom_len = tg3_get_eeprom_len,
13687 .get_eeprom = tg3_get_eeprom,
13688 .set_eeprom = tg3_set_eeprom,
13689 .get_ringparam = tg3_get_ringparam,
13690 .set_ringparam = tg3_set_ringparam,
13691 .get_pauseparam = tg3_get_pauseparam,
13692 .set_pauseparam = tg3_set_pauseparam,
13693 .self_test = tg3_self_test,
13694 .get_strings = tg3_get_strings,
13695 .set_phys_id = tg3_set_phys_id,
13696 .get_ethtool_stats = tg3_get_ethtool_stats,
13697 .get_coalesce = tg3_get_coalesce,
13698 .set_coalesce = tg3_set_coalesce,
13699 .get_sset_count = tg3_get_sset_count,
13700 .get_rxnfc = tg3_get_rxnfc,
13701 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13702 .get_rxfh_indir = tg3_get_rxfh_indir,
13703 .set_rxfh_indir = tg3_set_rxfh_indir,
13704 .get_channels = tg3_get_channels,
13705 .set_channels = tg3_set_channels,
13706 .get_ts_info = tg3_get_ts_info,
13707 };
13708
13709 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13710 struct rtnl_link_stats64 *stats)
13711 {
13712 struct tg3 *tp = netdev_priv(dev);
13713
13714 spin_lock_bh(&tp->lock);
13715 if (!tp->hw_stats) {
13716 spin_unlock_bh(&tp->lock);
13717 return &tp->net_stats_prev;
13718 }
13719
13720 tg3_get_nstats(tp, stats);
13721 spin_unlock_bh(&tp->lock);
13722
13723 return stats;
13724 }
13725
13726 static void tg3_set_rx_mode(struct net_device *dev)
13727 {
13728 struct tg3 *tp = netdev_priv(dev);
13729
13730 if (!netif_running(dev))
13731 return;
13732
13733 tg3_full_lock(tp, 0);
13734 __tg3_set_rx_mode(dev);
13735 tg3_full_unlock(tp);
13736 }
13737
13738 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13739 int new_mtu)
13740 {
13741 dev->mtu = new_mtu;
13742
13743 if (new_mtu > ETH_DATA_LEN) {
13744 if (tg3_flag(tp, 5780_CLASS)) {
13745 netdev_update_features(dev);
13746 tg3_flag_clear(tp, TSO_CAPABLE);
13747 } else {
13748 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13749 }
13750 } else {
13751 if (tg3_flag(tp, 5780_CLASS)) {
13752 tg3_flag_set(tp, TSO_CAPABLE);
13753 netdev_update_features(dev);
13754 }
13755 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13756 }
13757 }
13758
13759 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13760 {
13761 struct tg3 *tp = netdev_priv(dev);
13762 int err;
13763 bool reset_phy = false;
13764
13765 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13766 return -EINVAL;
13767
13768 if (!netif_running(dev)) {
13769 /* We'll just catch it later when the
13770 * device is up'd.
13771 */
13772 tg3_set_mtu(dev, tp, new_mtu);
13773 return 0;
13774 }
13775
13776 tg3_phy_stop(tp);
13777
13778 tg3_netif_stop(tp);
13779
13780 tg3_full_lock(tp, 1);
13781
13782 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13783
13784 tg3_set_mtu(dev, tp, new_mtu);
13785
13786 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13787 * breaks all requests to 256 bytes.
13788 */
13789 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13790 reset_phy = true;
13791
13792 err = tg3_restart_hw(tp, reset_phy);
13793
13794 if (!err)
13795 tg3_netif_start(tp);
13796
13797 tg3_full_unlock(tp);
13798
13799 if (!err)
13800 tg3_phy_start(tp);
13801
13802 return err;
13803 }
13804
13805 static const struct net_device_ops tg3_netdev_ops = {
13806 .ndo_open = tg3_open,
13807 .ndo_stop = tg3_close,
13808 .ndo_start_xmit = tg3_start_xmit,
13809 .ndo_get_stats64 = tg3_get_stats64,
13810 .ndo_validate_addr = eth_validate_addr,
13811 .ndo_set_rx_mode = tg3_set_rx_mode,
13812 .ndo_set_mac_address = tg3_set_mac_addr,
13813 .ndo_do_ioctl = tg3_ioctl,
13814 .ndo_tx_timeout = tg3_tx_timeout,
13815 .ndo_change_mtu = tg3_change_mtu,
13816 .ndo_fix_features = tg3_fix_features,
13817 .ndo_set_features = tg3_set_features,
13818 #ifdef CONFIG_NET_POLL_CONTROLLER
13819 .ndo_poll_controller = tg3_poll_controller,
13820 #endif
13821 };
13822
13823 static void tg3_get_eeprom_size(struct tg3 *tp)
13824 {
13825 u32 cursize, val, magic;
13826
13827 tp->nvram_size = EEPROM_CHIP_SIZE;
13828
13829 if (tg3_nvram_read(tp, 0, &magic) != 0)
13830 return;
13831
13832 if ((magic != TG3_EEPROM_MAGIC) &&
13833 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13834 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13835 return;
13836
13837 /*
13838 * Size the chip by reading offsets at increasing powers of two.
13839 * When we encounter our validation signature, we know the addressing
13840 * has wrapped around, and thus have our chip size.
13841 */
13842 cursize = 0x10;
13843
13844 while (cursize < tp->nvram_size) {
13845 if (tg3_nvram_read(tp, cursize, &val) != 0)
13846 return;
13847
13848 if (val == magic)
13849 break;
13850
13851 cursize <<= 1;
13852 }
13853
13854 tp->nvram_size = cursize;
13855 }
13856
13857 static void tg3_get_nvram_size(struct tg3 *tp)
13858 {
13859 u32 val;
13860
13861 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13862 return;
13863
13864 /* Selfboot format */
13865 if (val != TG3_EEPROM_MAGIC) {
13866 tg3_get_eeprom_size(tp);
13867 return;
13868 }
13869
13870 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13871 if (val != 0) {
13872 /* This is confusing. We want to operate on the
13873 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13874 * call will read from NVRAM and byteswap the data
13875 * according to the byteswapping settings for all
13876 * other register accesses. This ensures the data we
13877 * want will always reside in the lower 16-bits.
13878 * However, the data in NVRAM is in LE format, which
13879 * means the data from the NVRAM read will always be
13880 * opposite the endianness of the CPU. The 16-bit
13881 * byteswap then brings the data to CPU endianness.
13882 */
13883 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13884 return;
13885 }
13886 }
13887 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13888 }
13889
13890 static void tg3_get_nvram_info(struct tg3 *tp)
13891 {
13892 u32 nvcfg1;
13893
13894 nvcfg1 = tr32(NVRAM_CFG1);
13895 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13896 tg3_flag_set(tp, FLASH);
13897 } else {
13898 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13899 tw32(NVRAM_CFG1, nvcfg1);
13900 }
13901
13902 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13903 tg3_flag(tp, 5780_CLASS)) {
13904 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13905 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13906 tp->nvram_jedecnum = JEDEC_ATMEL;
13907 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13908 tg3_flag_set(tp, NVRAM_BUFFERED);
13909 break;
13910 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13911 tp->nvram_jedecnum = JEDEC_ATMEL;
13912 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13913 break;
13914 case FLASH_VENDOR_ATMEL_EEPROM:
13915 tp->nvram_jedecnum = JEDEC_ATMEL;
13916 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13917 tg3_flag_set(tp, NVRAM_BUFFERED);
13918 break;
13919 case FLASH_VENDOR_ST:
13920 tp->nvram_jedecnum = JEDEC_ST;
13921 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13922 tg3_flag_set(tp, NVRAM_BUFFERED);
13923 break;
13924 case FLASH_VENDOR_SAIFUN:
13925 tp->nvram_jedecnum = JEDEC_SAIFUN;
13926 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13927 break;
13928 case FLASH_VENDOR_SST_SMALL:
13929 case FLASH_VENDOR_SST_LARGE:
13930 tp->nvram_jedecnum = JEDEC_SST;
13931 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13932 break;
13933 }
13934 } else {
13935 tp->nvram_jedecnum = JEDEC_ATMEL;
13936 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13937 tg3_flag_set(tp, NVRAM_BUFFERED);
13938 }
13939 }
13940
13941 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13942 {
13943 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13944 case FLASH_5752PAGE_SIZE_256:
13945 tp->nvram_pagesize = 256;
13946 break;
13947 case FLASH_5752PAGE_SIZE_512:
13948 tp->nvram_pagesize = 512;
13949 break;
13950 case FLASH_5752PAGE_SIZE_1K:
13951 tp->nvram_pagesize = 1024;
13952 break;
13953 case FLASH_5752PAGE_SIZE_2K:
13954 tp->nvram_pagesize = 2048;
13955 break;
13956 case FLASH_5752PAGE_SIZE_4K:
13957 tp->nvram_pagesize = 4096;
13958 break;
13959 case FLASH_5752PAGE_SIZE_264:
13960 tp->nvram_pagesize = 264;
13961 break;
13962 case FLASH_5752PAGE_SIZE_528:
13963 tp->nvram_pagesize = 528;
13964 break;
13965 }
13966 }
13967
13968 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13969 {
13970 u32 nvcfg1;
13971
13972 nvcfg1 = tr32(NVRAM_CFG1);
13973
13974 /* NVRAM protection for TPM */
13975 if (nvcfg1 & (1 << 27))
13976 tg3_flag_set(tp, PROTECTED_NVRAM);
13977
13978 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13979 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13980 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13981 tp->nvram_jedecnum = JEDEC_ATMEL;
13982 tg3_flag_set(tp, NVRAM_BUFFERED);
13983 break;
13984 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13985 tp->nvram_jedecnum = JEDEC_ATMEL;
13986 tg3_flag_set(tp, NVRAM_BUFFERED);
13987 tg3_flag_set(tp, FLASH);
13988 break;
13989 case FLASH_5752VENDOR_ST_M45PE10:
13990 case FLASH_5752VENDOR_ST_M45PE20:
13991 case FLASH_5752VENDOR_ST_M45PE40:
13992 tp->nvram_jedecnum = JEDEC_ST;
13993 tg3_flag_set(tp, NVRAM_BUFFERED);
13994 tg3_flag_set(tp, FLASH);
13995 break;
13996 }
13997
13998 if (tg3_flag(tp, FLASH)) {
13999 tg3_nvram_get_pagesize(tp, nvcfg1);
14000 } else {
14001 /* For eeprom, set pagesize to maximum eeprom size */
14002 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14003
14004 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14005 tw32(NVRAM_CFG1, nvcfg1);
14006 }
14007 }
14008
14009 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14010 {
14011 u32 nvcfg1, protect = 0;
14012
14013 nvcfg1 = tr32(NVRAM_CFG1);
14014
14015 /* NVRAM protection for TPM */
14016 if (nvcfg1 & (1 << 27)) {
14017 tg3_flag_set(tp, PROTECTED_NVRAM);
14018 protect = 1;
14019 }
14020
14021 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14022 switch (nvcfg1) {
14023 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14024 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14025 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14026 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14027 tp->nvram_jedecnum = JEDEC_ATMEL;
14028 tg3_flag_set(tp, NVRAM_BUFFERED);
14029 tg3_flag_set(tp, FLASH);
14030 tp->nvram_pagesize = 264;
14031 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14032 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14033 tp->nvram_size = (protect ? 0x3e200 :
14034 TG3_NVRAM_SIZE_512KB);
14035 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14036 tp->nvram_size = (protect ? 0x1f200 :
14037 TG3_NVRAM_SIZE_256KB);
14038 else
14039 tp->nvram_size = (protect ? 0x1f200 :
14040 TG3_NVRAM_SIZE_128KB);
14041 break;
14042 case FLASH_5752VENDOR_ST_M45PE10:
14043 case FLASH_5752VENDOR_ST_M45PE20:
14044 case FLASH_5752VENDOR_ST_M45PE40:
14045 tp->nvram_jedecnum = JEDEC_ST;
14046 tg3_flag_set(tp, NVRAM_BUFFERED);
14047 tg3_flag_set(tp, FLASH);
14048 tp->nvram_pagesize = 256;
14049 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14050 tp->nvram_size = (protect ?
14051 TG3_NVRAM_SIZE_64KB :
14052 TG3_NVRAM_SIZE_128KB);
14053 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14054 tp->nvram_size = (protect ?
14055 TG3_NVRAM_SIZE_64KB :
14056 TG3_NVRAM_SIZE_256KB);
14057 else
14058 tp->nvram_size = (protect ?
14059 TG3_NVRAM_SIZE_128KB :
14060 TG3_NVRAM_SIZE_512KB);
14061 break;
14062 }
14063 }
14064
14065 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14066 {
14067 u32 nvcfg1;
14068
14069 nvcfg1 = tr32(NVRAM_CFG1);
14070
14071 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14072 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14073 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14074 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14075 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14076 tp->nvram_jedecnum = JEDEC_ATMEL;
14077 tg3_flag_set(tp, NVRAM_BUFFERED);
14078 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14079
14080 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14081 tw32(NVRAM_CFG1, nvcfg1);
14082 break;
14083 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14084 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14085 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14086 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14087 tp->nvram_jedecnum = JEDEC_ATMEL;
14088 tg3_flag_set(tp, NVRAM_BUFFERED);
14089 tg3_flag_set(tp, FLASH);
14090 tp->nvram_pagesize = 264;
14091 break;
14092 case FLASH_5752VENDOR_ST_M45PE10:
14093 case FLASH_5752VENDOR_ST_M45PE20:
14094 case FLASH_5752VENDOR_ST_M45PE40:
14095 tp->nvram_jedecnum = JEDEC_ST;
14096 tg3_flag_set(tp, NVRAM_BUFFERED);
14097 tg3_flag_set(tp, FLASH);
14098 tp->nvram_pagesize = 256;
14099 break;
14100 }
14101 }
14102
14103 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14104 {
14105 u32 nvcfg1, protect = 0;
14106
14107 nvcfg1 = tr32(NVRAM_CFG1);
14108
14109 /* NVRAM protection for TPM */
14110 if (nvcfg1 & (1 << 27)) {
14111 tg3_flag_set(tp, PROTECTED_NVRAM);
14112 protect = 1;
14113 }
14114
14115 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14116 switch (nvcfg1) {
14117 case FLASH_5761VENDOR_ATMEL_ADB021D:
14118 case FLASH_5761VENDOR_ATMEL_ADB041D:
14119 case FLASH_5761VENDOR_ATMEL_ADB081D:
14120 case FLASH_5761VENDOR_ATMEL_ADB161D:
14121 case FLASH_5761VENDOR_ATMEL_MDB021D:
14122 case FLASH_5761VENDOR_ATMEL_MDB041D:
14123 case FLASH_5761VENDOR_ATMEL_MDB081D:
14124 case FLASH_5761VENDOR_ATMEL_MDB161D:
14125 tp->nvram_jedecnum = JEDEC_ATMEL;
14126 tg3_flag_set(tp, NVRAM_BUFFERED);
14127 tg3_flag_set(tp, FLASH);
14128 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14129 tp->nvram_pagesize = 256;
14130 break;
14131 case FLASH_5761VENDOR_ST_A_M45PE20:
14132 case FLASH_5761VENDOR_ST_A_M45PE40:
14133 case FLASH_5761VENDOR_ST_A_M45PE80:
14134 case FLASH_5761VENDOR_ST_A_M45PE16:
14135 case FLASH_5761VENDOR_ST_M_M45PE20:
14136 case FLASH_5761VENDOR_ST_M_M45PE40:
14137 case FLASH_5761VENDOR_ST_M_M45PE80:
14138 case FLASH_5761VENDOR_ST_M_M45PE16:
14139 tp->nvram_jedecnum = JEDEC_ST;
14140 tg3_flag_set(tp, NVRAM_BUFFERED);
14141 tg3_flag_set(tp, FLASH);
14142 tp->nvram_pagesize = 256;
14143 break;
14144 }
14145
14146 if (protect) {
14147 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14148 } else {
14149 switch (nvcfg1) {
14150 case FLASH_5761VENDOR_ATMEL_ADB161D:
14151 case FLASH_5761VENDOR_ATMEL_MDB161D:
14152 case FLASH_5761VENDOR_ST_A_M45PE16:
14153 case FLASH_5761VENDOR_ST_M_M45PE16:
14154 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14155 break;
14156 case FLASH_5761VENDOR_ATMEL_ADB081D:
14157 case FLASH_5761VENDOR_ATMEL_MDB081D:
14158 case FLASH_5761VENDOR_ST_A_M45PE80:
14159 case FLASH_5761VENDOR_ST_M_M45PE80:
14160 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14161 break;
14162 case FLASH_5761VENDOR_ATMEL_ADB041D:
14163 case FLASH_5761VENDOR_ATMEL_MDB041D:
14164 case FLASH_5761VENDOR_ST_A_M45PE40:
14165 case FLASH_5761VENDOR_ST_M_M45PE40:
14166 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14167 break;
14168 case FLASH_5761VENDOR_ATMEL_ADB021D:
14169 case FLASH_5761VENDOR_ATMEL_MDB021D:
14170 case FLASH_5761VENDOR_ST_A_M45PE20:
14171 case FLASH_5761VENDOR_ST_M_M45PE20:
14172 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14173 break;
14174 }
14175 }
14176 }
14177
14178 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14179 {
14180 tp->nvram_jedecnum = JEDEC_ATMEL;
14181 tg3_flag_set(tp, NVRAM_BUFFERED);
14182 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14183 }
14184
14185 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14186 {
14187 u32 nvcfg1;
14188
14189 nvcfg1 = tr32(NVRAM_CFG1);
14190
14191 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14192 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14193 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14194 tp->nvram_jedecnum = JEDEC_ATMEL;
14195 tg3_flag_set(tp, NVRAM_BUFFERED);
14196 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14197
14198 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14199 tw32(NVRAM_CFG1, nvcfg1);
14200 return;
14201 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14202 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14203 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14204 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14205 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14206 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14207 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14208 tp->nvram_jedecnum = JEDEC_ATMEL;
14209 tg3_flag_set(tp, NVRAM_BUFFERED);
14210 tg3_flag_set(tp, FLASH);
14211
14212 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14213 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14214 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14215 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14216 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14217 break;
14218 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14219 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14220 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14221 break;
14222 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14223 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14224 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14225 break;
14226 }
14227 break;
14228 case FLASH_5752VENDOR_ST_M45PE10:
14229 case FLASH_5752VENDOR_ST_M45PE20:
14230 case FLASH_5752VENDOR_ST_M45PE40:
14231 tp->nvram_jedecnum = JEDEC_ST;
14232 tg3_flag_set(tp, NVRAM_BUFFERED);
14233 tg3_flag_set(tp, FLASH);
14234
14235 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14236 case FLASH_5752VENDOR_ST_M45PE10:
14237 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14238 break;
14239 case FLASH_5752VENDOR_ST_M45PE20:
14240 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14241 break;
14242 case FLASH_5752VENDOR_ST_M45PE40:
14243 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14244 break;
14245 }
14246 break;
14247 default:
14248 tg3_flag_set(tp, NO_NVRAM);
14249 return;
14250 }
14251
14252 tg3_nvram_get_pagesize(tp, nvcfg1);
14253 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14254 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14255 }
14256
14257
14258 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14259 {
14260 u32 nvcfg1;
14261
14262 nvcfg1 = tr32(NVRAM_CFG1);
14263
14264 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14265 case FLASH_5717VENDOR_ATMEL_EEPROM:
14266 case FLASH_5717VENDOR_MICRO_EEPROM:
14267 tp->nvram_jedecnum = JEDEC_ATMEL;
14268 tg3_flag_set(tp, NVRAM_BUFFERED);
14269 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14270
14271 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14272 tw32(NVRAM_CFG1, nvcfg1);
14273 return;
14274 case FLASH_5717VENDOR_ATMEL_MDB011D:
14275 case FLASH_5717VENDOR_ATMEL_ADB011B:
14276 case FLASH_5717VENDOR_ATMEL_ADB011D:
14277 case FLASH_5717VENDOR_ATMEL_MDB021D:
14278 case FLASH_5717VENDOR_ATMEL_ADB021B:
14279 case FLASH_5717VENDOR_ATMEL_ADB021D:
14280 case FLASH_5717VENDOR_ATMEL_45USPT:
14281 tp->nvram_jedecnum = JEDEC_ATMEL;
14282 tg3_flag_set(tp, NVRAM_BUFFERED);
14283 tg3_flag_set(tp, FLASH);
14284
14285 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14286 case FLASH_5717VENDOR_ATMEL_MDB021D:
14287 /* Detect size with tg3_nvram_get_size() */
14288 break;
14289 case FLASH_5717VENDOR_ATMEL_ADB021B:
14290 case FLASH_5717VENDOR_ATMEL_ADB021D:
14291 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14292 break;
14293 default:
14294 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14295 break;
14296 }
14297 break;
14298 case FLASH_5717VENDOR_ST_M_M25PE10:
14299 case FLASH_5717VENDOR_ST_A_M25PE10:
14300 case FLASH_5717VENDOR_ST_M_M45PE10:
14301 case FLASH_5717VENDOR_ST_A_M45PE10:
14302 case FLASH_5717VENDOR_ST_M_M25PE20:
14303 case FLASH_5717VENDOR_ST_A_M25PE20:
14304 case FLASH_5717VENDOR_ST_M_M45PE20:
14305 case FLASH_5717VENDOR_ST_A_M45PE20:
14306 case FLASH_5717VENDOR_ST_25USPT:
14307 case FLASH_5717VENDOR_ST_45USPT:
14308 tp->nvram_jedecnum = JEDEC_ST;
14309 tg3_flag_set(tp, NVRAM_BUFFERED);
14310 tg3_flag_set(tp, FLASH);
14311
14312 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14313 case FLASH_5717VENDOR_ST_M_M25PE20:
14314 case FLASH_5717VENDOR_ST_M_M45PE20:
14315 /* Detect size with tg3_nvram_get_size() */
14316 break;
14317 case FLASH_5717VENDOR_ST_A_M25PE20:
14318 case FLASH_5717VENDOR_ST_A_M45PE20:
14319 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14320 break;
14321 default:
14322 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14323 break;
14324 }
14325 break;
14326 default:
14327 tg3_flag_set(tp, NO_NVRAM);
14328 return;
14329 }
14330
14331 tg3_nvram_get_pagesize(tp, nvcfg1);
14332 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14333 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14334 }
14335
14336 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14337 {
14338 u32 nvcfg1, nvmpinstrp;
14339
14340 nvcfg1 = tr32(NVRAM_CFG1);
14341 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14342
14343 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14344 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14345 tg3_flag_set(tp, NO_NVRAM);
14346 return;
14347 }
14348
14349 switch (nvmpinstrp) {
14350 case FLASH_5762_EEPROM_HD:
14351 nvmpinstrp = FLASH_5720_EEPROM_HD;
14352 break;
14353 case FLASH_5762_EEPROM_LD:
14354 nvmpinstrp = FLASH_5720_EEPROM_LD;
14355 break;
14356 case FLASH_5720VENDOR_M_ST_M45PE20:
14357 /* This pinstrap supports multiple sizes, so force it
14358 * to read the actual size from location 0xf0.
14359 */
14360 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14361 break;
14362 }
14363 }
14364
14365 switch (nvmpinstrp) {
14366 case FLASH_5720_EEPROM_HD:
14367 case FLASH_5720_EEPROM_LD:
14368 tp->nvram_jedecnum = JEDEC_ATMEL;
14369 tg3_flag_set(tp, NVRAM_BUFFERED);
14370
14371 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14372 tw32(NVRAM_CFG1, nvcfg1);
14373 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14374 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14375 else
14376 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14377 return;
14378 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14379 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14380 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14381 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14382 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14383 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14384 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14385 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14386 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14387 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14388 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14389 case FLASH_5720VENDOR_ATMEL_45USPT:
14390 tp->nvram_jedecnum = JEDEC_ATMEL;
14391 tg3_flag_set(tp, NVRAM_BUFFERED);
14392 tg3_flag_set(tp, FLASH);
14393
14394 switch (nvmpinstrp) {
14395 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14396 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14397 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14398 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14399 break;
14400 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14401 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14402 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14403 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14404 break;
14405 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14406 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14407 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14408 break;
14409 default:
14410 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14411 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14412 break;
14413 }
14414 break;
14415 case FLASH_5720VENDOR_M_ST_M25PE10:
14416 case FLASH_5720VENDOR_M_ST_M45PE10:
14417 case FLASH_5720VENDOR_A_ST_M25PE10:
14418 case FLASH_5720VENDOR_A_ST_M45PE10:
14419 case FLASH_5720VENDOR_M_ST_M25PE20:
14420 case FLASH_5720VENDOR_M_ST_M45PE20:
14421 case FLASH_5720VENDOR_A_ST_M25PE20:
14422 case FLASH_5720VENDOR_A_ST_M45PE20:
14423 case FLASH_5720VENDOR_M_ST_M25PE40:
14424 case FLASH_5720VENDOR_M_ST_M45PE40:
14425 case FLASH_5720VENDOR_A_ST_M25PE40:
14426 case FLASH_5720VENDOR_A_ST_M45PE40:
14427 case FLASH_5720VENDOR_M_ST_M25PE80:
14428 case FLASH_5720VENDOR_M_ST_M45PE80:
14429 case FLASH_5720VENDOR_A_ST_M25PE80:
14430 case FLASH_5720VENDOR_A_ST_M45PE80:
14431 case FLASH_5720VENDOR_ST_25USPT:
14432 case FLASH_5720VENDOR_ST_45USPT:
14433 tp->nvram_jedecnum = JEDEC_ST;
14434 tg3_flag_set(tp, NVRAM_BUFFERED);
14435 tg3_flag_set(tp, FLASH);
14436
14437 switch (nvmpinstrp) {
14438 case FLASH_5720VENDOR_M_ST_M25PE20:
14439 case FLASH_5720VENDOR_M_ST_M45PE20:
14440 case FLASH_5720VENDOR_A_ST_M25PE20:
14441 case FLASH_5720VENDOR_A_ST_M45PE20:
14442 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14443 break;
14444 case FLASH_5720VENDOR_M_ST_M25PE40:
14445 case FLASH_5720VENDOR_M_ST_M45PE40:
14446 case FLASH_5720VENDOR_A_ST_M25PE40:
14447 case FLASH_5720VENDOR_A_ST_M45PE40:
14448 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14449 break;
14450 case FLASH_5720VENDOR_M_ST_M25PE80:
14451 case FLASH_5720VENDOR_M_ST_M45PE80:
14452 case FLASH_5720VENDOR_A_ST_M25PE80:
14453 case FLASH_5720VENDOR_A_ST_M45PE80:
14454 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14455 break;
14456 default:
14457 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14458 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14459 break;
14460 }
14461 break;
14462 default:
14463 tg3_flag_set(tp, NO_NVRAM);
14464 return;
14465 }
14466
14467 tg3_nvram_get_pagesize(tp, nvcfg1);
14468 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14469 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14470
14471 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14472 u32 val;
14473
14474 if (tg3_nvram_read(tp, 0, &val))
14475 return;
14476
14477 if (val != TG3_EEPROM_MAGIC &&
14478 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14479 tg3_flag_set(tp, NO_NVRAM);
14480 }
14481 }
14482
14483 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14484 static void tg3_nvram_init(struct tg3 *tp)
14485 {
14486 if (tg3_flag(tp, IS_SSB_CORE)) {
14487 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14488 tg3_flag_clear(tp, NVRAM);
14489 tg3_flag_clear(tp, NVRAM_BUFFERED);
14490 tg3_flag_set(tp, NO_NVRAM);
14491 return;
14492 }
14493
14494 tw32_f(GRC_EEPROM_ADDR,
14495 (EEPROM_ADDR_FSM_RESET |
14496 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14497 EEPROM_ADDR_CLKPERD_SHIFT)));
14498
14499 msleep(1);
14500
14501 /* Enable seeprom accesses. */
14502 tw32_f(GRC_LOCAL_CTRL,
14503 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14504 udelay(100);
14505
14506 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14507 tg3_asic_rev(tp) != ASIC_REV_5701) {
14508 tg3_flag_set(tp, NVRAM);
14509
14510 if (tg3_nvram_lock(tp)) {
14511 netdev_warn(tp->dev,
14512 "Cannot get nvram lock, %s failed\n",
14513 __func__);
14514 return;
14515 }
14516 tg3_enable_nvram_access(tp);
14517
14518 tp->nvram_size = 0;
14519
14520 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14521 tg3_get_5752_nvram_info(tp);
14522 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14523 tg3_get_5755_nvram_info(tp);
14524 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14525 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14526 tg3_asic_rev(tp) == ASIC_REV_5785)
14527 tg3_get_5787_nvram_info(tp);
14528 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14529 tg3_get_5761_nvram_info(tp);
14530 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14531 tg3_get_5906_nvram_info(tp);
14532 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14533 tg3_flag(tp, 57765_CLASS))
14534 tg3_get_57780_nvram_info(tp);
14535 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14536 tg3_asic_rev(tp) == ASIC_REV_5719)
14537 tg3_get_5717_nvram_info(tp);
14538 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14539 tg3_asic_rev(tp) == ASIC_REV_5762)
14540 tg3_get_5720_nvram_info(tp);
14541 else
14542 tg3_get_nvram_info(tp);
14543
14544 if (tp->nvram_size == 0)
14545 tg3_get_nvram_size(tp);
14546
14547 tg3_disable_nvram_access(tp);
14548 tg3_nvram_unlock(tp);
14549
14550 } else {
14551 tg3_flag_clear(tp, NVRAM);
14552 tg3_flag_clear(tp, NVRAM_BUFFERED);
14553
14554 tg3_get_eeprom_size(tp);
14555 }
14556 }
14557
14558 struct subsys_tbl_ent {
14559 u16 subsys_vendor, subsys_devid;
14560 u32 phy_id;
14561 };
14562
14563 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14564 /* Broadcom boards. */
14565 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14566 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14567 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14568 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14569 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14570 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14571 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14572 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14573 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14574 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14575 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14576 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14577 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14578 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14579 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14580 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14581 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14582 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14583 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14584 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14585 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14586 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14587
14588 /* 3com boards. */
14589 { TG3PCI_SUBVENDOR_ID_3COM,
14590 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14591 { TG3PCI_SUBVENDOR_ID_3COM,
14592 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14593 { TG3PCI_SUBVENDOR_ID_3COM,
14594 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14595 { TG3PCI_SUBVENDOR_ID_3COM,
14596 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14597 { TG3PCI_SUBVENDOR_ID_3COM,
14598 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14599
14600 /* DELL boards. */
14601 { TG3PCI_SUBVENDOR_ID_DELL,
14602 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14603 { TG3PCI_SUBVENDOR_ID_DELL,
14604 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14605 { TG3PCI_SUBVENDOR_ID_DELL,
14606 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14607 { TG3PCI_SUBVENDOR_ID_DELL,
14608 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14609
14610 /* Compaq boards. */
14611 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14612 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14613 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14614 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14615 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14616 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14617 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14618 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14619 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14620 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14621
14622 /* IBM boards. */
14623 { TG3PCI_SUBVENDOR_ID_IBM,
14624 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14625 };
14626
14627 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14628 {
14629 int i;
14630
14631 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14632 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14633 tp->pdev->subsystem_vendor) &&
14634 (subsys_id_to_phy_id[i].subsys_devid ==
14635 tp->pdev->subsystem_device))
14636 return &subsys_id_to_phy_id[i];
14637 }
14638 return NULL;
14639 }
14640
14641 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14642 {
14643 u32 val;
14644
14645 tp->phy_id = TG3_PHY_ID_INVALID;
14646 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14647
14648 /* Assume an onboard device and WOL capable by default. */
14649 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14650 tg3_flag_set(tp, WOL_CAP);
14651
14652 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14653 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14654 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14655 tg3_flag_set(tp, IS_NIC);
14656 }
14657 val = tr32(VCPU_CFGSHDW);
14658 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14659 tg3_flag_set(tp, ASPM_WORKAROUND);
14660 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14661 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14662 tg3_flag_set(tp, WOL_ENABLE);
14663 device_set_wakeup_enable(&tp->pdev->dev, true);
14664 }
14665 goto done;
14666 }
14667
14668 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14669 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14670 u32 nic_cfg, led_cfg;
14671 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14672 int eeprom_phy_serdes = 0;
14673
14674 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14675 tp->nic_sram_data_cfg = nic_cfg;
14676
14677 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14678 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14679 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14680 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14681 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14682 (ver > 0) && (ver < 0x100))
14683 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14684
14685 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14686 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14687
14688 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14689 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14690 eeprom_phy_serdes = 1;
14691
14692 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14693 if (nic_phy_id != 0) {
14694 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14695 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14696
14697 eeprom_phy_id = (id1 >> 16) << 10;
14698 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14699 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14700 } else
14701 eeprom_phy_id = 0;
14702
14703 tp->phy_id = eeprom_phy_id;
14704 if (eeprom_phy_serdes) {
14705 if (!tg3_flag(tp, 5705_PLUS))
14706 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14707 else
14708 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14709 }
14710
14711 if (tg3_flag(tp, 5750_PLUS))
14712 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14713 SHASTA_EXT_LED_MODE_MASK);
14714 else
14715 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14716
14717 switch (led_cfg) {
14718 default:
14719 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14720 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14721 break;
14722
14723 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14724 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14725 break;
14726
14727 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14728 tp->led_ctrl = LED_CTRL_MODE_MAC;
14729
14730 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14731 * read on some older 5700/5701 bootcode.
14732 */
14733 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14734 tg3_asic_rev(tp) == ASIC_REV_5701)
14735 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14736
14737 break;
14738
14739 case SHASTA_EXT_LED_SHARED:
14740 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14741 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14742 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14743 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14744 LED_CTRL_MODE_PHY_2);
14745 break;
14746
14747 case SHASTA_EXT_LED_MAC:
14748 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14749 break;
14750
14751 case SHASTA_EXT_LED_COMBO:
14752 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14753 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14754 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14755 LED_CTRL_MODE_PHY_2);
14756 break;
14757
14758 }
14759
14760 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14761 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14762 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14763 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14764
14765 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14766 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14767
14768 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14769 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14770 if ((tp->pdev->subsystem_vendor ==
14771 PCI_VENDOR_ID_ARIMA) &&
14772 (tp->pdev->subsystem_device == 0x205a ||
14773 tp->pdev->subsystem_device == 0x2063))
14774 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14775 } else {
14776 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14777 tg3_flag_set(tp, IS_NIC);
14778 }
14779
14780 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14781 tg3_flag_set(tp, ENABLE_ASF);
14782 if (tg3_flag(tp, 5750_PLUS))
14783 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14784 }
14785
14786 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14787 tg3_flag(tp, 5750_PLUS))
14788 tg3_flag_set(tp, ENABLE_APE);
14789
14790 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14791 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14792 tg3_flag_clear(tp, WOL_CAP);
14793
14794 if (tg3_flag(tp, WOL_CAP) &&
14795 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14796 tg3_flag_set(tp, WOL_ENABLE);
14797 device_set_wakeup_enable(&tp->pdev->dev, true);
14798 }
14799
14800 if (cfg2 & (1 << 17))
14801 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14802
14803 /* serdes signal pre-emphasis in register 0x590 set by */
14804 /* bootcode if bit 18 is set */
14805 if (cfg2 & (1 << 18))
14806 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14807
14808 if ((tg3_flag(tp, 57765_PLUS) ||
14809 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14810 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14811 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14812 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14813
14814 if (tg3_flag(tp, PCI_EXPRESS)) {
14815 u32 cfg3;
14816
14817 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14818 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14819 !tg3_flag(tp, 57765_PLUS) &&
14820 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14821 tg3_flag_set(tp, ASPM_WORKAROUND);
14822 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14823 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14824 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14825 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14826 }
14827
14828 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14829 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14830 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14831 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14832 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14833 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14834 }
14835 done:
14836 if (tg3_flag(tp, WOL_CAP))
14837 device_set_wakeup_enable(&tp->pdev->dev,
14838 tg3_flag(tp, WOL_ENABLE));
14839 else
14840 device_set_wakeup_capable(&tp->pdev->dev, false);
14841 }
14842
14843 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14844 {
14845 int i, err;
14846 u32 val2, off = offset * 8;
14847
14848 err = tg3_nvram_lock(tp);
14849 if (err)
14850 return err;
14851
14852 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14853 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14854 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14855 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14856 udelay(10);
14857
14858 for (i = 0; i < 100; i++) {
14859 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14860 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14861 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14862 break;
14863 }
14864 udelay(10);
14865 }
14866
14867 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14868
14869 tg3_nvram_unlock(tp);
14870 if (val2 & APE_OTP_STATUS_CMD_DONE)
14871 return 0;
14872
14873 return -EBUSY;
14874 }
14875
14876 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14877 {
14878 int i;
14879 u32 val;
14880
14881 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14882 tw32(OTP_CTRL, cmd);
14883
14884 /* Wait for up to 1 ms for command to execute. */
14885 for (i = 0; i < 100; i++) {
14886 val = tr32(OTP_STATUS);
14887 if (val & OTP_STATUS_CMD_DONE)
14888 break;
14889 udelay(10);
14890 }
14891
14892 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14893 }
14894
14895 /* Read the gphy configuration from the OTP region of the chip. The gphy
14896 * configuration is a 32-bit value that straddles the alignment boundary.
14897 * We do two 32-bit reads and then shift and merge the results.
14898 */
14899 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14900 {
14901 u32 bhalf_otp, thalf_otp;
14902
14903 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14904
14905 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14906 return 0;
14907
14908 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14909
14910 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14911 return 0;
14912
14913 thalf_otp = tr32(OTP_READ_DATA);
14914
14915 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14916
14917 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14918 return 0;
14919
14920 bhalf_otp = tr32(OTP_READ_DATA);
14921
14922 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14923 }
14924
14925 static void tg3_phy_init_link_config(struct tg3 *tp)
14926 {
14927 u32 adv = ADVERTISED_Autoneg;
14928
14929 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14930 adv |= ADVERTISED_1000baseT_Half |
14931 ADVERTISED_1000baseT_Full;
14932
14933 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14934 adv |= ADVERTISED_100baseT_Half |
14935 ADVERTISED_100baseT_Full |
14936 ADVERTISED_10baseT_Half |
14937 ADVERTISED_10baseT_Full |
14938 ADVERTISED_TP;
14939 else
14940 adv |= ADVERTISED_FIBRE;
14941
14942 tp->link_config.advertising = adv;
14943 tp->link_config.speed = SPEED_UNKNOWN;
14944 tp->link_config.duplex = DUPLEX_UNKNOWN;
14945 tp->link_config.autoneg = AUTONEG_ENABLE;
14946 tp->link_config.active_speed = SPEED_UNKNOWN;
14947 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14948
14949 tp->old_link = -1;
14950 }
14951
14952 static int tg3_phy_probe(struct tg3 *tp)
14953 {
14954 u32 hw_phy_id_1, hw_phy_id_2;
14955 u32 hw_phy_id, hw_phy_id_masked;
14956 int err;
14957
14958 /* flow control autonegotiation is default behavior */
14959 tg3_flag_set(tp, PAUSE_AUTONEG);
14960 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14961
14962 if (tg3_flag(tp, ENABLE_APE)) {
14963 switch (tp->pci_fn) {
14964 case 0:
14965 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14966 break;
14967 case 1:
14968 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14969 break;
14970 case 2:
14971 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14972 break;
14973 case 3:
14974 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14975 break;
14976 }
14977 }
14978
14979 if (!tg3_flag(tp, ENABLE_ASF) &&
14980 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14981 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14982 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14983 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14984
14985 if (tg3_flag(tp, USE_PHYLIB))
14986 return tg3_phy_init(tp);
14987
14988 /* Reading the PHY ID register can conflict with ASF
14989 * firmware access to the PHY hardware.
14990 */
14991 err = 0;
14992 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14993 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14994 } else {
14995 /* Now read the physical PHY_ID from the chip and verify
14996 * that it is sane. If it doesn't look good, we fall back
14997 * to either the hard-coded table based PHY_ID and failing
14998 * that the value found in the eeprom area.
14999 */
15000 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15001 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15002
15003 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15004 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15005 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15006
15007 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15008 }
15009
15010 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15011 tp->phy_id = hw_phy_id;
15012 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15013 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15014 else
15015 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15016 } else {
15017 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15018 /* Do nothing, phy ID already set up in
15019 * tg3_get_eeprom_hw_cfg().
15020 */
15021 } else {
15022 struct subsys_tbl_ent *p;
15023
15024 /* No eeprom signature? Try the hardcoded
15025 * subsys device table.
15026 */
15027 p = tg3_lookup_by_subsys(tp);
15028 if (p) {
15029 tp->phy_id = p->phy_id;
15030 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15031 /* For now we saw the IDs 0xbc050cd0,
15032 * 0xbc050f80 and 0xbc050c30 on devices
15033 * connected to an BCM4785 and there are
15034 * probably more. Just assume that the phy is
15035 * supported when it is connected to a SSB core
15036 * for now.
15037 */
15038 return -ENODEV;
15039 }
15040
15041 if (!tp->phy_id ||
15042 tp->phy_id == TG3_PHY_ID_BCM8002)
15043 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15044 }
15045 }
15046
15047 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15048 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15049 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15050 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15051 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15052 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15053 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15054 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15055 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
15056 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15057
15058 tg3_phy_init_link_config(tp);
15059
15060 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15061 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15062 !tg3_flag(tp, ENABLE_APE) &&
15063 !tg3_flag(tp, ENABLE_ASF)) {
15064 u32 bmsr, dummy;
15065
15066 tg3_readphy(tp, MII_BMSR, &bmsr);
15067 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15068 (bmsr & BMSR_LSTATUS))
15069 goto skip_phy_reset;
15070
15071 err = tg3_phy_reset(tp);
15072 if (err)
15073 return err;
15074
15075 tg3_phy_set_wirespeed(tp);
15076
15077 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15078 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15079 tp->link_config.flowctrl);
15080
15081 tg3_writephy(tp, MII_BMCR,
15082 BMCR_ANENABLE | BMCR_ANRESTART);
15083 }
15084 }
15085
15086 skip_phy_reset:
15087 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15088 err = tg3_init_5401phy_dsp(tp);
15089 if (err)
15090 return err;
15091
15092 err = tg3_init_5401phy_dsp(tp);
15093 }
15094
15095 return err;
15096 }
15097
15098 static void tg3_read_vpd(struct tg3 *tp)
15099 {
15100 u8 *vpd_data;
15101 unsigned int block_end, rosize, len;
15102 u32 vpdlen;
15103 int j, i = 0;
15104
15105 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15106 if (!vpd_data)
15107 goto out_no_vpd;
15108
15109 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15110 if (i < 0)
15111 goto out_not_found;
15112
15113 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15114 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15115 i += PCI_VPD_LRDT_TAG_SIZE;
15116
15117 if (block_end > vpdlen)
15118 goto out_not_found;
15119
15120 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15121 PCI_VPD_RO_KEYWORD_MFR_ID);
15122 if (j > 0) {
15123 len = pci_vpd_info_field_size(&vpd_data[j]);
15124
15125 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15126 if (j + len > block_end || len != 4 ||
15127 memcmp(&vpd_data[j], "1028", 4))
15128 goto partno;
15129
15130 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15131 PCI_VPD_RO_KEYWORD_VENDOR0);
15132 if (j < 0)
15133 goto partno;
15134
15135 len = pci_vpd_info_field_size(&vpd_data[j]);
15136
15137 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15138 if (j + len > block_end)
15139 goto partno;
15140
15141 if (len >= sizeof(tp->fw_ver))
15142 len = sizeof(tp->fw_ver) - 1;
15143 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15144 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15145 &vpd_data[j]);
15146 }
15147
15148 partno:
15149 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15150 PCI_VPD_RO_KEYWORD_PARTNO);
15151 if (i < 0)
15152 goto out_not_found;
15153
15154 len = pci_vpd_info_field_size(&vpd_data[i]);
15155
15156 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15157 if (len > TG3_BPN_SIZE ||
15158 (len + i) > vpdlen)
15159 goto out_not_found;
15160
15161 memcpy(tp->board_part_number, &vpd_data[i], len);
15162
15163 out_not_found:
15164 kfree(vpd_data);
15165 if (tp->board_part_number[0])
15166 return;
15167
15168 out_no_vpd:
15169 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15170 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15171 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15172 strcpy(tp->board_part_number, "BCM5717");
15173 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15174 strcpy(tp->board_part_number, "BCM5718");
15175 else
15176 goto nomatch;
15177 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15178 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15179 strcpy(tp->board_part_number, "BCM57780");
15180 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15181 strcpy(tp->board_part_number, "BCM57760");
15182 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15183 strcpy(tp->board_part_number, "BCM57790");
15184 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15185 strcpy(tp->board_part_number, "BCM57788");
15186 else
15187 goto nomatch;
15188 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15189 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15190 strcpy(tp->board_part_number, "BCM57761");
15191 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15192 strcpy(tp->board_part_number, "BCM57765");
15193 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15194 strcpy(tp->board_part_number, "BCM57781");
15195 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15196 strcpy(tp->board_part_number, "BCM57785");
15197 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15198 strcpy(tp->board_part_number, "BCM57791");
15199 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15200 strcpy(tp->board_part_number, "BCM57795");
15201 else
15202 goto nomatch;
15203 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15204 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15205 strcpy(tp->board_part_number, "BCM57762");
15206 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15207 strcpy(tp->board_part_number, "BCM57766");
15208 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15209 strcpy(tp->board_part_number, "BCM57782");
15210 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15211 strcpy(tp->board_part_number, "BCM57786");
15212 else
15213 goto nomatch;
15214 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15215 strcpy(tp->board_part_number, "BCM95906");
15216 } else {
15217 nomatch:
15218 strcpy(tp->board_part_number, "none");
15219 }
15220 }
15221
15222 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15223 {
15224 u32 val;
15225
15226 if (tg3_nvram_read(tp, offset, &val) ||
15227 (val & 0xfc000000) != 0x0c000000 ||
15228 tg3_nvram_read(tp, offset + 4, &val) ||
15229 val != 0)
15230 return 0;
15231
15232 return 1;
15233 }
15234
15235 static void tg3_read_bc_ver(struct tg3 *tp)
15236 {
15237 u32 val, offset, start, ver_offset;
15238 int i, dst_off;
15239 bool newver = false;
15240
15241 if (tg3_nvram_read(tp, 0xc, &offset) ||
15242 tg3_nvram_read(tp, 0x4, &start))
15243 return;
15244
15245 offset = tg3_nvram_logical_addr(tp, offset);
15246
15247 if (tg3_nvram_read(tp, offset, &val))
15248 return;
15249
15250 if ((val & 0xfc000000) == 0x0c000000) {
15251 if (tg3_nvram_read(tp, offset + 4, &val))
15252 return;
15253
15254 if (val == 0)
15255 newver = true;
15256 }
15257
15258 dst_off = strlen(tp->fw_ver);
15259
15260 if (newver) {
15261 if (TG3_VER_SIZE - dst_off < 16 ||
15262 tg3_nvram_read(tp, offset + 8, &ver_offset))
15263 return;
15264
15265 offset = offset + ver_offset - start;
15266 for (i = 0; i < 16; i += 4) {
15267 __be32 v;
15268 if (tg3_nvram_read_be32(tp, offset + i, &v))
15269 return;
15270
15271 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15272 }
15273 } else {
15274 u32 major, minor;
15275
15276 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15277 return;
15278
15279 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15280 TG3_NVM_BCVER_MAJSFT;
15281 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15282 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15283 "v%d.%02d", major, minor);
15284 }
15285 }
15286
15287 static void tg3_read_hwsb_ver(struct tg3 *tp)
15288 {
15289 u32 val, major, minor;
15290
15291 /* Use native endian representation */
15292 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15293 return;
15294
15295 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15296 TG3_NVM_HWSB_CFG1_MAJSFT;
15297 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15298 TG3_NVM_HWSB_CFG1_MINSFT;
15299
15300 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15301 }
15302
15303 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15304 {
15305 u32 offset, major, minor, build;
15306
15307 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15308
15309 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15310 return;
15311
15312 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15313 case TG3_EEPROM_SB_REVISION_0:
15314 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15315 break;
15316 case TG3_EEPROM_SB_REVISION_2:
15317 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15318 break;
15319 case TG3_EEPROM_SB_REVISION_3:
15320 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15321 break;
15322 case TG3_EEPROM_SB_REVISION_4:
15323 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15324 break;
15325 case TG3_EEPROM_SB_REVISION_5:
15326 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15327 break;
15328 case TG3_EEPROM_SB_REVISION_6:
15329 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15330 break;
15331 default:
15332 return;
15333 }
15334
15335 if (tg3_nvram_read(tp, offset, &val))
15336 return;
15337
15338 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15339 TG3_EEPROM_SB_EDH_BLD_SHFT;
15340 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15341 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15342 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15343
15344 if (minor > 99 || build > 26)
15345 return;
15346
15347 offset = strlen(tp->fw_ver);
15348 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15349 " v%d.%02d", major, minor);
15350
15351 if (build > 0) {
15352 offset = strlen(tp->fw_ver);
15353 if (offset < TG3_VER_SIZE - 1)
15354 tp->fw_ver[offset] = 'a' + build - 1;
15355 }
15356 }
15357
15358 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15359 {
15360 u32 val, offset, start;
15361 int i, vlen;
15362
15363 for (offset = TG3_NVM_DIR_START;
15364 offset < TG3_NVM_DIR_END;
15365 offset += TG3_NVM_DIRENT_SIZE) {
15366 if (tg3_nvram_read(tp, offset, &val))
15367 return;
15368
15369 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15370 break;
15371 }
15372
15373 if (offset == TG3_NVM_DIR_END)
15374 return;
15375
15376 if (!tg3_flag(tp, 5705_PLUS))
15377 start = 0x08000000;
15378 else if (tg3_nvram_read(tp, offset - 4, &start))
15379 return;
15380
15381 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15382 !tg3_fw_img_is_valid(tp, offset) ||
15383 tg3_nvram_read(tp, offset + 8, &val))
15384 return;
15385
15386 offset += val - start;
15387
15388 vlen = strlen(tp->fw_ver);
15389
15390 tp->fw_ver[vlen++] = ',';
15391 tp->fw_ver[vlen++] = ' ';
15392
15393 for (i = 0; i < 4; i++) {
15394 __be32 v;
15395 if (tg3_nvram_read_be32(tp, offset, &v))
15396 return;
15397
15398 offset += sizeof(v);
15399
15400 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15401 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15402 break;
15403 }
15404
15405 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15406 vlen += sizeof(v);
15407 }
15408 }
15409
15410 static void tg3_probe_ncsi(struct tg3 *tp)
15411 {
15412 u32 apedata;
15413
15414 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15415 if (apedata != APE_SEG_SIG_MAGIC)
15416 return;
15417
15418 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15419 if (!(apedata & APE_FW_STATUS_READY))
15420 return;
15421
15422 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15423 tg3_flag_set(tp, APE_HAS_NCSI);
15424 }
15425
15426 static void tg3_read_dash_ver(struct tg3 *tp)
15427 {
15428 int vlen;
15429 u32 apedata;
15430 char *fwtype;
15431
15432 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15433
15434 if (tg3_flag(tp, APE_HAS_NCSI))
15435 fwtype = "NCSI";
15436 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15437 fwtype = "SMASH";
15438 else
15439 fwtype = "DASH";
15440
15441 vlen = strlen(tp->fw_ver);
15442
15443 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15444 fwtype,
15445 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15446 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15447 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15448 (apedata & APE_FW_VERSION_BLDMSK));
15449 }
15450
15451 static void tg3_read_otp_ver(struct tg3 *tp)
15452 {
15453 u32 val, val2;
15454
15455 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15456 return;
15457
15458 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15459 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15460 TG3_OTP_MAGIC0_VALID(val)) {
15461 u64 val64 = (u64) val << 32 | val2;
15462 u32 ver = 0;
15463 int i, vlen;
15464
15465 for (i = 0; i < 7; i++) {
15466 if ((val64 & 0xff) == 0)
15467 break;
15468 ver = val64 & 0xff;
15469 val64 >>= 8;
15470 }
15471 vlen = strlen(tp->fw_ver);
15472 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15473 }
15474 }
15475
15476 static void tg3_read_fw_ver(struct tg3 *tp)
15477 {
15478 u32 val;
15479 bool vpd_vers = false;
15480
15481 if (tp->fw_ver[0] != 0)
15482 vpd_vers = true;
15483
15484 if (tg3_flag(tp, NO_NVRAM)) {
15485 strcat(tp->fw_ver, "sb");
15486 tg3_read_otp_ver(tp);
15487 return;
15488 }
15489
15490 if (tg3_nvram_read(tp, 0, &val))
15491 return;
15492
15493 if (val == TG3_EEPROM_MAGIC)
15494 tg3_read_bc_ver(tp);
15495 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15496 tg3_read_sb_ver(tp, val);
15497 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15498 tg3_read_hwsb_ver(tp);
15499
15500 if (tg3_flag(tp, ENABLE_ASF)) {
15501 if (tg3_flag(tp, ENABLE_APE)) {
15502 tg3_probe_ncsi(tp);
15503 if (!vpd_vers)
15504 tg3_read_dash_ver(tp);
15505 } else if (!vpd_vers) {
15506 tg3_read_mgmtfw_ver(tp);
15507 }
15508 }
15509
15510 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15511 }
15512
15513 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15514 {
15515 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15516 return TG3_RX_RET_MAX_SIZE_5717;
15517 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15518 return TG3_RX_RET_MAX_SIZE_5700;
15519 else
15520 return TG3_RX_RET_MAX_SIZE_5705;
15521 }
15522
15523 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15524 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15525 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15526 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15527 { },
15528 };
15529
15530 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15531 {
15532 struct pci_dev *peer;
15533 unsigned int func, devnr = tp->pdev->devfn & ~7;
15534
15535 for (func = 0; func < 8; func++) {
15536 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15537 if (peer && peer != tp->pdev)
15538 break;
15539 pci_dev_put(peer);
15540 }
15541 /* 5704 can be configured in single-port mode, set peer to
15542 * tp->pdev in that case.
15543 */
15544 if (!peer) {
15545 peer = tp->pdev;
15546 return peer;
15547 }
15548
15549 /*
15550 * We don't need to keep the refcount elevated; there's no way
15551 * to remove one half of this device without removing the other
15552 */
15553 pci_dev_put(peer);
15554
15555 return peer;
15556 }
15557
15558 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15559 {
15560 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15561 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15562 u32 reg;
15563
15564 /* All devices that use the alternate
15565 * ASIC REV location have a CPMU.
15566 */
15567 tg3_flag_set(tp, CPMU_PRESENT);
15568
15569 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15570 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15571 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15572 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15573 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15574 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15575 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15576 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15577 reg = TG3PCI_GEN2_PRODID_ASICREV;
15578 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15579 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15580 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15581 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15582 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15583 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15584 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15585 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15586 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15587 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15588 reg = TG3PCI_GEN15_PRODID_ASICREV;
15589 else
15590 reg = TG3PCI_PRODID_ASICREV;
15591
15592 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15593 }
15594
15595 /* Wrong chip ID in 5752 A0. This code can be removed later
15596 * as A0 is not in production.
15597 */
15598 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15599 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15600
15601 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15602 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15603
15604 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15605 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15606 tg3_asic_rev(tp) == ASIC_REV_5720)
15607 tg3_flag_set(tp, 5717_PLUS);
15608
15609 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15610 tg3_asic_rev(tp) == ASIC_REV_57766)
15611 tg3_flag_set(tp, 57765_CLASS);
15612
15613 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15614 tg3_asic_rev(tp) == ASIC_REV_5762)
15615 tg3_flag_set(tp, 57765_PLUS);
15616
15617 /* Intentionally exclude ASIC_REV_5906 */
15618 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15619 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15620 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15621 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15622 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15623 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15624 tg3_flag(tp, 57765_PLUS))
15625 tg3_flag_set(tp, 5755_PLUS);
15626
15627 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15628 tg3_asic_rev(tp) == ASIC_REV_5714)
15629 tg3_flag_set(tp, 5780_CLASS);
15630
15631 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15632 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15633 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15634 tg3_flag(tp, 5755_PLUS) ||
15635 tg3_flag(tp, 5780_CLASS))
15636 tg3_flag_set(tp, 5750_PLUS);
15637
15638 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15639 tg3_flag(tp, 5750_PLUS))
15640 tg3_flag_set(tp, 5705_PLUS);
15641 }
15642
15643 static bool tg3_10_100_only_device(struct tg3 *tp,
15644 const struct pci_device_id *ent)
15645 {
15646 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15647
15648 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15649 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15650 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15651 return true;
15652
15653 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15654 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15655 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15656 return true;
15657 } else {
15658 return true;
15659 }
15660 }
15661
15662 return false;
15663 }
15664
15665 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15666 {
15667 u32 misc_ctrl_reg;
15668 u32 pci_state_reg, grc_misc_cfg;
15669 u32 val;
15670 u16 pci_cmd;
15671 int err;
15672
15673 /* Force memory write invalidate off. If we leave it on,
15674 * then on 5700_BX chips we have to enable a workaround.
15675 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15676 * to match the cacheline size. The Broadcom driver have this
15677 * workaround but turns MWI off all the times so never uses
15678 * it. This seems to suggest that the workaround is insufficient.
15679 */
15680 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15681 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15682 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15683
15684 /* Important! -- Make sure register accesses are byteswapped
15685 * correctly. Also, for those chips that require it, make
15686 * sure that indirect register accesses are enabled before
15687 * the first operation.
15688 */
15689 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15690 &misc_ctrl_reg);
15691 tp->misc_host_ctrl |= (misc_ctrl_reg &
15692 MISC_HOST_CTRL_CHIPREV);
15693 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15694 tp->misc_host_ctrl);
15695
15696 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15697
15698 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15699 * we need to disable memory and use config. cycles
15700 * only to access all registers. The 5702/03 chips
15701 * can mistakenly decode the special cycles from the
15702 * ICH chipsets as memory write cycles, causing corruption
15703 * of register and memory space. Only certain ICH bridges
15704 * will drive special cycles with non-zero data during the
15705 * address phase which can fall within the 5703's address
15706 * range. This is not an ICH bug as the PCI spec allows
15707 * non-zero address during special cycles. However, only
15708 * these ICH bridges are known to drive non-zero addresses
15709 * during special cycles.
15710 *
15711 * Since special cycles do not cross PCI bridges, we only
15712 * enable this workaround if the 5703 is on the secondary
15713 * bus of these ICH bridges.
15714 */
15715 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15716 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15717 static struct tg3_dev_id {
15718 u32 vendor;
15719 u32 device;
15720 u32 rev;
15721 } ich_chipsets[] = {
15722 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15723 PCI_ANY_ID },
15724 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15725 PCI_ANY_ID },
15726 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15727 0xa },
15728 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15729 PCI_ANY_ID },
15730 { },
15731 };
15732 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15733 struct pci_dev *bridge = NULL;
15734
15735 while (pci_id->vendor != 0) {
15736 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15737 bridge);
15738 if (!bridge) {
15739 pci_id++;
15740 continue;
15741 }
15742 if (pci_id->rev != PCI_ANY_ID) {
15743 if (bridge->revision > pci_id->rev)
15744 continue;
15745 }
15746 if (bridge->subordinate &&
15747 (bridge->subordinate->number ==
15748 tp->pdev->bus->number)) {
15749 tg3_flag_set(tp, ICH_WORKAROUND);
15750 pci_dev_put(bridge);
15751 break;
15752 }
15753 }
15754 }
15755
15756 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15757 static struct tg3_dev_id {
15758 u32 vendor;
15759 u32 device;
15760 } bridge_chipsets[] = {
15761 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15762 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15763 { },
15764 };
15765 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15766 struct pci_dev *bridge = NULL;
15767
15768 while (pci_id->vendor != 0) {
15769 bridge = pci_get_device(pci_id->vendor,
15770 pci_id->device,
15771 bridge);
15772 if (!bridge) {
15773 pci_id++;
15774 continue;
15775 }
15776 if (bridge->subordinate &&
15777 (bridge->subordinate->number <=
15778 tp->pdev->bus->number) &&
15779 (bridge->subordinate->busn_res.end >=
15780 tp->pdev->bus->number)) {
15781 tg3_flag_set(tp, 5701_DMA_BUG);
15782 pci_dev_put(bridge);
15783 break;
15784 }
15785 }
15786 }
15787
15788 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15789 * DMA addresses > 40-bit. This bridge may have other additional
15790 * 57xx devices behind it in some 4-port NIC designs for example.
15791 * Any tg3 device found behind the bridge will also need the 40-bit
15792 * DMA workaround.
15793 */
15794 if (tg3_flag(tp, 5780_CLASS)) {
15795 tg3_flag_set(tp, 40BIT_DMA_BUG);
15796 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15797 } else {
15798 struct pci_dev *bridge = NULL;
15799
15800 do {
15801 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15802 PCI_DEVICE_ID_SERVERWORKS_EPB,
15803 bridge);
15804 if (bridge && bridge->subordinate &&
15805 (bridge->subordinate->number <=
15806 tp->pdev->bus->number) &&
15807 (bridge->subordinate->busn_res.end >=
15808 tp->pdev->bus->number)) {
15809 tg3_flag_set(tp, 40BIT_DMA_BUG);
15810 pci_dev_put(bridge);
15811 break;
15812 }
15813 } while (bridge);
15814 }
15815
15816 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15817 tg3_asic_rev(tp) == ASIC_REV_5714)
15818 tp->pdev_peer = tg3_find_peer(tp);
15819
15820 /* Determine TSO capabilities */
15821 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15822 ; /* Do nothing. HW bug. */
15823 else if (tg3_flag(tp, 57765_PLUS))
15824 tg3_flag_set(tp, HW_TSO_3);
15825 else if (tg3_flag(tp, 5755_PLUS) ||
15826 tg3_asic_rev(tp) == ASIC_REV_5906)
15827 tg3_flag_set(tp, HW_TSO_2);
15828 else if (tg3_flag(tp, 5750_PLUS)) {
15829 tg3_flag_set(tp, HW_TSO_1);
15830 tg3_flag_set(tp, TSO_BUG);
15831 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15832 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15833 tg3_flag_clear(tp, TSO_BUG);
15834 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15835 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15836 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15837 tg3_flag_set(tp, FW_TSO);
15838 tg3_flag_set(tp, TSO_BUG);
15839 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15840 tp->fw_needed = FIRMWARE_TG3TSO5;
15841 else
15842 tp->fw_needed = FIRMWARE_TG3TSO;
15843 }
15844
15845 /* Selectively allow TSO based on operating conditions */
15846 if (tg3_flag(tp, HW_TSO_1) ||
15847 tg3_flag(tp, HW_TSO_2) ||
15848 tg3_flag(tp, HW_TSO_3) ||
15849 tg3_flag(tp, FW_TSO)) {
15850 /* For firmware TSO, assume ASF is disabled.
15851 * We'll disable TSO later if we discover ASF
15852 * is enabled in tg3_get_eeprom_hw_cfg().
15853 */
15854 tg3_flag_set(tp, TSO_CAPABLE);
15855 } else {
15856 tg3_flag_clear(tp, TSO_CAPABLE);
15857 tg3_flag_clear(tp, TSO_BUG);
15858 tp->fw_needed = NULL;
15859 }
15860
15861 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15862 tp->fw_needed = FIRMWARE_TG3;
15863
15864 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15865 tp->fw_needed = FIRMWARE_TG357766;
15866
15867 tp->irq_max = 1;
15868
15869 if (tg3_flag(tp, 5750_PLUS)) {
15870 tg3_flag_set(tp, SUPPORT_MSI);
15871 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15872 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15873 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15874 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15875 tp->pdev_peer == tp->pdev))
15876 tg3_flag_clear(tp, SUPPORT_MSI);
15877
15878 if (tg3_flag(tp, 5755_PLUS) ||
15879 tg3_asic_rev(tp) == ASIC_REV_5906) {
15880 tg3_flag_set(tp, 1SHOT_MSI);
15881 }
15882
15883 if (tg3_flag(tp, 57765_PLUS)) {
15884 tg3_flag_set(tp, SUPPORT_MSIX);
15885 tp->irq_max = TG3_IRQ_MAX_VECS;
15886 }
15887 }
15888
15889 tp->txq_max = 1;
15890 tp->rxq_max = 1;
15891 if (tp->irq_max > 1) {
15892 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15893 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15894
15895 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15896 tg3_asic_rev(tp) == ASIC_REV_5720)
15897 tp->txq_max = tp->irq_max - 1;
15898 }
15899
15900 if (tg3_flag(tp, 5755_PLUS) ||
15901 tg3_asic_rev(tp) == ASIC_REV_5906)
15902 tg3_flag_set(tp, SHORT_DMA_BUG);
15903
15904 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15905 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15906
15907 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15908 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15909 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15910 tg3_asic_rev(tp) == ASIC_REV_5762)
15911 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15912
15913 if (tg3_flag(tp, 57765_PLUS) &&
15914 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15915 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15916
15917 if (!tg3_flag(tp, 5705_PLUS) ||
15918 tg3_flag(tp, 5780_CLASS) ||
15919 tg3_flag(tp, USE_JUMBO_BDFLAG))
15920 tg3_flag_set(tp, JUMBO_CAPABLE);
15921
15922 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15923 &pci_state_reg);
15924
15925 if (pci_is_pcie(tp->pdev)) {
15926 u16 lnkctl;
15927
15928 tg3_flag_set(tp, PCI_EXPRESS);
15929
15930 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15931 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15932 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15933 tg3_flag_clear(tp, HW_TSO_2);
15934 tg3_flag_clear(tp, TSO_CAPABLE);
15935 }
15936 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15937 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15938 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15939 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15940 tg3_flag_set(tp, CLKREQ_BUG);
15941 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15942 tg3_flag_set(tp, L1PLLPD_EN);
15943 }
15944 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15945 /* BCM5785 devices are effectively PCIe devices, and should
15946 * follow PCIe codepaths, but do not have a PCIe capabilities
15947 * section.
15948 */
15949 tg3_flag_set(tp, PCI_EXPRESS);
15950 } else if (!tg3_flag(tp, 5705_PLUS) ||
15951 tg3_flag(tp, 5780_CLASS)) {
15952 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15953 if (!tp->pcix_cap) {
15954 dev_err(&tp->pdev->dev,
15955 "Cannot find PCI-X capability, aborting\n");
15956 return -EIO;
15957 }
15958
15959 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15960 tg3_flag_set(tp, PCIX_MODE);
15961 }
15962
15963 /* If we have an AMD 762 or VIA K8T800 chipset, write
15964 * reordering to the mailbox registers done by the host
15965 * controller can cause major troubles. We read back from
15966 * every mailbox register write to force the writes to be
15967 * posted to the chip in order.
15968 */
15969 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15970 !tg3_flag(tp, PCI_EXPRESS))
15971 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15972
15973 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15974 &tp->pci_cacheline_sz);
15975 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15976 &tp->pci_lat_timer);
15977 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15978 tp->pci_lat_timer < 64) {
15979 tp->pci_lat_timer = 64;
15980 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15981 tp->pci_lat_timer);
15982 }
15983
15984 /* Important! -- It is critical that the PCI-X hw workaround
15985 * situation is decided before the first MMIO register access.
15986 */
15987 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15988 /* 5700 BX chips need to have their TX producer index
15989 * mailboxes written twice to workaround a bug.
15990 */
15991 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15992
15993 /* If we are in PCI-X mode, enable register write workaround.
15994 *
15995 * The workaround is to use indirect register accesses
15996 * for all chip writes not to mailbox registers.
15997 */
15998 if (tg3_flag(tp, PCIX_MODE)) {
15999 u32 pm_reg;
16000
16001 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16002
16003 /* The chip can have it's power management PCI config
16004 * space registers clobbered due to this bug.
16005 * So explicitly force the chip into D0 here.
16006 */
16007 pci_read_config_dword(tp->pdev,
16008 tp->pm_cap + PCI_PM_CTRL,
16009 &pm_reg);
16010 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16011 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16012 pci_write_config_dword(tp->pdev,
16013 tp->pm_cap + PCI_PM_CTRL,
16014 pm_reg);
16015
16016 /* Also, force SERR#/PERR# in PCI command. */
16017 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16018 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16019 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16020 }
16021 }
16022
16023 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16024 tg3_flag_set(tp, PCI_HIGH_SPEED);
16025 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16026 tg3_flag_set(tp, PCI_32BIT);
16027
16028 /* Chip-specific fixup from Broadcom driver */
16029 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16030 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16031 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16032 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16033 }
16034
16035 /* Default fast path register access methods */
16036 tp->read32 = tg3_read32;
16037 tp->write32 = tg3_write32;
16038 tp->read32_mbox = tg3_read32;
16039 tp->write32_mbox = tg3_write32;
16040 tp->write32_tx_mbox = tg3_write32;
16041 tp->write32_rx_mbox = tg3_write32;
16042
16043 /* Various workaround register access methods */
16044 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16045 tp->write32 = tg3_write_indirect_reg32;
16046 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16047 (tg3_flag(tp, PCI_EXPRESS) &&
16048 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16049 /*
16050 * Back to back register writes can cause problems on these
16051 * chips, the workaround is to read back all reg writes
16052 * except those to mailbox regs.
16053 *
16054 * See tg3_write_indirect_reg32().
16055 */
16056 tp->write32 = tg3_write_flush_reg32;
16057 }
16058
16059 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16060 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16061 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16062 tp->write32_rx_mbox = tg3_write_flush_reg32;
16063 }
16064
16065 if (tg3_flag(tp, ICH_WORKAROUND)) {
16066 tp->read32 = tg3_read_indirect_reg32;
16067 tp->write32 = tg3_write_indirect_reg32;
16068 tp->read32_mbox = tg3_read_indirect_mbox;
16069 tp->write32_mbox = tg3_write_indirect_mbox;
16070 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16071 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16072
16073 iounmap(tp->regs);
16074 tp->regs = NULL;
16075
16076 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16077 pci_cmd &= ~PCI_COMMAND_MEMORY;
16078 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16079 }
16080 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16081 tp->read32_mbox = tg3_read32_mbox_5906;
16082 tp->write32_mbox = tg3_write32_mbox_5906;
16083 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16084 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16085 }
16086
16087 if (tp->write32 == tg3_write_indirect_reg32 ||
16088 (tg3_flag(tp, PCIX_MODE) &&
16089 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16090 tg3_asic_rev(tp) == ASIC_REV_5701)))
16091 tg3_flag_set(tp, SRAM_USE_CONFIG);
16092
16093 /* The memory arbiter has to be enabled in order for SRAM accesses
16094 * to succeed. Normally on powerup the tg3 chip firmware will make
16095 * sure it is enabled, but other entities such as system netboot
16096 * code might disable it.
16097 */
16098 val = tr32(MEMARB_MODE);
16099 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16100
16101 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16102 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16103 tg3_flag(tp, 5780_CLASS)) {
16104 if (tg3_flag(tp, PCIX_MODE)) {
16105 pci_read_config_dword(tp->pdev,
16106 tp->pcix_cap + PCI_X_STATUS,
16107 &val);
16108 tp->pci_fn = val & 0x7;
16109 }
16110 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16111 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16112 tg3_asic_rev(tp) == ASIC_REV_5720) {
16113 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16114 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16115 val = tr32(TG3_CPMU_STATUS);
16116
16117 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16118 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16119 else
16120 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16121 TG3_CPMU_STATUS_FSHFT_5719;
16122 }
16123
16124 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16125 tp->write32_tx_mbox = tg3_write_flush_reg32;
16126 tp->write32_rx_mbox = tg3_write_flush_reg32;
16127 }
16128
16129 /* Get eeprom hw config before calling tg3_set_power_state().
16130 * In particular, the TG3_FLAG_IS_NIC flag must be
16131 * determined before calling tg3_set_power_state() so that
16132 * we know whether or not to switch out of Vaux power.
16133 * When the flag is set, it means that GPIO1 is used for eeprom
16134 * write protect and also implies that it is a LOM where GPIOs
16135 * are not used to switch power.
16136 */
16137 tg3_get_eeprom_hw_cfg(tp);
16138
16139 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16140 tg3_flag_clear(tp, TSO_CAPABLE);
16141 tg3_flag_clear(tp, TSO_BUG);
16142 tp->fw_needed = NULL;
16143 }
16144
16145 if (tg3_flag(tp, ENABLE_APE)) {
16146 /* Allow reads and writes to the
16147 * APE register and memory space.
16148 */
16149 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16150 PCISTATE_ALLOW_APE_SHMEM_WR |
16151 PCISTATE_ALLOW_APE_PSPACE_WR;
16152 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16153 pci_state_reg);
16154
16155 tg3_ape_lock_init(tp);
16156 }
16157
16158 /* Set up tp->grc_local_ctrl before calling
16159 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16160 * will bring 5700's external PHY out of reset.
16161 * It is also used as eeprom write protect on LOMs.
16162 */
16163 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16164 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16165 tg3_flag(tp, EEPROM_WRITE_PROT))
16166 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16167 GRC_LCLCTRL_GPIO_OUTPUT1);
16168 /* Unused GPIO3 must be driven as output on 5752 because there
16169 * are no pull-up resistors on unused GPIO pins.
16170 */
16171 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16172 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16173
16174 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16175 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16176 tg3_flag(tp, 57765_CLASS))
16177 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16178
16179 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16180 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16181 /* Turn off the debug UART. */
16182 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16183 if (tg3_flag(tp, IS_NIC))
16184 /* Keep VMain power. */
16185 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16186 GRC_LCLCTRL_GPIO_OUTPUT0;
16187 }
16188
16189 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16190 tp->grc_local_ctrl |=
16191 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16192
16193 /* Switch out of Vaux if it is a NIC */
16194 tg3_pwrsrc_switch_to_vmain(tp);
16195
16196 /* Derive initial jumbo mode from MTU assigned in
16197 * ether_setup() via the alloc_etherdev() call
16198 */
16199 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16200 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16201
16202 /* Determine WakeOnLan speed to use. */
16203 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16204 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16205 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16206 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16207 tg3_flag_clear(tp, WOL_SPEED_100MB);
16208 } else {
16209 tg3_flag_set(tp, WOL_SPEED_100MB);
16210 }
16211
16212 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16213 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16214
16215 /* A few boards don't want Ethernet@WireSpeed phy feature */
16216 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16217 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16218 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16219 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16220 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16221 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16222 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16223
16224 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16225 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16226 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16227 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16228 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16229
16230 if (tg3_flag(tp, 5705_PLUS) &&
16231 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16232 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16233 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16234 !tg3_flag(tp, 57765_PLUS)) {
16235 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16236 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16237 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16238 tg3_asic_rev(tp) == ASIC_REV_5761) {
16239 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16240 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16241 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16242 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16243 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16244 } else
16245 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16246 }
16247
16248 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16249 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16250 tp->phy_otp = tg3_read_otp_phycfg(tp);
16251 if (tp->phy_otp == 0)
16252 tp->phy_otp = TG3_OTP_DEFAULT;
16253 }
16254
16255 if (tg3_flag(tp, CPMU_PRESENT))
16256 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16257 else
16258 tp->mi_mode = MAC_MI_MODE_BASE;
16259
16260 tp->coalesce_mode = 0;
16261 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16262 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16263 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16264
16265 /* Set these bits to enable statistics workaround. */
16266 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16267 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16268 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16269 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16270 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16271 }
16272
16273 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16274 tg3_asic_rev(tp) == ASIC_REV_57780)
16275 tg3_flag_set(tp, USE_PHYLIB);
16276
16277 err = tg3_mdio_init(tp);
16278 if (err)
16279 return err;
16280
16281 /* Initialize data/descriptor byte/word swapping. */
16282 val = tr32(GRC_MODE);
16283 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16284 tg3_asic_rev(tp) == ASIC_REV_5762)
16285 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16286 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16287 GRC_MODE_B2HRX_ENABLE |
16288 GRC_MODE_HTX2B_ENABLE |
16289 GRC_MODE_HOST_STACKUP);
16290 else
16291 val &= GRC_MODE_HOST_STACKUP;
16292
16293 tw32(GRC_MODE, val | tp->grc_mode);
16294
16295 tg3_switch_clocks(tp);
16296
16297 /* Clear this out for sanity. */
16298 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16299
16300 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16301 &pci_state_reg);
16302 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16303 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16304 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16305 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16306 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16307 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16308 void __iomem *sram_base;
16309
16310 /* Write some dummy words into the SRAM status block
16311 * area, see if it reads back correctly. If the return
16312 * value is bad, force enable the PCIX workaround.
16313 */
16314 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16315
16316 writel(0x00000000, sram_base);
16317 writel(0x00000000, sram_base + 4);
16318 writel(0xffffffff, sram_base + 4);
16319 if (readl(sram_base) != 0x00000000)
16320 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16321 }
16322 }
16323
16324 udelay(50);
16325 tg3_nvram_init(tp);
16326
16327 /* If the device has an NVRAM, no need to load patch firmware */
16328 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16329 !tg3_flag(tp, NO_NVRAM))
16330 tp->fw_needed = NULL;
16331
16332 grc_misc_cfg = tr32(GRC_MISC_CFG);
16333 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16334
16335 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16336 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16337 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16338 tg3_flag_set(tp, IS_5788);
16339
16340 if (!tg3_flag(tp, IS_5788) &&
16341 tg3_asic_rev(tp) != ASIC_REV_5700)
16342 tg3_flag_set(tp, TAGGED_STATUS);
16343 if (tg3_flag(tp, TAGGED_STATUS)) {
16344 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16345 HOSTCC_MODE_CLRTICK_TXBD);
16346
16347 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16348 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16349 tp->misc_host_ctrl);
16350 }
16351
16352 /* Preserve the APE MAC_MODE bits */
16353 if (tg3_flag(tp, ENABLE_APE))
16354 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16355 else
16356 tp->mac_mode = 0;
16357
16358 if (tg3_10_100_only_device(tp, ent))
16359 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16360
16361 err = tg3_phy_probe(tp);
16362 if (err) {
16363 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16364 /* ... but do not return immediately ... */
16365 tg3_mdio_fini(tp);
16366 }
16367
16368 tg3_read_vpd(tp);
16369 tg3_read_fw_ver(tp);
16370
16371 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16372 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16373 } else {
16374 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16375 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16376 else
16377 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16378 }
16379
16380 /* 5700 {AX,BX} chips have a broken status block link
16381 * change bit implementation, so we must use the
16382 * status register in those cases.
16383 */
16384 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16385 tg3_flag_set(tp, USE_LINKCHG_REG);
16386 else
16387 tg3_flag_clear(tp, USE_LINKCHG_REG);
16388
16389 /* The led_ctrl is set during tg3_phy_probe, here we might
16390 * have to force the link status polling mechanism based
16391 * upon subsystem IDs.
16392 */
16393 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16394 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16395 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16396 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16397 tg3_flag_set(tp, USE_LINKCHG_REG);
16398 }
16399
16400 /* For all SERDES we poll the MAC status register. */
16401 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16402 tg3_flag_set(tp, POLL_SERDES);
16403 else
16404 tg3_flag_clear(tp, POLL_SERDES);
16405
16406 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16407 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16408 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16409 tg3_flag(tp, PCIX_MODE)) {
16410 tp->rx_offset = NET_SKB_PAD;
16411 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16412 tp->rx_copy_thresh = ~(u16)0;
16413 #endif
16414 }
16415
16416 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16417 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16418 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16419
16420 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16421
16422 /* Increment the rx prod index on the rx std ring by at most
16423 * 8 for these chips to workaround hw errata.
16424 */
16425 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16426 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16427 tg3_asic_rev(tp) == ASIC_REV_5755)
16428 tp->rx_std_max_post = 8;
16429
16430 if (tg3_flag(tp, ASPM_WORKAROUND))
16431 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16432 PCIE_PWR_MGMT_L1_THRESH_MSK;
16433
16434 return err;
16435 }
16436
16437 #ifdef CONFIG_SPARC
16438 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16439 {
16440 struct net_device *dev = tp->dev;
16441 struct pci_dev *pdev = tp->pdev;
16442 struct device_node *dp = pci_device_to_OF_node(pdev);
16443 const unsigned char *addr;
16444 int len;
16445
16446 addr = of_get_property(dp, "local-mac-address", &len);
16447 if (addr && len == 6) {
16448 memcpy(dev->dev_addr, addr, 6);
16449 return 0;
16450 }
16451 return -ENODEV;
16452 }
16453
16454 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16455 {
16456 struct net_device *dev = tp->dev;
16457
16458 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16459 return 0;
16460 }
16461 #endif
16462
16463 static int tg3_get_device_address(struct tg3 *tp)
16464 {
16465 struct net_device *dev = tp->dev;
16466 u32 hi, lo, mac_offset;
16467 int addr_ok = 0;
16468 int err;
16469
16470 #ifdef CONFIG_SPARC
16471 if (!tg3_get_macaddr_sparc(tp))
16472 return 0;
16473 #endif
16474
16475 if (tg3_flag(tp, IS_SSB_CORE)) {
16476 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16477 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16478 return 0;
16479 }
16480
16481 mac_offset = 0x7c;
16482 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16483 tg3_flag(tp, 5780_CLASS)) {
16484 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16485 mac_offset = 0xcc;
16486 if (tg3_nvram_lock(tp))
16487 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16488 else
16489 tg3_nvram_unlock(tp);
16490 } else if (tg3_flag(tp, 5717_PLUS)) {
16491 if (tp->pci_fn & 1)
16492 mac_offset = 0xcc;
16493 if (tp->pci_fn > 1)
16494 mac_offset += 0x18c;
16495 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16496 mac_offset = 0x10;
16497
16498 /* First try to get it from MAC address mailbox. */
16499 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16500 if ((hi >> 16) == 0x484b) {
16501 dev->dev_addr[0] = (hi >> 8) & 0xff;
16502 dev->dev_addr[1] = (hi >> 0) & 0xff;
16503
16504 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16505 dev->dev_addr[2] = (lo >> 24) & 0xff;
16506 dev->dev_addr[3] = (lo >> 16) & 0xff;
16507 dev->dev_addr[4] = (lo >> 8) & 0xff;
16508 dev->dev_addr[5] = (lo >> 0) & 0xff;
16509
16510 /* Some old bootcode may report a 0 MAC address in SRAM */
16511 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16512 }
16513 if (!addr_ok) {
16514 /* Next, try NVRAM. */
16515 if (!tg3_flag(tp, NO_NVRAM) &&
16516 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16517 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16518 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16519 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16520 }
16521 /* Finally just fetch it out of the MAC control regs. */
16522 else {
16523 hi = tr32(MAC_ADDR_0_HIGH);
16524 lo = tr32(MAC_ADDR_0_LOW);
16525
16526 dev->dev_addr[5] = lo & 0xff;
16527 dev->dev_addr[4] = (lo >> 8) & 0xff;
16528 dev->dev_addr[3] = (lo >> 16) & 0xff;
16529 dev->dev_addr[2] = (lo >> 24) & 0xff;
16530 dev->dev_addr[1] = hi & 0xff;
16531 dev->dev_addr[0] = (hi >> 8) & 0xff;
16532 }
16533 }
16534
16535 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16536 #ifdef CONFIG_SPARC
16537 if (!tg3_get_default_macaddr_sparc(tp))
16538 return 0;
16539 #endif
16540 return -EINVAL;
16541 }
16542 return 0;
16543 }
16544
16545 #define BOUNDARY_SINGLE_CACHELINE 1
16546 #define BOUNDARY_MULTI_CACHELINE 2
16547
16548 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16549 {
16550 int cacheline_size;
16551 u8 byte;
16552 int goal;
16553
16554 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16555 if (byte == 0)
16556 cacheline_size = 1024;
16557 else
16558 cacheline_size = (int) byte * 4;
16559
16560 /* On 5703 and later chips, the boundary bits have no
16561 * effect.
16562 */
16563 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16564 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16565 !tg3_flag(tp, PCI_EXPRESS))
16566 goto out;
16567
16568 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16569 goal = BOUNDARY_MULTI_CACHELINE;
16570 #else
16571 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16572 goal = BOUNDARY_SINGLE_CACHELINE;
16573 #else
16574 goal = 0;
16575 #endif
16576 #endif
16577
16578 if (tg3_flag(tp, 57765_PLUS)) {
16579 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16580 goto out;
16581 }
16582
16583 if (!goal)
16584 goto out;
16585
16586 /* PCI controllers on most RISC systems tend to disconnect
16587 * when a device tries to burst across a cache-line boundary.
16588 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16589 *
16590 * Unfortunately, for PCI-E there are only limited
16591 * write-side controls for this, and thus for reads
16592 * we will still get the disconnects. We'll also waste
16593 * these PCI cycles for both read and write for chips
16594 * other than 5700 and 5701 which do not implement the
16595 * boundary bits.
16596 */
16597 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16598 switch (cacheline_size) {
16599 case 16:
16600 case 32:
16601 case 64:
16602 case 128:
16603 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16604 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16605 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16606 } else {
16607 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16608 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16609 }
16610 break;
16611
16612 case 256:
16613 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16614 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16615 break;
16616
16617 default:
16618 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16619 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16620 break;
16621 }
16622 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16623 switch (cacheline_size) {
16624 case 16:
16625 case 32:
16626 case 64:
16627 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16628 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16629 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16630 break;
16631 }
16632 /* fallthrough */
16633 case 128:
16634 default:
16635 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16636 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16637 break;
16638 }
16639 } else {
16640 switch (cacheline_size) {
16641 case 16:
16642 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16643 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16644 DMA_RWCTRL_WRITE_BNDRY_16);
16645 break;
16646 }
16647 /* fallthrough */
16648 case 32:
16649 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16650 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16651 DMA_RWCTRL_WRITE_BNDRY_32);
16652 break;
16653 }
16654 /* fallthrough */
16655 case 64:
16656 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16657 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16658 DMA_RWCTRL_WRITE_BNDRY_64);
16659 break;
16660 }
16661 /* fallthrough */
16662 case 128:
16663 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16664 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16665 DMA_RWCTRL_WRITE_BNDRY_128);
16666 break;
16667 }
16668 /* fallthrough */
16669 case 256:
16670 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16671 DMA_RWCTRL_WRITE_BNDRY_256);
16672 break;
16673 case 512:
16674 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16675 DMA_RWCTRL_WRITE_BNDRY_512);
16676 break;
16677 case 1024:
16678 default:
16679 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16680 DMA_RWCTRL_WRITE_BNDRY_1024);
16681 break;
16682 }
16683 }
16684
16685 out:
16686 return val;
16687 }
16688
16689 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16690 int size, bool to_device)
16691 {
16692 struct tg3_internal_buffer_desc test_desc;
16693 u32 sram_dma_descs;
16694 int i, ret;
16695
16696 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16697
16698 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16699 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16700 tw32(RDMAC_STATUS, 0);
16701 tw32(WDMAC_STATUS, 0);
16702
16703 tw32(BUFMGR_MODE, 0);
16704 tw32(FTQ_RESET, 0);
16705
16706 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16707 test_desc.addr_lo = buf_dma & 0xffffffff;
16708 test_desc.nic_mbuf = 0x00002100;
16709 test_desc.len = size;
16710
16711 /*
16712 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16713 * the *second* time the tg3 driver was getting loaded after an
16714 * initial scan.
16715 *
16716 * Broadcom tells me:
16717 * ...the DMA engine is connected to the GRC block and a DMA
16718 * reset may affect the GRC block in some unpredictable way...
16719 * The behavior of resets to individual blocks has not been tested.
16720 *
16721 * Broadcom noted the GRC reset will also reset all sub-components.
16722 */
16723 if (to_device) {
16724 test_desc.cqid_sqid = (13 << 8) | 2;
16725
16726 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16727 udelay(40);
16728 } else {
16729 test_desc.cqid_sqid = (16 << 8) | 7;
16730
16731 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16732 udelay(40);
16733 }
16734 test_desc.flags = 0x00000005;
16735
16736 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16737 u32 val;
16738
16739 val = *(((u32 *)&test_desc) + i);
16740 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16741 sram_dma_descs + (i * sizeof(u32)));
16742 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16743 }
16744 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16745
16746 if (to_device)
16747 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16748 else
16749 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16750
16751 ret = -ENODEV;
16752 for (i = 0; i < 40; i++) {
16753 u32 val;
16754
16755 if (to_device)
16756 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16757 else
16758 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16759 if ((val & 0xffff) == sram_dma_descs) {
16760 ret = 0;
16761 break;
16762 }
16763
16764 udelay(100);
16765 }
16766
16767 return ret;
16768 }
16769
16770 #define TEST_BUFFER_SIZE 0x2000
16771
16772 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16773 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16774 { },
16775 };
16776
16777 static int tg3_test_dma(struct tg3 *tp)
16778 {
16779 dma_addr_t buf_dma;
16780 u32 *buf, saved_dma_rwctrl;
16781 int ret = 0;
16782
16783 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16784 &buf_dma, GFP_KERNEL);
16785 if (!buf) {
16786 ret = -ENOMEM;
16787 goto out_nofree;
16788 }
16789
16790 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16791 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16792
16793 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16794
16795 if (tg3_flag(tp, 57765_PLUS))
16796 goto out;
16797
16798 if (tg3_flag(tp, PCI_EXPRESS)) {
16799 /* DMA read watermark not used on PCIE */
16800 tp->dma_rwctrl |= 0x00180000;
16801 } else if (!tg3_flag(tp, PCIX_MODE)) {
16802 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16803 tg3_asic_rev(tp) == ASIC_REV_5750)
16804 tp->dma_rwctrl |= 0x003f0000;
16805 else
16806 tp->dma_rwctrl |= 0x003f000f;
16807 } else {
16808 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16809 tg3_asic_rev(tp) == ASIC_REV_5704) {
16810 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16811 u32 read_water = 0x7;
16812
16813 /* If the 5704 is behind the EPB bridge, we can
16814 * do the less restrictive ONE_DMA workaround for
16815 * better performance.
16816 */
16817 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16818 tg3_asic_rev(tp) == ASIC_REV_5704)
16819 tp->dma_rwctrl |= 0x8000;
16820 else if (ccval == 0x6 || ccval == 0x7)
16821 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16822
16823 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16824 read_water = 4;
16825 /* Set bit 23 to enable PCIX hw bug fix */
16826 tp->dma_rwctrl |=
16827 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16828 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16829 (1 << 23);
16830 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16831 /* 5780 always in PCIX mode */
16832 tp->dma_rwctrl |= 0x00144000;
16833 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16834 /* 5714 always in PCIX mode */
16835 tp->dma_rwctrl |= 0x00148000;
16836 } else {
16837 tp->dma_rwctrl |= 0x001b000f;
16838 }
16839 }
16840 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16841 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16842
16843 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16844 tg3_asic_rev(tp) == ASIC_REV_5704)
16845 tp->dma_rwctrl &= 0xfffffff0;
16846
16847 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16848 tg3_asic_rev(tp) == ASIC_REV_5701) {
16849 /* Remove this if it causes problems for some boards. */
16850 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16851
16852 /* On 5700/5701 chips, we need to set this bit.
16853 * Otherwise the chip will issue cacheline transactions
16854 * to streamable DMA memory with not all the byte
16855 * enables turned on. This is an error on several
16856 * RISC PCI controllers, in particular sparc64.
16857 *
16858 * On 5703/5704 chips, this bit has been reassigned
16859 * a different meaning. In particular, it is used
16860 * on those chips to enable a PCI-X workaround.
16861 */
16862 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16863 }
16864
16865 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16866
16867 #if 0
16868 /* Unneeded, already done by tg3_get_invariants. */
16869 tg3_switch_clocks(tp);
16870 #endif
16871
16872 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16873 tg3_asic_rev(tp) != ASIC_REV_5701)
16874 goto out;
16875
16876 /* It is best to perform DMA test with maximum write burst size
16877 * to expose the 5700/5701 write DMA bug.
16878 */
16879 saved_dma_rwctrl = tp->dma_rwctrl;
16880 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16881 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16882
16883 while (1) {
16884 u32 *p = buf, i;
16885
16886 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16887 p[i] = i;
16888
16889 /* Send the buffer to the chip. */
16890 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16891 if (ret) {
16892 dev_err(&tp->pdev->dev,
16893 "%s: Buffer write failed. err = %d\n",
16894 __func__, ret);
16895 break;
16896 }
16897
16898 #if 0
16899 /* validate data reached card RAM correctly. */
16900 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16901 u32 val;
16902 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16903 if (le32_to_cpu(val) != p[i]) {
16904 dev_err(&tp->pdev->dev,
16905 "%s: Buffer corrupted on device! "
16906 "(%d != %d)\n", __func__, val, i);
16907 /* ret = -ENODEV here? */
16908 }
16909 p[i] = 0;
16910 }
16911 #endif
16912 /* Now read it back. */
16913 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16914 if (ret) {
16915 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16916 "err = %d\n", __func__, ret);
16917 break;
16918 }
16919
16920 /* Verify it. */
16921 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16922 if (p[i] == i)
16923 continue;
16924
16925 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16926 DMA_RWCTRL_WRITE_BNDRY_16) {
16927 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16928 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16929 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16930 break;
16931 } else {
16932 dev_err(&tp->pdev->dev,
16933 "%s: Buffer corrupted on read back! "
16934 "(%d != %d)\n", __func__, p[i], i);
16935 ret = -ENODEV;
16936 goto out;
16937 }
16938 }
16939
16940 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16941 /* Success. */
16942 ret = 0;
16943 break;
16944 }
16945 }
16946 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16947 DMA_RWCTRL_WRITE_BNDRY_16) {
16948 /* DMA test passed without adjusting DMA boundary,
16949 * now look for chipsets that are known to expose the
16950 * DMA bug without failing the test.
16951 */
16952 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16953 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16954 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16955 } else {
16956 /* Safe to use the calculated DMA boundary. */
16957 tp->dma_rwctrl = saved_dma_rwctrl;
16958 }
16959
16960 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16961 }
16962
16963 out:
16964 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16965 out_nofree:
16966 return ret;
16967 }
16968
16969 static void tg3_init_bufmgr_config(struct tg3 *tp)
16970 {
16971 if (tg3_flag(tp, 57765_PLUS)) {
16972 tp->bufmgr_config.mbuf_read_dma_low_water =
16973 DEFAULT_MB_RDMA_LOW_WATER_5705;
16974 tp->bufmgr_config.mbuf_mac_rx_low_water =
16975 DEFAULT_MB_MACRX_LOW_WATER_57765;
16976 tp->bufmgr_config.mbuf_high_water =
16977 DEFAULT_MB_HIGH_WATER_57765;
16978
16979 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16980 DEFAULT_MB_RDMA_LOW_WATER_5705;
16981 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16982 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16983 tp->bufmgr_config.mbuf_high_water_jumbo =
16984 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16985 } else if (tg3_flag(tp, 5705_PLUS)) {
16986 tp->bufmgr_config.mbuf_read_dma_low_water =
16987 DEFAULT_MB_RDMA_LOW_WATER_5705;
16988 tp->bufmgr_config.mbuf_mac_rx_low_water =
16989 DEFAULT_MB_MACRX_LOW_WATER_5705;
16990 tp->bufmgr_config.mbuf_high_water =
16991 DEFAULT_MB_HIGH_WATER_5705;
16992 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16993 tp->bufmgr_config.mbuf_mac_rx_low_water =
16994 DEFAULT_MB_MACRX_LOW_WATER_5906;
16995 tp->bufmgr_config.mbuf_high_water =
16996 DEFAULT_MB_HIGH_WATER_5906;
16997 }
16998
16999 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17000 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17001 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17002 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17003 tp->bufmgr_config.mbuf_high_water_jumbo =
17004 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17005 } else {
17006 tp->bufmgr_config.mbuf_read_dma_low_water =
17007 DEFAULT_MB_RDMA_LOW_WATER;
17008 tp->bufmgr_config.mbuf_mac_rx_low_water =
17009 DEFAULT_MB_MACRX_LOW_WATER;
17010 tp->bufmgr_config.mbuf_high_water =
17011 DEFAULT_MB_HIGH_WATER;
17012
17013 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17014 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17015 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17016 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17017 tp->bufmgr_config.mbuf_high_water_jumbo =
17018 DEFAULT_MB_HIGH_WATER_JUMBO;
17019 }
17020
17021 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17022 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17023 }
17024
17025 static char *tg3_phy_string(struct tg3 *tp)
17026 {
17027 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17028 case TG3_PHY_ID_BCM5400: return "5400";
17029 case TG3_PHY_ID_BCM5401: return "5401";
17030 case TG3_PHY_ID_BCM5411: return "5411";
17031 case TG3_PHY_ID_BCM5701: return "5701";
17032 case TG3_PHY_ID_BCM5703: return "5703";
17033 case TG3_PHY_ID_BCM5704: return "5704";
17034 case TG3_PHY_ID_BCM5705: return "5705";
17035 case TG3_PHY_ID_BCM5750: return "5750";
17036 case TG3_PHY_ID_BCM5752: return "5752";
17037 case TG3_PHY_ID_BCM5714: return "5714";
17038 case TG3_PHY_ID_BCM5780: return "5780";
17039 case TG3_PHY_ID_BCM5755: return "5755";
17040 case TG3_PHY_ID_BCM5787: return "5787";
17041 case TG3_PHY_ID_BCM5784: return "5784";
17042 case TG3_PHY_ID_BCM5756: return "5722/5756";
17043 case TG3_PHY_ID_BCM5906: return "5906";
17044 case TG3_PHY_ID_BCM5761: return "5761";
17045 case TG3_PHY_ID_BCM5718C: return "5718C";
17046 case TG3_PHY_ID_BCM5718S: return "5718S";
17047 case TG3_PHY_ID_BCM57765: return "57765";
17048 case TG3_PHY_ID_BCM5719C: return "5719C";
17049 case TG3_PHY_ID_BCM5720C: return "5720C";
17050 case TG3_PHY_ID_BCM5762: return "5762C";
17051 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17052 case 0: return "serdes";
17053 default: return "unknown";
17054 }
17055 }
17056
17057 static char *tg3_bus_string(struct tg3 *tp, char *str)
17058 {
17059 if (tg3_flag(tp, PCI_EXPRESS)) {
17060 strcpy(str, "PCI Express");
17061 return str;
17062 } else if (tg3_flag(tp, PCIX_MODE)) {
17063 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17064
17065 strcpy(str, "PCIX:");
17066
17067 if ((clock_ctrl == 7) ||
17068 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17069 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17070 strcat(str, "133MHz");
17071 else if (clock_ctrl == 0)
17072 strcat(str, "33MHz");
17073 else if (clock_ctrl == 2)
17074 strcat(str, "50MHz");
17075 else if (clock_ctrl == 4)
17076 strcat(str, "66MHz");
17077 else if (clock_ctrl == 6)
17078 strcat(str, "100MHz");
17079 } else {
17080 strcpy(str, "PCI:");
17081 if (tg3_flag(tp, PCI_HIGH_SPEED))
17082 strcat(str, "66MHz");
17083 else
17084 strcat(str, "33MHz");
17085 }
17086 if (tg3_flag(tp, PCI_32BIT))
17087 strcat(str, ":32-bit");
17088 else
17089 strcat(str, ":64-bit");
17090 return str;
17091 }
17092
17093 static void tg3_init_coal(struct tg3 *tp)
17094 {
17095 struct ethtool_coalesce *ec = &tp->coal;
17096
17097 memset(ec, 0, sizeof(*ec));
17098 ec->cmd = ETHTOOL_GCOALESCE;
17099 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17100 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17101 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17102 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17103 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17104 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17105 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17106 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17107 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17108
17109 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17110 HOSTCC_MODE_CLRTICK_TXBD)) {
17111 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17112 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17113 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17114 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17115 }
17116
17117 if (tg3_flag(tp, 5705_PLUS)) {
17118 ec->rx_coalesce_usecs_irq = 0;
17119 ec->tx_coalesce_usecs_irq = 0;
17120 ec->stats_block_coalesce_usecs = 0;
17121 }
17122 }
17123
17124 static int tg3_init_one(struct pci_dev *pdev,
17125 const struct pci_device_id *ent)
17126 {
17127 struct net_device *dev;
17128 struct tg3 *tp;
17129 int i, err, pm_cap;
17130 u32 sndmbx, rcvmbx, intmbx;
17131 char str[40];
17132 u64 dma_mask, persist_dma_mask;
17133 netdev_features_t features = 0;
17134
17135 printk_once(KERN_INFO "%s\n", version);
17136
17137 err = pci_enable_device(pdev);
17138 if (err) {
17139 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17140 return err;
17141 }
17142
17143 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17144 if (err) {
17145 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17146 goto err_out_disable_pdev;
17147 }
17148
17149 pci_set_master(pdev);
17150
17151 /* Find power-management capability. */
17152 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17153 if (pm_cap == 0) {
17154 dev_err(&pdev->dev,
17155 "Cannot find Power Management capability, aborting\n");
17156 err = -EIO;
17157 goto err_out_free_res;
17158 }
17159
17160 err = pci_set_power_state(pdev, PCI_D0);
17161 if (err) {
17162 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17163 goto err_out_free_res;
17164 }
17165
17166 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17167 if (!dev) {
17168 err = -ENOMEM;
17169 goto err_out_power_down;
17170 }
17171
17172 SET_NETDEV_DEV(dev, &pdev->dev);
17173
17174 tp = netdev_priv(dev);
17175 tp->pdev = pdev;
17176 tp->dev = dev;
17177 tp->pm_cap = pm_cap;
17178 tp->rx_mode = TG3_DEF_RX_MODE;
17179 tp->tx_mode = TG3_DEF_TX_MODE;
17180 tp->irq_sync = 1;
17181
17182 if (tg3_debug > 0)
17183 tp->msg_enable = tg3_debug;
17184 else
17185 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17186
17187 if (pdev_is_ssb_gige_core(pdev)) {
17188 tg3_flag_set(tp, IS_SSB_CORE);
17189 if (ssb_gige_must_flush_posted_writes(pdev))
17190 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17191 if (ssb_gige_one_dma_at_once(pdev))
17192 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17193 if (ssb_gige_have_roboswitch(pdev))
17194 tg3_flag_set(tp, ROBOSWITCH);
17195 if (ssb_gige_is_rgmii(pdev))
17196 tg3_flag_set(tp, RGMII_MODE);
17197 }
17198
17199 /* The word/byte swap controls here control register access byte
17200 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17201 * setting below.
17202 */
17203 tp->misc_host_ctrl =
17204 MISC_HOST_CTRL_MASK_PCI_INT |
17205 MISC_HOST_CTRL_WORD_SWAP |
17206 MISC_HOST_CTRL_INDIR_ACCESS |
17207 MISC_HOST_CTRL_PCISTATE_RW;
17208
17209 /* The NONFRM (non-frame) byte/word swap controls take effect
17210 * on descriptor entries, anything which isn't packet data.
17211 *
17212 * The StrongARM chips on the board (one for tx, one for rx)
17213 * are running in big-endian mode.
17214 */
17215 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17216 GRC_MODE_WSWAP_NONFRM_DATA);
17217 #ifdef __BIG_ENDIAN
17218 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17219 #endif
17220 spin_lock_init(&tp->lock);
17221 spin_lock_init(&tp->indirect_lock);
17222 INIT_WORK(&tp->reset_task, tg3_reset_task);
17223
17224 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17225 if (!tp->regs) {
17226 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17227 err = -ENOMEM;
17228 goto err_out_free_dev;
17229 }
17230
17231 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17232 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17233 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17234 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17235 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17236 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17237 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17238 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17239 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17240 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17241 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17242 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17243 tg3_flag_set(tp, ENABLE_APE);
17244 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17245 if (!tp->aperegs) {
17246 dev_err(&pdev->dev,
17247 "Cannot map APE registers, aborting\n");
17248 err = -ENOMEM;
17249 goto err_out_iounmap;
17250 }
17251 }
17252
17253 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17254 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17255
17256 dev->ethtool_ops = &tg3_ethtool_ops;
17257 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17258 dev->netdev_ops = &tg3_netdev_ops;
17259 dev->irq = pdev->irq;
17260
17261 err = tg3_get_invariants(tp, ent);
17262 if (err) {
17263 dev_err(&pdev->dev,
17264 "Problem fetching invariants of chip, aborting\n");
17265 goto err_out_apeunmap;
17266 }
17267
17268 /* The EPB bridge inside 5714, 5715, and 5780 and any
17269 * device behind the EPB cannot support DMA addresses > 40-bit.
17270 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17271 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17272 * do DMA address check in tg3_start_xmit().
17273 */
17274 if (tg3_flag(tp, IS_5788))
17275 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17276 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17277 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17278 #ifdef CONFIG_HIGHMEM
17279 dma_mask = DMA_BIT_MASK(64);
17280 #endif
17281 } else
17282 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17283
17284 /* Configure DMA attributes. */
17285 if (dma_mask > DMA_BIT_MASK(32)) {
17286 err = pci_set_dma_mask(pdev, dma_mask);
17287 if (!err) {
17288 features |= NETIF_F_HIGHDMA;
17289 err = pci_set_consistent_dma_mask(pdev,
17290 persist_dma_mask);
17291 if (err < 0) {
17292 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17293 "DMA for consistent allocations\n");
17294 goto err_out_apeunmap;
17295 }
17296 }
17297 }
17298 if (err || dma_mask == DMA_BIT_MASK(32)) {
17299 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17300 if (err) {
17301 dev_err(&pdev->dev,
17302 "No usable DMA configuration, aborting\n");
17303 goto err_out_apeunmap;
17304 }
17305 }
17306
17307 tg3_init_bufmgr_config(tp);
17308
17309 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17310
17311 /* 5700 B0 chips do not support checksumming correctly due
17312 * to hardware bugs.
17313 */
17314 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17315 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17316
17317 if (tg3_flag(tp, 5755_PLUS))
17318 features |= NETIF_F_IPV6_CSUM;
17319 }
17320
17321 /* TSO is on by default on chips that support hardware TSO.
17322 * Firmware TSO on older chips gives lower performance, so it
17323 * is off by default, but can be enabled using ethtool.
17324 */
17325 if ((tg3_flag(tp, HW_TSO_1) ||
17326 tg3_flag(tp, HW_TSO_2) ||
17327 tg3_flag(tp, HW_TSO_3)) &&
17328 (features & NETIF_F_IP_CSUM))
17329 features |= NETIF_F_TSO;
17330 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17331 if (features & NETIF_F_IPV6_CSUM)
17332 features |= NETIF_F_TSO6;
17333 if (tg3_flag(tp, HW_TSO_3) ||
17334 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17335 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17336 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17337 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17338 tg3_asic_rev(tp) == ASIC_REV_57780)
17339 features |= NETIF_F_TSO_ECN;
17340 }
17341
17342 dev->features |= features;
17343 dev->vlan_features |= features;
17344
17345 /*
17346 * Add loopback capability only for a subset of devices that support
17347 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17348 * loopback for the remaining devices.
17349 */
17350 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17351 !tg3_flag(tp, CPMU_PRESENT))
17352 /* Add the loopback capability */
17353 features |= NETIF_F_LOOPBACK;
17354
17355 dev->hw_features |= features;
17356
17357 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17358 !tg3_flag(tp, TSO_CAPABLE) &&
17359 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17360 tg3_flag_set(tp, MAX_RXPEND_64);
17361 tp->rx_pending = 63;
17362 }
17363
17364 err = tg3_get_device_address(tp);
17365 if (err) {
17366 dev_err(&pdev->dev,
17367 "Could not obtain valid ethernet address, aborting\n");
17368 goto err_out_apeunmap;
17369 }
17370
17371 /*
17372 * Reset chip in case UNDI or EFI driver did not shutdown
17373 * DMA self test will enable WDMAC and we'll see (spurious)
17374 * pending DMA on the PCI bus at that point.
17375 */
17376 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17377 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17378 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17379 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17380 }
17381
17382 err = tg3_test_dma(tp);
17383 if (err) {
17384 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17385 goto err_out_apeunmap;
17386 }
17387
17388 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17389 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17390 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17391 for (i = 0; i < tp->irq_max; i++) {
17392 struct tg3_napi *tnapi = &tp->napi[i];
17393
17394 tnapi->tp = tp;
17395 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17396
17397 tnapi->int_mbox = intmbx;
17398 if (i <= 4)
17399 intmbx += 0x8;
17400 else
17401 intmbx += 0x4;
17402
17403 tnapi->consmbox = rcvmbx;
17404 tnapi->prodmbox = sndmbx;
17405
17406 if (i)
17407 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17408 else
17409 tnapi->coal_now = HOSTCC_MODE_NOW;
17410
17411 if (!tg3_flag(tp, SUPPORT_MSIX))
17412 break;
17413
17414 /*
17415 * If we support MSIX, we'll be using RSS. If we're using
17416 * RSS, the first vector only handles link interrupts and the
17417 * remaining vectors handle rx and tx interrupts. Reuse the
17418 * mailbox values for the next iteration. The values we setup
17419 * above are still useful for the single vectored mode.
17420 */
17421 if (!i)
17422 continue;
17423
17424 rcvmbx += 0x8;
17425
17426 if (sndmbx & 0x4)
17427 sndmbx -= 0x4;
17428 else
17429 sndmbx += 0xc;
17430 }
17431
17432 tg3_init_coal(tp);
17433
17434 pci_set_drvdata(pdev, dev);
17435
17436 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17437 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17438 tg3_asic_rev(tp) == ASIC_REV_5762)
17439 tg3_flag_set(tp, PTP_CAPABLE);
17440
17441 if (tg3_flag(tp, 5717_PLUS)) {
17442 /* Resume a low-power mode */
17443 tg3_frob_aux_power(tp, false);
17444 }
17445
17446 tg3_timer_init(tp);
17447
17448 tg3_carrier_off(tp);
17449
17450 err = register_netdev(dev);
17451 if (err) {
17452 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17453 goto err_out_apeunmap;
17454 }
17455
17456 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17457 tp->board_part_number,
17458 tg3_chip_rev_id(tp),
17459 tg3_bus_string(tp, str),
17460 dev->dev_addr);
17461
17462 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17463 struct phy_device *phydev;
17464 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17465 netdev_info(dev,
17466 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17467 phydev->drv->name, dev_name(&phydev->dev));
17468 } else {
17469 char *ethtype;
17470
17471 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17472 ethtype = "10/100Base-TX";
17473 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17474 ethtype = "1000Base-SX";
17475 else
17476 ethtype = "10/100/1000Base-T";
17477
17478 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17479 "(WireSpeed[%d], EEE[%d])\n",
17480 tg3_phy_string(tp), ethtype,
17481 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17482 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17483 }
17484
17485 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17486 (dev->features & NETIF_F_RXCSUM) != 0,
17487 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17488 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17489 tg3_flag(tp, ENABLE_ASF) != 0,
17490 tg3_flag(tp, TSO_CAPABLE) != 0);
17491 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17492 tp->dma_rwctrl,
17493 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17494 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17495
17496 pci_save_state(pdev);
17497
17498 return 0;
17499
17500 err_out_apeunmap:
17501 if (tp->aperegs) {
17502 iounmap(tp->aperegs);
17503 tp->aperegs = NULL;
17504 }
17505
17506 err_out_iounmap:
17507 if (tp->regs) {
17508 iounmap(tp->regs);
17509 tp->regs = NULL;
17510 }
17511
17512 err_out_free_dev:
17513 free_netdev(dev);
17514
17515 err_out_power_down:
17516 pci_set_power_state(pdev, PCI_D3hot);
17517
17518 err_out_free_res:
17519 pci_release_regions(pdev);
17520
17521 err_out_disable_pdev:
17522 pci_disable_device(pdev);
17523 pci_set_drvdata(pdev, NULL);
17524 return err;
17525 }
17526
17527 static void tg3_remove_one(struct pci_dev *pdev)
17528 {
17529 struct net_device *dev = pci_get_drvdata(pdev);
17530
17531 if (dev) {
17532 struct tg3 *tp = netdev_priv(dev);
17533
17534 release_firmware(tp->fw);
17535
17536 tg3_reset_task_cancel(tp);
17537
17538 if (tg3_flag(tp, USE_PHYLIB)) {
17539 tg3_phy_fini(tp);
17540 tg3_mdio_fini(tp);
17541 }
17542
17543 unregister_netdev(dev);
17544 if (tp->aperegs) {
17545 iounmap(tp->aperegs);
17546 tp->aperegs = NULL;
17547 }
17548 if (tp->regs) {
17549 iounmap(tp->regs);
17550 tp->regs = NULL;
17551 }
17552 free_netdev(dev);
17553 pci_release_regions(pdev);
17554 pci_disable_device(pdev);
17555 pci_set_drvdata(pdev, NULL);
17556 }
17557 }
17558
17559 #ifdef CONFIG_PM_SLEEP
17560 static int tg3_suspend(struct device *device)
17561 {
17562 struct pci_dev *pdev = to_pci_dev(device);
17563 struct net_device *dev = pci_get_drvdata(pdev);
17564 struct tg3 *tp = netdev_priv(dev);
17565 int err;
17566
17567 if (!netif_running(dev))
17568 return 0;
17569
17570 tg3_reset_task_cancel(tp);
17571 tg3_phy_stop(tp);
17572 tg3_netif_stop(tp);
17573
17574 tg3_timer_stop(tp);
17575
17576 tg3_full_lock(tp, 1);
17577 tg3_disable_ints(tp);
17578 tg3_full_unlock(tp);
17579
17580 netif_device_detach(dev);
17581
17582 tg3_full_lock(tp, 0);
17583 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17584 tg3_flag_clear(tp, INIT_COMPLETE);
17585 tg3_full_unlock(tp);
17586
17587 err = tg3_power_down_prepare(tp);
17588 if (err) {
17589 int err2;
17590
17591 tg3_full_lock(tp, 0);
17592
17593 tg3_flag_set(tp, INIT_COMPLETE);
17594 err2 = tg3_restart_hw(tp, true);
17595 if (err2)
17596 goto out;
17597
17598 tg3_timer_start(tp);
17599
17600 netif_device_attach(dev);
17601 tg3_netif_start(tp);
17602
17603 out:
17604 tg3_full_unlock(tp);
17605
17606 if (!err2)
17607 tg3_phy_start(tp);
17608 }
17609
17610 return err;
17611 }
17612
17613 static int tg3_resume(struct device *device)
17614 {
17615 struct pci_dev *pdev = to_pci_dev(device);
17616 struct net_device *dev = pci_get_drvdata(pdev);
17617 struct tg3 *tp = netdev_priv(dev);
17618 int err;
17619
17620 if (!netif_running(dev))
17621 return 0;
17622
17623 netif_device_attach(dev);
17624
17625 tg3_full_lock(tp, 0);
17626
17627 tg3_flag_set(tp, INIT_COMPLETE);
17628 err = tg3_restart_hw(tp,
17629 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17630 if (err)
17631 goto out;
17632
17633 tg3_timer_start(tp);
17634
17635 tg3_netif_start(tp);
17636
17637 out:
17638 tg3_full_unlock(tp);
17639
17640 if (!err)
17641 tg3_phy_start(tp);
17642
17643 return err;
17644 }
17645 #endif /* CONFIG_PM_SLEEP */
17646
17647 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17648
17649 /**
17650 * tg3_io_error_detected - called when PCI error is detected
17651 * @pdev: Pointer to PCI device
17652 * @state: The current pci connection state
17653 *
17654 * This function is called after a PCI bus error affecting
17655 * this device has been detected.
17656 */
17657 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17658 pci_channel_state_t state)
17659 {
17660 struct net_device *netdev = pci_get_drvdata(pdev);
17661 struct tg3 *tp = netdev_priv(netdev);
17662 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17663
17664 netdev_info(netdev, "PCI I/O error detected\n");
17665
17666 rtnl_lock();
17667
17668 if (!netif_running(netdev))
17669 goto done;
17670
17671 tg3_phy_stop(tp);
17672
17673 tg3_netif_stop(tp);
17674
17675 tg3_timer_stop(tp);
17676
17677 /* Want to make sure that the reset task doesn't run */
17678 tg3_reset_task_cancel(tp);
17679
17680 netif_device_detach(netdev);
17681
17682 /* Clean up software state, even if MMIO is blocked */
17683 tg3_full_lock(tp, 0);
17684 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17685 tg3_full_unlock(tp);
17686
17687 done:
17688 if (state == pci_channel_io_perm_failure)
17689 err = PCI_ERS_RESULT_DISCONNECT;
17690 else
17691 pci_disable_device(pdev);
17692
17693 rtnl_unlock();
17694
17695 return err;
17696 }
17697
17698 /**
17699 * tg3_io_slot_reset - called after the pci bus has been reset.
17700 * @pdev: Pointer to PCI device
17701 *
17702 * Restart the card from scratch, as if from a cold-boot.
17703 * At this point, the card has exprienced a hard reset,
17704 * followed by fixups by BIOS, and has its config space
17705 * set up identically to what it was at cold boot.
17706 */
17707 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17708 {
17709 struct net_device *netdev = pci_get_drvdata(pdev);
17710 struct tg3 *tp = netdev_priv(netdev);
17711 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17712 int err;
17713
17714 rtnl_lock();
17715
17716 if (pci_enable_device(pdev)) {
17717 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17718 goto done;
17719 }
17720
17721 pci_set_master(pdev);
17722 pci_restore_state(pdev);
17723 pci_save_state(pdev);
17724
17725 if (!netif_running(netdev)) {
17726 rc = PCI_ERS_RESULT_RECOVERED;
17727 goto done;
17728 }
17729
17730 err = tg3_power_up(tp);
17731 if (err)
17732 goto done;
17733
17734 rc = PCI_ERS_RESULT_RECOVERED;
17735
17736 done:
17737 rtnl_unlock();
17738
17739 return rc;
17740 }
17741
17742 /**
17743 * tg3_io_resume - called when traffic can start flowing again.
17744 * @pdev: Pointer to PCI device
17745 *
17746 * This callback is called when the error recovery driver tells
17747 * us that its OK to resume normal operation.
17748 */
17749 static void tg3_io_resume(struct pci_dev *pdev)
17750 {
17751 struct net_device *netdev = pci_get_drvdata(pdev);
17752 struct tg3 *tp = netdev_priv(netdev);
17753 int err;
17754
17755 rtnl_lock();
17756
17757 if (!netif_running(netdev))
17758 goto done;
17759
17760 tg3_full_lock(tp, 0);
17761 tg3_flag_set(tp, INIT_COMPLETE);
17762 err = tg3_restart_hw(tp, true);
17763 if (err) {
17764 tg3_full_unlock(tp);
17765 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17766 goto done;
17767 }
17768
17769 netif_device_attach(netdev);
17770
17771 tg3_timer_start(tp);
17772
17773 tg3_netif_start(tp);
17774
17775 tg3_full_unlock(tp);
17776
17777 tg3_phy_start(tp);
17778
17779 done:
17780 rtnl_unlock();
17781 }
17782
17783 static const struct pci_error_handlers tg3_err_handler = {
17784 .error_detected = tg3_io_error_detected,
17785 .slot_reset = tg3_io_slot_reset,
17786 .resume = tg3_io_resume
17787 };
17788
17789 static struct pci_driver tg3_driver = {
17790 .name = DRV_MODULE_NAME,
17791 .id_table = tg3_pci_tbl,
17792 .probe = tg3_init_one,
17793 .remove = tg3_remove_one,
17794 .err_handler = &tg3_err_handler,
17795 .driver.pm = &tg3_pm_ops,
17796 };
17797
17798 static int __init tg3_init(void)
17799 {
17800 return pci_register_driver(&tg3_driver);
17801 }
17802
17803 static void __exit tg3_cleanup(void)
17804 {
17805 pci_unregister_driver(&tg3_driver);
17806 }
17807
17808 module_init(tg3_init);
17809 module_exit(tg3_cleanup);