Merge tag 'v3.10.57' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
445
446
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
458 };
459
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 writel(val, mbox);
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
594 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 unsigned long flags;
621
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 unsigned long flags;
646
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 int i;
673 u32 regbase, bit;
674
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
679
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
696 }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 int i, off;
703 int ret = 0;
704 u32 status, req, gnt, bit;
705
706 if (!tg3_flag(tp, ENABLE_APE))
707 return 0;
708
709 switch (locknum) {
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 return 0;
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
719 break;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
738 off = 4 * locknum;
739
740 tg3_ape_write32(tp, req + off, bit);
741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit)
746 break;
747 if (pci_channel_offline(tp->pdev))
748 break;
749
750 udelay(10);
751 }
752
753 if (status != bit) {
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp, gnt + off, bit);
756 ret = -EBUSY;
757 }
758
759 return ret;
760 }
761
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764 u32 gnt, bit;
765
766 if (!tg3_flag(tp, ENABLE_APE))
767 return;
768
769 switch (locknum) {
770 case TG3_APE_LOCK_GPIO:
771 if (tg3_asic_rev(tp) == ASIC_REV_5761)
772 return;
773 case TG3_APE_LOCK_GRC:
774 case TG3_APE_LOCK_MEM:
775 if (!tp->pci_fn)
776 bit = APE_LOCK_GRANT_DRIVER;
777 else
778 bit = 1 << tp->pci_fn;
779 break;
780 case TG3_APE_LOCK_PHY0:
781 case TG3_APE_LOCK_PHY1:
782 case TG3_APE_LOCK_PHY2:
783 case TG3_APE_LOCK_PHY3:
784 bit = APE_LOCK_GRANT_DRIVER;
785 break;
786 default:
787 return;
788 }
789
790 if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 gnt = TG3_APE_LOCK_GRANT;
792 else
793 gnt = TG3_APE_PER_LOCK_GRANT;
794
795 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800 u32 apedata;
801
802 while (timeout_us) {
803 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804 return -EBUSY;
805
806 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808 break;
809
810 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811
812 udelay(10);
813 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814 }
815
816 return timeout_us ? 0 : -EBUSY;
817 }
818
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821 u32 i, apedata;
822
823 for (i = 0; i < timeout_us / 10; i++) {
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825
826 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827 break;
828
829 udelay(10);
830 }
831
832 return i == timeout_us / 10;
833 }
834
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 len)
837 {
838 int err;
839 u32 i, bufoff, msgoff, maxlen, apedata;
840
841 if (!tg3_flag(tp, APE_HAS_NCSI))
842 return 0;
843
844 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845 if (apedata != APE_SEG_SIG_MAGIC)
846 return -ENODEV;
847
848 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 if (!(apedata & APE_FW_STATUS_READY))
850 return -EAGAIN;
851
852 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853 TG3_APE_SHMEM_BASE;
854 msgoff = bufoff + 2 * sizeof(u32);
855 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856
857 while (len) {
858 u32 length;
859
860 /* Cap xfer sizes to scratchpad limits. */
861 length = (len > maxlen) ? maxlen : len;
862 len -= length;
863
864 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 if (!(apedata & APE_FW_STATUS_READY))
866 return -EAGAIN;
867
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err = tg3_ape_event_lock(tp, 1000);
870 if (err)
871 return err;
872
873 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874 APE_EVENT_STATUS_SCRTCHPD_READ |
875 APE_EVENT_STATUS_EVENT_PENDING;
876 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877
878 tg3_ape_write32(tp, bufoff, base_off);
879 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880
881 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883
884 base_off += length;
885
886 if (tg3_ape_wait_for_event(tp, 30000))
887 return -EAGAIN;
888
889 for (i = 0; length; i += 4, length -= 4) {
890 u32 val = tg3_ape_read32(tp, msgoff + i);
891 memcpy(data, &val, sizeof(u32));
892 data++;
893 }
894 }
895
896 return 0;
897 }
898
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901 int err;
902 u32 apedata;
903
904 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905 if (apedata != APE_SEG_SIG_MAGIC)
906 return -EAGAIN;
907
908 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909 if (!(apedata & APE_FW_STATUS_READY))
910 return -EAGAIN;
911
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err = tg3_ape_event_lock(tp, 1000);
914 if (err)
915 return err;
916
917 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918 event | APE_EVENT_STATUS_EVENT_PENDING);
919
920 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922
923 return 0;
924 }
925
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928 u32 event;
929 u32 apedata;
930
931 if (!tg3_flag(tp, ENABLE_APE))
932 return;
933
934 switch (kind) {
935 case RESET_KIND_INIT:
936 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937 APE_HOST_SEG_SIG_MAGIC);
938 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939 APE_HOST_SEG_LEN_MAGIC);
940 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945 APE_HOST_BEHAV_NO_PHYLOCK);
946 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947 TG3_APE_HOST_DRVR_STATE_START);
948
949 event = APE_EVENT_STATUS_STATE_START;
950 break;
951 case RESET_KIND_SHUTDOWN:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
956 */
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958
959 if (device_may_wakeup(&tp->pdev->dev) &&
960 tg3_flag(tp, WOL_ENABLE)) {
961 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962 TG3_APE_HOST_WOL_SPEED_AUTO);
963 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964 } else
965 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968
969 event = APE_EVENT_STATUS_STATE_UNLOAD;
970 break;
971 case RESET_KIND_SUSPEND:
972 event = APE_EVENT_STATUS_STATE_SUSPEND;
973 break;
974 default:
975 return;
976 }
977
978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979
980 tg3_ape_send_event(tp, event);
981 }
982
983 static void tg3_disable_ints(struct tg3 *tp)
984 {
985 int i;
986
987 tw32(TG3PCI_MISC_HOST_CTRL,
988 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
989 for (i = 0; i < tp->irq_max; i++)
990 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
991 }
992
993 static void tg3_enable_ints(struct tg3 *tp)
994 {
995 int i;
996
997 tp->irq_sync = 0;
998 wmb();
999
1000 tw32(TG3PCI_MISC_HOST_CTRL,
1001 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1002
1003 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1004 for (i = 0; i < tp->irq_cnt; i++) {
1005 struct tg3_napi *tnapi = &tp->napi[i];
1006
1007 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 if (tg3_flag(tp, 1SHOT_MSI))
1009 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010
1011 tp->coal_now |= tnapi->coal_now;
1012 }
1013
1014 /* Force an initial interrupt */
1015 if (!tg3_flag(tp, TAGGED_STATUS) &&
1016 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1017 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1018 else
1019 tw32(HOSTCC_MODE, tp->coal_now);
1020
1021 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1022 }
1023
1024 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1025 {
1026 struct tg3 *tp = tnapi->tp;
1027 struct tg3_hw_status *sblk = tnapi->hw_status;
1028 unsigned int work_exists = 0;
1029
1030 /* check for phy events */
1031 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1032 if (sblk->status & SD_STATUS_LINK_CHG)
1033 work_exists = 1;
1034 }
1035
1036 /* check for TX work to do */
1037 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1038 work_exists = 1;
1039
1040 /* check for RX work to do */
1041 if (tnapi->rx_rcb_prod_idx &&
1042 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1043 work_exists = 1;
1044
1045 return work_exists;
1046 }
1047
1048 /* tg3_int_reenable
1049 * similar to tg3_enable_ints, but it accurately determines whether there
1050 * is new work pending and can return without flushing the PIO write
1051 * which reenables interrupts
1052 */
1053 static void tg3_int_reenable(struct tg3_napi *tnapi)
1054 {
1055 struct tg3 *tp = tnapi->tp;
1056
1057 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1058 mmiowb();
1059
1060 /* When doing tagged status, this work check is unnecessary.
1061 * The last_tag we write above tells the chip which piece of
1062 * work we've completed.
1063 */
1064 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1065 tw32(HOSTCC_MODE, tp->coalesce_mode |
1066 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1067 }
1068
1069 static void tg3_switch_clocks(struct tg3 *tp)
1070 {
1071 u32 clock_ctrl;
1072 u32 orig_clock_ctrl;
1073
1074 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1075 return;
1076
1077 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1078
1079 orig_clock_ctrl = clock_ctrl;
1080 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1081 CLOCK_CTRL_CLKRUN_OENABLE |
1082 0x1f);
1083 tp->pci_clock_ctrl = clock_ctrl;
1084
1085 if (tg3_flag(tp, 5705_PLUS)) {
1086 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1089 }
1090 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 clock_ctrl |
1093 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1094 40);
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1097 40);
1098 }
1099 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1100 }
1101
1102 #define PHY_BUSY_LOOPS 5000
1103
1104 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1105 u32 *val)
1106 {
1107 u32 frame_val;
1108 unsigned int loops;
1109 int ret;
1110
1111 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1112 tw32_f(MAC_MI_MODE,
1113 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 udelay(80);
1115 }
1116
1117 tg3_ape_lock(tp, tp->phy_ape_lock);
1118
1119 *val = 0x0;
1120
1121 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1122 MI_COM_PHY_ADDR_MASK);
1123 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1124 MI_COM_REG_ADDR_MASK);
1125 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1126
1127 tw32_f(MAC_MI_COM, frame_val);
1128
1129 loops = PHY_BUSY_LOOPS;
1130 while (loops != 0) {
1131 udelay(10);
1132 frame_val = tr32(MAC_MI_COM);
1133
1134 if ((frame_val & MI_COM_BUSY) == 0) {
1135 udelay(5);
1136 frame_val = tr32(MAC_MI_COM);
1137 break;
1138 }
1139 loops -= 1;
1140 }
1141
1142 ret = -EBUSY;
1143 if (loops != 0) {
1144 *val = frame_val & MI_COM_DATA_MASK;
1145 ret = 0;
1146 }
1147
1148 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1149 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 udelay(80);
1151 }
1152
1153 tg3_ape_unlock(tp, tp->phy_ape_lock);
1154
1155 return ret;
1156 }
1157
1158 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1159 {
1160 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1161 }
1162
1163 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1164 u32 val)
1165 {
1166 u32 frame_val;
1167 unsigned int loops;
1168 int ret;
1169
1170 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1171 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1172 return 0;
1173
1174 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1175 tw32_f(MAC_MI_MODE,
1176 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 udelay(80);
1178 }
1179
1180 tg3_ape_lock(tp, tp->phy_ape_lock);
1181
1182 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1183 MI_COM_PHY_ADDR_MASK);
1184 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1185 MI_COM_REG_ADDR_MASK);
1186 frame_val |= (val & MI_COM_DATA_MASK);
1187 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1188
1189 tw32_f(MAC_MI_COM, frame_val);
1190
1191 loops = PHY_BUSY_LOOPS;
1192 while (loops != 0) {
1193 udelay(10);
1194 frame_val = tr32(MAC_MI_COM);
1195 if ((frame_val & MI_COM_BUSY) == 0) {
1196 udelay(5);
1197 frame_val = tr32(MAC_MI_COM);
1198 break;
1199 }
1200 loops -= 1;
1201 }
1202
1203 ret = -EBUSY;
1204 if (loops != 0)
1205 ret = 0;
1206
1207 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1208 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 udelay(80);
1210 }
1211
1212 tg3_ape_unlock(tp, tp->phy_ape_lock);
1213
1214 return ret;
1215 }
1216
1217 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1218 {
1219 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1220 }
1221
1222 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 {
1224 int err;
1225
1226 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 if (err)
1228 goto done;
1229
1230 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 if (err)
1232 goto done;
1233
1234 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1235 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 if (err)
1237 goto done;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1240
1241 done:
1242 return err;
1243 }
1244
1245 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 {
1247 int err;
1248
1249 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 if (err)
1255 goto done;
1256
1257 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 if (err)
1260 goto done;
1261
1262 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1263
1264 done:
1265 return err;
1266 }
1267
1268 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 {
1270 int err;
1271
1272 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1273 if (!err)
1274 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1275
1276 return err;
1277 }
1278
1279 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 {
1281 int err;
1282
1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284 if (!err)
1285 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287 return err;
1288 }
1289
1290 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 {
1292 int err;
1293
1294 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1295 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1296 MII_TG3_AUXCTL_SHDWSEL_MISC);
1297 if (!err)
1298 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1299
1300 return err;
1301 }
1302
1303 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1304 {
1305 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1306 set |= MII_TG3_AUXCTL_MISC_WREN;
1307
1308 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1309 }
1310
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1312 {
1313 u32 val;
1314 int err;
1315
1316 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1317
1318 if (err)
1319 return err;
1320 if (enable)
1321
1322 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 else
1324 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325
1326 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1327 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1328
1329 return err;
1330 }
1331
1332 static int tg3_bmcr_reset(struct tg3 *tp)
1333 {
1334 u32 phy_control;
1335 int limit, err;
1336
1337 /* OK, reset it, and poll the BMCR_RESET bit until it
1338 * clears or we time out.
1339 */
1340 phy_control = BMCR_RESET;
1341 err = tg3_writephy(tp, MII_BMCR, phy_control);
1342 if (err != 0)
1343 return -EBUSY;
1344
1345 limit = 5000;
1346 while (limit--) {
1347 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if (err != 0)
1349 return -EBUSY;
1350
1351 if ((phy_control & BMCR_RESET) == 0) {
1352 udelay(40);
1353 break;
1354 }
1355 udelay(10);
1356 }
1357 if (limit < 0)
1358 return -EBUSY;
1359
1360 return 0;
1361 }
1362
1363 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1364 {
1365 struct tg3 *tp = bp->priv;
1366 u32 val;
1367
1368 spin_lock_bh(&tp->lock);
1369
1370 if (tg3_readphy(tp, reg, &val))
1371 val = -EIO;
1372
1373 spin_unlock_bh(&tp->lock);
1374
1375 return val;
1376 }
1377
1378 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1379 {
1380 struct tg3 *tp = bp->priv;
1381 u32 ret = 0;
1382
1383 spin_lock_bh(&tp->lock);
1384
1385 if (tg3_writephy(tp, reg, val))
1386 ret = -EIO;
1387
1388 spin_unlock_bh(&tp->lock);
1389
1390 return ret;
1391 }
1392
1393 static int tg3_mdio_reset(struct mii_bus *bp)
1394 {
1395 return 0;
1396 }
1397
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1399 {
1400 u32 val;
1401 struct phy_device *phydev;
1402
1403 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1404 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405 case PHY_ID_BCM50610:
1406 case PHY_ID_BCM50610M:
1407 val = MAC_PHYCFG2_50610_LED_MODES;
1408 break;
1409 case PHY_ID_BCMAC131:
1410 val = MAC_PHYCFG2_AC131_LED_MODES;
1411 break;
1412 case PHY_ID_RTL8211C:
1413 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1414 break;
1415 case PHY_ID_RTL8201E:
1416 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1417 break;
1418 default:
1419 return;
1420 }
1421
1422 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423 tw32(MAC_PHYCFG2, val);
1424
1425 val = tr32(MAC_PHYCFG1);
1426 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429 tw32(MAC_PHYCFG1, val);
1430
1431 return;
1432 }
1433
1434 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436 MAC_PHYCFG2_FMODE_MASK_MASK |
1437 MAC_PHYCFG2_GMODE_MASK_MASK |
1438 MAC_PHYCFG2_ACT_MASK_MASK |
1439 MAC_PHYCFG2_QUAL_MASK_MASK |
1440 MAC_PHYCFG2_INBAND_ENABLE;
1441
1442 tw32(MAC_PHYCFG2, val);
1443
1444 val = tr32(MAC_PHYCFG1);
1445 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1452 }
1453 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455 tw32(MAC_PHYCFG1, val);
1456
1457 val = tr32(MAC_EXT_RGMII_MODE);
1458 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459 MAC_RGMII_MODE_RX_QUALITY |
1460 MAC_RGMII_MODE_RX_ACTIVITY |
1461 MAC_RGMII_MODE_RX_ENG_DET |
1462 MAC_RGMII_MODE_TX_ENABLE |
1463 MAC_RGMII_MODE_TX_LOWPWR |
1464 MAC_RGMII_MODE_TX_RESET);
1465 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467 val |= MAC_RGMII_MODE_RX_INT_B |
1468 MAC_RGMII_MODE_RX_QUALITY |
1469 MAC_RGMII_MODE_RX_ACTIVITY |
1470 MAC_RGMII_MODE_RX_ENG_DET;
1471 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472 val |= MAC_RGMII_MODE_TX_ENABLE |
1473 MAC_RGMII_MODE_TX_LOWPWR |
1474 MAC_RGMII_MODE_TX_RESET;
1475 }
1476 tw32(MAC_EXT_RGMII_MODE, val);
1477 }
1478
1479 static void tg3_mdio_start(struct tg3 *tp)
1480 {
1481 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482 tw32_f(MAC_MI_MODE, tp->mi_mode);
1483 udelay(80);
1484
1485 if (tg3_flag(tp, MDIOBUS_INITED) &&
1486 tg3_asic_rev(tp) == ASIC_REV_5785)
1487 tg3_mdio_config_5785(tp);
1488 }
1489
1490 static int tg3_mdio_init(struct tg3 *tp)
1491 {
1492 int i;
1493 u32 reg;
1494 struct phy_device *phydev;
1495
1496 if (tg3_flag(tp, 5717_PLUS)) {
1497 u32 is_serdes;
1498
1499 tp->phy_addr = tp->pci_fn + 1;
1500
1501 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1503 else
1504 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 if (is_serdes)
1507 tp->phy_addr += 7;
1508 } else
1509 tp->phy_addr = TG3_PHY_MII_ADDR;
1510
1511 tg3_mdio_start(tp);
1512
1513 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1514 return 0;
1515
1516 tp->mdio_bus = mdiobus_alloc();
1517 if (tp->mdio_bus == NULL)
1518 return -ENOMEM;
1519
1520 tp->mdio_bus->name = "tg3 mdio bus";
1521 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1522 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1523 tp->mdio_bus->priv = tp;
1524 tp->mdio_bus->parent = &tp->pdev->dev;
1525 tp->mdio_bus->read = &tg3_mdio_read;
1526 tp->mdio_bus->write = &tg3_mdio_write;
1527 tp->mdio_bus->reset = &tg3_mdio_reset;
1528 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1529 tp->mdio_bus->irq = &tp->mdio_irq[0];
1530
1531 for (i = 0; i < PHY_MAX_ADDR; i++)
1532 tp->mdio_bus->irq[i] = PHY_POLL;
1533
1534 /* The bus registration will look for all the PHYs on the mdio bus.
1535 * Unfortunately, it does not ensure the PHY is powered up before
1536 * accessing the PHY ID registers. A chip reset is the
1537 * quickest way to bring the device back to an operational state..
1538 */
1539 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1540 tg3_bmcr_reset(tp);
1541
1542 i = mdiobus_register(tp->mdio_bus);
1543 if (i) {
1544 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1545 mdiobus_free(tp->mdio_bus);
1546 return i;
1547 }
1548
1549 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1550
1551 if (!phydev || !phydev->drv) {
1552 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1553 mdiobus_unregister(tp->mdio_bus);
1554 mdiobus_free(tp->mdio_bus);
1555 return -ENODEV;
1556 }
1557
1558 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1559 case PHY_ID_BCM57780:
1560 phydev->interface = PHY_INTERFACE_MODE_GMII;
1561 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1562 break;
1563 case PHY_ID_BCM50610:
1564 case PHY_ID_BCM50610M:
1565 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1566 PHY_BRCM_RX_REFCLK_UNUSED |
1567 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1568 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1569 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1570 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1571 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1572 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1573 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1574 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1575 /* fallthru */
1576 case PHY_ID_RTL8211C:
1577 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1578 break;
1579 case PHY_ID_RTL8201E:
1580 case PHY_ID_BCMAC131:
1581 phydev->interface = PHY_INTERFACE_MODE_MII;
1582 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 break;
1585 }
1586
1587 tg3_flag_set(tp, MDIOBUS_INITED);
1588
1589 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1590 tg3_mdio_config_5785(tp);
1591
1592 return 0;
1593 }
1594
1595 static void tg3_mdio_fini(struct tg3 *tp)
1596 {
1597 if (tg3_flag(tp, MDIOBUS_INITED)) {
1598 tg3_flag_clear(tp, MDIOBUS_INITED);
1599 mdiobus_unregister(tp->mdio_bus);
1600 mdiobus_free(tp->mdio_bus);
1601 }
1602 }
1603
1604 /* tp->lock is held. */
1605 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 {
1607 u32 val;
1608
1609 val = tr32(GRC_RX_CPU_EVENT);
1610 val |= GRC_RX_CPU_DRIVER_EVENT;
1611 tw32_f(GRC_RX_CPU_EVENT, val);
1612
1613 tp->last_event_jiffies = jiffies;
1614 }
1615
1616 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1617
1618 /* tp->lock is held. */
1619 static void tg3_wait_for_event_ack(struct tg3 *tp)
1620 {
1621 int i;
1622 unsigned int delay_cnt;
1623 long time_remain;
1624
1625 /* If enough time has passed, no wait is necessary. */
1626 time_remain = (long)(tp->last_event_jiffies + 1 +
1627 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1628 (long)jiffies;
1629 if (time_remain < 0)
1630 return;
1631
1632 /* Check if we can shorten the wait time. */
1633 delay_cnt = jiffies_to_usecs(time_remain);
1634 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1635 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1636 delay_cnt = (delay_cnt >> 3) + 1;
1637
1638 for (i = 0; i < delay_cnt; i++) {
1639 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1640 break;
1641 if (pci_channel_offline(tp->pdev))
1642 break;
1643
1644 udelay(8);
1645 }
1646 }
1647
1648 /* tp->lock is held. */
1649 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1650 {
1651 u32 reg, val;
1652
1653 val = 0;
1654 if (!tg3_readphy(tp, MII_BMCR, &reg))
1655 val = reg << 16;
1656 if (!tg3_readphy(tp, MII_BMSR, &reg))
1657 val |= (reg & 0xffff);
1658 *data++ = val;
1659
1660 val = 0;
1661 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1662 val = reg << 16;
1663 if (!tg3_readphy(tp, MII_LPA, &reg))
1664 val |= (reg & 0xffff);
1665 *data++ = val;
1666
1667 val = 0;
1668 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1669 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1670 val = reg << 16;
1671 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1672 val |= (reg & 0xffff);
1673 }
1674 *data++ = val;
1675
1676 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1677 val = reg << 16;
1678 else
1679 val = 0;
1680 *data++ = val;
1681 }
1682
1683 /* tp->lock is held. */
1684 static void tg3_ump_link_report(struct tg3 *tp)
1685 {
1686 u32 data[4];
1687
1688 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1689 return;
1690
1691 tg3_phy_gather_ump_data(tp, data);
1692
1693 tg3_wait_for_event_ack(tp);
1694
1695 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1697 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1698 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1699 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1701
1702 tg3_generate_fw_event(tp);
1703 }
1704
1705 /* tp->lock is held. */
1706 static void tg3_stop_fw(struct tg3 *tp)
1707 {
1708 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1709 /* Wait for RX cpu to ACK the previous event. */
1710 tg3_wait_for_event_ack(tp);
1711
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1713
1714 tg3_generate_fw_event(tp);
1715
1716 /* Wait for RX cpu to ACK this event. */
1717 tg3_wait_for_event_ack(tp);
1718 }
1719 }
1720
1721 /* tp->lock is held. */
1722 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1723 {
1724 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1725 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1726
1727 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1728 switch (kind) {
1729 case RESET_KIND_INIT:
1730 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1731 DRV_STATE_START);
1732 break;
1733
1734 case RESET_KIND_SHUTDOWN:
1735 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736 DRV_STATE_UNLOAD);
1737 break;
1738
1739 case RESET_KIND_SUSPEND:
1740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 DRV_STATE_SUSPEND);
1742 break;
1743
1744 default:
1745 break;
1746 }
1747 }
1748
1749 if (kind == RESET_KIND_INIT ||
1750 kind == RESET_KIND_SUSPEND)
1751 tg3_ape_driver_state_change(tp, kind);
1752 }
1753
1754 /* tp->lock is held. */
1755 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1756 {
1757 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1758 switch (kind) {
1759 case RESET_KIND_INIT:
1760 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761 DRV_STATE_START_DONE);
1762 break;
1763
1764 case RESET_KIND_SHUTDOWN:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_UNLOAD_DONE);
1767 break;
1768
1769 default:
1770 break;
1771 }
1772 }
1773
1774 if (kind == RESET_KIND_SHUTDOWN)
1775 tg3_ape_driver_state_change(tp, kind);
1776 }
1777
1778 /* tp->lock is held. */
1779 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1780 {
1781 if (tg3_flag(tp, ENABLE_ASF)) {
1782 switch (kind) {
1783 case RESET_KIND_INIT:
1784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785 DRV_STATE_START);
1786 break;
1787
1788 case RESET_KIND_SHUTDOWN:
1789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790 DRV_STATE_UNLOAD);
1791 break;
1792
1793 case RESET_KIND_SUSPEND:
1794 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795 DRV_STATE_SUSPEND);
1796 break;
1797
1798 default:
1799 break;
1800 }
1801 }
1802 }
1803
1804 static int tg3_poll_fw(struct tg3 *tp)
1805 {
1806 int i;
1807 u32 val;
1808
1809 if (tg3_flag(tp, NO_FWARE_REPORTED))
1810 return 0;
1811
1812 if (tg3_flag(tp, IS_SSB_CORE)) {
1813 /* We don't use firmware. */
1814 return 0;
1815 }
1816
1817 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1818 /* Wait up to 20ms for init done. */
1819 for (i = 0; i < 200; i++) {
1820 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1821 return 0;
1822 if (pci_channel_offline(tp->pdev))
1823 return -ENODEV;
1824
1825 udelay(100);
1826 }
1827 return -ENODEV;
1828 }
1829
1830 /* Wait for firmware initialization to complete. */
1831 for (i = 0; i < 100000; i++) {
1832 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1833 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1834 break;
1835 if (pci_channel_offline(tp->pdev)) {
1836 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1837 tg3_flag_set(tp, NO_FWARE_REPORTED);
1838 netdev_info(tp->dev, "No firmware running\n");
1839 }
1840
1841 break;
1842 }
1843
1844 udelay(10);
1845 }
1846
1847 /* Chip might not be fitted with firmware. Some Sun onboard
1848 * parts are configured like that. So don't signal the timeout
1849 * of the above loop as an error, but do report the lack of
1850 * running firmware once.
1851 */
1852 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1853 tg3_flag_set(tp, NO_FWARE_REPORTED);
1854
1855 netdev_info(tp->dev, "No firmware running\n");
1856 }
1857
1858 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1859 /* The 57765 A0 needs a little more
1860 * time to do some important work.
1861 */
1862 mdelay(10);
1863 }
1864
1865 return 0;
1866 }
1867
1868 static void tg3_link_report(struct tg3 *tp)
1869 {
1870 if (!netif_carrier_ok(tp->dev)) {
1871 netif_info(tp, link, tp->dev, "Link is down\n");
1872 tg3_ump_link_report(tp);
1873 } else if (netif_msg_link(tp)) {
1874 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1875 (tp->link_config.active_speed == SPEED_1000 ?
1876 1000 :
1877 (tp->link_config.active_speed == SPEED_100 ?
1878 100 : 10)),
1879 (tp->link_config.active_duplex == DUPLEX_FULL ?
1880 "full" : "half"));
1881
1882 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1883 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1884 "on" : "off",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1886 "on" : "off");
1887
1888 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1889 netdev_info(tp->dev, "EEE is %s\n",
1890 tp->setlpicnt ? "enabled" : "disabled");
1891
1892 tg3_ump_link_report(tp);
1893 }
1894
1895 tp->link_up = netif_carrier_ok(tp->dev);
1896 }
1897
1898 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1899 {
1900 u32 flowctrl = 0;
1901
1902 if (adv & ADVERTISE_PAUSE_CAP) {
1903 flowctrl |= FLOW_CTRL_RX;
1904 if (!(adv & ADVERTISE_PAUSE_ASYM))
1905 flowctrl |= FLOW_CTRL_TX;
1906 } else if (adv & ADVERTISE_PAUSE_ASYM)
1907 flowctrl |= FLOW_CTRL_TX;
1908
1909 return flowctrl;
1910 }
1911
1912 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1913 {
1914 u16 miireg;
1915
1916 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1917 miireg = ADVERTISE_1000XPAUSE;
1918 else if (flow_ctrl & FLOW_CTRL_TX)
1919 miireg = ADVERTISE_1000XPSE_ASYM;
1920 else if (flow_ctrl & FLOW_CTRL_RX)
1921 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1922 else
1923 miireg = 0;
1924
1925 return miireg;
1926 }
1927
1928 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1929 {
1930 u32 flowctrl = 0;
1931
1932 if (adv & ADVERTISE_1000XPAUSE) {
1933 flowctrl |= FLOW_CTRL_RX;
1934 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1935 flowctrl |= FLOW_CTRL_TX;
1936 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1937 flowctrl |= FLOW_CTRL_TX;
1938
1939 return flowctrl;
1940 }
1941
1942 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1943 {
1944 u8 cap = 0;
1945
1946 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1947 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1948 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1949 if (lcladv & ADVERTISE_1000XPAUSE)
1950 cap = FLOW_CTRL_RX;
1951 if (rmtadv & ADVERTISE_1000XPAUSE)
1952 cap = FLOW_CTRL_TX;
1953 }
1954
1955 return cap;
1956 }
1957
1958 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1959 {
1960 u8 autoneg;
1961 u8 flowctrl = 0;
1962 u32 old_rx_mode = tp->rx_mode;
1963 u32 old_tx_mode = tp->tx_mode;
1964
1965 if (tg3_flag(tp, USE_PHYLIB))
1966 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1967 else
1968 autoneg = tp->link_config.autoneg;
1969
1970 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1971 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1972 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1973 else
1974 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1975 } else
1976 flowctrl = tp->link_config.flowctrl;
1977
1978 tp->link_config.active_flowctrl = flowctrl;
1979
1980 if (flowctrl & FLOW_CTRL_RX)
1981 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1982 else
1983 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1984
1985 if (old_rx_mode != tp->rx_mode)
1986 tw32_f(MAC_RX_MODE, tp->rx_mode);
1987
1988 if (flowctrl & FLOW_CTRL_TX)
1989 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1990 else
1991 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1992
1993 if (old_tx_mode != tp->tx_mode)
1994 tw32_f(MAC_TX_MODE, tp->tx_mode);
1995 }
1996
1997 static void tg3_adjust_link(struct net_device *dev)
1998 {
1999 u8 oldflowctrl, linkmesg = 0;
2000 u32 mac_mode, lcl_adv, rmt_adv;
2001 struct tg3 *tp = netdev_priv(dev);
2002 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2003
2004 spin_lock_bh(&tp->lock);
2005
2006 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2007 MAC_MODE_HALF_DUPLEX);
2008
2009 oldflowctrl = tp->link_config.active_flowctrl;
2010
2011 if (phydev->link) {
2012 lcl_adv = 0;
2013 rmt_adv = 0;
2014
2015 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2016 mac_mode |= MAC_MODE_PORT_MODE_MII;
2017 else if (phydev->speed == SPEED_1000 ||
2018 tg3_asic_rev(tp) != ASIC_REV_5785)
2019 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2020 else
2021 mac_mode |= MAC_MODE_PORT_MODE_MII;
2022
2023 if (phydev->duplex == DUPLEX_HALF)
2024 mac_mode |= MAC_MODE_HALF_DUPLEX;
2025 else {
2026 lcl_adv = mii_advertise_flowctrl(
2027 tp->link_config.flowctrl);
2028
2029 if (phydev->pause)
2030 rmt_adv = LPA_PAUSE_CAP;
2031 if (phydev->asym_pause)
2032 rmt_adv |= LPA_PAUSE_ASYM;
2033 }
2034
2035 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2036 } else
2037 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2038
2039 if (mac_mode != tp->mac_mode) {
2040 tp->mac_mode = mac_mode;
2041 tw32_f(MAC_MODE, tp->mac_mode);
2042 udelay(40);
2043 }
2044
2045 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2046 if (phydev->speed == SPEED_10)
2047 tw32(MAC_MI_STAT,
2048 MAC_MI_STAT_10MBPS_MODE |
2049 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2050 else
2051 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 }
2053
2054 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2055 tw32(MAC_TX_LENGTHS,
2056 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2057 (6 << TX_LENGTHS_IPG_SHIFT) |
2058 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2059 else
2060 tw32(MAC_TX_LENGTHS,
2061 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062 (6 << TX_LENGTHS_IPG_SHIFT) |
2063 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2064
2065 if (phydev->link != tp->old_link ||
2066 phydev->speed != tp->link_config.active_speed ||
2067 phydev->duplex != tp->link_config.active_duplex ||
2068 oldflowctrl != tp->link_config.active_flowctrl)
2069 linkmesg = 1;
2070
2071 tp->old_link = phydev->link;
2072 tp->link_config.active_speed = phydev->speed;
2073 tp->link_config.active_duplex = phydev->duplex;
2074
2075 spin_unlock_bh(&tp->lock);
2076
2077 if (linkmesg)
2078 tg3_link_report(tp);
2079 }
2080
2081 static int tg3_phy_init(struct tg3 *tp)
2082 {
2083 struct phy_device *phydev;
2084
2085 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2086 return 0;
2087
2088 /* Bring the PHY back to a known state. */
2089 tg3_bmcr_reset(tp);
2090
2091 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2092
2093 /* Attach the MAC to the PHY. */
2094 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2095 tg3_adjust_link, phydev->interface);
2096 if (IS_ERR(phydev)) {
2097 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2098 return PTR_ERR(phydev);
2099 }
2100
2101 /* Mask with MAC supported features. */
2102 switch (phydev->interface) {
2103 case PHY_INTERFACE_MODE_GMII:
2104 case PHY_INTERFACE_MODE_RGMII:
2105 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2106 phydev->supported &= (PHY_GBIT_FEATURES |
2107 SUPPORTED_Pause |
2108 SUPPORTED_Asym_Pause);
2109 break;
2110 }
2111 /* fallthru */
2112 case PHY_INTERFACE_MODE_MII:
2113 phydev->supported &= (PHY_BASIC_FEATURES |
2114 SUPPORTED_Pause |
2115 SUPPORTED_Asym_Pause);
2116 break;
2117 default:
2118 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2119 return -EINVAL;
2120 }
2121
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124 phydev->advertising = phydev->supported;
2125
2126 return 0;
2127 }
2128
2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131 struct phy_device *phydev;
2132
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134 return;
2135
2136 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2137
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 phydev->advertising = tp->link_config.advertising;
2144 }
2145
2146 phy_start(phydev);
2147
2148 phy_start_aneg(phydev);
2149 }
2150
2151 static void tg3_phy_stop(struct tg3 *tp)
2152 {
2153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154 return;
2155
2156 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2157 }
2158
2159 static void tg3_phy_fini(struct tg3 *tp)
2160 {
2161 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2163 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164 }
2165 }
2166
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2168 {
2169 int err;
2170 u32 val;
2171
2172 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2173 return 0;
2174
2175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176 /* Cannot do read-modify-write on 5401 */
2177 err = tg3_phy_auxctl_write(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2180 0x4c20);
2181 goto done;
2182 }
2183
2184 err = tg3_phy_auxctl_read(tp,
2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186 if (err)
2187 return err;
2188
2189 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190 err = tg3_phy_auxctl_write(tp,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2192
2193 done:
2194 return err;
2195 }
2196
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 {
2199 u32 phytest;
2200
2201 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2202 u32 phy;
2203
2204 tg3_writephy(tp, MII_TG3_FET_TEST,
2205 phytest | MII_TG3_FET_SHADOW_EN);
2206 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2207 if (enable)
2208 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2209 else
2210 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2212 }
2213 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214 }
2215 }
2216
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 {
2219 u32 reg;
2220
2221 if (!tg3_flag(tp, 5705_PLUS) ||
2222 (tg3_flag(tp, 5717_PLUS) &&
2223 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2224 return;
2225
2226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227 tg3_phy_fet_toggle_apd(tp, enable);
2228 return;
2229 }
2230
2231 reg = MII_TG3_MISC_SHDW_WREN |
2232 MII_TG3_MISC_SHDW_SCR5_SEL |
2233 MII_TG3_MISC_SHDW_SCR5_LPED |
2234 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239
2240 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2241
2242
2243 reg = MII_TG3_MISC_SHDW_WREN |
2244 MII_TG3_MISC_SHDW_APD_SEL |
2245 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2246 if (enable)
2247 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2248
2249 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2250 }
2251
2252 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2253 {
2254 u32 phy;
2255
2256 if (!tg3_flag(tp, 5705_PLUS) ||
2257 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2258 return;
2259
2260 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2261 u32 ephy;
2262
2263 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2264 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2265
2266 tg3_writephy(tp, MII_TG3_FET_TEST,
2267 ephy | MII_TG3_FET_SHADOW_EN);
2268 if (!tg3_readphy(tp, reg, &phy)) {
2269 if (enable)
2270 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 else
2272 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273 tg3_writephy(tp, reg, phy);
2274 }
2275 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2276 }
2277 } else {
2278 int ret;
2279
2280 ret = tg3_phy_auxctl_read(tp,
2281 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2282 if (!ret) {
2283 if (enable)
2284 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 else
2286 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287 tg3_phy_auxctl_write(tp,
2288 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2289 }
2290 }
2291 }
2292
2293 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2294 {
2295 int ret;
2296 u32 val;
2297
2298 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2299 return;
2300
2301 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2302 if (!ret)
2303 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2304 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2305 }
2306
2307 static void tg3_phy_apply_otp(struct tg3 *tp)
2308 {
2309 u32 otp, phy;
2310
2311 if (!tp->phy_otp)
2312 return;
2313
2314 otp = tp->phy_otp;
2315
2316 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2317 return;
2318
2319 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2320 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2321 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2322
2323 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2324 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2325 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2326
2327 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2328 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2329 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2330
2331 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2333
2334 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2335 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2336
2337 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2338 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2339 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2340
2341 tg3_phy_toggle_auxctl_smdsp(tp, false);
2342 }
2343
2344 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2345 {
2346 u32 val;
2347
2348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2349 return;
2350
2351 tp->setlpicnt = 0;
2352
2353 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2354 current_link_up &&
2355 tp->link_config.active_duplex == DUPLEX_FULL &&
2356 (tp->link_config.active_speed == SPEED_100 ||
2357 tp->link_config.active_speed == SPEED_1000)) {
2358 u32 eeectl;
2359
2360 if (tp->link_config.active_speed == SPEED_1000)
2361 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2362 else
2363 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2364
2365 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2366
2367 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2368 TG3_CL45_D7_EEERES_STAT, &val);
2369
2370 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2371 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2372 tp->setlpicnt = 2;
2373 }
2374
2375 if (!tp->setlpicnt) {
2376 if (current_link_up &&
2377 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2378 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2379 tg3_phy_toggle_auxctl_smdsp(tp, false);
2380 }
2381
2382 val = tr32(TG3_CPMU_EEE_MODE);
2383 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2384 }
2385 }
2386
2387 static void tg3_phy_eee_enable(struct tg3 *tp)
2388 {
2389 u32 val;
2390
2391 if (tp->link_config.active_speed == SPEED_1000 &&
2392 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2393 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2394 tg3_flag(tp, 57765_CLASS)) &&
2395 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2396 val = MII_TG3_DSP_TAP26_ALNOKO |
2397 MII_TG3_DSP_TAP26_RMRXSTO;
2398 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2399 tg3_phy_toggle_auxctl_smdsp(tp, false);
2400 }
2401
2402 val = tr32(TG3_CPMU_EEE_MODE);
2403 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2404 }
2405
2406 static int tg3_wait_macro_done(struct tg3 *tp)
2407 {
2408 int limit = 100;
2409
2410 while (limit--) {
2411 u32 tmp32;
2412
2413 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2414 if ((tmp32 & 0x1000) == 0)
2415 break;
2416 }
2417 }
2418 if (limit < 0)
2419 return -EBUSY;
2420
2421 return 0;
2422 }
2423
2424 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2425 {
2426 static const u32 test_pat[4][6] = {
2427 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2428 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2429 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2430 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2431 };
2432 int chan;
2433
2434 for (chan = 0; chan < 4; chan++) {
2435 int i;
2436
2437 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2438 (chan * 0x2000) | 0x0200);
2439 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2440
2441 for (i = 0; i < 6; i++)
2442 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2443 test_pat[chan][i]);
2444
2445 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2446 if (tg3_wait_macro_done(tp)) {
2447 *resetp = 1;
2448 return -EBUSY;
2449 }
2450
2451 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2452 (chan * 0x2000) | 0x0200);
2453 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2454 if (tg3_wait_macro_done(tp)) {
2455 *resetp = 1;
2456 return -EBUSY;
2457 }
2458
2459 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2460 if (tg3_wait_macro_done(tp)) {
2461 *resetp = 1;
2462 return -EBUSY;
2463 }
2464
2465 for (i = 0; i < 6; i += 2) {
2466 u32 low, high;
2467
2468 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2469 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2470 tg3_wait_macro_done(tp)) {
2471 *resetp = 1;
2472 return -EBUSY;
2473 }
2474 low &= 0x7fff;
2475 high &= 0x000f;
2476 if (low != test_pat[chan][i] ||
2477 high != test_pat[chan][i+1]) {
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2479 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2481
2482 return -EBUSY;
2483 }
2484 }
2485 }
2486
2487 return 0;
2488 }
2489
2490 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2491 {
2492 int chan;
2493
2494 for (chan = 0; chan < 4; chan++) {
2495 int i;
2496
2497 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2498 (chan * 0x2000) | 0x0200);
2499 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2500 for (i = 0; i < 6; i++)
2501 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2502 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2503 if (tg3_wait_macro_done(tp))
2504 return -EBUSY;
2505 }
2506
2507 return 0;
2508 }
2509
2510 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2511 {
2512 u32 reg32, phy9_orig;
2513 int retries, do_phy_reset, err;
2514
2515 retries = 10;
2516 do_phy_reset = 1;
2517 do {
2518 if (do_phy_reset) {
2519 err = tg3_bmcr_reset(tp);
2520 if (err)
2521 return err;
2522 do_phy_reset = 0;
2523 }
2524
2525 /* Disable transmitter and interrupt. */
2526 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2527 continue;
2528
2529 reg32 |= 0x3000;
2530 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2531
2532 /* Set full-duplex, 1000 mbps. */
2533 tg3_writephy(tp, MII_BMCR,
2534 BMCR_FULLDPLX | BMCR_SPEED1000);
2535
2536 /* Set to master mode. */
2537 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2538 continue;
2539
2540 tg3_writephy(tp, MII_CTRL1000,
2541 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2542
2543 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2544 if (err)
2545 return err;
2546
2547 /* Block the PHY control access. */
2548 tg3_phydsp_write(tp, 0x8005, 0x0800);
2549
2550 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2551 if (!err)
2552 break;
2553 } while (--retries);
2554
2555 err = tg3_phy_reset_chanpat(tp);
2556 if (err)
2557 return err;
2558
2559 tg3_phydsp_write(tp, 0x8005, 0x0000);
2560
2561 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2562 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2563
2564 tg3_phy_toggle_auxctl_smdsp(tp, false);
2565
2566 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2567
2568 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2569 reg32 &= ~0x3000;
2570 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2571 } else if (!err)
2572 err = -EBUSY;
2573
2574 return err;
2575 }
2576
2577 static void tg3_carrier_off(struct tg3 *tp)
2578 {
2579 netif_carrier_off(tp->dev);
2580 tp->link_up = false;
2581 }
2582
2583 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2584 {
2585 if (tg3_flag(tp, ENABLE_ASF))
2586 netdev_warn(tp->dev,
2587 "Management side-band traffic will be interrupted during phy settings change\n");
2588 }
2589
2590 /* This will reset the tigon3 PHY if there is no valid
2591 * link unless the FORCE argument is non-zero.
2592 */
2593 static int tg3_phy_reset(struct tg3 *tp)
2594 {
2595 u32 val, cpmuctrl;
2596 int err;
2597
2598 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2599 val = tr32(GRC_MISC_CFG);
2600 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2601 udelay(40);
2602 }
2603 err = tg3_readphy(tp, MII_BMSR, &val);
2604 err |= tg3_readphy(tp, MII_BMSR, &val);
2605 if (err != 0)
2606 return -EBUSY;
2607
2608 if (netif_running(tp->dev) && tp->link_up) {
2609 netif_carrier_off(tp->dev);
2610 tg3_link_report(tp);
2611 }
2612
2613 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2614 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2615 tg3_asic_rev(tp) == ASIC_REV_5705) {
2616 err = tg3_phy_reset_5703_4_5(tp);
2617 if (err)
2618 return err;
2619 goto out;
2620 }
2621
2622 cpmuctrl = 0;
2623 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2624 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2625 cpmuctrl = tr32(TG3_CPMU_CTRL);
2626 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2627 tw32(TG3_CPMU_CTRL,
2628 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2629 }
2630
2631 err = tg3_bmcr_reset(tp);
2632 if (err)
2633 return err;
2634
2635 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2636 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2637 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2638
2639 tw32(TG3_CPMU_CTRL, cpmuctrl);
2640 }
2641
2642 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2643 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2644 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2645 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2646 CPMU_LSPD_1000MB_MACCLK_12_5) {
2647 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2648 udelay(40);
2649 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2650 }
2651 }
2652
2653 if (tg3_flag(tp, 5717_PLUS) &&
2654 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2655 return 0;
2656
2657 tg3_phy_apply_otp(tp);
2658
2659 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2660 tg3_phy_toggle_apd(tp, true);
2661 else
2662 tg3_phy_toggle_apd(tp, false);
2663
2664 out:
2665 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2666 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2667 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2668 tg3_phydsp_write(tp, 0x000a, 0x0323);
2669 tg3_phy_toggle_auxctl_smdsp(tp, false);
2670 }
2671
2672 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2673 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2674 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2675 }
2676
2677 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2678 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2679 tg3_phydsp_write(tp, 0x000a, 0x310b);
2680 tg3_phydsp_write(tp, 0x201f, 0x9506);
2681 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2682 tg3_phy_toggle_auxctl_smdsp(tp, false);
2683 }
2684 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2685 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2686 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2687 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2688 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2689 tg3_writephy(tp, MII_TG3_TEST1,
2690 MII_TG3_TEST1_TRIM_EN | 0x4);
2691 } else
2692 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2693
2694 tg3_phy_toggle_auxctl_smdsp(tp, false);
2695 }
2696 }
2697
2698 /* Set Extended packet length bit (bit 14) on all chips that */
2699 /* support jumbo frames */
2700 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2701 /* Cannot do read-modify-write on 5401 */
2702 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2703 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2704 /* Set bit 14 with read-modify-write to preserve other bits */
2705 err = tg3_phy_auxctl_read(tp,
2706 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2707 if (!err)
2708 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2709 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2710 }
2711
2712 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2713 * jumbo frames transmission.
2714 */
2715 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2716 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2717 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2718 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2719 }
2720
2721 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2722 /* adjust output voltage */
2723 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2724 }
2725
2726 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2727 tg3_phydsp_write(tp, 0xffb, 0x4000);
2728
2729 tg3_phy_toggle_automdix(tp, true);
2730 tg3_phy_set_wirespeed(tp);
2731 return 0;
2732 }
2733
2734 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2735 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2736 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2737 TG3_GPIO_MSG_NEED_VAUX)
2738 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2739 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2740 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2741 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2742 (TG3_GPIO_MSG_DRVR_PRES << 12))
2743
2744 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2745 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2746 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2747 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2748 (TG3_GPIO_MSG_NEED_VAUX << 12))
2749
2750 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2751 {
2752 u32 status, shift;
2753
2754 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2755 tg3_asic_rev(tp) == ASIC_REV_5719)
2756 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2757 else
2758 status = tr32(TG3_CPMU_DRV_STATUS);
2759
2760 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2761 status &= ~(TG3_GPIO_MSG_MASK << shift);
2762 status |= (newstat << shift);
2763
2764 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2765 tg3_asic_rev(tp) == ASIC_REV_5719)
2766 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2767 else
2768 tw32(TG3_CPMU_DRV_STATUS, status);
2769
2770 return status >> TG3_APE_GPIO_MSG_SHIFT;
2771 }
2772
2773 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2774 {
2775 if (!tg3_flag(tp, IS_NIC))
2776 return 0;
2777
2778 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2779 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2780 tg3_asic_rev(tp) == ASIC_REV_5720) {
2781 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2782 return -EIO;
2783
2784 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2785
2786 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2787 TG3_GRC_LCLCTL_PWRSW_DELAY);
2788
2789 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2790 } else {
2791 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2792 TG3_GRC_LCLCTL_PWRSW_DELAY);
2793 }
2794
2795 return 0;
2796 }
2797
2798 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2799 {
2800 u32 grc_local_ctrl;
2801
2802 if (!tg3_flag(tp, IS_NIC) ||
2803 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2804 tg3_asic_rev(tp) == ASIC_REV_5701)
2805 return;
2806
2807 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2808
2809 tw32_wait_f(GRC_LOCAL_CTRL,
2810 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY);
2812
2813 tw32_wait_f(GRC_LOCAL_CTRL,
2814 grc_local_ctrl,
2815 TG3_GRC_LCLCTL_PWRSW_DELAY);
2816
2817 tw32_wait_f(GRC_LOCAL_CTRL,
2818 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 }
2821
2822 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2823 {
2824 if (!tg3_flag(tp, IS_NIC))
2825 return;
2826
2827 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2828 tg3_asic_rev(tp) == ASIC_REV_5701) {
2829 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2830 (GRC_LCLCTRL_GPIO_OE0 |
2831 GRC_LCLCTRL_GPIO_OE1 |
2832 GRC_LCLCTRL_GPIO_OE2 |
2833 GRC_LCLCTRL_GPIO_OUTPUT0 |
2834 GRC_LCLCTRL_GPIO_OUTPUT1),
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2837 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2838 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2839 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2840 GRC_LCLCTRL_GPIO_OE1 |
2841 GRC_LCLCTRL_GPIO_OE2 |
2842 GRC_LCLCTRL_GPIO_OUTPUT0 |
2843 GRC_LCLCTRL_GPIO_OUTPUT1 |
2844 tp->grc_local_ctrl;
2845 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2849 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2853 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 } else {
2856 u32 no_gpio2;
2857 u32 grc_local_ctrl = 0;
2858
2859 /* Workaround to prevent overdrawing Amps. */
2860 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2861 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2862 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2863 grc_local_ctrl,
2864 TG3_GRC_LCLCTL_PWRSW_DELAY);
2865 }
2866
2867 /* On 5753 and variants, GPIO2 cannot be used. */
2868 no_gpio2 = tp->nic_sram_data_cfg &
2869 NIC_SRAM_DATA_CFG_NO_GPIO2;
2870
2871 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2872 GRC_LCLCTRL_GPIO_OE1 |
2873 GRC_LCLCTRL_GPIO_OE2 |
2874 GRC_LCLCTRL_GPIO_OUTPUT1 |
2875 GRC_LCLCTRL_GPIO_OUTPUT2;
2876 if (no_gpio2) {
2877 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2878 GRC_LCLCTRL_GPIO_OUTPUT2);
2879 }
2880 tw32_wait_f(GRC_LOCAL_CTRL,
2881 tp->grc_local_ctrl | grc_local_ctrl,
2882 TG3_GRC_LCLCTL_PWRSW_DELAY);
2883
2884 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2885
2886 tw32_wait_f(GRC_LOCAL_CTRL,
2887 tp->grc_local_ctrl | grc_local_ctrl,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY);
2889
2890 if (!no_gpio2) {
2891 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2892 tw32_wait_f(GRC_LOCAL_CTRL,
2893 tp->grc_local_ctrl | grc_local_ctrl,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY);
2895 }
2896 }
2897 }
2898
2899 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2900 {
2901 u32 msg = 0;
2902
2903 /* Serialize power state transitions */
2904 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2905 return;
2906
2907 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2908 msg = TG3_GPIO_MSG_NEED_VAUX;
2909
2910 msg = tg3_set_function_status(tp, msg);
2911
2912 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2913 goto done;
2914
2915 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2916 tg3_pwrsrc_switch_to_vaux(tp);
2917 else
2918 tg3_pwrsrc_die_with_vmain(tp);
2919
2920 done:
2921 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2922 }
2923
2924 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2925 {
2926 bool need_vaux = false;
2927
2928 /* The GPIOs do something completely different on 57765. */
2929 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2930 return;
2931
2932 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2933 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2934 tg3_asic_rev(tp) == ASIC_REV_5720) {
2935 tg3_frob_aux_power_5717(tp, include_wol ?
2936 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2937 return;
2938 }
2939
2940 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2941 struct net_device *dev_peer;
2942
2943 dev_peer = pci_get_drvdata(tp->pdev_peer);
2944
2945 /* remove_one() may have been run on the peer. */
2946 if (dev_peer) {
2947 struct tg3 *tp_peer = netdev_priv(dev_peer);
2948
2949 if (tg3_flag(tp_peer, INIT_COMPLETE))
2950 return;
2951
2952 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2953 tg3_flag(tp_peer, ENABLE_ASF))
2954 need_vaux = true;
2955 }
2956 }
2957
2958 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2959 tg3_flag(tp, ENABLE_ASF))
2960 need_vaux = true;
2961
2962 if (need_vaux)
2963 tg3_pwrsrc_switch_to_vaux(tp);
2964 else
2965 tg3_pwrsrc_die_with_vmain(tp);
2966 }
2967
2968 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2969 {
2970 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2971 return 1;
2972 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2973 if (speed != SPEED_10)
2974 return 1;
2975 } else if (speed == SPEED_10)
2976 return 1;
2977
2978 return 0;
2979 }
2980
2981 static bool tg3_phy_power_bug(struct tg3 *tp)
2982 {
2983 switch (tg3_asic_rev(tp)) {
2984 case ASIC_REV_5700:
2985 case ASIC_REV_5704:
2986 return true;
2987 case ASIC_REV_5780:
2988 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2989 return true;
2990 return false;
2991 case ASIC_REV_5717:
2992 if (!tp->pci_fn)
2993 return true;
2994 return false;
2995 case ASIC_REV_5719:
2996 case ASIC_REV_5720:
2997 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2998 !tp->pci_fn)
2999 return true;
3000 return false;
3001 }
3002
3003 return false;
3004 }
3005
3006 static bool tg3_phy_led_bug(struct tg3 *tp)
3007 {
3008 switch (tg3_asic_rev(tp)) {
3009 case ASIC_REV_5719:
3010 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3011 !tp->pci_fn)
3012 return true;
3013 return false;
3014 }
3015
3016 return false;
3017 }
3018
3019 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3020 {
3021 u32 val;
3022
3023 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3024 return;
3025
3026 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3027 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3028 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3029 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3030
3031 sg_dig_ctrl |=
3032 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3033 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3034 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3035 }
3036 return;
3037 }
3038
3039 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3040 tg3_bmcr_reset(tp);
3041 val = tr32(GRC_MISC_CFG);
3042 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3043 udelay(40);
3044 return;
3045 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3046 u32 phytest;
3047 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3048 u32 phy;
3049
3050 tg3_writephy(tp, MII_ADVERTISE, 0);
3051 tg3_writephy(tp, MII_BMCR,
3052 BMCR_ANENABLE | BMCR_ANRESTART);
3053
3054 tg3_writephy(tp, MII_TG3_FET_TEST,
3055 phytest | MII_TG3_FET_SHADOW_EN);
3056 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3057 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3058 tg3_writephy(tp,
3059 MII_TG3_FET_SHDW_AUXMODE4,
3060 phy);
3061 }
3062 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3063 }
3064 return;
3065 } else if (do_low_power) {
3066 if (!tg3_phy_led_bug(tp))
3067 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3068 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3069
3070 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3071 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3072 MII_TG3_AUXCTL_PCTL_VREG_11V;
3073 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3074 }
3075
3076 /* The PHY should not be powered down on some chips because
3077 * of bugs.
3078 */
3079 if (tg3_phy_power_bug(tp))
3080 return;
3081
3082 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3083 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3084 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3085 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3086 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3087 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3088 }
3089
3090 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3091 }
3092
3093 /* tp->lock is held. */
3094 static int tg3_nvram_lock(struct tg3 *tp)
3095 {
3096 if (tg3_flag(tp, NVRAM)) {
3097 int i;
3098
3099 if (tp->nvram_lock_cnt == 0) {
3100 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3101 for (i = 0; i < 8000; i++) {
3102 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3103 break;
3104 udelay(20);
3105 }
3106 if (i == 8000) {
3107 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3108 return -ENODEV;
3109 }
3110 }
3111 tp->nvram_lock_cnt++;
3112 }
3113 return 0;
3114 }
3115
3116 /* tp->lock is held. */
3117 static void tg3_nvram_unlock(struct tg3 *tp)
3118 {
3119 if (tg3_flag(tp, NVRAM)) {
3120 if (tp->nvram_lock_cnt > 0)
3121 tp->nvram_lock_cnt--;
3122 if (tp->nvram_lock_cnt == 0)
3123 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3124 }
3125 }
3126
3127 /* tp->lock is held. */
3128 static void tg3_enable_nvram_access(struct tg3 *tp)
3129 {
3130 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3131 u32 nvaccess = tr32(NVRAM_ACCESS);
3132
3133 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3134 }
3135 }
3136
3137 /* tp->lock is held. */
3138 static void tg3_disable_nvram_access(struct tg3 *tp)
3139 {
3140 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3141 u32 nvaccess = tr32(NVRAM_ACCESS);
3142
3143 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3144 }
3145 }
3146
3147 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3148 u32 offset, u32 *val)
3149 {
3150 u32 tmp;
3151 int i;
3152
3153 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3154 return -EINVAL;
3155
3156 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3157 EEPROM_ADDR_DEVID_MASK |
3158 EEPROM_ADDR_READ);
3159 tw32(GRC_EEPROM_ADDR,
3160 tmp |
3161 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3162 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3163 EEPROM_ADDR_ADDR_MASK) |
3164 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3165
3166 for (i = 0; i < 1000; i++) {
3167 tmp = tr32(GRC_EEPROM_ADDR);
3168
3169 if (tmp & EEPROM_ADDR_COMPLETE)
3170 break;
3171 msleep(1);
3172 }
3173 if (!(tmp & EEPROM_ADDR_COMPLETE))
3174 return -EBUSY;
3175
3176 tmp = tr32(GRC_EEPROM_DATA);
3177
3178 /*
3179 * The data will always be opposite the native endian
3180 * format. Perform a blind byteswap to compensate.
3181 */
3182 *val = swab32(tmp);
3183
3184 return 0;
3185 }
3186
3187 #define NVRAM_CMD_TIMEOUT 10000
3188
3189 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3190 {
3191 int i;
3192
3193 tw32(NVRAM_CMD, nvram_cmd);
3194 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3195 udelay(10);
3196 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3197 udelay(10);
3198 break;
3199 }
3200 }
3201
3202 if (i == NVRAM_CMD_TIMEOUT)
3203 return -EBUSY;
3204
3205 return 0;
3206 }
3207
3208 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3209 {
3210 if (tg3_flag(tp, NVRAM) &&
3211 tg3_flag(tp, NVRAM_BUFFERED) &&
3212 tg3_flag(tp, FLASH) &&
3213 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3214 (tp->nvram_jedecnum == JEDEC_ATMEL))
3215
3216 addr = ((addr / tp->nvram_pagesize) <<
3217 ATMEL_AT45DB0X1B_PAGE_POS) +
3218 (addr % tp->nvram_pagesize);
3219
3220 return addr;
3221 }
3222
3223 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3224 {
3225 if (tg3_flag(tp, NVRAM) &&
3226 tg3_flag(tp, NVRAM_BUFFERED) &&
3227 tg3_flag(tp, FLASH) &&
3228 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3229 (tp->nvram_jedecnum == JEDEC_ATMEL))
3230
3231 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3232 tp->nvram_pagesize) +
3233 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3234
3235 return addr;
3236 }
3237
3238 /* NOTE: Data read in from NVRAM is byteswapped according to
3239 * the byteswapping settings for all other register accesses.
3240 * tg3 devices are BE devices, so on a BE machine, the data
3241 * returned will be exactly as it is seen in NVRAM. On a LE
3242 * machine, the 32-bit value will be byteswapped.
3243 */
3244 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3245 {
3246 int ret;
3247
3248 if (!tg3_flag(tp, NVRAM))
3249 return tg3_nvram_read_using_eeprom(tp, offset, val);
3250
3251 offset = tg3_nvram_phys_addr(tp, offset);
3252
3253 if (offset > NVRAM_ADDR_MSK)
3254 return -EINVAL;
3255
3256 ret = tg3_nvram_lock(tp);
3257 if (ret)
3258 return ret;
3259
3260 tg3_enable_nvram_access(tp);
3261
3262 tw32(NVRAM_ADDR, offset);
3263 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3264 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3265
3266 if (ret == 0)
3267 *val = tr32(NVRAM_RDDATA);
3268
3269 tg3_disable_nvram_access(tp);
3270
3271 tg3_nvram_unlock(tp);
3272
3273 return ret;
3274 }
3275
3276 /* Ensures NVRAM data is in bytestream format. */
3277 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3278 {
3279 u32 v;
3280 int res = tg3_nvram_read(tp, offset, &v);
3281 if (!res)
3282 *val = cpu_to_be32(v);
3283 return res;
3284 }
3285
3286 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3287 u32 offset, u32 len, u8 *buf)
3288 {
3289 int i, j, rc = 0;
3290 u32 val;
3291
3292 for (i = 0; i < len; i += 4) {
3293 u32 addr;
3294 __be32 data;
3295
3296 addr = offset + i;
3297
3298 memcpy(&data, buf + i, 4);
3299
3300 /*
3301 * The SEEPROM interface expects the data to always be opposite
3302 * the native endian format. We accomplish this by reversing
3303 * all the operations that would have been performed on the
3304 * data from a call to tg3_nvram_read_be32().
3305 */
3306 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3307
3308 val = tr32(GRC_EEPROM_ADDR);
3309 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3310
3311 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3312 EEPROM_ADDR_READ);
3313 tw32(GRC_EEPROM_ADDR, val |
3314 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3315 (addr & EEPROM_ADDR_ADDR_MASK) |
3316 EEPROM_ADDR_START |
3317 EEPROM_ADDR_WRITE);
3318
3319 for (j = 0; j < 1000; j++) {
3320 val = tr32(GRC_EEPROM_ADDR);
3321
3322 if (val & EEPROM_ADDR_COMPLETE)
3323 break;
3324 msleep(1);
3325 }
3326 if (!(val & EEPROM_ADDR_COMPLETE)) {
3327 rc = -EBUSY;
3328 break;
3329 }
3330 }
3331
3332 return rc;
3333 }
3334
3335 /* offset and length are dword aligned */
3336 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3337 u8 *buf)
3338 {
3339 int ret = 0;
3340 u32 pagesize = tp->nvram_pagesize;
3341 u32 pagemask = pagesize - 1;
3342 u32 nvram_cmd;
3343 u8 *tmp;
3344
3345 tmp = kmalloc(pagesize, GFP_KERNEL);
3346 if (tmp == NULL)
3347 return -ENOMEM;
3348
3349 while (len) {
3350 int j;
3351 u32 phy_addr, page_off, size;
3352
3353 phy_addr = offset & ~pagemask;
3354
3355 for (j = 0; j < pagesize; j += 4) {
3356 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3357 (__be32 *) (tmp + j));
3358 if (ret)
3359 break;
3360 }
3361 if (ret)
3362 break;
3363
3364 page_off = offset & pagemask;
3365 size = pagesize;
3366 if (len < size)
3367 size = len;
3368
3369 len -= size;
3370
3371 memcpy(tmp + page_off, buf, size);
3372
3373 offset = offset + (pagesize - page_off);
3374
3375 tg3_enable_nvram_access(tp);
3376
3377 /*
3378 * Before we can erase the flash page, we need
3379 * to issue a special "write enable" command.
3380 */
3381 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3382
3383 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3384 break;
3385
3386 /* Erase the target page */
3387 tw32(NVRAM_ADDR, phy_addr);
3388
3389 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3390 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3391
3392 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3393 break;
3394
3395 /* Issue another write enable to start the write. */
3396 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3397
3398 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3399 break;
3400
3401 for (j = 0; j < pagesize; j += 4) {
3402 __be32 data;
3403
3404 data = *((__be32 *) (tmp + j));
3405
3406 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3407
3408 tw32(NVRAM_ADDR, phy_addr + j);
3409
3410 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3411 NVRAM_CMD_WR;
3412
3413 if (j == 0)
3414 nvram_cmd |= NVRAM_CMD_FIRST;
3415 else if (j == (pagesize - 4))
3416 nvram_cmd |= NVRAM_CMD_LAST;
3417
3418 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3419 if (ret)
3420 break;
3421 }
3422 if (ret)
3423 break;
3424 }
3425
3426 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3427 tg3_nvram_exec_cmd(tp, nvram_cmd);
3428
3429 kfree(tmp);
3430
3431 return ret;
3432 }
3433
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3436 u8 *buf)
3437 {
3438 int i, ret = 0;
3439
3440 for (i = 0; i < len; i += 4, offset += 4) {
3441 u32 page_off, phy_addr, nvram_cmd;
3442 __be32 data;
3443
3444 memcpy(&data, buf + i, 4);
3445 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3446
3447 page_off = offset % tp->nvram_pagesize;
3448
3449 phy_addr = tg3_nvram_phys_addr(tp, offset);
3450
3451 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3452
3453 if (page_off == 0 || i == 0)
3454 nvram_cmd |= NVRAM_CMD_FIRST;
3455 if (page_off == (tp->nvram_pagesize - 4))
3456 nvram_cmd |= NVRAM_CMD_LAST;
3457
3458 if (i == (len - 4))
3459 nvram_cmd |= NVRAM_CMD_LAST;
3460
3461 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3462 !tg3_flag(tp, FLASH) ||
3463 !tg3_flag(tp, 57765_PLUS))
3464 tw32(NVRAM_ADDR, phy_addr);
3465
3466 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3467 !tg3_flag(tp, 5755_PLUS) &&
3468 (tp->nvram_jedecnum == JEDEC_ST) &&
3469 (nvram_cmd & NVRAM_CMD_FIRST)) {
3470 u32 cmd;
3471
3472 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3473 ret = tg3_nvram_exec_cmd(tp, cmd);
3474 if (ret)
3475 break;
3476 }
3477 if (!tg3_flag(tp, FLASH)) {
3478 /* We always do complete word writes to eeprom. */
3479 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3480 }
3481
3482 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3483 if (ret)
3484 break;
3485 }
3486 return ret;
3487 }
3488
3489 /* offset and length are dword aligned */
3490 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3491 {
3492 int ret;
3493
3494 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3495 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3496 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3497 udelay(40);
3498 }
3499
3500 if (!tg3_flag(tp, NVRAM)) {
3501 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3502 } else {
3503 u32 grc_mode;
3504
3505 ret = tg3_nvram_lock(tp);
3506 if (ret)
3507 return ret;
3508
3509 tg3_enable_nvram_access(tp);
3510 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3511 tw32(NVRAM_WRITE1, 0x406);
3512
3513 grc_mode = tr32(GRC_MODE);
3514 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3515
3516 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3517 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3518 buf);
3519 } else {
3520 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3521 buf);
3522 }
3523
3524 grc_mode = tr32(GRC_MODE);
3525 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3526
3527 tg3_disable_nvram_access(tp);
3528 tg3_nvram_unlock(tp);
3529 }
3530
3531 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3533 udelay(40);
3534 }
3535
3536 return ret;
3537 }
3538
3539 #define RX_CPU_SCRATCH_BASE 0x30000
3540 #define RX_CPU_SCRATCH_SIZE 0x04000
3541 #define TX_CPU_SCRATCH_BASE 0x34000
3542 #define TX_CPU_SCRATCH_SIZE 0x04000
3543
3544 /* tp->lock is held. */
3545 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3546 {
3547 int i;
3548 const int iters = 10000;
3549
3550 for (i = 0; i < iters; i++) {
3551 tw32(cpu_base + CPU_STATE, 0xffffffff);
3552 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3553 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3554 break;
3555 if (pci_channel_offline(tp->pdev))
3556 return -EBUSY;
3557 }
3558
3559 return (i == iters) ? -EBUSY : 0;
3560 }
3561
3562 /* tp->lock is held. */
3563 static int tg3_rxcpu_pause(struct tg3 *tp)
3564 {
3565 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3566
3567 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3568 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3569 udelay(10);
3570
3571 return rc;
3572 }
3573
3574 /* tp->lock is held. */
3575 static int tg3_txcpu_pause(struct tg3 *tp)
3576 {
3577 return tg3_pause_cpu(tp, TX_CPU_BASE);
3578 }
3579
3580 /* tp->lock is held. */
3581 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583 tw32(cpu_base + CPU_STATE, 0xffffffff);
3584 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3585 }
3586
3587 /* tp->lock is held. */
3588 static void tg3_rxcpu_resume(struct tg3 *tp)
3589 {
3590 tg3_resume_cpu(tp, RX_CPU_BASE);
3591 }
3592
3593 /* tp->lock is held. */
3594 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3595 {
3596 int rc;
3597
3598 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3599
3600 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3601 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3602
3603 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3604 return 0;
3605 }
3606 if (cpu_base == RX_CPU_BASE) {
3607 rc = tg3_rxcpu_pause(tp);
3608 } else {
3609 /*
3610 * There is only an Rx CPU for the 5750 derivative in the
3611 * BCM4785.
3612 */
3613 if (tg3_flag(tp, IS_SSB_CORE))
3614 return 0;
3615
3616 rc = tg3_txcpu_pause(tp);
3617 }
3618
3619 if (rc) {
3620 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3621 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3622 return -ENODEV;
3623 }
3624
3625 /* Clear firmware's nvram arbitration. */
3626 if (tg3_flag(tp, NVRAM))
3627 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3628 return 0;
3629 }
3630
3631 static int tg3_fw_data_len(struct tg3 *tp,
3632 const struct tg3_firmware_hdr *fw_hdr)
3633 {
3634 int fw_len;
3635
3636 /* Non fragmented firmware have one firmware header followed by a
3637 * contiguous chunk of data to be written. The length field in that
3638 * header is not the length of data to be written but the complete
3639 * length of the bss. The data length is determined based on
3640 * tp->fw->size minus headers.
3641 *
3642 * Fragmented firmware have a main header followed by multiple
3643 * fragments. Each fragment is identical to non fragmented firmware
3644 * with a firmware header followed by a contiguous chunk of data. In
3645 * the main header, the length field is unused and set to 0xffffffff.
3646 * In each fragment header the length is the entire size of that
3647 * fragment i.e. fragment data + header length. Data length is
3648 * therefore length field in the header minus TG3_FW_HDR_LEN.
3649 */
3650 if (tp->fw_len == 0xffffffff)
3651 fw_len = be32_to_cpu(fw_hdr->len);
3652 else
3653 fw_len = tp->fw->size;
3654
3655 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3656 }
3657
3658 /* tp->lock is held. */
3659 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3660 u32 cpu_scratch_base, int cpu_scratch_size,
3661 const struct tg3_firmware_hdr *fw_hdr)
3662 {
3663 int err, i;
3664 void (*write_op)(struct tg3 *, u32, u32);
3665 int total_len = tp->fw->size;
3666
3667 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3668 netdev_err(tp->dev,
3669 "%s: Trying to load TX cpu firmware which is 5705\n",
3670 __func__);
3671 return -EINVAL;
3672 }
3673
3674 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3675 write_op = tg3_write_mem;
3676 else
3677 write_op = tg3_write_indirect_reg32;
3678
3679 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3680 /* It is possible that bootcode is still loading at this point.
3681 * Get the nvram lock first before halting the cpu.
3682 */
3683 int lock_err = tg3_nvram_lock(tp);
3684 err = tg3_halt_cpu(tp, cpu_base);
3685 if (!lock_err)
3686 tg3_nvram_unlock(tp);
3687 if (err)
3688 goto out;
3689
3690 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3691 write_op(tp, cpu_scratch_base + i, 0);
3692 tw32(cpu_base + CPU_STATE, 0xffffffff);
3693 tw32(cpu_base + CPU_MODE,
3694 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3695 } else {
3696 /* Subtract additional main header for fragmented firmware and
3697 * advance to the first fragment
3698 */
3699 total_len -= TG3_FW_HDR_LEN;
3700 fw_hdr++;
3701 }
3702
3703 do {
3704 u32 *fw_data = (u32 *)(fw_hdr + 1);
3705 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3706 write_op(tp, cpu_scratch_base +
3707 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3708 (i * sizeof(u32)),
3709 be32_to_cpu(fw_data[i]));
3710
3711 total_len -= be32_to_cpu(fw_hdr->len);
3712
3713 /* Advance to next fragment */
3714 fw_hdr = (struct tg3_firmware_hdr *)
3715 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3716 } while (total_len > 0);
3717
3718 err = 0;
3719
3720 out:
3721 return err;
3722 }
3723
3724 /* tp->lock is held. */
3725 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3726 {
3727 int i;
3728 const int iters = 5;
3729
3730 tw32(cpu_base + CPU_STATE, 0xffffffff);
3731 tw32_f(cpu_base + CPU_PC, pc);
3732
3733 for (i = 0; i < iters; i++) {
3734 if (tr32(cpu_base + CPU_PC) == pc)
3735 break;
3736 tw32(cpu_base + CPU_STATE, 0xffffffff);
3737 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3738 tw32_f(cpu_base + CPU_PC, pc);
3739 udelay(1000);
3740 }
3741
3742 return (i == iters) ? -EBUSY : 0;
3743 }
3744
3745 /* tp->lock is held. */
3746 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3747 {
3748 const struct tg3_firmware_hdr *fw_hdr;
3749 int err;
3750
3751 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3752
3753 /* Firmware blob starts with version numbers, followed by
3754 start address and length. We are setting complete length.
3755 length = end_address_of_bss - start_address_of_text.
3756 Remainder is the blob to be loaded contiguously
3757 from start address. */
3758
3759 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3760 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3761 fw_hdr);
3762 if (err)
3763 return err;
3764
3765 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3766 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3767 fw_hdr);
3768 if (err)
3769 return err;
3770
3771 /* Now startup only the RX cpu. */
3772 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3773 be32_to_cpu(fw_hdr->base_addr));
3774 if (err) {
3775 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3776 "should be %08x\n", __func__,
3777 tr32(RX_CPU_BASE + CPU_PC),
3778 be32_to_cpu(fw_hdr->base_addr));
3779 return -ENODEV;
3780 }
3781
3782 tg3_rxcpu_resume(tp);
3783
3784 return 0;
3785 }
3786
3787 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3788 {
3789 const int iters = 1000;
3790 int i;
3791 u32 val;
3792
3793 /* Wait for boot code to complete initialization and enter service
3794 * loop. It is then safe to download service patches
3795 */
3796 for (i = 0; i < iters; i++) {
3797 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3798 break;
3799
3800 udelay(10);
3801 }
3802
3803 if (i == iters) {
3804 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3805 return -EBUSY;
3806 }
3807
3808 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3809 if (val & 0xff) {
3810 netdev_warn(tp->dev,
3811 "Other patches exist. Not downloading EEE patch\n");
3812 return -EEXIST;
3813 }
3814
3815 return 0;
3816 }
3817
3818 /* tp->lock is held. */
3819 static void tg3_load_57766_firmware(struct tg3 *tp)
3820 {
3821 struct tg3_firmware_hdr *fw_hdr;
3822
3823 if (!tg3_flag(tp, NO_NVRAM))
3824 return;
3825
3826 if (tg3_validate_rxcpu_state(tp))
3827 return;
3828
3829 if (!tp->fw)
3830 return;
3831
3832 /* This firmware blob has a different format than older firmware
3833 * releases as given below. The main difference is we have fragmented
3834 * data to be written to non-contiguous locations.
3835 *
3836 * In the beginning we have a firmware header identical to other
3837 * firmware which consists of version, base addr and length. The length
3838 * here is unused and set to 0xffffffff.
3839 *
3840 * This is followed by a series of firmware fragments which are
3841 * individually identical to previous firmware. i.e. they have the
3842 * firmware header and followed by data for that fragment. The version
3843 * field of the individual fragment header is unused.
3844 */
3845
3846 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3847 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3848 return;
3849
3850 if (tg3_rxcpu_pause(tp))
3851 return;
3852
3853 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3854 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3855
3856 tg3_rxcpu_resume(tp);
3857 }
3858
3859 /* tp->lock is held. */
3860 static int tg3_load_tso_firmware(struct tg3 *tp)
3861 {
3862 const struct tg3_firmware_hdr *fw_hdr;
3863 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3864 int err;
3865
3866 if (!tg3_flag(tp, FW_TSO))
3867 return 0;
3868
3869 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3870
3871 /* Firmware blob starts with version numbers, followed by
3872 start address and length. We are setting complete length.
3873 length = end_address_of_bss - start_address_of_text.
3874 Remainder is the blob to be loaded contiguously
3875 from start address. */
3876
3877 cpu_scratch_size = tp->fw_len;
3878
3879 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3880 cpu_base = RX_CPU_BASE;
3881 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3882 } else {
3883 cpu_base = TX_CPU_BASE;
3884 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3885 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3886 }
3887
3888 err = tg3_load_firmware_cpu(tp, cpu_base,
3889 cpu_scratch_base, cpu_scratch_size,
3890 fw_hdr);
3891 if (err)
3892 return err;
3893
3894 /* Now startup the cpu. */
3895 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3896 be32_to_cpu(fw_hdr->base_addr));
3897 if (err) {
3898 netdev_err(tp->dev,
3899 "%s fails to set CPU PC, is %08x should be %08x\n",
3900 __func__, tr32(cpu_base + CPU_PC),
3901 be32_to_cpu(fw_hdr->base_addr));
3902 return -ENODEV;
3903 }
3904
3905 tg3_resume_cpu(tp, cpu_base);
3906 return 0;
3907 }
3908
3909
3910 /* tp->lock is held. */
3911 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3912 {
3913 u32 addr_high, addr_low;
3914 int i;
3915
3916 addr_high = ((tp->dev->dev_addr[0] << 8) |
3917 tp->dev->dev_addr[1]);
3918 addr_low = ((tp->dev->dev_addr[2] << 24) |
3919 (tp->dev->dev_addr[3] << 16) |
3920 (tp->dev->dev_addr[4] << 8) |
3921 (tp->dev->dev_addr[5] << 0));
3922 for (i = 0; i < 4; i++) {
3923 if (i == 1 && skip_mac_1)
3924 continue;
3925 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3926 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3927 }
3928
3929 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3930 tg3_asic_rev(tp) == ASIC_REV_5704) {
3931 for (i = 0; i < 12; i++) {
3932 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3933 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3934 }
3935 }
3936
3937 addr_high = (tp->dev->dev_addr[0] +
3938 tp->dev->dev_addr[1] +
3939 tp->dev->dev_addr[2] +
3940 tp->dev->dev_addr[3] +
3941 tp->dev->dev_addr[4] +
3942 tp->dev->dev_addr[5]) &
3943 TX_BACKOFF_SEED_MASK;
3944 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3945 }
3946
3947 static void tg3_enable_register_access(struct tg3 *tp)
3948 {
3949 /*
3950 * Make sure register accesses (indirect or otherwise) will function
3951 * correctly.
3952 */
3953 pci_write_config_dword(tp->pdev,
3954 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3955 }
3956
3957 static int tg3_power_up(struct tg3 *tp)
3958 {
3959 int err;
3960
3961 tg3_enable_register_access(tp);
3962
3963 err = pci_set_power_state(tp->pdev, PCI_D0);
3964 if (!err) {
3965 /* Switch out of Vaux if it is a NIC */
3966 tg3_pwrsrc_switch_to_vmain(tp);
3967 } else {
3968 netdev_err(tp->dev, "Transition to D0 failed\n");
3969 }
3970
3971 return err;
3972 }
3973
3974 static int tg3_setup_phy(struct tg3 *, bool);
3975
3976 static int tg3_power_down_prepare(struct tg3 *tp)
3977 {
3978 u32 misc_host_ctrl;
3979 bool device_should_wake, do_low_power;
3980
3981 tg3_enable_register_access(tp);
3982
3983 /* Restore the CLKREQ setting. */
3984 if (tg3_flag(tp, CLKREQ_BUG))
3985 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3986 PCI_EXP_LNKCTL_CLKREQ_EN);
3987
3988 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3989 tw32(TG3PCI_MISC_HOST_CTRL,
3990 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3991
3992 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3993 tg3_flag(tp, WOL_ENABLE);
3994
3995 if (tg3_flag(tp, USE_PHYLIB)) {
3996 do_low_power = false;
3997 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3998 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3999 struct phy_device *phydev;
4000 u32 phyid, advertising;
4001
4002 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4003
4004 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4005
4006 tp->link_config.speed = phydev->speed;
4007 tp->link_config.duplex = phydev->duplex;
4008 tp->link_config.autoneg = phydev->autoneg;
4009 tp->link_config.advertising = phydev->advertising;
4010
4011 advertising = ADVERTISED_TP |
4012 ADVERTISED_Pause |
4013 ADVERTISED_Autoneg |
4014 ADVERTISED_10baseT_Half;
4015
4016 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4017 if (tg3_flag(tp, WOL_SPEED_100MB))
4018 advertising |=
4019 ADVERTISED_100baseT_Half |
4020 ADVERTISED_100baseT_Full |
4021 ADVERTISED_10baseT_Full;
4022 else
4023 advertising |= ADVERTISED_10baseT_Full;
4024 }
4025
4026 phydev->advertising = advertising;
4027
4028 phy_start_aneg(phydev);
4029
4030 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4031 if (phyid != PHY_ID_BCMAC131) {
4032 phyid &= PHY_BCM_OUI_MASK;
4033 if (phyid == PHY_BCM_OUI_1 ||
4034 phyid == PHY_BCM_OUI_2 ||
4035 phyid == PHY_BCM_OUI_3)
4036 do_low_power = true;
4037 }
4038 }
4039 } else {
4040 do_low_power = true;
4041
4042 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4043 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4044
4045 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4046 tg3_setup_phy(tp, false);
4047 }
4048
4049 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4050 u32 val;
4051
4052 val = tr32(GRC_VCPU_EXT_CTRL);
4053 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4054 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4055 int i;
4056 u32 val;
4057
4058 for (i = 0; i < 200; i++) {
4059 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4060 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4061 break;
4062 msleep(1);
4063 }
4064 }
4065 if (tg3_flag(tp, WOL_CAP))
4066 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4067 WOL_DRV_STATE_SHUTDOWN |
4068 WOL_DRV_WOL |
4069 WOL_SET_MAGIC_PKT);
4070
4071 if (device_should_wake) {
4072 u32 mac_mode;
4073
4074 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4075 if (do_low_power &&
4076 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4077 tg3_phy_auxctl_write(tp,
4078 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4079 MII_TG3_AUXCTL_PCTL_WOL_EN |
4080 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4081 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4082 udelay(40);
4083 }
4084
4085 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4086 mac_mode = MAC_MODE_PORT_MODE_GMII;
4087 else if (tp->phy_flags &
4088 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4089 if (tp->link_config.active_speed == SPEED_1000)
4090 mac_mode = MAC_MODE_PORT_MODE_GMII;
4091 else
4092 mac_mode = MAC_MODE_PORT_MODE_MII;
4093 } else
4094 mac_mode = MAC_MODE_PORT_MODE_MII;
4095
4096 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4097 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4098 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4099 SPEED_100 : SPEED_10;
4100 if (tg3_5700_link_polarity(tp, speed))
4101 mac_mode |= MAC_MODE_LINK_POLARITY;
4102 else
4103 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4104 }
4105 } else {
4106 mac_mode = MAC_MODE_PORT_MODE_TBI;
4107 }
4108
4109 if (!tg3_flag(tp, 5750_PLUS))
4110 tw32(MAC_LED_CTRL, tp->led_ctrl);
4111
4112 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4113 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4114 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4115 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4116
4117 if (tg3_flag(tp, ENABLE_APE))
4118 mac_mode |= MAC_MODE_APE_TX_EN |
4119 MAC_MODE_APE_RX_EN |
4120 MAC_MODE_TDE_ENABLE;
4121
4122 tw32_f(MAC_MODE, mac_mode);
4123 udelay(100);
4124
4125 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4126 udelay(10);
4127 }
4128
4129 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4130 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4131 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4132 u32 base_val;
4133
4134 base_val = tp->pci_clock_ctrl;
4135 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4136 CLOCK_CTRL_TXCLK_DISABLE);
4137
4138 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4139 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4140 } else if (tg3_flag(tp, 5780_CLASS) ||
4141 tg3_flag(tp, CPMU_PRESENT) ||
4142 tg3_asic_rev(tp) == ASIC_REV_5906) {
4143 /* do nothing */
4144 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4145 u32 newbits1, newbits2;
4146
4147 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4148 tg3_asic_rev(tp) == ASIC_REV_5701) {
4149 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4150 CLOCK_CTRL_TXCLK_DISABLE |
4151 CLOCK_CTRL_ALTCLK);
4152 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4153 } else if (tg3_flag(tp, 5705_PLUS)) {
4154 newbits1 = CLOCK_CTRL_625_CORE;
4155 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4156 } else {
4157 newbits1 = CLOCK_CTRL_ALTCLK;
4158 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4159 }
4160
4161 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4162 40);
4163
4164 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4165 40);
4166
4167 if (!tg3_flag(tp, 5705_PLUS)) {
4168 u32 newbits3;
4169
4170 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4171 tg3_asic_rev(tp) == ASIC_REV_5701) {
4172 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4173 CLOCK_CTRL_TXCLK_DISABLE |
4174 CLOCK_CTRL_44MHZ_CORE);
4175 } else {
4176 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4177 }
4178
4179 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4180 tp->pci_clock_ctrl | newbits3, 40);
4181 }
4182 }
4183
4184 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4185 tg3_power_down_phy(tp, do_low_power);
4186
4187 tg3_frob_aux_power(tp, true);
4188
4189 /* Workaround for unstable PLL clock */
4190 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4191 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4192 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4193 u32 val = tr32(0x7d00);
4194
4195 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4196 tw32(0x7d00, val);
4197 if (!tg3_flag(tp, ENABLE_ASF)) {
4198 int err;
4199
4200 err = tg3_nvram_lock(tp);
4201 tg3_halt_cpu(tp, RX_CPU_BASE);
4202 if (!err)
4203 tg3_nvram_unlock(tp);
4204 }
4205 }
4206
4207 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4208
4209 return 0;
4210 }
4211
4212 static void tg3_power_down(struct tg3 *tp)
4213 {
4214 tg3_power_down_prepare(tp);
4215
4216 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4217 pci_set_power_state(tp->pdev, PCI_D3hot);
4218 }
4219
4220 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4221 {
4222 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4223 case MII_TG3_AUX_STAT_10HALF:
4224 *speed = SPEED_10;
4225 *duplex = DUPLEX_HALF;
4226 break;
4227
4228 case MII_TG3_AUX_STAT_10FULL:
4229 *speed = SPEED_10;
4230 *duplex = DUPLEX_FULL;
4231 break;
4232
4233 case MII_TG3_AUX_STAT_100HALF:
4234 *speed = SPEED_100;
4235 *duplex = DUPLEX_HALF;
4236 break;
4237
4238 case MII_TG3_AUX_STAT_100FULL:
4239 *speed = SPEED_100;
4240 *duplex = DUPLEX_FULL;
4241 break;
4242
4243 case MII_TG3_AUX_STAT_1000HALF:
4244 *speed = SPEED_1000;
4245 *duplex = DUPLEX_HALF;
4246 break;
4247
4248 case MII_TG3_AUX_STAT_1000FULL:
4249 *speed = SPEED_1000;
4250 *duplex = DUPLEX_FULL;
4251 break;
4252
4253 default:
4254 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4255 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4256 SPEED_10;
4257 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4258 DUPLEX_HALF;
4259 break;
4260 }
4261 *speed = SPEED_UNKNOWN;
4262 *duplex = DUPLEX_UNKNOWN;
4263 break;
4264 }
4265 }
4266
4267 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4268 {
4269 int err = 0;
4270 u32 val, new_adv;
4271
4272 new_adv = ADVERTISE_CSMA;
4273 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4274 new_adv |= mii_advertise_flowctrl(flowctrl);
4275
4276 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4277 if (err)
4278 goto done;
4279
4280 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4281 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4282
4283 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4284 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4285 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4286
4287 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4288 if (err)
4289 goto done;
4290 }
4291
4292 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4293 goto done;
4294
4295 tw32(TG3_CPMU_EEE_MODE,
4296 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4297
4298 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4299 if (!err) {
4300 u32 err2;
4301
4302 val = 0;
4303 /* Advertise 100-BaseTX EEE ability */
4304 if (advertise & ADVERTISED_100baseT_Full)
4305 val |= MDIO_AN_EEE_ADV_100TX;
4306 /* Advertise 1000-BaseT EEE ability */
4307 if (advertise & ADVERTISED_1000baseT_Full)
4308 val |= MDIO_AN_EEE_ADV_1000T;
4309 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4310 if (err)
4311 val = 0;
4312
4313 switch (tg3_asic_rev(tp)) {
4314 case ASIC_REV_5717:
4315 case ASIC_REV_57765:
4316 case ASIC_REV_57766:
4317 case ASIC_REV_5719:
4318 /* If we advertised any eee advertisements above... */
4319 if (val)
4320 val = MII_TG3_DSP_TAP26_ALNOKO |
4321 MII_TG3_DSP_TAP26_RMRXSTO |
4322 MII_TG3_DSP_TAP26_OPCSINPT;
4323 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4324 /* Fall through */
4325 case ASIC_REV_5720:
4326 case ASIC_REV_5762:
4327 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4328 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4329 MII_TG3_DSP_CH34TP2_HIBW01);
4330 }
4331
4332 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4333 if (!err)
4334 err = err2;
4335 }
4336
4337 done:
4338 return err;
4339 }
4340
4341 static void tg3_phy_copper_begin(struct tg3 *tp)
4342 {
4343 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4344 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4345 u32 adv, fc;
4346
4347 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4348 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4349 adv = ADVERTISED_10baseT_Half |
4350 ADVERTISED_10baseT_Full;
4351 if (tg3_flag(tp, WOL_SPEED_100MB))
4352 adv |= ADVERTISED_100baseT_Half |
4353 ADVERTISED_100baseT_Full;
4354 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4355 adv |= ADVERTISED_1000baseT_Half |
4356 ADVERTISED_1000baseT_Full;
4357
4358 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4359 } else {
4360 adv = tp->link_config.advertising;
4361 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4362 adv &= ~(ADVERTISED_1000baseT_Half |
4363 ADVERTISED_1000baseT_Full);
4364
4365 fc = tp->link_config.flowctrl;
4366 }
4367
4368 tg3_phy_autoneg_cfg(tp, adv, fc);
4369
4370 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4371 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4372 /* Normally during power down we want to autonegotiate
4373 * the lowest possible speed for WOL. However, to avoid
4374 * link flap, we leave it untouched.
4375 */
4376 return;
4377 }
4378
4379 tg3_writephy(tp, MII_BMCR,
4380 BMCR_ANENABLE | BMCR_ANRESTART);
4381 } else {
4382 int i;
4383 u32 bmcr, orig_bmcr;
4384
4385 tp->link_config.active_speed = tp->link_config.speed;
4386 tp->link_config.active_duplex = tp->link_config.duplex;
4387
4388 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4389 /* With autoneg disabled, 5715 only links up when the
4390 * advertisement register has the configured speed
4391 * enabled.
4392 */
4393 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4394 }
4395
4396 bmcr = 0;
4397 switch (tp->link_config.speed) {
4398 default:
4399 case SPEED_10:
4400 break;
4401
4402 case SPEED_100:
4403 bmcr |= BMCR_SPEED100;
4404 break;
4405
4406 case SPEED_1000:
4407 bmcr |= BMCR_SPEED1000;
4408 break;
4409 }
4410
4411 if (tp->link_config.duplex == DUPLEX_FULL)
4412 bmcr |= BMCR_FULLDPLX;
4413
4414 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4415 (bmcr != orig_bmcr)) {
4416 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4417 for (i = 0; i < 1500; i++) {
4418 u32 tmp;
4419
4420 udelay(10);
4421 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4422 tg3_readphy(tp, MII_BMSR, &tmp))
4423 continue;
4424 if (!(tmp & BMSR_LSTATUS)) {
4425 udelay(40);
4426 break;
4427 }
4428 }
4429 tg3_writephy(tp, MII_BMCR, bmcr);
4430 udelay(40);
4431 }
4432 }
4433 }
4434
4435 static int tg3_phy_pull_config(struct tg3 *tp)
4436 {
4437 int err;
4438 u32 val;
4439
4440 err = tg3_readphy(tp, MII_BMCR, &val);
4441 if (err)
4442 goto done;
4443
4444 if (!(val & BMCR_ANENABLE)) {
4445 tp->link_config.autoneg = AUTONEG_DISABLE;
4446 tp->link_config.advertising = 0;
4447 tg3_flag_clear(tp, PAUSE_AUTONEG);
4448
4449 err = -EIO;
4450
4451 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4452 case 0:
4453 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4454 goto done;
4455
4456 tp->link_config.speed = SPEED_10;
4457 break;
4458 case BMCR_SPEED100:
4459 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4460 goto done;
4461
4462 tp->link_config.speed = SPEED_100;
4463 break;
4464 case BMCR_SPEED1000:
4465 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4466 tp->link_config.speed = SPEED_1000;
4467 break;
4468 }
4469 /* Fall through */
4470 default:
4471 goto done;
4472 }
4473
4474 if (val & BMCR_FULLDPLX)
4475 tp->link_config.duplex = DUPLEX_FULL;
4476 else
4477 tp->link_config.duplex = DUPLEX_HALF;
4478
4479 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4480
4481 err = 0;
4482 goto done;
4483 }
4484
4485 tp->link_config.autoneg = AUTONEG_ENABLE;
4486 tp->link_config.advertising = ADVERTISED_Autoneg;
4487 tg3_flag_set(tp, PAUSE_AUTONEG);
4488
4489 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4490 u32 adv;
4491
4492 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4493 if (err)
4494 goto done;
4495
4496 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4497 tp->link_config.advertising |= adv | ADVERTISED_TP;
4498
4499 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4500 } else {
4501 tp->link_config.advertising |= ADVERTISED_FIBRE;
4502 }
4503
4504 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4505 u32 adv;
4506
4507 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4508 err = tg3_readphy(tp, MII_CTRL1000, &val);
4509 if (err)
4510 goto done;
4511
4512 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4513 } else {
4514 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4515 if (err)
4516 goto done;
4517
4518 adv = tg3_decode_flowctrl_1000X(val);
4519 tp->link_config.flowctrl = adv;
4520
4521 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4522 adv = mii_adv_to_ethtool_adv_x(val);
4523 }
4524
4525 tp->link_config.advertising |= adv;
4526 }
4527
4528 done:
4529 return err;
4530 }
4531
4532 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4533 {
4534 int err;
4535
4536 /* Turn off tap power management. */
4537 /* Set Extended packet length bit */
4538 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4539
4540 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4541 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4542 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4543 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4544 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4545
4546 udelay(40);
4547
4548 return err;
4549 }
4550
4551 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4552 {
4553 u32 val;
4554 u32 tgtadv = 0;
4555 u32 advertising = tp->link_config.advertising;
4556
4557 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4558 return true;
4559
4560 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4561 return false;
4562
4563 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4564
4565
4566 if (advertising & ADVERTISED_100baseT_Full)
4567 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4568 if (advertising & ADVERTISED_1000baseT_Full)
4569 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4570
4571 if (val != tgtadv)
4572 return false;
4573
4574 return true;
4575 }
4576
4577 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4578 {
4579 u32 advmsk, tgtadv, advertising;
4580
4581 advertising = tp->link_config.advertising;
4582 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4583
4584 advmsk = ADVERTISE_ALL;
4585 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4586 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4587 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4588 }
4589
4590 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4591 return false;
4592
4593 if ((*lcladv & advmsk) != tgtadv)
4594 return false;
4595
4596 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4597 u32 tg3_ctrl;
4598
4599 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4600
4601 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4602 return false;
4603
4604 if (tgtadv &&
4605 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4606 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4607 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4608 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4609 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4610 } else {
4611 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4612 }
4613
4614 if (tg3_ctrl != tgtadv)
4615 return false;
4616 }
4617
4618 return true;
4619 }
4620
4621 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4622 {
4623 u32 lpeth = 0;
4624
4625 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4626 u32 val;
4627
4628 if (tg3_readphy(tp, MII_STAT1000, &val))
4629 return false;
4630
4631 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4632 }
4633
4634 if (tg3_readphy(tp, MII_LPA, rmtadv))
4635 return false;
4636
4637 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4638 tp->link_config.rmt_adv = lpeth;
4639
4640 return true;
4641 }
4642
4643 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4644 {
4645 if (curr_link_up != tp->link_up) {
4646 if (curr_link_up) {
4647 netif_carrier_on(tp->dev);
4648 } else {
4649 netif_carrier_off(tp->dev);
4650 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4651 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4652 }
4653
4654 tg3_link_report(tp);
4655 return true;
4656 }
4657
4658 return false;
4659 }
4660
4661 static void tg3_clear_mac_status(struct tg3 *tp)
4662 {
4663 tw32(MAC_EVENT, 0);
4664
4665 tw32_f(MAC_STATUS,
4666 MAC_STATUS_SYNC_CHANGED |
4667 MAC_STATUS_CFG_CHANGED |
4668 MAC_STATUS_MI_COMPLETION |
4669 MAC_STATUS_LNKSTATE_CHANGED);
4670 udelay(40);
4671 }
4672
4673 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4674 {
4675 bool current_link_up;
4676 u32 bmsr, val;
4677 u32 lcl_adv, rmt_adv;
4678 u16 current_speed;
4679 u8 current_duplex;
4680 int i, err;
4681
4682 tg3_clear_mac_status(tp);
4683
4684 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4685 tw32_f(MAC_MI_MODE,
4686 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4687 udelay(80);
4688 }
4689
4690 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4691
4692 /* Some third-party PHYs need to be reset on link going
4693 * down.
4694 */
4695 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4696 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4697 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4698 tp->link_up) {
4699 tg3_readphy(tp, MII_BMSR, &bmsr);
4700 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4701 !(bmsr & BMSR_LSTATUS))
4702 force_reset = true;
4703 }
4704 if (force_reset)
4705 tg3_phy_reset(tp);
4706
4707 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4708 tg3_readphy(tp, MII_BMSR, &bmsr);
4709 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4710 !tg3_flag(tp, INIT_COMPLETE))
4711 bmsr = 0;
4712
4713 if (!(bmsr & BMSR_LSTATUS)) {
4714 err = tg3_init_5401phy_dsp(tp);
4715 if (err)
4716 return err;
4717
4718 tg3_readphy(tp, MII_BMSR, &bmsr);
4719 for (i = 0; i < 1000; i++) {
4720 udelay(10);
4721 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4722 (bmsr & BMSR_LSTATUS)) {
4723 udelay(40);
4724 break;
4725 }
4726 }
4727
4728 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4729 TG3_PHY_REV_BCM5401_B0 &&
4730 !(bmsr & BMSR_LSTATUS) &&
4731 tp->link_config.active_speed == SPEED_1000) {
4732 err = tg3_phy_reset(tp);
4733 if (!err)
4734 err = tg3_init_5401phy_dsp(tp);
4735 if (err)
4736 return err;
4737 }
4738 }
4739 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4740 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4741 /* 5701 {A0,B0} CRC bug workaround */
4742 tg3_writephy(tp, 0x15, 0x0a75);
4743 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4744 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4745 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4746 }
4747
4748 /* Clear pending interrupts... */
4749 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4750 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4751
4752 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4753 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4754 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4755 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4756
4757 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4758 tg3_asic_rev(tp) == ASIC_REV_5701) {
4759 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4760 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4761 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4762 else
4763 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4764 }
4765
4766 current_link_up = false;
4767 current_speed = SPEED_UNKNOWN;
4768 current_duplex = DUPLEX_UNKNOWN;
4769 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4770 tp->link_config.rmt_adv = 0;
4771
4772 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4773 err = tg3_phy_auxctl_read(tp,
4774 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4775 &val);
4776 if (!err && !(val & (1 << 10))) {
4777 tg3_phy_auxctl_write(tp,
4778 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4779 val | (1 << 10));
4780 goto relink;
4781 }
4782 }
4783
4784 bmsr = 0;
4785 for (i = 0; i < 100; i++) {
4786 tg3_readphy(tp, MII_BMSR, &bmsr);
4787 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4788 (bmsr & BMSR_LSTATUS))
4789 break;
4790 udelay(40);
4791 }
4792
4793 if (bmsr & BMSR_LSTATUS) {
4794 u32 aux_stat, bmcr;
4795
4796 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4797 for (i = 0; i < 2000; i++) {
4798 udelay(10);
4799 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4800 aux_stat)
4801 break;
4802 }
4803
4804 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4805 &current_speed,
4806 &current_duplex);
4807
4808 bmcr = 0;
4809 for (i = 0; i < 200; i++) {
4810 tg3_readphy(tp, MII_BMCR, &bmcr);
4811 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4812 continue;
4813 if (bmcr && bmcr != 0x7fff)
4814 break;
4815 udelay(10);
4816 }
4817
4818 lcl_adv = 0;
4819 rmt_adv = 0;
4820
4821 tp->link_config.active_speed = current_speed;
4822 tp->link_config.active_duplex = current_duplex;
4823
4824 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4825 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4826
4827 if ((bmcr & BMCR_ANENABLE) &&
4828 eee_config_ok &&
4829 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4830 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4831 current_link_up = true;
4832
4833 /* EEE settings changes take effect only after a phy
4834 * reset. If we have skipped a reset due to Link Flap
4835 * Avoidance being enabled, do it now.
4836 */
4837 if (!eee_config_ok &&
4838 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4839 !force_reset)
4840 tg3_phy_reset(tp);
4841 } else {
4842 if (!(bmcr & BMCR_ANENABLE) &&
4843 tp->link_config.speed == current_speed &&
4844 tp->link_config.duplex == current_duplex) {
4845 current_link_up = true;
4846 }
4847 }
4848
4849 if (current_link_up &&
4850 tp->link_config.active_duplex == DUPLEX_FULL) {
4851 u32 reg, bit;
4852
4853 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4854 reg = MII_TG3_FET_GEN_STAT;
4855 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4856 } else {
4857 reg = MII_TG3_EXT_STAT;
4858 bit = MII_TG3_EXT_STAT_MDIX;
4859 }
4860
4861 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4862 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4863
4864 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4865 }
4866 }
4867
4868 relink:
4869 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4870 tg3_phy_copper_begin(tp);
4871
4872 if (tg3_flag(tp, ROBOSWITCH)) {
4873 current_link_up = true;
4874 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4875 current_speed = SPEED_1000;
4876 current_duplex = DUPLEX_FULL;
4877 tp->link_config.active_speed = current_speed;
4878 tp->link_config.active_duplex = current_duplex;
4879 }
4880
4881 tg3_readphy(tp, MII_BMSR, &bmsr);
4882 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4883 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4884 current_link_up = true;
4885 }
4886
4887 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4888 if (current_link_up) {
4889 if (tp->link_config.active_speed == SPEED_100 ||
4890 tp->link_config.active_speed == SPEED_10)
4891 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4892 else
4893 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4894 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4895 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4896 else
4897 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4898
4899 /* In order for the 5750 core in BCM4785 chip to work properly
4900 * in RGMII mode, the Led Control Register must be set up.
4901 */
4902 if (tg3_flag(tp, RGMII_MODE)) {
4903 u32 led_ctrl = tr32(MAC_LED_CTRL);
4904 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4905
4906 if (tp->link_config.active_speed == SPEED_10)
4907 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4908 else if (tp->link_config.active_speed == SPEED_100)
4909 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4910 LED_CTRL_100MBPS_ON);
4911 else if (tp->link_config.active_speed == SPEED_1000)
4912 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4913 LED_CTRL_1000MBPS_ON);
4914
4915 tw32(MAC_LED_CTRL, led_ctrl);
4916 udelay(40);
4917 }
4918
4919 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4920 if (tp->link_config.active_duplex == DUPLEX_HALF)
4921 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4922
4923 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4924 if (current_link_up &&
4925 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4926 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4927 else
4928 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4929 }
4930
4931 /* ??? Without this setting Netgear GA302T PHY does not
4932 * ??? send/receive packets...
4933 */
4934 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4935 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4936 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4937 tw32_f(MAC_MI_MODE, tp->mi_mode);
4938 udelay(80);
4939 }
4940
4941 tw32_f(MAC_MODE, tp->mac_mode);
4942 udelay(40);
4943
4944 tg3_phy_eee_adjust(tp, current_link_up);
4945
4946 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4947 /* Polled via timer. */
4948 tw32_f(MAC_EVENT, 0);
4949 } else {
4950 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4951 }
4952 udelay(40);
4953
4954 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4955 current_link_up &&
4956 tp->link_config.active_speed == SPEED_1000 &&
4957 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4958 udelay(120);
4959 tw32_f(MAC_STATUS,
4960 (MAC_STATUS_SYNC_CHANGED |
4961 MAC_STATUS_CFG_CHANGED));
4962 udelay(40);
4963 tg3_write_mem(tp,
4964 NIC_SRAM_FIRMWARE_MBOX,
4965 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4966 }
4967
4968 /* Prevent send BD corruption. */
4969 if (tg3_flag(tp, CLKREQ_BUG)) {
4970 if (tp->link_config.active_speed == SPEED_100 ||
4971 tp->link_config.active_speed == SPEED_10)
4972 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4973 PCI_EXP_LNKCTL_CLKREQ_EN);
4974 else
4975 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4976 PCI_EXP_LNKCTL_CLKREQ_EN);
4977 }
4978
4979 tg3_test_and_report_link_chg(tp, current_link_up);
4980
4981 return 0;
4982 }
4983
4984 struct tg3_fiber_aneginfo {
4985 int state;
4986 #define ANEG_STATE_UNKNOWN 0
4987 #define ANEG_STATE_AN_ENABLE 1
4988 #define ANEG_STATE_RESTART_INIT 2
4989 #define ANEG_STATE_RESTART 3
4990 #define ANEG_STATE_DISABLE_LINK_OK 4
4991 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4992 #define ANEG_STATE_ABILITY_DETECT 6
4993 #define ANEG_STATE_ACK_DETECT_INIT 7
4994 #define ANEG_STATE_ACK_DETECT 8
4995 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4996 #define ANEG_STATE_COMPLETE_ACK 10
4997 #define ANEG_STATE_IDLE_DETECT_INIT 11
4998 #define ANEG_STATE_IDLE_DETECT 12
4999 #define ANEG_STATE_LINK_OK 13
5000 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5001 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5002
5003 u32 flags;
5004 #define MR_AN_ENABLE 0x00000001
5005 #define MR_RESTART_AN 0x00000002
5006 #define MR_AN_COMPLETE 0x00000004
5007 #define MR_PAGE_RX 0x00000008
5008 #define MR_NP_LOADED 0x00000010
5009 #define MR_TOGGLE_TX 0x00000020
5010 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5011 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5012 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5013 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5014 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5015 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5016 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5017 #define MR_TOGGLE_RX 0x00002000
5018 #define MR_NP_RX 0x00004000
5019
5020 #define MR_LINK_OK 0x80000000
5021
5022 unsigned long link_time, cur_time;
5023
5024 u32 ability_match_cfg;
5025 int ability_match_count;
5026
5027 char ability_match, idle_match, ack_match;
5028
5029 u32 txconfig, rxconfig;
5030 #define ANEG_CFG_NP 0x00000080
5031 #define ANEG_CFG_ACK 0x00000040
5032 #define ANEG_CFG_RF2 0x00000020
5033 #define ANEG_CFG_RF1 0x00000010
5034 #define ANEG_CFG_PS2 0x00000001
5035 #define ANEG_CFG_PS1 0x00008000
5036 #define ANEG_CFG_HD 0x00004000
5037 #define ANEG_CFG_FD 0x00002000
5038 #define ANEG_CFG_INVAL 0x00001f06
5039
5040 };
5041 #define ANEG_OK 0
5042 #define ANEG_DONE 1
5043 #define ANEG_TIMER_ENAB 2
5044 #define ANEG_FAILED -1
5045
5046 #define ANEG_STATE_SETTLE_TIME 10000
5047
5048 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5049 struct tg3_fiber_aneginfo *ap)
5050 {
5051 u16 flowctrl;
5052 unsigned long delta;
5053 u32 rx_cfg_reg;
5054 int ret;
5055
5056 if (ap->state == ANEG_STATE_UNKNOWN) {
5057 ap->rxconfig = 0;
5058 ap->link_time = 0;
5059 ap->cur_time = 0;
5060 ap->ability_match_cfg = 0;
5061 ap->ability_match_count = 0;
5062 ap->ability_match = 0;
5063 ap->idle_match = 0;
5064 ap->ack_match = 0;
5065 }
5066 ap->cur_time++;
5067
5068 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5069 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5070
5071 if (rx_cfg_reg != ap->ability_match_cfg) {
5072 ap->ability_match_cfg = rx_cfg_reg;
5073 ap->ability_match = 0;
5074 ap->ability_match_count = 0;
5075 } else {
5076 if (++ap->ability_match_count > 1) {
5077 ap->ability_match = 1;
5078 ap->ability_match_cfg = rx_cfg_reg;
5079 }
5080 }
5081 if (rx_cfg_reg & ANEG_CFG_ACK)
5082 ap->ack_match = 1;
5083 else
5084 ap->ack_match = 0;
5085
5086 ap->idle_match = 0;
5087 } else {
5088 ap->idle_match = 1;
5089 ap->ability_match_cfg = 0;
5090 ap->ability_match_count = 0;
5091 ap->ability_match = 0;
5092 ap->ack_match = 0;
5093
5094 rx_cfg_reg = 0;
5095 }
5096
5097 ap->rxconfig = rx_cfg_reg;
5098 ret = ANEG_OK;
5099
5100 switch (ap->state) {
5101 case ANEG_STATE_UNKNOWN:
5102 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5103 ap->state = ANEG_STATE_AN_ENABLE;
5104
5105 /* fallthru */
5106 case ANEG_STATE_AN_ENABLE:
5107 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5108 if (ap->flags & MR_AN_ENABLE) {
5109 ap->link_time = 0;
5110 ap->cur_time = 0;
5111 ap->ability_match_cfg = 0;
5112 ap->ability_match_count = 0;
5113 ap->ability_match = 0;
5114 ap->idle_match = 0;
5115 ap->ack_match = 0;
5116
5117 ap->state = ANEG_STATE_RESTART_INIT;
5118 } else {
5119 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5120 }
5121 break;
5122
5123 case ANEG_STATE_RESTART_INIT:
5124 ap->link_time = ap->cur_time;
5125 ap->flags &= ~(MR_NP_LOADED);
5126 ap->txconfig = 0;
5127 tw32(MAC_TX_AUTO_NEG, 0);
5128 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5129 tw32_f(MAC_MODE, tp->mac_mode);
5130 udelay(40);
5131
5132 ret = ANEG_TIMER_ENAB;
5133 ap->state = ANEG_STATE_RESTART;
5134
5135 /* fallthru */
5136 case ANEG_STATE_RESTART:
5137 delta = ap->cur_time - ap->link_time;
5138 if (delta > ANEG_STATE_SETTLE_TIME)
5139 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5140 else
5141 ret = ANEG_TIMER_ENAB;
5142 break;
5143
5144 case ANEG_STATE_DISABLE_LINK_OK:
5145 ret = ANEG_DONE;
5146 break;
5147
5148 case ANEG_STATE_ABILITY_DETECT_INIT:
5149 ap->flags &= ~(MR_TOGGLE_TX);
5150 ap->txconfig = ANEG_CFG_FD;
5151 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5152 if (flowctrl & ADVERTISE_1000XPAUSE)
5153 ap->txconfig |= ANEG_CFG_PS1;
5154 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5155 ap->txconfig |= ANEG_CFG_PS2;
5156 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5157 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5158 tw32_f(MAC_MODE, tp->mac_mode);
5159 udelay(40);
5160
5161 ap->state = ANEG_STATE_ABILITY_DETECT;
5162 break;
5163
5164 case ANEG_STATE_ABILITY_DETECT:
5165 if (ap->ability_match != 0 && ap->rxconfig != 0)
5166 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5167 break;
5168
5169 case ANEG_STATE_ACK_DETECT_INIT:
5170 ap->txconfig |= ANEG_CFG_ACK;
5171 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5172 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5173 tw32_f(MAC_MODE, tp->mac_mode);
5174 udelay(40);
5175
5176 ap->state = ANEG_STATE_ACK_DETECT;
5177
5178 /* fallthru */
5179 case ANEG_STATE_ACK_DETECT:
5180 if (ap->ack_match != 0) {
5181 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5182 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5183 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5184 } else {
5185 ap->state = ANEG_STATE_AN_ENABLE;
5186 }
5187 } else if (ap->ability_match != 0 &&
5188 ap->rxconfig == 0) {
5189 ap->state = ANEG_STATE_AN_ENABLE;
5190 }
5191 break;
5192
5193 case ANEG_STATE_COMPLETE_ACK_INIT:
5194 if (ap->rxconfig & ANEG_CFG_INVAL) {
5195 ret = ANEG_FAILED;
5196 break;
5197 }
5198 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5199 MR_LP_ADV_HALF_DUPLEX |
5200 MR_LP_ADV_SYM_PAUSE |
5201 MR_LP_ADV_ASYM_PAUSE |
5202 MR_LP_ADV_REMOTE_FAULT1 |
5203 MR_LP_ADV_REMOTE_FAULT2 |
5204 MR_LP_ADV_NEXT_PAGE |
5205 MR_TOGGLE_RX |
5206 MR_NP_RX);
5207 if (ap->rxconfig & ANEG_CFG_FD)
5208 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5209 if (ap->rxconfig & ANEG_CFG_HD)
5210 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5211 if (ap->rxconfig & ANEG_CFG_PS1)
5212 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5213 if (ap->rxconfig & ANEG_CFG_PS2)
5214 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5215 if (ap->rxconfig & ANEG_CFG_RF1)
5216 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5217 if (ap->rxconfig & ANEG_CFG_RF2)
5218 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5219 if (ap->rxconfig & ANEG_CFG_NP)
5220 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5221
5222 ap->link_time = ap->cur_time;
5223
5224 ap->flags ^= (MR_TOGGLE_TX);
5225 if (ap->rxconfig & 0x0008)
5226 ap->flags |= MR_TOGGLE_RX;
5227 if (ap->rxconfig & ANEG_CFG_NP)
5228 ap->flags |= MR_NP_RX;
5229 ap->flags |= MR_PAGE_RX;
5230
5231 ap->state = ANEG_STATE_COMPLETE_ACK;
5232 ret = ANEG_TIMER_ENAB;
5233 break;
5234
5235 case ANEG_STATE_COMPLETE_ACK:
5236 if (ap->ability_match != 0 &&
5237 ap->rxconfig == 0) {
5238 ap->state = ANEG_STATE_AN_ENABLE;
5239 break;
5240 }
5241 delta = ap->cur_time - ap->link_time;
5242 if (delta > ANEG_STATE_SETTLE_TIME) {
5243 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5244 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5245 } else {
5246 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5247 !(ap->flags & MR_NP_RX)) {
5248 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5249 } else {
5250 ret = ANEG_FAILED;
5251 }
5252 }
5253 }
5254 break;
5255
5256 case ANEG_STATE_IDLE_DETECT_INIT:
5257 ap->link_time = ap->cur_time;
5258 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5259 tw32_f(MAC_MODE, tp->mac_mode);
5260 udelay(40);
5261
5262 ap->state = ANEG_STATE_IDLE_DETECT;
5263 ret = ANEG_TIMER_ENAB;
5264 break;
5265
5266 case ANEG_STATE_IDLE_DETECT:
5267 if (ap->ability_match != 0 &&
5268 ap->rxconfig == 0) {
5269 ap->state = ANEG_STATE_AN_ENABLE;
5270 break;
5271 }
5272 delta = ap->cur_time - ap->link_time;
5273 if (delta > ANEG_STATE_SETTLE_TIME) {
5274 /* XXX another gem from the Broadcom driver :( */
5275 ap->state = ANEG_STATE_LINK_OK;
5276 }
5277 break;
5278
5279 case ANEG_STATE_LINK_OK:
5280 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5281 ret = ANEG_DONE;
5282 break;
5283
5284 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5285 /* ??? unimplemented */
5286 break;
5287
5288 case ANEG_STATE_NEXT_PAGE_WAIT:
5289 /* ??? unimplemented */
5290 break;
5291
5292 default:
5293 ret = ANEG_FAILED;
5294 break;
5295 }
5296
5297 return ret;
5298 }
5299
5300 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5301 {
5302 int res = 0;
5303 struct tg3_fiber_aneginfo aninfo;
5304 int status = ANEG_FAILED;
5305 unsigned int tick;
5306 u32 tmp;
5307
5308 tw32_f(MAC_TX_AUTO_NEG, 0);
5309
5310 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5311 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5312 udelay(40);
5313
5314 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5315 udelay(40);
5316
5317 memset(&aninfo, 0, sizeof(aninfo));
5318 aninfo.flags |= MR_AN_ENABLE;
5319 aninfo.state = ANEG_STATE_UNKNOWN;
5320 aninfo.cur_time = 0;
5321 tick = 0;
5322 while (++tick < 195000) {
5323 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5324 if (status == ANEG_DONE || status == ANEG_FAILED)
5325 break;
5326
5327 udelay(1);
5328 }
5329
5330 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5331 tw32_f(MAC_MODE, tp->mac_mode);
5332 udelay(40);
5333
5334 *txflags = aninfo.txconfig;
5335 *rxflags = aninfo.flags;
5336
5337 if (status == ANEG_DONE &&
5338 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5339 MR_LP_ADV_FULL_DUPLEX)))
5340 res = 1;
5341
5342 return res;
5343 }
5344
5345 static void tg3_init_bcm8002(struct tg3 *tp)
5346 {
5347 u32 mac_status = tr32(MAC_STATUS);
5348 int i;
5349
5350 /* Reset when initting first time or we have a link. */
5351 if (tg3_flag(tp, INIT_COMPLETE) &&
5352 !(mac_status & MAC_STATUS_PCS_SYNCED))
5353 return;
5354
5355 /* Set PLL lock range. */
5356 tg3_writephy(tp, 0x16, 0x8007);
5357
5358 /* SW reset */
5359 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5360
5361 /* Wait for reset to complete. */
5362 /* XXX schedule_timeout() ... */
5363 for (i = 0; i < 500; i++)
5364 udelay(10);
5365
5366 /* Config mode; select PMA/Ch 1 regs. */
5367 tg3_writephy(tp, 0x10, 0x8411);
5368
5369 /* Enable auto-lock and comdet, select txclk for tx. */
5370 tg3_writephy(tp, 0x11, 0x0a10);
5371
5372 tg3_writephy(tp, 0x18, 0x00a0);
5373 tg3_writephy(tp, 0x16, 0x41ff);
5374
5375 /* Assert and deassert POR. */
5376 tg3_writephy(tp, 0x13, 0x0400);
5377 udelay(40);
5378 tg3_writephy(tp, 0x13, 0x0000);
5379
5380 tg3_writephy(tp, 0x11, 0x0a50);
5381 udelay(40);
5382 tg3_writephy(tp, 0x11, 0x0a10);
5383
5384 /* Wait for signal to stabilize */
5385 /* XXX schedule_timeout() ... */
5386 for (i = 0; i < 15000; i++)
5387 udelay(10);
5388
5389 /* Deselect the channel register so we can read the PHYID
5390 * later.
5391 */
5392 tg3_writephy(tp, 0x10, 0x8011);
5393 }
5394
5395 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5396 {
5397 u16 flowctrl;
5398 bool current_link_up;
5399 u32 sg_dig_ctrl, sg_dig_status;
5400 u32 serdes_cfg, expected_sg_dig_ctrl;
5401 int workaround, port_a;
5402
5403 serdes_cfg = 0;
5404 expected_sg_dig_ctrl = 0;
5405 workaround = 0;
5406 port_a = 1;
5407 current_link_up = false;
5408
5409 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5410 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5411 workaround = 1;
5412 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5413 port_a = 0;
5414
5415 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5416 /* preserve bits 20-23 for voltage regulator */
5417 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5418 }
5419
5420 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5421
5422 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5423 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5424 if (workaround) {
5425 u32 val = serdes_cfg;
5426
5427 if (port_a)
5428 val |= 0xc010000;
5429 else
5430 val |= 0x4010000;
5431 tw32_f(MAC_SERDES_CFG, val);
5432 }
5433
5434 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5435 }
5436 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5437 tg3_setup_flow_control(tp, 0, 0);
5438 current_link_up = true;
5439 }
5440 goto out;
5441 }
5442
5443 /* Want auto-negotiation. */
5444 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5445
5446 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5447 if (flowctrl & ADVERTISE_1000XPAUSE)
5448 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5449 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5450 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5451
5452 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5453 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5454 tp->serdes_counter &&
5455 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5456 MAC_STATUS_RCVD_CFG)) ==
5457 MAC_STATUS_PCS_SYNCED)) {
5458 tp->serdes_counter--;
5459 current_link_up = true;
5460 goto out;
5461 }
5462 restart_autoneg:
5463 if (workaround)
5464 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5465 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5466 udelay(5);
5467 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5468
5469 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5471 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5472 MAC_STATUS_SIGNAL_DET)) {
5473 sg_dig_status = tr32(SG_DIG_STATUS);
5474 mac_status = tr32(MAC_STATUS);
5475
5476 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5477 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5478 u32 local_adv = 0, remote_adv = 0;
5479
5480 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5481 local_adv |= ADVERTISE_1000XPAUSE;
5482 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5483 local_adv |= ADVERTISE_1000XPSE_ASYM;
5484
5485 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5486 remote_adv |= LPA_1000XPAUSE;
5487 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5488 remote_adv |= LPA_1000XPAUSE_ASYM;
5489
5490 tp->link_config.rmt_adv =
5491 mii_adv_to_ethtool_adv_x(remote_adv);
5492
5493 tg3_setup_flow_control(tp, local_adv, remote_adv);
5494 current_link_up = true;
5495 tp->serdes_counter = 0;
5496 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5497 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5498 if (tp->serdes_counter)
5499 tp->serdes_counter--;
5500 else {
5501 if (workaround) {
5502 u32 val = serdes_cfg;
5503
5504 if (port_a)
5505 val |= 0xc010000;
5506 else
5507 val |= 0x4010000;
5508
5509 tw32_f(MAC_SERDES_CFG, val);
5510 }
5511
5512 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5513 udelay(40);
5514
5515 /* Link parallel detection - link is up */
5516 /* only if we have PCS_SYNC and not */
5517 /* receiving config code words */
5518 mac_status = tr32(MAC_STATUS);
5519 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5520 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5521 tg3_setup_flow_control(tp, 0, 0);
5522 current_link_up = true;
5523 tp->phy_flags |=
5524 TG3_PHYFLG_PARALLEL_DETECT;
5525 tp->serdes_counter =
5526 SERDES_PARALLEL_DET_TIMEOUT;
5527 } else
5528 goto restart_autoneg;
5529 }
5530 }
5531 } else {
5532 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5533 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5534 }
5535
5536 out:
5537 return current_link_up;
5538 }
5539
5540 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5541 {
5542 bool current_link_up = false;
5543
5544 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5545 goto out;
5546
5547 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5548 u32 txflags, rxflags;
5549 int i;
5550
5551 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5552 u32 local_adv = 0, remote_adv = 0;
5553
5554 if (txflags & ANEG_CFG_PS1)
5555 local_adv |= ADVERTISE_1000XPAUSE;
5556 if (txflags & ANEG_CFG_PS2)
5557 local_adv |= ADVERTISE_1000XPSE_ASYM;
5558
5559 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5560 remote_adv |= LPA_1000XPAUSE;
5561 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5562 remote_adv |= LPA_1000XPAUSE_ASYM;
5563
5564 tp->link_config.rmt_adv =
5565 mii_adv_to_ethtool_adv_x(remote_adv);
5566
5567 tg3_setup_flow_control(tp, local_adv, remote_adv);
5568
5569 current_link_up = true;
5570 }
5571 for (i = 0; i < 30; i++) {
5572 udelay(20);
5573 tw32_f(MAC_STATUS,
5574 (MAC_STATUS_SYNC_CHANGED |
5575 MAC_STATUS_CFG_CHANGED));
5576 udelay(40);
5577 if ((tr32(MAC_STATUS) &
5578 (MAC_STATUS_SYNC_CHANGED |
5579 MAC_STATUS_CFG_CHANGED)) == 0)
5580 break;
5581 }
5582
5583 mac_status = tr32(MAC_STATUS);
5584 if (!current_link_up &&
5585 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5586 !(mac_status & MAC_STATUS_RCVD_CFG))
5587 current_link_up = true;
5588 } else {
5589 tg3_setup_flow_control(tp, 0, 0);
5590
5591 /* Forcing 1000FD link up. */
5592 current_link_up = true;
5593
5594 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5595 udelay(40);
5596
5597 tw32_f(MAC_MODE, tp->mac_mode);
5598 udelay(40);
5599 }
5600
5601 out:
5602 return current_link_up;
5603 }
5604
5605 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5606 {
5607 u32 orig_pause_cfg;
5608 u16 orig_active_speed;
5609 u8 orig_active_duplex;
5610 u32 mac_status;
5611 bool current_link_up;
5612 int i;
5613
5614 orig_pause_cfg = tp->link_config.active_flowctrl;
5615 orig_active_speed = tp->link_config.active_speed;
5616 orig_active_duplex = tp->link_config.active_duplex;
5617
5618 if (!tg3_flag(tp, HW_AUTONEG) &&
5619 tp->link_up &&
5620 tg3_flag(tp, INIT_COMPLETE)) {
5621 mac_status = tr32(MAC_STATUS);
5622 mac_status &= (MAC_STATUS_PCS_SYNCED |
5623 MAC_STATUS_SIGNAL_DET |
5624 MAC_STATUS_CFG_CHANGED |
5625 MAC_STATUS_RCVD_CFG);
5626 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5627 MAC_STATUS_SIGNAL_DET)) {
5628 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5629 MAC_STATUS_CFG_CHANGED));
5630 return 0;
5631 }
5632 }
5633
5634 tw32_f(MAC_TX_AUTO_NEG, 0);
5635
5636 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5637 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5638 tw32_f(MAC_MODE, tp->mac_mode);
5639 udelay(40);
5640
5641 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5642 tg3_init_bcm8002(tp);
5643
5644 /* Enable link change event even when serdes polling. */
5645 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5646 udelay(40);
5647
5648 current_link_up = false;
5649 tp->link_config.rmt_adv = 0;
5650 mac_status = tr32(MAC_STATUS);
5651
5652 if (tg3_flag(tp, HW_AUTONEG))
5653 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5654 else
5655 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5656
5657 tp->napi[0].hw_status->status =
5658 (SD_STATUS_UPDATED |
5659 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5660
5661 for (i = 0; i < 100; i++) {
5662 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5663 MAC_STATUS_CFG_CHANGED));
5664 udelay(5);
5665 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5666 MAC_STATUS_CFG_CHANGED |
5667 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5668 break;
5669 }
5670
5671 mac_status = tr32(MAC_STATUS);
5672 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5673 current_link_up = false;
5674 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5675 tp->serdes_counter == 0) {
5676 tw32_f(MAC_MODE, (tp->mac_mode |
5677 MAC_MODE_SEND_CONFIGS));
5678 udelay(1);
5679 tw32_f(MAC_MODE, tp->mac_mode);
5680 }
5681 }
5682
5683 if (current_link_up) {
5684 tp->link_config.active_speed = SPEED_1000;
5685 tp->link_config.active_duplex = DUPLEX_FULL;
5686 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5687 LED_CTRL_LNKLED_OVERRIDE |
5688 LED_CTRL_1000MBPS_ON));
5689 } else {
5690 tp->link_config.active_speed = SPEED_UNKNOWN;
5691 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5692 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5693 LED_CTRL_LNKLED_OVERRIDE |
5694 LED_CTRL_TRAFFIC_OVERRIDE));
5695 }
5696
5697 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5698 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5699 if (orig_pause_cfg != now_pause_cfg ||
5700 orig_active_speed != tp->link_config.active_speed ||
5701 orig_active_duplex != tp->link_config.active_duplex)
5702 tg3_link_report(tp);
5703 }
5704
5705 return 0;
5706 }
5707
5708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5709 {
5710 int err = 0;
5711 u32 bmsr, bmcr;
5712 u16 current_speed = SPEED_UNKNOWN;
5713 u8 current_duplex = DUPLEX_UNKNOWN;
5714 bool current_link_up = false;
5715 u32 local_adv, remote_adv, sgsr;
5716
5717 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5718 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5719 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5720 (sgsr & SERDES_TG3_SGMII_MODE)) {
5721
5722 if (force_reset)
5723 tg3_phy_reset(tp);
5724
5725 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5726
5727 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5728 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5729 } else {
5730 current_link_up = true;
5731 if (sgsr & SERDES_TG3_SPEED_1000) {
5732 current_speed = SPEED_1000;
5733 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5734 } else if (sgsr & SERDES_TG3_SPEED_100) {
5735 current_speed = SPEED_100;
5736 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5737 } else {
5738 current_speed = SPEED_10;
5739 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5740 }
5741
5742 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5743 current_duplex = DUPLEX_FULL;
5744 else
5745 current_duplex = DUPLEX_HALF;
5746 }
5747
5748 tw32_f(MAC_MODE, tp->mac_mode);
5749 udelay(40);
5750
5751 tg3_clear_mac_status(tp);
5752
5753 goto fiber_setup_done;
5754 }
5755
5756 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5757 tw32_f(MAC_MODE, tp->mac_mode);
5758 udelay(40);
5759
5760 tg3_clear_mac_status(tp);
5761
5762 if (force_reset)
5763 tg3_phy_reset(tp);
5764
5765 tp->link_config.rmt_adv = 0;
5766
5767 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5768 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5769 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5770 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5771 bmsr |= BMSR_LSTATUS;
5772 else
5773 bmsr &= ~BMSR_LSTATUS;
5774 }
5775
5776 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5777
5778 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5779 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5780 /* do nothing, just check for link up at the end */
5781 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5782 u32 adv, newadv;
5783
5784 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5785 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5786 ADVERTISE_1000XPAUSE |
5787 ADVERTISE_1000XPSE_ASYM |
5788 ADVERTISE_SLCT);
5789
5790 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5791 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5792
5793 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5794 tg3_writephy(tp, MII_ADVERTISE, newadv);
5795 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5796 tg3_writephy(tp, MII_BMCR, bmcr);
5797
5798 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5799 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5800 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5801
5802 return err;
5803 }
5804 } else {
5805 u32 new_bmcr;
5806
5807 bmcr &= ~BMCR_SPEED1000;
5808 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5809
5810 if (tp->link_config.duplex == DUPLEX_FULL)
5811 new_bmcr |= BMCR_FULLDPLX;
5812
5813 if (new_bmcr != bmcr) {
5814 /* BMCR_SPEED1000 is a reserved bit that needs
5815 * to be set on write.
5816 */
5817 new_bmcr |= BMCR_SPEED1000;
5818
5819 /* Force a linkdown */
5820 if (tp->link_up) {
5821 u32 adv;
5822
5823 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5824 adv &= ~(ADVERTISE_1000XFULL |
5825 ADVERTISE_1000XHALF |
5826 ADVERTISE_SLCT);
5827 tg3_writephy(tp, MII_ADVERTISE, adv);
5828 tg3_writephy(tp, MII_BMCR, bmcr |
5829 BMCR_ANRESTART |
5830 BMCR_ANENABLE);
5831 udelay(10);
5832 tg3_carrier_off(tp);
5833 }
5834 tg3_writephy(tp, MII_BMCR, new_bmcr);
5835 bmcr = new_bmcr;
5836 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5837 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5838 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5839 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5840 bmsr |= BMSR_LSTATUS;
5841 else
5842 bmsr &= ~BMSR_LSTATUS;
5843 }
5844 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5845 }
5846 }
5847
5848 if (bmsr & BMSR_LSTATUS) {
5849 current_speed = SPEED_1000;
5850 current_link_up = true;
5851 if (bmcr & BMCR_FULLDPLX)
5852 current_duplex = DUPLEX_FULL;
5853 else
5854 current_duplex = DUPLEX_HALF;
5855
5856 local_adv = 0;
5857 remote_adv = 0;
5858
5859 if (bmcr & BMCR_ANENABLE) {
5860 u32 common;
5861
5862 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5863 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5864 common = local_adv & remote_adv;
5865 if (common & (ADVERTISE_1000XHALF |
5866 ADVERTISE_1000XFULL)) {
5867 if (common & ADVERTISE_1000XFULL)
5868 current_duplex = DUPLEX_FULL;
5869 else
5870 current_duplex = DUPLEX_HALF;
5871
5872 tp->link_config.rmt_adv =
5873 mii_adv_to_ethtool_adv_x(remote_adv);
5874 } else if (!tg3_flag(tp, 5780_CLASS)) {
5875 /* Link is up via parallel detect */
5876 } else {
5877 current_link_up = false;
5878 }
5879 }
5880 }
5881
5882 fiber_setup_done:
5883 if (current_link_up && current_duplex == DUPLEX_FULL)
5884 tg3_setup_flow_control(tp, local_adv, remote_adv);
5885
5886 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5887 if (tp->link_config.active_duplex == DUPLEX_HALF)
5888 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5889
5890 tw32_f(MAC_MODE, tp->mac_mode);
5891 udelay(40);
5892
5893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5894
5895 tp->link_config.active_speed = current_speed;
5896 tp->link_config.active_duplex = current_duplex;
5897
5898 tg3_test_and_report_link_chg(tp, current_link_up);
5899 return err;
5900 }
5901
5902 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5903 {
5904 if (tp->serdes_counter) {
5905 /* Give autoneg time to complete. */
5906 tp->serdes_counter--;
5907 return;
5908 }
5909
5910 if (!tp->link_up &&
5911 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5912 u32 bmcr;
5913
5914 tg3_readphy(tp, MII_BMCR, &bmcr);
5915 if (bmcr & BMCR_ANENABLE) {
5916 u32 phy1, phy2;
5917
5918 /* Select shadow register 0x1f */
5919 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5920 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5921
5922 /* Select expansion interrupt status register */
5923 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5924 MII_TG3_DSP_EXP1_INT_STAT);
5925 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5926 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5927
5928 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5929 /* We have signal detect and not receiving
5930 * config code words, link is up by parallel
5931 * detection.
5932 */
5933
5934 bmcr &= ~BMCR_ANENABLE;
5935 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5936 tg3_writephy(tp, MII_BMCR, bmcr);
5937 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5938 }
5939 }
5940 } else if (tp->link_up &&
5941 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5942 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5943 u32 phy2;
5944
5945 /* Select expansion interrupt status register */
5946 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5947 MII_TG3_DSP_EXP1_INT_STAT);
5948 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5949 if (phy2 & 0x20) {
5950 u32 bmcr;
5951
5952 /* Config code words received, turn on autoneg. */
5953 tg3_readphy(tp, MII_BMCR, &bmcr);
5954 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5955
5956 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5957
5958 }
5959 }
5960 }
5961
5962 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5963 {
5964 u32 val;
5965 int err;
5966
5967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5968 err = tg3_setup_fiber_phy(tp, force_reset);
5969 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5970 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5971 else
5972 err = tg3_setup_copper_phy(tp, force_reset);
5973
5974 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5975 u32 scale;
5976
5977 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5978 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5979 scale = 65;
5980 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5981 scale = 6;
5982 else
5983 scale = 12;
5984
5985 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5986 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5987 tw32(GRC_MISC_CFG, val);
5988 }
5989
5990 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5991 (6 << TX_LENGTHS_IPG_SHIFT);
5992 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5993 tg3_asic_rev(tp) == ASIC_REV_5762)
5994 val |= tr32(MAC_TX_LENGTHS) &
5995 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5996 TX_LENGTHS_CNT_DWN_VAL_MSK);
5997
5998 if (tp->link_config.active_speed == SPEED_1000 &&
5999 tp->link_config.active_duplex == DUPLEX_HALF)
6000 tw32(MAC_TX_LENGTHS, val |
6001 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6002 else
6003 tw32(MAC_TX_LENGTHS, val |
6004 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6005
6006 if (!tg3_flag(tp, 5705_PLUS)) {
6007 if (tp->link_up) {
6008 tw32(HOSTCC_STAT_COAL_TICKS,
6009 tp->coal.stats_block_coalesce_usecs);
6010 } else {
6011 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6012 }
6013 }
6014
6015 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6016 val = tr32(PCIE_PWR_MGMT_THRESH);
6017 if (!tp->link_up)
6018 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6019 tp->pwrmgmt_thresh;
6020 else
6021 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6022 tw32(PCIE_PWR_MGMT_THRESH, val);
6023 }
6024
6025 return err;
6026 }
6027
6028 /* tp->lock must be held */
6029 static u64 tg3_refclk_read(struct tg3 *tp)
6030 {
6031 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6032 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6033 }
6034
6035 /* tp->lock must be held */
6036 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6037 {
6038 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6039 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6040 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6041 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6042 }
6043
6044 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6045 static inline void tg3_full_unlock(struct tg3 *tp);
6046 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6047 {
6048 struct tg3 *tp = netdev_priv(dev);
6049
6050 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6051 SOF_TIMESTAMPING_RX_SOFTWARE |
6052 SOF_TIMESTAMPING_SOFTWARE;
6053
6054 if (tg3_flag(tp, PTP_CAPABLE)) {
6055 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6056 SOF_TIMESTAMPING_RX_HARDWARE |
6057 SOF_TIMESTAMPING_RAW_HARDWARE;
6058 }
6059
6060 if (tp->ptp_clock)
6061 info->phc_index = ptp_clock_index(tp->ptp_clock);
6062 else
6063 info->phc_index = -1;
6064
6065 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6066
6067 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6068 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6069 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6070 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6071 return 0;
6072 }
6073
6074 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6075 {
6076 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6077 bool neg_adj = false;
6078 u32 correction = 0;
6079
6080 if (ppb < 0) {
6081 neg_adj = true;
6082 ppb = -ppb;
6083 }
6084
6085 /* Frequency adjustment is performed using hardware with a 24 bit
6086 * accumulator and a programmable correction value. On each clk, the
6087 * correction value gets added to the accumulator and when it
6088 * overflows, the time counter is incremented/decremented.
6089 *
6090 * So conversion from ppb to correction value is
6091 * ppb * (1 << 24) / 1000000000
6092 */
6093 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6094 TG3_EAV_REF_CLK_CORRECT_MASK;
6095
6096 tg3_full_lock(tp, 0);
6097
6098 if (correction)
6099 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6100 TG3_EAV_REF_CLK_CORRECT_EN |
6101 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6102 else
6103 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6104
6105 tg3_full_unlock(tp);
6106
6107 return 0;
6108 }
6109
6110 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6111 {
6112 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6113
6114 tg3_full_lock(tp, 0);
6115 tp->ptp_adjust += delta;
6116 tg3_full_unlock(tp);
6117
6118 return 0;
6119 }
6120
6121 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6122 {
6123 u64 ns;
6124 u32 remainder;
6125 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6126
6127 tg3_full_lock(tp, 0);
6128 ns = tg3_refclk_read(tp);
6129 ns += tp->ptp_adjust;
6130 tg3_full_unlock(tp);
6131
6132 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6133 ts->tv_nsec = remainder;
6134
6135 return 0;
6136 }
6137
6138 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6139 const struct timespec *ts)
6140 {
6141 u64 ns;
6142 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6143
6144 ns = timespec_to_ns(ts);
6145
6146 tg3_full_lock(tp, 0);
6147 tg3_refclk_write(tp, ns);
6148 tp->ptp_adjust = 0;
6149 tg3_full_unlock(tp);
6150
6151 return 0;
6152 }
6153
6154 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6155 struct ptp_clock_request *rq, int on)
6156 {
6157 return -EOPNOTSUPP;
6158 }
6159
6160 static const struct ptp_clock_info tg3_ptp_caps = {
6161 .owner = THIS_MODULE,
6162 .name = "tg3 clock",
6163 .max_adj = 250000000,
6164 .n_alarm = 0,
6165 .n_ext_ts = 0,
6166 .n_per_out = 0,
6167 .pps = 0,
6168 .adjfreq = tg3_ptp_adjfreq,
6169 .adjtime = tg3_ptp_adjtime,
6170 .gettime = tg3_ptp_gettime,
6171 .settime = tg3_ptp_settime,
6172 .enable = tg3_ptp_enable,
6173 };
6174
6175 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6176 struct skb_shared_hwtstamps *timestamp)
6177 {
6178 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6179 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6180 tp->ptp_adjust);
6181 }
6182
6183 /* tp->lock must be held */
6184 static void tg3_ptp_init(struct tg3 *tp)
6185 {
6186 if (!tg3_flag(tp, PTP_CAPABLE))
6187 return;
6188
6189 /* Initialize the hardware clock to the system time. */
6190 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6191 tp->ptp_adjust = 0;
6192 tp->ptp_info = tg3_ptp_caps;
6193 }
6194
6195 /* tp->lock must be held */
6196 static void tg3_ptp_resume(struct tg3 *tp)
6197 {
6198 if (!tg3_flag(tp, PTP_CAPABLE))
6199 return;
6200
6201 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6202 tp->ptp_adjust = 0;
6203 }
6204
6205 static void tg3_ptp_fini(struct tg3 *tp)
6206 {
6207 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6208 return;
6209
6210 ptp_clock_unregister(tp->ptp_clock);
6211 tp->ptp_clock = NULL;
6212 tp->ptp_adjust = 0;
6213 }
6214
6215 static inline int tg3_irq_sync(struct tg3 *tp)
6216 {
6217 return tp->irq_sync;
6218 }
6219
6220 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6221 {
6222 int i;
6223
6224 dst = (u32 *)((u8 *)dst + off);
6225 for (i = 0; i < len; i += sizeof(u32))
6226 *dst++ = tr32(off + i);
6227 }
6228
6229 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6230 {
6231 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6232 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6233 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6234 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6235 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6236 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6237 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6238 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6239 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6240 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6241 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6242 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6243 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6244 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6245 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6246 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6247 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6248 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6249 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6250
6251 if (tg3_flag(tp, SUPPORT_MSIX))
6252 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6253
6254 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6255 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6256 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6257 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6258 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6259 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6260 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6261 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6262
6263 if (!tg3_flag(tp, 5705_PLUS)) {
6264 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6265 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6266 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6267 }
6268
6269 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6270 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6271 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6272 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6273 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6274
6275 if (tg3_flag(tp, NVRAM))
6276 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6277 }
6278
6279 static void tg3_dump_state(struct tg3 *tp)
6280 {
6281 int i;
6282 u32 *regs;
6283
6284 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6285 if (!regs)
6286 return;
6287
6288 if (tg3_flag(tp, PCI_EXPRESS)) {
6289 /* Read up to but not including private PCI registers */
6290 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6291 regs[i / sizeof(u32)] = tr32(i);
6292 } else
6293 tg3_dump_legacy_regs(tp, regs);
6294
6295 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6296 if (!regs[i + 0] && !regs[i + 1] &&
6297 !regs[i + 2] && !regs[i + 3])
6298 continue;
6299
6300 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6301 i * 4,
6302 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6303 }
6304
6305 kfree(regs);
6306
6307 for (i = 0; i < tp->irq_cnt; i++) {
6308 struct tg3_napi *tnapi = &tp->napi[i];
6309
6310 /* SW status block */
6311 netdev_err(tp->dev,
6312 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6313 i,
6314 tnapi->hw_status->status,
6315 tnapi->hw_status->status_tag,
6316 tnapi->hw_status->rx_jumbo_consumer,
6317 tnapi->hw_status->rx_consumer,
6318 tnapi->hw_status->rx_mini_consumer,
6319 tnapi->hw_status->idx[0].rx_producer,
6320 tnapi->hw_status->idx[0].tx_consumer);
6321
6322 netdev_err(tp->dev,
6323 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6324 i,
6325 tnapi->last_tag, tnapi->last_irq_tag,
6326 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6327 tnapi->rx_rcb_ptr,
6328 tnapi->prodring.rx_std_prod_idx,
6329 tnapi->prodring.rx_std_cons_idx,
6330 tnapi->prodring.rx_jmb_prod_idx,
6331 tnapi->prodring.rx_jmb_cons_idx);
6332 }
6333 }
6334
6335 /* This is called whenever we suspect that the system chipset is re-
6336 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6337 * is bogus tx completions. We try to recover by setting the
6338 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6339 * in the workqueue.
6340 */
6341 static void tg3_tx_recover(struct tg3 *tp)
6342 {
6343 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6344 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6345
6346 netdev_warn(tp->dev,
6347 "The system may be re-ordering memory-mapped I/O "
6348 "cycles to the network device, attempting to recover. "
6349 "Please report the problem to the driver maintainer "
6350 "and include system chipset information.\n");
6351
6352 spin_lock(&tp->lock);
6353 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6354 spin_unlock(&tp->lock);
6355 }
6356
6357 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6358 {
6359 /* Tell compiler to fetch tx indices from memory. */
6360 barrier();
6361 return tnapi->tx_pending -
6362 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6363 }
6364
6365 /* Tigon3 never reports partial packet sends. So we do not
6366 * need special logic to handle SKBs that have not had all
6367 * of their frags sent yet, like SunGEM does.
6368 */
6369 static void tg3_tx(struct tg3_napi *tnapi)
6370 {
6371 struct tg3 *tp = tnapi->tp;
6372 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6373 u32 sw_idx = tnapi->tx_cons;
6374 struct netdev_queue *txq;
6375 int index = tnapi - tp->napi;
6376 unsigned int pkts_compl = 0, bytes_compl = 0;
6377
6378 if (tg3_flag(tp, ENABLE_TSS))
6379 index--;
6380
6381 txq = netdev_get_tx_queue(tp->dev, index);
6382
6383 while (sw_idx != hw_idx) {
6384 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6385 struct sk_buff *skb = ri->skb;
6386 int i, tx_bug = 0;
6387
6388 if (unlikely(skb == NULL)) {
6389 tg3_tx_recover(tp);
6390 return;
6391 }
6392
6393 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6394 struct skb_shared_hwtstamps timestamp;
6395 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6396 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6397
6398 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6399
6400 skb_tstamp_tx(skb, &timestamp);
6401 }
6402
6403 pci_unmap_single(tp->pdev,
6404 dma_unmap_addr(ri, mapping),
6405 skb_headlen(skb),
6406 PCI_DMA_TODEVICE);
6407
6408 ri->skb = NULL;
6409
6410 while (ri->fragmented) {
6411 ri->fragmented = false;
6412 sw_idx = NEXT_TX(sw_idx);
6413 ri = &tnapi->tx_buffers[sw_idx];
6414 }
6415
6416 sw_idx = NEXT_TX(sw_idx);
6417
6418 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6419 ri = &tnapi->tx_buffers[sw_idx];
6420 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6421 tx_bug = 1;
6422
6423 pci_unmap_page(tp->pdev,
6424 dma_unmap_addr(ri, mapping),
6425 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6426 PCI_DMA_TODEVICE);
6427
6428 while (ri->fragmented) {
6429 ri->fragmented = false;
6430 sw_idx = NEXT_TX(sw_idx);
6431 ri = &tnapi->tx_buffers[sw_idx];
6432 }
6433
6434 sw_idx = NEXT_TX(sw_idx);
6435 }
6436
6437 pkts_compl++;
6438 bytes_compl += skb->len;
6439
6440 dev_kfree_skb(skb);
6441
6442 if (unlikely(tx_bug)) {
6443 tg3_tx_recover(tp);
6444 return;
6445 }
6446 }
6447
6448 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6449
6450 tnapi->tx_cons = sw_idx;
6451
6452 /* Need to make the tx_cons update visible to tg3_start_xmit()
6453 * before checking for netif_queue_stopped(). Without the
6454 * memory barrier, there is a small possibility that tg3_start_xmit()
6455 * will miss it and cause the queue to be stopped forever.
6456 */
6457 smp_mb();
6458
6459 if (unlikely(netif_tx_queue_stopped(txq) &&
6460 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6461 __netif_tx_lock(txq, smp_processor_id());
6462 if (netif_tx_queue_stopped(txq) &&
6463 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6464 netif_tx_wake_queue(txq);
6465 __netif_tx_unlock(txq);
6466 }
6467 }
6468
6469 static void tg3_frag_free(bool is_frag, void *data)
6470 {
6471 if (is_frag)
6472 put_page(virt_to_head_page(data));
6473 else
6474 kfree(data);
6475 }
6476
6477 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6478 {
6479 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6480 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6481
6482 if (!ri->data)
6483 return;
6484
6485 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6486 map_sz, PCI_DMA_FROMDEVICE);
6487 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6488 ri->data = NULL;
6489 }
6490
6491
6492 /* Returns size of skb allocated or < 0 on error.
6493 *
6494 * We only need to fill in the address because the other members
6495 * of the RX descriptor are invariant, see tg3_init_rings.
6496 *
6497 * Note the purposeful assymetry of cpu vs. chip accesses. For
6498 * posting buffers we only dirty the first cache line of the RX
6499 * descriptor (containing the address). Whereas for the RX status
6500 * buffers the cpu only reads the last cacheline of the RX descriptor
6501 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6502 */
6503 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6504 u32 opaque_key, u32 dest_idx_unmasked,
6505 unsigned int *frag_size)
6506 {
6507 struct tg3_rx_buffer_desc *desc;
6508 struct ring_info *map;
6509 u8 *data;
6510 dma_addr_t mapping;
6511 int skb_size, data_size, dest_idx;
6512
6513 switch (opaque_key) {
6514 case RXD_OPAQUE_RING_STD:
6515 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6516 desc = &tpr->rx_std[dest_idx];
6517 map = &tpr->rx_std_buffers[dest_idx];
6518 data_size = tp->rx_pkt_map_sz;
6519 break;
6520
6521 case RXD_OPAQUE_RING_JUMBO:
6522 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6523 desc = &tpr->rx_jmb[dest_idx].std;
6524 map = &tpr->rx_jmb_buffers[dest_idx];
6525 data_size = TG3_RX_JMB_MAP_SZ;
6526 break;
6527
6528 default:
6529 return -EINVAL;
6530 }
6531
6532 /* Do not overwrite any of the map or rp information
6533 * until we are sure we can commit to a new buffer.
6534 *
6535 * Callers depend upon this behavior and assume that
6536 * we leave everything unchanged if we fail.
6537 */
6538 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6539 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6540 if (skb_size <= PAGE_SIZE) {
6541 data = netdev_alloc_frag(skb_size);
6542 *frag_size = skb_size;
6543 } else {
6544 data = kmalloc(skb_size, GFP_ATOMIC);
6545 *frag_size = 0;
6546 }
6547 if (!data)
6548 return -ENOMEM;
6549
6550 mapping = pci_map_single(tp->pdev,
6551 data + TG3_RX_OFFSET(tp),
6552 data_size,
6553 PCI_DMA_FROMDEVICE);
6554 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6555 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6556 return -EIO;
6557 }
6558
6559 map->data = data;
6560 dma_unmap_addr_set(map, mapping, mapping);
6561
6562 desc->addr_hi = ((u64)mapping >> 32);
6563 desc->addr_lo = ((u64)mapping & 0xffffffff);
6564
6565 return data_size;
6566 }
6567
6568 /* We only need to move over in the address because the other
6569 * members of the RX descriptor are invariant. See notes above
6570 * tg3_alloc_rx_data for full details.
6571 */
6572 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6573 struct tg3_rx_prodring_set *dpr,
6574 u32 opaque_key, int src_idx,
6575 u32 dest_idx_unmasked)
6576 {
6577 struct tg3 *tp = tnapi->tp;
6578 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6579 struct ring_info *src_map, *dest_map;
6580 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6581 int dest_idx;
6582
6583 switch (opaque_key) {
6584 case RXD_OPAQUE_RING_STD:
6585 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6586 dest_desc = &dpr->rx_std[dest_idx];
6587 dest_map = &dpr->rx_std_buffers[dest_idx];
6588 src_desc = &spr->rx_std[src_idx];
6589 src_map = &spr->rx_std_buffers[src_idx];
6590 break;
6591
6592 case RXD_OPAQUE_RING_JUMBO:
6593 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6594 dest_desc = &dpr->rx_jmb[dest_idx].std;
6595 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6596 src_desc = &spr->rx_jmb[src_idx].std;
6597 src_map = &spr->rx_jmb_buffers[src_idx];
6598 break;
6599
6600 default:
6601 return;
6602 }
6603
6604 dest_map->data = src_map->data;
6605 dma_unmap_addr_set(dest_map, mapping,
6606 dma_unmap_addr(src_map, mapping));
6607 dest_desc->addr_hi = src_desc->addr_hi;
6608 dest_desc->addr_lo = src_desc->addr_lo;
6609
6610 /* Ensure that the update to the skb happens after the physical
6611 * addresses have been transferred to the new BD location.
6612 */
6613 smp_wmb();
6614
6615 src_map->data = NULL;
6616 }
6617
6618 /* The RX ring scheme is composed of multiple rings which post fresh
6619 * buffers to the chip, and one special ring the chip uses to report
6620 * status back to the host.
6621 *
6622 * The special ring reports the status of received packets to the
6623 * host. The chip does not write into the original descriptor the
6624 * RX buffer was obtained from. The chip simply takes the original
6625 * descriptor as provided by the host, updates the status and length
6626 * field, then writes this into the next status ring entry.
6627 *
6628 * Each ring the host uses to post buffers to the chip is described
6629 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6630 * it is first placed into the on-chip ram. When the packet's length
6631 * is known, it walks down the TG3_BDINFO entries to select the ring.
6632 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6633 * which is within the range of the new packet's length is chosen.
6634 *
6635 * The "separate ring for rx status" scheme may sound queer, but it makes
6636 * sense from a cache coherency perspective. If only the host writes
6637 * to the buffer post rings, and only the chip writes to the rx status
6638 * rings, then cache lines never move beyond shared-modified state.
6639 * If both the host and chip were to write into the same ring, cache line
6640 * eviction could occur since both entities want it in an exclusive state.
6641 */
6642 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6643 {
6644 struct tg3 *tp = tnapi->tp;
6645 u32 work_mask, rx_std_posted = 0;
6646 u32 std_prod_idx, jmb_prod_idx;
6647 u32 sw_idx = tnapi->rx_rcb_ptr;
6648 u16 hw_idx;
6649 int received;
6650 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6651
6652 hw_idx = *(tnapi->rx_rcb_prod_idx);
6653 /*
6654 * We need to order the read of hw_idx and the read of
6655 * the opaque cookie.
6656 */
6657 rmb();
6658 work_mask = 0;
6659 received = 0;
6660 std_prod_idx = tpr->rx_std_prod_idx;
6661 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6662 while (sw_idx != hw_idx && budget > 0) {
6663 struct ring_info *ri;
6664 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6665 unsigned int len;
6666 struct sk_buff *skb;
6667 dma_addr_t dma_addr;
6668 u32 opaque_key, desc_idx, *post_ptr;
6669 u8 *data;
6670 u64 tstamp = 0;
6671
6672 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6673 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6674 if (opaque_key == RXD_OPAQUE_RING_STD) {
6675 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6676 dma_addr = dma_unmap_addr(ri, mapping);
6677 data = ri->data;
6678 post_ptr = &std_prod_idx;
6679 rx_std_posted++;
6680 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6681 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6682 dma_addr = dma_unmap_addr(ri, mapping);
6683 data = ri->data;
6684 post_ptr = &jmb_prod_idx;
6685 } else
6686 goto next_pkt_nopost;
6687
6688 work_mask |= opaque_key;
6689
6690 if (desc->err_vlan & RXD_ERR_MASK) {
6691 drop_it:
6692 tg3_recycle_rx(tnapi, tpr, opaque_key,
6693 desc_idx, *post_ptr);
6694 drop_it_no_recycle:
6695 /* Other statistics kept track of by card. */
6696 tp->rx_dropped++;
6697 goto next_pkt;
6698 }
6699
6700 prefetch(data + TG3_RX_OFFSET(tp));
6701 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6702 ETH_FCS_LEN;
6703
6704 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6705 RXD_FLAG_PTPSTAT_PTPV1 ||
6706 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6707 RXD_FLAG_PTPSTAT_PTPV2) {
6708 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6709 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6710 }
6711
6712 if (len > TG3_RX_COPY_THRESH(tp)) {
6713 int skb_size;
6714 unsigned int frag_size;
6715
6716 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6717 *post_ptr, &frag_size);
6718 if (skb_size < 0)
6719 goto drop_it;
6720
6721 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6722 PCI_DMA_FROMDEVICE);
6723
6724 /* Ensure that the update to the data happens
6725 * after the usage of the old DMA mapping.
6726 */
6727 smp_wmb();
6728
6729 ri->data = NULL;
6730
6731 skb = build_skb(data, frag_size);
6732 if (!skb) {
6733 tg3_frag_free(frag_size != 0, data);
6734 goto drop_it_no_recycle;
6735 }
6736 skb_reserve(skb, TG3_RX_OFFSET(tp));
6737 } else {
6738 tg3_recycle_rx(tnapi, tpr, opaque_key,
6739 desc_idx, *post_ptr);
6740
6741 skb = netdev_alloc_skb(tp->dev,
6742 len + TG3_RAW_IP_ALIGN);
6743 if (skb == NULL)
6744 goto drop_it_no_recycle;
6745
6746 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6747 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6748 memcpy(skb->data,
6749 data + TG3_RX_OFFSET(tp),
6750 len);
6751 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6752 }
6753
6754 skb_put(skb, len);
6755 if (tstamp)
6756 tg3_hwclock_to_timestamp(tp, tstamp,
6757 skb_hwtstamps(skb));
6758
6759 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6760 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6761 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6762 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6763 skb->ip_summed = CHECKSUM_UNNECESSARY;
6764 else
6765 skb_checksum_none_assert(skb);
6766
6767 skb->protocol = eth_type_trans(skb, tp->dev);
6768
6769 if (len > (tp->dev->mtu + ETH_HLEN) &&
6770 skb->protocol != htons(ETH_P_8021Q)) {
6771 dev_kfree_skb(skb);
6772 goto drop_it_no_recycle;
6773 }
6774
6775 if (desc->type_flags & RXD_FLAG_VLAN &&
6776 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6777 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6778 desc->err_vlan & RXD_VLAN_MASK);
6779
6780 napi_gro_receive(&tnapi->napi, skb);
6781
6782 received++;
6783 budget--;
6784
6785 next_pkt:
6786 (*post_ptr)++;
6787
6788 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6789 tpr->rx_std_prod_idx = std_prod_idx &
6790 tp->rx_std_ring_mask;
6791 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6792 tpr->rx_std_prod_idx);
6793 work_mask &= ~RXD_OPAQUE_RING_STD;
6794 rx_std_posted = 0;
6795 }
6796 next_pkt_nopost:
6797 sw_idx++;
6798 sw_idx &= tp->rx_ret_ring_mask;
6799
6800 /* Refresh hw_idx to see if there is new work */
6801 if (sw_idx == hw_idx) {
6802 hw_idx = *(tnapi->rx_rcb_prod_idx);
6803 rmb();
6804 }
6805 }
6806
6807 /* ACK the status ring. */
6808 tnapi->rx_rcb_ptr = sw_idx;
6809 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6810
6811 /* Refill RX ring(s). */
6812 if (!tg3_flag(tp, ENABLE_RSS)) {
6813 /* Sync BD data before updating mailbox */
6814 wmb();
6815
6816 if (work_mask & RXD_OPAQUE_RING_STD) {
6817 tpr->rx_std_prod_idx = std_prod_idx &
6818 tp->rx_std_ring_mask;
6819 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6820 tpr->rx_std_prod_idx);
6821 }
6822 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6823 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6824 tp->rx_jmb_ring_mask;
6825 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6826 tpr->rx_jmb_prod_idx);
6827 }
6828 mmiowb();
6829 } else if (work_mask) {
6830 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6831 * updated before the producer indices can be updated.
6832 */
6833 smp_wmb();
6834
6835 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6836 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6837
6838 if (tnapi != &tp->napi[1]) {
6839 tp->rx_refill = true;
6840 napi_schedule(&tp->napi[1].napi);
6841 }
6842 }
6843
6844 return received;
6845 }
6846
6847 static void tg3_poll_link(struct tg3 *tp)
6848 {
6849 /* handle link change and other phy events */
6850 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6851 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6852
6853 if (sblk->status & SD_STATUS_LINK_CHG) {
6854 sblk->status = SD_STATUS_UPDATED |
6855 (sblk->status & ~SD_STATUS_LINK_CHG);
6856 spin_lock(&tp->lock);
6857 if (tg3_flag(tp, USE_PHYLIB)) {
6858 tw32_f(MAC_STATUS,
6859 (MAC_STATUS_SYNC_CHANGED |
6860 MAC_STATUS_CFG_CHANGED |
6861 MAC_STATUS_MI_COMPLETION |
6862 MAC_STATUS_LNKSTATE_CHANGED));
6863 udelay(40);
6864 } else
6865 tg3_setup_phy(tp, false);
6866 spin_unlock(&tp->lock);
6867 }
6868 }
6869 }
6870
6871 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6872 struct tg3_rx_prodring_set *dpr,
6873 struct tg3_rx_prodring_set *spr)
6874 {
6875 u32 si, di, cpycnt, src_prod_idx;
6876 int i, err = 0;
6877
6878 while (1) {
6879 src_prod_idx = spr->rx_std_prod_idx;
6880
6881 /* Make sure updates to the rx_std_buffers[] entries and the
6882 * standard producer index are seen in the correct order.
6883 */
6884 smp_rmb();
6885
6886 if (spr->rx_std_cons_idx == src_prod_idx)
6887 break;
6888
6889 if (spr->rx_std_cons_idx < src_prod_idx)
6890 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6891 else
6892 cpycnt = tp->rx_std_ring_mask + 1 -
6893 spr->rx_std_cons_idx;
6894
6895 cpycnt = min(cpycnt,
6896 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6897
6898 si = spr->rx_std_cons_idx;
6899 di = dpr->rx_std_prod_idx;
6900
6901 for (i = di; i < di + cpycnt; i++) {
6902 if (dpr->rx_std_buffers[i].data) {
6903 cpycnt = i - di;
6904 err = -ENOSPC;
6905 break;
6906 }
6907 }
6908
6909 if (!cpycnt)
6910 break;
6911
6912 /* Ensure that updates to the rx_std_buffers ring and the
6913 * shadowed hardware producer ring from tg3_recycle_skb() are
6914 * ordered correctly WRT the skb check above.
6915 */
6916 smp_rmb();
6917
6918 memcpy(&dpr->rx_std_buffers[di],
6919 &spr->rx_std_buffers[si],
6920 cpycnt * sizeof(struct ring_info));
6921
6922 for (i = 0; i < cpycnt; i++, di++, si++) {
6923 struct tg3_rx_buffer_desc *sbd, *dbd;
6924 sbd = &spr->rx_std[si];
6925 dbd = &dpr->rx_std[di];
6926 dbd->addr_hi = sbd->addr_hi;
6927 dbd->addr_lo = sbd->addr_lo;
6928 }
6929
6930 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6931 tp->rx_std_ring_mask;
6932 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6933 tp->rx_std_ring_mask;
6934 }
6935
6936 while (1) {
6937 src_prod_idx = spr->rx_jmb_prod_idx;
6938
6939 /* Make sure updates to the rx_jmb_buffers[] entries and
6940 * the jumbo producer index are seen in the correct order.
6941 */
6942 smp_rmb();
6943
6944 if (spr->rx_jmb_cons_idx == src_prod_idx)
6945 break;
6946
6947 if (spr->rx_jmb_cons_idx < src_prod_idx)
6948 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6949 else
6950 cpycnt = tp->rx_jmb_ring_mask + 1 -
6951 spr->rx_jmb_cons_idx;
6952
6953 cpycnt = min(cpycnt,
6954 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6955
6956 si = spr->rx_jmb_cons_idx;
6957 di = dpr->rx_jmb_prod_idx;
6958
6959 for (i = di; i < di + cpycnt; i++) {
6960 if (dpr->rx_jmb_buffers[i].data) {
6961 cpycnt = i - di;
6962 err = -ENOSPC;
6963 break;
6964 }
6965 }
6966
6967 if (!cpycnt)
6968 break;
6969
6970 /* Ensure that updates to the rx_jmb_buffers ring and the
6971 * shadowed hardware producer ring from tg3_recycle_skb() are
6972 * ordered correctly WRT the skb check above.
6973 */
6974 smp_rmb();
6975
6976 memcpy(&dpr->rx_jmb_buffers[di],
6977 &spr->rx_jmb_buffers[si],
6978 cpycnt * sizeof(struct ring_info));
6979
6980 for (i = 0; i < cpycnt; i++, di++, si++) {
6981 struct tg3_rx_buffer_desc *sbd, *dbd;
6982 sbd = &spr->rx_jmb[si].std;
6983 dbd = &dpr->rx_jmb[di].std;
6984 dbd->addr_hi = sbd->addr_hi;
6985 dbd->addr_lo = sbd->addr_lo;
6986 }
6987
6988 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6989 tp->rx_jmb_ring_mask;
6990 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6991 tp->rx_jmb_ring_mask;
6992 }
6993
6994 return err;
6995 }
6996
6997 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6998 {
6999 struct tg3 *tp = tnapi->tp;
7000
7001 /* run TX completion thread */
7002 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7003 tg3_tx(tnapi);
7004 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7005 return work_done;
7006 }
7007
7008 if (!tnapi->rx_rcb_prod_idx)
7009 return work_done;
7010
7011 /* run RX thread, within the bounds set by NAPI.
7012 * All RX "locking" is done by ensuring outside
7013 * code synchronizes with tg3->napi.poll()
7014 */
7015 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7016 work_done += tg3_rx(tnapi, budget - work_done);
7017
7018 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7019 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7020 int i, err = 0;
7021 u32 std_prod_idx = dpr->rx_std_prod_idx;
7022 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7023
7024 tp->rx_refill = false;
7025 for (i = 1; i <= tp->rxq_cnt; i++)
7026 err |= tg3_rx_prodring_xfer(tp, dpr,
7027 &tp->napi[i].prodring);
7028
7029 wmb();
7030
7031 if (std_prod_idx != dpr->rx_std_prod_idx)
7032 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7033 dpr->rx_std_prod_idx);
7034
7035 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7036 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7037 dpr->rx_jmb_prod_idx);
7038
7039 mmiowb();
7040
7041 if (err)
7042 tw32_f(HOSTCC_MODE, tp->coal_now);
7043 }
7044
7045 return work_done;
7046 }
7047
7048 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7049 {
7050 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7051 schedule_work(&tp->reset_task);
7052 }
7053
7054 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7055 {
7056 cancel_work_sync(&tp->reset_task);
7057 tg3_flag_clear(tp, RESET_TASK_PENDING);
7058 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7059 }
7060
7061 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7062 {
7063 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7064 struct tg3 *tp = tnapi->tp;
7065 int work_done = 0;
7066 struct tg3_hw_status *sblk = tnapi->hw_status;
7067
7068 while (1) {
7069 work_done = tg3_poll_work(tnapi, work_done, budget);
7070
7071 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7072 goto tx_recovery;
7073
7074 if (unlikely(work_done >= budget))
7075 break;
7076
7077 /* tp->last_tag is used in tg3_int_reenable() below
7078 * to tell the hw how much work has been processed,
7079 * so we must read it before checking for more work.
7080 */
7081 tnapi->last_tag = sblk->status_tag;
7082 tnapi->last_irq_tag = tnapi->last_tag;
7083 rmb();
7084
7085 /* check for RX/TX work to do */
7086 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7087 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7088
7089 /* This test here is not race free, but will reduce
7090 * the number of interrupts by looping again.
7091 */
7092 if (tnapi == &tp->napi[1] && tp->rx_refill)
7093 continue;
7094
7095 napi_complete(napi);
7096 /* Reenable interrupts. */
7097 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7098
7099 /* This test here is synchronized by napi_schedule()
7100 * and napi_complete() to close the race condition.
7101 */
7102 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7103 tw32(HOSTCC_MODE, tp->coalesce_mode |
7104 HOSTCC_MODE_ENABLE |
7105 tnapi->coal_now);
7106 }
7107 mmiowb();
7108 break;
7109 }
7110 }
7111
7112 return work_done;
7113
7114 tx_recovery:
7115 /* work_done is guaranteed to be less than budget. */
7116 napi_complete(napi);
7117 tg3_reset_task_schedule(tp);
7118 return work_done;
7119 }
7120
7121 static void tg3_process_error(struct tg3 *tp)
7122 {
7123 u32 val;
7124 bool real_error = false;
7125
7126 if (tg3_flag(tp, ERROR_PROCESSED))
7127 return;
7128
7129 /* Check Flow Attention register */
7130 val = tr32(HOSTCC_FLOW_ATTN);
7131 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7132 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7133 real_error = true;
7134 }
7135
7136 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7137 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7138 real_error = true;
7139 }
7140
7141 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7142 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7143 real_error = true;
7144 }
7145
7146 if (!real_error)
7147 return;
7148
7149 tg3_dump_state(tp);
7150
7151 tg3_flag_set(tp, ERROR_PROCESSED);
7152 tg3_reset_task_schedule(tp);
7153 }
7154
7155 static int tg3_poll(struct napi_struct *napi, int budget)
7156 {
7157 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7158 struct tg3 *tp = tnapi->tp;
7159 int work_done = 0;
7160 struct tg3_hw_status *sblk = tnapi->hw_status;
7161
7162 while (1) {
7163 if (sblk->status & SD_STATUS_ERROR)
7164 tg3_process_error(tp);
7165
7166 tg3_poll_link(tp);
7167
7168 work_done = tg3_poll_work(tnapi, work_done, budget);
7169
7170 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7171 goto tx_recovery;
7172
7173 if (unlikely(work_done >= budget))
7174 break;
7175
7176 if (tg3_flag(tp, TAGGED_STATUS)) {
7177 /* tp->last_tag is used in tg3_int_reenable() below
7178 * to tell the hw how much work has been processed,
7179 * so we must read it before checking for more work.
7180 */
7181 tnapi->last_tag = sblk->status_tag;
7182 tnapi->last_irq_tag = tnapi->last_tag;
7183 rmb();
7184 } else
7185 sblk->status &= ~SD_STATUS_UPDATED;
7186
7187 if (likely(!tg3_has_work(tnapi))) {
7188 napi_complete(napi);
7189 tg3_int_reenable(tnapi);
7190 break;
7191 }
7192 }
7193
7194 return work_done;
7195
7196 tx_recovery:
7197 /* work_done is guaranteed to be less than budget. */
7198 napi_complete(napi);
7199 tg3_reset_task_schedule(tp);
7200 return work_done;
7201 }
7202
7203 static void tg3_napi_disable(struct tg3 *tp)
7204 {
7205 int i;
7206
7207 for (i = tp->irq_cnt - 1; i >= 0; i--)
7208 napi_disable(&tp->napi[i].napi);
7209 }
7210
7211 static void tg3_napi_enable(struct tg3 *tp)
7212 {
7213 int i;
7214
7215 for (i = 0; i < tp->irq_cnt; i++)
7216 napi_enable(&tp->napi[i].napi);
7217 }
7218
7219 static void tg3_napi_init(struct tg3 *tp)
7220 {
7221 int i;
7222
7223 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7224 for (i = 1; i < tp->irq_cnt; i++)
7225 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7226 }
7227
7228 static void tg3_napi_fini(struct tg3 *tp)
7229 {
7230 int i;
7231
7232 for (i = 0; i < tp->irq_cnt; i++)
7233 netif_napi_del(&tp->napi[i].napi);
7234 }
7235
7236 static inline void tg3_netif_stop(struct tg3 *tp)
7237 {
7238 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7239 tg3_napi_disable(tp);
7240 netif_carrier_off(tp->dev);
7241 netif_tx_disable(tp->dev);
7242 }
7243
7244 /* tp->lock must be held */
7245 static inline void tg3_netif_start(struct tg3 *tp)
7246 {
7247 tg3_ptp_resume(tp);
7248
7249 /* NOTE: unconditional netif_tx_wake_all_queues is only
7250 * appropriate so long as all callers are assured to
7251 * have free tx slots (such as after tg3_init_hw)
7252 */
7253 netif_tx_wake_all_queues(tp->dev);
7254
7255 if (tp->link_up)
7256 netif_carrier_on(tp->dev);
7257
7258 tg3_napi_enable(tp);
7259 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7260 tg3_enable_ints(tp);
7261 }
7262
7263 static void tg3_irq_quiesce(struct tg3 *tp)
7264 {
7265 int i;
7266
7267 BUG_ON(tp->irq_sync);
7268
7269 tp->irq_sync = 1;
7270 smp_mb();
7271
7272 for (i = 0; i < tp->irq_cnt; i++)
7273 synchronize_irq(tp->napi[i].irq_vec);
7274 }
7275
7276 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7277 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7278 * with as well. Most of the time, this is not necessary except when
7279 * shutting down the device.
7280 */
7281 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7282 {
7283 spin_lock_bh(&tp->lock);
7284 if (irq_sync)
7285 tg3_irq_quiesce(tp);
7286 }
7287
7288 static inline void tg3_full_unlock(struct tg3 *tp)
7289 {
7290 spin_unlock_bh(&tp->lock);
7291 }
7292
7293 /* One-shot MSI handler - Chip automatically disables interrupt
7294 * after sending MSI so driver doesn't have to do it.
7295 */
7296 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7297 {
7298 struct tg3_napi *tnapi = dev_id;
7299 struct tg3 *tp = tnapi->tp;
7300
7301 prefetch(tnapi->hw_status);
7302 if (tnapi->rx_rcb)
7303 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7304
7305 if (likely(!tg3_irq_sync(tp)))
7306 napi_schedule(&tnapi->napi);
7307
7308 return IRQ_HANDLED;
7309 }
7310
7311 /* MSI ISR - No need to check for interrupt sharing and no need to
7312 * flush status block and interrupt mailbox. PCI ordering rules
7313 * guarantee that MSI will arrive after the status block.
7314 */
7315 static irqreturn_t tg3_msi(int irq, void *dev_id)
7316 {
7317 struct tg3_napi *tnapi = dev_id;
7318 struct tg3 *tp = tnapi->tp;
7319
7320 prefetch(tnapi->hw_status);
7321 if (tnapi->rx_rcb)
7322 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7323 /*
7324 * Writing any value to intr-mbox-0 clears PCI INTA# and
7325 * chip-internal interrupt pending events.
7326 * Writing non-zero to intr-mbox-0 additional tells the
7327 * NIC to stop sending us irqs, engaging "in-intr-handler"
7328 * event coalescing.
7329 */
7330 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7331 if (likely(!tg3_irq_sync(tp)))
7332 napi_schedule(&tnapi->napi);
7333
7334 return IRQ_RETVAL(1);
7335 }
7336
7337 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7338 {
7339 struct tg3_napi *tnapi = dev_id;
7340 struct tg3 *tp = tnapi->tp;
7341 struct tg3_hw_status *sblk = tnapi->hw_status;
7342 unsigned int handled = 1;
7343
7344 /* In INTx mode, it is possible for the interrupt to arrive at
7345 * the CPU before the status block posted prior to the interrupt.
7346 * Reading the PCI State register will confirm whether the
7347 * interrupt is ours and will flush the status block.
7348 */
7349 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7350 if (tg3_flag(tp, CHIP_RESETTING) ||
7351 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7352 handled = 0;
7353 goto out;
7354 }
7355 }
7356
7357 /*
7358 * Writing any value to intr-mbox-0 clears PCI INTA# and
7359 * chip-internal interrupt pending events.
7360 * Writing non-zero to intr-mbox-0 additional tells the
7361 * NIC to stop sending us irqs, engaging "in-intr-handler"
7362 * event coalescing.
7363 *
7364 * Flush the mailbox to de-assert the IRQ immediately to prevent
7365 * spurious interrupts. The flush impacts performance but
7366 * excessive spurious interrupts can be worse in some cases.
7367 */
7368 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7369 if (tg3_irq_sync(tp))
7370 goto out;
7371 sblk->status &= ~SD_STATUS_UPDATED;
7372 if (likely(tg3_has_work(tnapi))) {
7373 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7374 napi_schedule(&tnapi->napi);
7375 } else {
7376 /* No work, shared interrupt perhaps? re-enable
7377 * interrupts, and flush that PCI write
7378 */
7379 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7380 0x00000000);
7381 }
7382 out:
7383 return IRQ_RETVAL(handled);
7384 }
7385
7386 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7387 {
7388 struct tg3_napi *tnapi = dev_id;
7389 struct tg3 *tp = tnapi->tp;
7390 struct tg3_hw_status *sblk = tnapi->hw_status;
7391 unsigned int handled = 1;
7392
7393 /* In INTx mode, it is possible for the interrupt to arrive at
7394 * the CPU before the status block posted prior to the interrupt.
7395 * Reading the PCI State register will confirm whether the
7396 * interrupt is ours and will flush the status block.
7397 */
7398 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7399 if (tg3_flag(tp, CHIP_RESETTING) ||
7400 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7401 handled = 0;
7402 goto out;
7403 }
7404 }
7405
7406 /*
7407 * writing any value to intr-mbox-0 clears PCI INTA# and
7408 * chip-internal interrupt pending events.
7409 * writing non-zero to intr-mbox-0 additional tells the
7410 * NIC to stop sending us irqs, engaging "in-intr-handler"
7411 * event coalescing.
7412 *
7413 * Flush the mailbox to de-assert the IRQ immediately to prevent
7414 * spurious interrupts. The flush impacts performance but
7415 * excessive spurious interrupts can be worse in some cases.
7416 */
7417 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7418
7419 /*
7420 * In a shared interrupt configuration, sometimes other devices'
7421 * interrupts will scream. We record the current status tag here
7422 * so that the above check can report that the screaming interrupts
7423 * are unhandled. Eventually they will be silenced.
7424 */
7425 tnapi->last_irq_tag = sblk->status_tag;
7426
7427 if (tg3_irq_sync(tp))
7428 goto out;
7429
7430 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7431
7432 napi_schedule(&tnapi->napi);
7433
7434 out:
7435 return IRQ_RETVAL(handled);
7436 }
7437
7438 /* ISR for interrupt test */
7439 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7440 {
7441 struct tg3_napi *tnapi = dev_id;
7442 struct tg3 *tp = tnapi->tp;
7443 struct tg3_hw_status *sblk = tnapi->hw_status;
7444
7445 if ((sblk->status & SD_STATUS_UPDATED) ||
7446 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7447 tg3_disable_ints(tp);
7448 return IRQ_RETVAL(1);
7449 }
7450 return IRQ_RETVAL(0);
7451 }
7452
7453 #ifdef CONFIG_NET_POLL_CONTROLLER
7454 static void tg3_poll_controller(struct net_device *dev)
7455 {
7456 int i;
7457 struct tg3 *tp = netdev_priv(dev);
7458
7459 if (tg3_irq_sync(tp))
7460 return;
7461
7462 for (i = 0; i < tp->irq_cnt; i++)
7463 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7464 }
7465 #endif
7466
7467 static void tg3_tx_timeout(struct net_device *dev)
7468 {
7469 struct tg3 *tp = netdev_priv(dev);
7470
7471 if (netif_msg_tx_err(tp)) {
7472 netdev_err(dev, "transmit timed out, resetting\n");
7473 tg3_dump_state(tp);
7474 }
7475
7476 tg3_reset_task_schedule(tp);
7477 }
7478
7479 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7480 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7481 {
7482 u32 base = (u32) mapping & 0xffffffff;
7483
7484 return base + len + 8 < base;
7485 }
7486
7487 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7488 * of any 4GB boundaries: 4G, 8G, etc
7489 */
7490 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7491 u32 len, u32 mss)
7492 {
7493 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7494 u32 base = (u32) mapping & 0xffffffff;
7495
7496 return ((base + len + (mss & 0x3fff)) < base);
7497 }
7498 return 0;
7499 }
7500
7501 /* Test for DMA addresses > 40-bit */
7502 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7503 int len)
7504 {
7505 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7506 if (tg3_flag(tp, 40BIT_DMA_BUG))
7507 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7508 return 0;
7509 #else
7510 return 0;
7511 #endif
7512 }
7513
7514 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7515 dma_addr_t mapping, u32 len, u32 flags,
7516 u32 mss, u32 vlan)
7517 {
7518 txbd->addr_hi = ((u64) mapping >> 32);
7519 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7520 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7521 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7522 }
7523
7524 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7525 dma_addr_t map, u32 len, u32 flags,
7526 u32 mss, u32 vlan)
7527 {
7528 struct tg3 *tp = tnapi->tp;
7529 bool hwbug = false;
7530
7531 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7532 hwbug = true;
7533
7534 if (tg3_4g_overflow_test(map, len))
7535 hwbug = true;
7536
7537 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7538 hwbug = true;
7539
7540 if (tg3_40bit_overflow_test(tp, map, len))
7541 hwbug = true;
7542
7543 if (tp->dma_limit) {
7544 u32 prvidx = *entry;
7545 u32 tmp_flag = flags & ~TXD_FLAG_END;
7546 while (len > tp->dma_limit && *budget) {
7547 u32 frag_len = tp->dma_limit;
7548 len -= tp->dma_limit;
7549
7550 /* Avoid the 8byte DMA problem */
7551 if (len <= 8) {
7552 len += tp->dma_limit / 2;
7553 frag_len = tp->dma_limit / 2;
7554 }
7555
7556 tnapi->tx_buffers[*entry].fragmented = true;
7557
7558 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7559 frag_len, tmp_flag, mss, vlan);
7560 *budget -= 1;
7561 prvidx = *entry;
7562 *entry = NEXT_TX(*entry);
7563
7564 map += frag_len;
7565 }
7566
7567 if (len) {
7568 if (*budget) {
7569 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7570 len, flags, mss, vlan);
7571 *budget -= 1;
7572 *entry = NEXT_TX(*entry);
7573 } else {
7574 hwbug = true;
7575 tnapi->tx_buffers[prvidx].fragmented = false;
7576 }
7577 }
7578 } else {
7579 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7580 len, flags, mss, vlan);
7581 *entry = NEXT_TX(*entry);
7582 }
7583
7584 return hwbug;
7585 }
7586
7587 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7588 {
7589 int i;
7590 struct sk_buff *skb;
7591 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7592
7593 skb = txb->skb;
7594 txb->skb = NULL;
7595
7596 pci_unmap_single(tnapi->tp->pdev,
7597 dma_unmap_addr(txb, mapping),
7598 skb_headlen(skb),
7599 PCI_DMA_TODEVICE);
7600
7601 while (txb->fragmented) {
7602 txb->fragmented = false;
7603 entry = NEXT_TX(entry);
7604 txb = &tnapi->tx_buffers[entry];
7605 }
7606
7607 for (i = 0; i <= last; i++) {
7608 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7609
7610 entry = NEXT_TX(entry);
7611 txb = &tnapi->tx_buffers[entry];
7612
7613 pci_unmap_page(tnapi->tp->pdev,
7614 dma_unmap_addr(txb, mapping),
7615 skb_frag_size(frag), PCI_DMA_TODEVICE);
7616
7617 while (txb->fragmented) {
7618 txb->fragmented = false;
7619 entry = NEXT_TX(entry);
7620 txb = &tnapi->tx_buffers[entry];
7621 }
7622 }
7623 }
7624
7625 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7626 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7627 struct sk_buff **pskb,
7628 u32 *entry, u32 *budget,
7629 u32 base_flags, u32 mss, u32 vlan)
7630 {
7631 struct tg3 *tp = tnapi->tp;
7632 struct sk_buff *new_skb, *skb = *pskb;
7633 dma_addr_t new_addr = 0;
7634 int ret = 0;
7635
7636 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7637 new_skb = skb_copy(skb, GFP_ATOMIC);
7638 else {
7639 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7640
7641 new_skb = skb_copy_expand(skb,
7642 skb_headroom(skb) + more_headroom,
7643 skb_tailroom(skb), GFP_ATOMIC);
7644 }
7645
7646 if (!new_skb) {
7647 ret = -1;
7648 } else {
7649 /* New SKB is guaranteed to be linear. */
7650 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7651 PCI_DMA_TODEVICE);
7652 /* Make sure the mapping succeeded */
7653 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7654 dev_kfree_skb(new_skb);
7655 ret = -1;
7656 } else {
7657 u32 save_entry = *entry;
7658
7659 base_flags |= TXD_FLAG_END;
7660
7661 tnapi->tx_buffers[*entry].skb = new_skb;
7662 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7663 mapping, new_addr);
7664
7665 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7666 new_skb->len, base_flags,
7667 mss, vlan)) {
7668 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7669 dev_kfree_skb(new_skb);
7670 ret = -1;
7671 }
7672 }
7673 }
7674
7675 dev_kfree_skb(skb);
7676 *pskb = new_skb;
7677 return ret;
7678 }
7679
7680 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7681
7682 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7683 * TSO header is greater than 80 bytes.
7684 */
7685 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7686 {
7687 struct sk_buff *segs, *nskb;
7688 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7689
7690 /* Estimate the number of fragments in the worst case */
7691 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7692 netif_stop_queue(tp->dev);
7693
7694 /* netif_tx_stop_queue() must be done before checking
7695 * checking tx index in tg3_tx_avail() below, because in
7696 * tg3_tx(), we update tx index before checking for
7697 * netif_tx_queue_stopped().
7698 */
7699 smp_mb();
7700 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7701 return NETDEV_TX_BUSY;
7702
7703 netif_wake_queue(tp->dev);
7704 }
7705
7706 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7707 if (IS_ERR(segs))
7708 goto tg3_tso_bug_end;
7709
7710 do {
7711 nskb = segs;
7712 segs = segs->next;
7713 nskb->next = NULL;
7714 tg3_start_xmit(nskb, tp->dev);
7715 } while (segs);
7716
7717 tg3_tso_bug_end:
7718 dev_kfree_skb(skb);
7719
7720 return NETDEV_TX_OK;
7721 }
7722
7723 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7724 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7725 */
7726 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7727 {
7728 struct tg3 *tp = netdev_priv(dev);
7729 u32 len, entry, base_flags, mss, vlan = 0;
7730 u32 budget;
7731 int i = -1, would_hit_hwbug;
7732 dma_addr_t mapping;
7733 struct tg3_napi *tnapi;
7734 struct netdev_queue *txq;
7735 unsigned int last;
7736
7737 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7738 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7739 if (tg3_flag(tp, ENABLE_TSS))
7740 tnapi++;
7741
7742 budget = tg3_tx_avail(tnapi);
7743
7744 /* We are running in BH disabled context with netif_tx_lock
7745 * and TX reclaim runs via tp->napi.poll inside of a software
7746 * interrupt. Furthermore, IRQ processing runs lockless so we have
7747 * no IRQ context deadlocks to worry about either. Rejoice!
7748 */
7749 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7750 if (!netif_tx_queue_stopped(txq)) {
7751 netif_tx_stop_queue(txq);
7752
7753 /* This is a hard error, log it. */
7754 netdev_err(dev,
7755 "BUG! Tx Ring full when queue awake!\n");
7756 }
7757 return NETDEV_TX_BUSY;
7758 }
7759
7760 entry = tnapi->tx_prod;
7761 base_flags = 0;
7762 if (skb->ip_summed == CHECKSUM_PARTIAL)
7763 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7764
7765 mss = skb_shinfo(skb)->gso_size;
7766 if (mss) {
7767 struct iphdr *iph;
7768 u32 tcp_opt_len, hdr_len;
7769
7770 if (skb_header_cloned(skb) &&
7771 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7772 goto drop;
7773
7774 iph = ip_hdr(skb);
7775 tcp_opt_len = tcp_optlen(skb);
7776
7777 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7778
7779 if (!skb_is_gso_v6(skb)) {
7780 iph->check = 0;
7781 iph->tot_len = htons(mss + hdr_len);
7782 }
7783
7784 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7785 tg3_flag(tp, TSO_BUG))
7786 return tg3_tso_bug(tp, skb);
7787
7788 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7789 TXD_FLAG_CPU_POST_DMA);
7790
7791 if (tg3_flag(tp, HW_TSO_1) ||
7792 tg3_flag(tp, HW_TSO_2) ||
7793 tg3_flag(tp, HW_TSO_3)) {
7794 tcp_hdr(skb)->check = 0;
7795 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7796 } else
7797 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7798 iph->daddr, 0,
7799 IPPROTO_TCP,
7800 0);
7801
7802 if (tg3_flag(tp, HW_TSO_3)) {
7803 mss |= (hdr_len & 0xc) << 12;
7804 if (hdr_len & 0x10)
7805 base_flags |= 0x00000010;
7806 base_flags |= (hdr_len & 0x3e0) << 5;
7807 } else if (tg3_flag(tp, HW_TSO_2))
7808 mss |= hdr_len << 9;
7809 else if (tg3_flag(tp, HW_TSO_1) ||
7810 tg3_asic_rev(tp) == ASIC_REV_5705) {
7811 if (tcp_opt_len || iph->ihl > 5) {
7812 int tsflags;
7813
7814 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7815 mss |= (tsflags << 11);
7816 }
7817 } else {
7818 if (tcp_opt_len || iph->ihl > 5) {
7819 int tsflags;
7820
7821 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7822 base_flags |= tsflags << 12;
7823 }
7824 }
7825 }
7826
7827 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7828 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7829 base_flags |= TXD_FLAG_JMB_PKT;
7830
7831 if (vlan_tx_tag_present(skb)) {
7832 base_flags |= TXD_FLAG_VLAN;
7833 vlan = vlan_tx_tag_get(skb);
7834 }
7835
7836 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7837 tg3_flag(tp, TX_TSTAMP_EN)) {
7838 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7839 base_flags |= TXD_FLAG_HWTSTAMP;
7840 }
7841
7842 len = skb_headlen(skb);
7843
7844 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7845 if (pci_dma_mapping_error(tp->pdev, mapping))
7846 goto drop;
7847
7848
7849 tnapi->tx_buffers[entry].skb = skb;
7850 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7851
7852 would_hit_hwbug = 0;
7853
7854 if (tg3_flag(tp, 5701_DMA_BUG))
7855 would_hit_hwbug = 1;
7856
7857 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7858 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7859 mss, vlan)) {
7860 would_hit_hwbug = 1;
7861 } else if (skb_shinfo(skb)->nr_frags > 0) {
7862 u32 tmp_mss = mss;
7863
7864 if (!tg3_flag(tp, HW_TSO_1) &&
7865 !tg3_flag(tp, HW_TSO_2) &&
7866 !tg3_flag(tp, HW_TSO_3))
7867 tmp_mss = 0;
7868
7869 /* Now loop through additional data
7870 * fragments, and queue them.
7871 */
7872 last = skb_shinfo(skb)->nr_frags - 1;
7873 for (i = 0; i <= last; i++) {
7874 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7875
7876 len = skb_frag_size(frag);
7877 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7878 len, DMA_TO_DEVICE);
7879
7880 tnapi->tx_buffers[entry].skb = NULL;
7881 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7882 mapping);
7883 if (dma_mapping_error(&tp->pdev->dev, mapping))
7884 goto dma_error;
7885
7886 if (!budget ||
7887 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7888 len, base_flags |
7889 ((i == last) ? TXD_FLAG_END : 0),
7890 tmp_mss, vlan)) {
7891 would_hit_hwbug = 1;
7892 break;
7893 }
7894 }
7895 }
7896
7897 if (would_hit_hwbug) {
7898 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7899
7900 /* If the workaround fails due to memory/mapping
7901 * failure, silently drop this packet.
7902 */
7903 entry = tnapi->tx_prod;
7904 budget = tg3_tx_avail(tnapi);
7905 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7906 base_flags, mss, vlan))
7907 goto drop_nofree;
7908 }
7909
7910 skb_tx_timestamp(skb);
7911 netdev_tx_sent_queue(txq, skb->len);
7912
7913 /* Sync BD data before updating mailbox */
7914 wmb();
7915
7916 /* Packets are ready, update Tx producer idx local and on card. */
7917 tw32_tx_mbox(tnapi->prodmbox, entry);
7918
7919 tnapi->tx_prod = entry;
7920 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7921 netif_tx_stop_queue(txq);
7922
7923 /* netif_tx_stop_queue() must be done before checking
7924 * checking tx index in tg3_tx_avail() below, because in
7925 * tg3_tx(), we update tx index before checking for
7926 * netif_tx_queue_stopped().
7927 */
7928 smp_mb();
7929 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7930 netif_tx_wake_queue(txq);
7931 }
7932
7933 mmiowb();
7934 return NETDEV_TX_OK;
7935
7936 dma_error:
7937 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7938 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7939 drop:
7940 dev_kfree_skb(skb);
7941 drop_nofree:
7942 tp->tx_dropped++;
7943 return NETDEV_TX_OK;
7944 }
7945
7946 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7947 {
7948 if (enable) {
7949 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7950 MAC_MODE_PORT_MODE_MASK);
7951
7952 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7953
7954 if (!tg3_flag(tp, 5705_PLUS))
7955 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7956
7957 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7958 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7959 else
7960 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7961 } else {
7962 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7963
7964 if (tg3_flag(tp, 5705_PLUS) ||
7965 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7966 tg3_asic_rev(tp) == ASIC_REV_5700)
7967 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7968 }
7969
7970 tw32(MAC_MODE, tp->mac_mode);
7971 udelay(40);
7972 }
7973
7974 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7975 {
7976 u32 val, bmcr, mac_mode, ptest = 0;
7977
7978 tg3_phy_toggle_apd(tp, false);
7979 tg3_phy_toggle_automdix(tp, false);
7980
7981 if (extlpbk && tg3_phy_set_extloopbk(tp))
7982 return -EIO;
7983
7984 bmcr = BMCR_FULLDPLX;
7985 switch (speed) {
7986 case SPEED_10:
7987 break;
7988 case SPEED_100:
7989 bmcr |= BMCR_SPEED100;
7990 break;
7991 case SPEED_1000:
7992 default:
7993 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7994 speed = SPEED_100;
7995 bmcr |= BMCR_SPEED100;
7996 } else {
7997 speed = SPEED_1000;
7998 bmcr |= BMCR_SPEED1000;
7999 }
8000 }
8001
8002 if (extlpbk) {
8003 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8004 tg3_readphy(tp, MII_CTRL1000, &val);
8005 val |= CTL1000_AS_MASTER |
8006 CTL1000_ENABLE_MASTER;
8007 tg3_writephy(tp, MII_CTRL1000, val);
8008 } else {
8009 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8010 MII_TG3_FET_PTEST_TRIM_2;
8011 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8012 }
8013 } else
8014 bmcr |= BMCR_LOOPBACK;
8015
8016 tg3_writephy(tp, MII_BMCR, bmcr);
8017
8018 /* The write needs to be flushed for the FETs */
8019 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8020 tg3_readphy(tp, MII_BMCR, &bmcr);
8021
8022 udelay(40);
8023
8024 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8025 tg3_asic_rev(tp) == ASIC_REV_5785) {
8026 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8027 MII_TG3_FET_PTEST_FRC_TX_LINK |
8028 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8029
8030 /* The write needs to be flushed for the AC131 */
8031 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8032 }
8033
8034 /* Reset to prevent losing 1st rx packet intermittently */
8035 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8036 tg3_flag(tp, 5780_CLASS)) {
8037 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8038 udelay(10);
8039 tw32_f(MAC_RX_MODE, tp->rx_mode);
8040 }
8041
8042 mac_mode = tp->mac_mode &
8043 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8044 if (speed == SPEED_1000)
8045 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8046 else
8047 mac_mode |= MAC_MODE_PORT_MODE_MII;
8048
8049 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8050 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8051
8052 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8053 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8054 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8055 mac_mode |= MAC_MODE_LINK_POLARITY;
8056
8057 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8058 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8059 }
8060
8061 tw32(MAC_MODE, mac_mode);
8062 udelay(40);
8063
8064 return 0;
8065 }
8066
8067 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8068 {
8069 struct tg3 *tp = netdev_priv(dev);
8070
8071 if (features & NETIF_F_LOOPBACK) {
8072 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8073 return;
8074
8075 spin_lock_bh(&tp->lock);
8076 tg3_mac_loopback(tp, true);
8077 netif_carrier_on(tp->dev);
8078 spin_unlock_bh(&tp->lock);
8079 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8080 } else {
8081 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8082 return;
8083
8084 spin_lock_bh(&tp->lock);
8085 tg3_mac_loopback(tp, false);
8086 /* Force link status check */
8087 tg3_setup_phy(tp, true);
8088 spin_unlock_bh(&tp->lock);
8089 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8090 }
8091 }
8092
8093 static netdev_features_t tg3_fix_features(struct net_device *dev,
8094 netdev_features_t features)
8095 {
8096 struct tg3 *tp = netdev_priv(dev);
8097
8098 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8099 features &= ~NETIF_F_ALL_TSO;
8100
8101 return features;
8102 }
8103
8104 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8105 {
8106 netdev_features_t changed = dev->features ^ features;
8107
8108 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8109 tg3_set_loopback(dev, features);
8110
8111 return 0;
8112 }
8113
8114 static void tg3_rx_prodring_free(struct tg3 *tp,
8115 struct tg3_rx_prodring_set *tpr)
8116 {
8117 int i;
8118
8119 if (tpr != &tp->napi[0].prodring) {
8120 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8121 i = (i + 1) & tp->rx_std_ring_mask)
8122 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8123 tp->rx_pkt_map_sz);
8124
8125 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8126 for (i = tpr->rx_jmb_cons_idx;
8127 i != tpr->rx_jmb_prod_idx;
8128 i = (i + 1) & tp->rx_jmb_ring_mask) {
8129 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8130 TG3_RX_JMB_MAP_SZ);
8131 }
8132 }
8133
8134 return;
8135 }
8136
8137 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8138 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8139 tp->rx_pkt_map_sz);
8140
8141 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8142 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8143 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8144 TG3_RX_JMB_MAP_SZ);
8145 }
8146 }
8147
8148 /* Initialize rx rings for packet processing.
8149 *
8150 * The chip has been shut down and the driver detached from
8151 * the networking, so no interrupts or new tx packets will
8152 * end up in the driver. tp->{tx,}lock are held and thus
8153 * we may not sleep.
8154 */
8155 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8156 struct tg3_rx_prodring_set *tpr)
8157 {
8158 u32 i, rx_pkt_dma_sz;
8159
8160 tpr->rx_std_cons_idx = 0;
8161 tpr->rx_std_prod_idx = 0;
8162 tpr->rx_jmb_cons_idx = 0;
8163 tpr->rx_jmb_prod_idx = 0;
8164
8165 if (tpr != &tp->napi[0].prodring) {
8166 memset(&tpr->rx_std_buffers[0], 0,
8167 TG3_RX_STD_BUFF_RING_SIZE(tp));
8168 if (tpr->rx_jmb_buffers)
8169 memset(&tpr->rx_jmb_buffers[0], 0,
8170 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8171 goto done;
8172 }
8173
8174 /* Zero out all descriptors. */
8175 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8176
8177 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8178 if (tg3_flag(tp, 5780_CLASS) &&
8179 tp->dev->mtu > ETH_DATA_LEN)
8180 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8181 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8182
8183 /* Initialize invariants of the rings, we only set this
8184 * stuff once. This works because the card does not
8185 * write into the rx buffer posting rings.
8186 */
8187 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8188 struct tg3_rx_buffer_desc *rxd;
8189
8190 rxd = &tpr->rx_std[i];
8191 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8192 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8193 rxd->opaque = (RXD_OPAQUE_RING_STD |
8194 (i << RXD_OPAQUE_INDEX_SHIFT));
8195 }
8196
8197 /* Now allocate fresh SKBs for each rx ring. */
8198 for (i = 0; i < tp->rx_pending; i++) {
8199 unsigned int frag_size;
8200
8201 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8202 &frag_size) < 0) {
8203 netdev_warn(tp->dev,
8204 "Using a smaller RX standard ring. Only "
8205 "%d out of %d buffers were allocated "
8206 "successfully\n", i, tp->rx_pending);
8207 if (i == 0)
8208 goto initfail;
8209 tp->rx_pending = i;
8210 break;
8211 }
8212 }
8213
8214 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8215 goto done;
8216
8217 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8218
8219 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8220 goto done;
8221
8222 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8223 struct tg3_rx_buffer_desc *rxd;
8224
8225 rxd = &tpr->rx_jmb[i].std;
8226 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8227 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8228 RXD_FLAG_JUMBO;
8229 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8230 (i << RXD_OPAQUE_INDEX_SHIFT));
8231 }
8232
8233 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8234 unsigned int frag_size;
8235
8236 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8237 &frag_size) < 0) {
8238 netdev_warn(tp->dev,
8239 "Using a smaller RX jumbo ring. Only %d "
8240 "out of %d buffers were allocated "
8241 "successfully\n", i, tp->rx_jumbo_pending);
8242 if (i == 0)
8243 goto initfail;
8244 tp->rx_jumbo_pending = i;
8245 break;
8246 }
8247 }
8248
8249 done:
8250 return 0;
8251
8252 initfail:
8253 tg3_rx_prodring_free(tp, tpr);
8254 return -ENOMEM;
8255 }
8256
8257 static void tg3_rx_prodring_fini(struct tg3 *tp,
8258 struct tg3_rx_prodring_set *tpr)
8259 {
8260 kfree(tpr->rx_std_buffers);
8261 tpr->rx_std_buffers = NULL;
8262 kfree(tpr->rx_jmb_buffers);
8263 tpr->rx_jmb_buffers = NULL;
8264 if (tpr->rx_std) {
8265 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8266 tpr->rx_std, tpr->rx_std_mapping);
8267 tpr->rx_std = NULL;
8268 }
8269 if (tpr->rx_jmb) {
8270 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8271 tpr->rx_jmb, tpr->rx_jmb_mapping);
8272 tpr->rx_jmb = NULL;
8273 }
8274 }
8275
8276 static int tg3_rx_prodring_init(struct tg3 *tp,
8277 struct tg3_rx_prodring_set *tpr)
8278 {
8279 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8280 GFP_KERNEL);
8281 if (!tpr->rx_std_buffers)
8282 return -ENOMEM;
8283
8284 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8285 TG3_RX_STD_RING_BYTES(tp),
8286 &tpr->rx_std_mapping,
8287 GFP_KERNEL);
8288 if (!tpr->rx_std)
8289 goto err_out;
8290
8291 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8292 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8293 GFP_KERNEL);
8294 if (!tpr->rx_jmb_buffers)
8295 goto err_out;
8296
8297 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8298 TG3_RX_JMB_RING_BYTES(tp),
8299 &tpr->rx_jmb_mapping,
8300 GFP_KERNEL);
8301 if (!tpr->rx_jmb)
8302 goto err_out;
8303 }
8304
8305 return 0;
8306
8307 err_out:
8308 tg3_rx_prodring_fini(tp, tpr);
8309 return -ENOMEM;
8310 }
8311
8312 /* Free up pending packets in all rx/tx rings.
8313 *
8314 * The chip has been shut down and the driver detached from
8315 * the networking, so no interrupts or new tx packets will
8316 * end up in the driver. tp->{tx,}lock is not held and we are not
8317 * in an interrupt context and thus may sleep.
8318 */
8319 static void tg3_free_rings(struct tg3 *tp)
8320 {
8321 int i, j;
8322
8323 for (j = 0; j < tp->irq_cnt; j++) {
8324 struct tg3_napi *tnapi = &tp->napi[j];
8325
8326 tg3_rx_prodring_free(tp, &tnapi->prodring);
8327
8328 if (!tnapi->tx_buffers)
8329 continue;
8330
8331 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8332 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8333
8334 if (!skb)
8335 continue;
8336
8337 tg3_tx_skb_unmap(tnapi, i,
8338 skb_shinfo(skb)->nr_frags - 1);
8339
8340 dev_kfree_skb_any(skb);
8341 }
8342 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8343 }
8344 }
8345
8346 /* Initialize tx/rx rings for packet processing.
8347 *
8348 * The chip has been shut down and the driver detached from
8349 * the networking, so no interrupts or new tx packets will
8350 * end up in the driver. tp->{tx,}lock are held and thus
8351 * we may not sleep.
8352 */
8353 static int tg3_init_rings(struct tg3 *tp)
8354 {
8355 int i;
8356
8357 /* Free up all the SKBs. */
8358 tg3_free_rings(tp);
8359
8360 for (i = 0; i < tp->irq_cnt; i++) {
8361 struct tg3_napi *tnapi = &tp->napi[i];
8362
8363 tnapi->last_tag = 0;
8364 tnapi->last_irq_tag = 0;
8365 tnapi->hw_status->status = 0;
8366 tnapi->hw_status->status_tag = 0;
8367 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8368
8369 tnapi->tx_prod = 0;
8370 tnapi->tx_cons = 0;
8371 if (tnapi->tx_ring)
8372 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8373
8374 tnapi->rx_rcb_ptr = 0;
8375 if (tnapi->rx_rcb)
8376 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8377
8378 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8379 tg3_free_rings(tp);
8380 return -ENOMEM;
8381 }
8382 }
8383
8384 return 0;
8385 }
8386
8387 static void tg3_mem_tx_release(struct tg3 *tp)
8388 {
8389 int i;
8390
8391 for (i = 0; i < tp->irq_max; i++) {
8392 struct tg3_napi *tnapi = &tp->napi[i];
8393
8394 if (tnapi->tx_ring) {
8395 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8396 tnapi->tx_ring, tnapi->tx_desc_mapping);
8397 tnapi->tx_ring = NULL;
8398 }
8399
8400 kfree(tnapi->tx_buffers);
8401 tnapi->tx_buffers = NULL;
8402 }
8403 }
8404
8405 static int tg3_mem_tx_acquire(struct tg3 *tp)
8406 {
8407 int i;
8408 struct tg3_napi *tnapi = &tp->napi[0];
8409
8410 /* If multivector TSS is enabled, vector 0 does not handle
8411 * tx interrupts. Don't allocate any resources for it.
8412 */
8413 if (tg3_flag(tp, ENABLE_TSS))
8414 tnapi++;
8415
8416 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8417 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8418 TG3_TX_RING_SIZE, GFP_KERNEL);
8419 if (!tnapi->tx_buffers)
8420 goto err_out;
8421
8422 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8423 TG3_TX_RING_BYTES,
8424 &tnapi->tx_desc_mapping,
8425 GFP_KERNEL);
8426 if (!tnapi->tx_ring)
8427 goto err_out;
8428 }
8429
8430 return 0;
8431
8432 err_out:
8433 tg3_mem_tx_release(tp);
8434 return -ENOMEM;
8435 }
8436
8437 static void tg3_mem_rx_release(struct tg3 *tp)
8438 {
8439 int i;
8440
8441 for (i = 0; i < tp->irq_max; i++) {
8442 struct tg3_napi *tnapi = &tp->napi[i];
8443
8444 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8445
8446 if (!tnapi->rx_rcb)
8447 continue;
8448
8449 dma_free_coherent(&tp->pdev->dev,
8450 TG3_RX_RCB_RING_BYTES(tp),
8451 tnapi->rx_rcb,
8452 tnapi->rx_rcb_mapping);
8453 tnapi->rx_rcb = NULL;
8454 }
8455 }
8456
8457 static int tg3_mem_rx_acquire(struct tg3 *tp)
8458 {
8459 unsigned int i, limit;
8460
8461 limit = tp->rxq_cnt;
8462
8463 /* If RSS is enabled, we need a (dummy) producer ring
8464 * set on vector zero. This is the true hw prodring.
8465 */
8466 if (tg3_flag(tp, ENABLE_RSS))
8467 limit++;
8468
8469 for (i = 0; i < limit; i++) {
8470 struct tg3_napi *tnapi = &tp->napi[i];
8471
8472 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8473 goto err_out;
8474
8475 /* If multivector RSS is enabled, vector 0
8476 * does not handle rx or tx interrupts.
8477 * Don't allocate any resources for it.
8478 */
8479 if (!i && tg3_flag(tp, ENABLE_RSS))
8480 continue;
8481
8482 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8483 TG3_RX_RCB_RING_BYTES(tp),
8484 &tnapi->rx_rcb_mapping,
8485 GFP_KERNEL | __GFP_ZERO);
8486 if (!tnapi->rx_rcb)
8487 goto err_out;
8488 }
8489
8490 return 0;
8491
8492 err_out:
8493 tg3_mem_rx_release(tp);
8494 return -ENOMEM;
8495 }
8496
8497 /*
8498 * Must not be invoked with interrupt sources disabled and
8499 * the hardware shutdown down.
8500 */
8501 static void tg3_free_consistent(struct tg3 *tp)
8502 {
8503 int i;
8504
8505 for (i = 0; i < tp->irq_cnt; i++) {
8506 struct tg3_napi *tnapi = &tp->napi[i];
8507
8508 if (tnapi->hw_status) {
8509 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8510 tnapi->hw_status,
8511 tnapi->status_mapping);
8512 tnapi->hw_status = NULL;
8513 }
8514 }
8515
8516 tg3_mem_rx_release(tp);
8517 tg3_mem_tx_release(tp);
8518
8519 if (tp->hw_stats) {
8520 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8521 tp->hw_stats, tp->stats_mapping);
8522 tp->hw_stats = NULL;
8523 }
8524 }
8525
8526 /*
8527 * Must not be invoked with interrupt sources disabled and
8528 * the hardware shutdown down. Can sleep.
8529 */
8530 static int tg3_alloc_consistent(struct tg3 *tp)
8531 {
8532 int i;
8533
8534 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8535 sizeof(struct tg3_hw_stats),
8536 &tp->stats_mapping,
8537 GFP_KERNEL | __GFP_ZERO);
8538 if (!tp->hw_stats)
8539 goto err_out;
8540
8541 for (i = 0; i < tp->irq_cnt; i++) {
8542 struct tg3_napi *tnapi = &tp->napi[i];
8543 struct tg3_hw_status *sblk;
8544
8545 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8546 TG3_HW_STATUS_SIZE,
8547 &tnapi->status_mapping,
8548 GFP_KERNEL | __GFP_ZERO);
8549 if (!tnapi->hw_status)
8550 goto err_out;
8551
8552 sblk = tnapi->hw_status;
8553
8554 if (tg3_flag(tp, ENABLE_RSS)) {
8555 u16 *prodptr = NULL;
8556
8557 /*
8558 * When RSS is enabled, the status block format changes
8559 * slightly. The "rx_jumbo_consumer", "reserved",
8560 * and "rx_mini_consumer" members get mapped to the
8561 * other three rx return ring producer indexes.
8562 */
8563 switch (i) {
8564 case 1:
8565 prodptr = &sblk->idx[0].rx_producer;
8566 break;
8567 case 2:
8568 prodptr = &sblk->rx_jumbo_consumer;
8569 break;
8570 case 3:
8571 prodptr = &sblk->reserved;
8572 break;
8573 case 4:
8574 prodptr = &sblk->rx_mini_consumer;
8575 break;
8576 }
8577 tnapi->rx_rcb_prod_idx = prodptr;
8578 } else {
8579 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8580 }
8581 }
8582
8583 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8584 goto err_out;
8585
8586 return 0;
8587
8588 err_out:
8589 tg3_free_consistent(tp);
8590 return -ENOMEM;
8591 }
8592
8593 #define MAX_WAIT_CNT 1000
8594
8595 /* To stop a block, clear the enable bit and poll till it
8596 * clears. tp->lock is held.
8597 */
8598 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8599 {
8600 unsigned int i;
8601 u32 val;
8602
8603 if (tg3_flag(tp, 5705_PLUS)) {
8604 switch (ofs) {
8605 case RCVLSC_MODE:
8606 case DMAC_MODE:
8607 case MBFREE_MODE:
8608 case BUFMGR_MODE:
8609 case MEMARB_MODE:
8610 /* We can't enable/disable these bits of the
8611 * 5705/5750, just say success.
8612 */
8613 return 0;
8614
8615 default:
8616 break;
8617 }
8618 }
8619
8620 val = tr32(ofs);
8621 val &= ~enable_bit;
8622 tw32_f(ofs, val);
8623
8624 for (i = 0; i < MAX_WAIT_CNT; i++) {
8625 if (pci_channel_offline(tp->pdev)) {
8626 dev_err(&tp->pdev->dev,
8627 "tg3_stop_block device offline, "
8628 "ofs=%lx enable_bit=%x\n",
8629 ofs, enable_bit);
8630 return -ENODEV;
8631 }
8632
8633 udelay(100);
8634 val = tr32(ofs);
8635 if ((val & enable_bit) == 0)
8636 break;
8637 }
8638
8639 if (i == MAX_WAIT_CNT && !silent) {
8640 dev_err(&tp->pdev->dev,
8641 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8642 ofs, enable_bit);
8643 return -ENODEV;
8644 }
8645
8646 return 0;
8647 }
8648
8649 /* tp->lock is held. */
8650 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8651 {
8652 int i, err;
8653
8654 tg3_disable_ints(tp);
8655
8656 if (pci_channel_offline(tp->pdev)) {
8657 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8658 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8659 err = -ENODEV;
8660 goto err_no_dev;
8661 }
8662
8663 tp->rx_mode &= ~RX_MODE_ENABLE;
8664 tw32_f(MAC_RX_MODE, tp->rx_mode);
8665 udelay(10);
8666
8667 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8668 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8669 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8670 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8671 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8672 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8673
8674 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8675 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8676 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8677 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8678 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8679 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8680 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8681
8682 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8683 tw32_f(MAC_MODE, tp->mac_mode);
8684 udelay(40);
8685
8686 tp->tx_mode &= ~TX_MODE_ENABLE;
8687 tw32_f(MAC_TX_MODE, tp->tx_mode);
8688
8689 for (i = 0; i < MAX_WAIT_CNT; i++) {
8690 udelay(100);
8691 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8692 break;
8693 }
8694 if (i >= MAX_WAIT_CNT) {
8695 dev_err(&tp->pdev->dev,
8696 "%s timed out, TX_MODE_ENABLE will not clear "
8697 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8698 err |= -ENODEV;
8699 }
8700
8701 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8702 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8703 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8704
8705 tw32(FTQ_RESET, 0xffffffff);
8706 tw32(FTQ_RESET, 0x00000000);
8707
8708 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8709 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8710
8711 err_no_dev:
8712 for (i = 0; i < tp->irq_cnt; i++) {
8713 struct tg3_napi *tnapi = &tp->napi[i];
8714 if (tnapi->hw_status)
8715 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8716 }
8717
8718 return err;
8719 }
8720
8721 /* Save PCI command register before chip reset */
8722 static void tg3_save_pci_state(struct tg3 *tp)
8723 {
8724 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8725 }
8726
8727 /* Restore PCI state after chip reset */
8728 static void tg3_restore_pci_state(struct tg3 *tp)
8729 {
8730 u32 val;
8731
8732 /* Re-enable indirect register accesses. */
8733 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8734 tp->misc_host_ctrl);
8735
8736 /* Set MAX PCI retry to zero. */
8737 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8738 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8739 tg3_flag(tp, PCIX_MODE))
8740 val |= PCISTATE_RETRY_SAME_DMA;
8741 /* Allow reads and writes to the APE register and memory space. */
8742 if (tg3_flag(tp, ENABLE_APE))
8743 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8744 PCISTATE_ALLOW_APE_SHMEM_WR |
8745 PCISTATE_ALLOW_APE_PSPACE_WR;
8746 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8747
8748 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8749
8750 if (!tg3_flag(tp, PCI_EXPRESS)) {
8751 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8752 tp->pci_cacheline_sz);
8753 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8754 tp->pci_lat_timer);
8755 }
8756
8757 /* Make sure PCI-X relaxed ordering bit is clear. */
8758 if (tg3_flag(tp, PCIX_MODE)) {
8759 u16 pcix_cmd;
8760
8761 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8762 &pcix_cmd);
8763 pcix_cmd &= ~PCI_X_CMD_ERO;
8764 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8765 pcix_cmd);
8766 }
8767
8768 if (tg3_flag(tp, 5780_CLASS)) {
8769
8770 /* Chip reset on 5780 will reset MSI enable bit,
8771 * so need to restore it.
8772 */
8773 if (tg3_flag(tp, USING_MSI)) {
8774 u16 ctrl;
8775
8776 pci_read_config_word(tp->pdev,
8777 tp->msi_cap + PCI_MSI_FLAGS,
8778 &ctrl);
8779 pci_write_config_word(tp->pdev,
8780 tp->msi_cap + PCI_MSI_FLAGS,
8781 ctrl | PCI_MSI_FLAGS_ENABLE);
8782 val = tr32(MSGINT_MODE);
8783 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8784 }
8785 }
8786 }
8787
8788 /* tp->lock is held. */
8789 static int tg3_chip_reset(struct tg3 *tp)
8790 {
8791 u32 val;
8792 void (*write_op)(struct tg3 *, u32, u32);
8793 int i, err;
8794
8795 tg3_nvram_lock(tp);
8796
8797 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8798
8799 /* No matching tg3_nvram_unlock() after this because
8800 * chip reset below will undo the nvram lock.
8801 */
8802 tp->nvram_lock_cnt = 0;
8803
8804 /* GRC_MISC_CFG core clock reset will clear the memory
8805 * enable bit in PCI register 4 and the MSI enable bit
8806 * on some chips, so we save relevant registers here.
8807 */
8808 tg3_save_pci_state(tp);
8809
8810 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8811 tg3_flag(tp, 5755_PLUS))
8812 tw32(GRC_FASTBOOT_PC, 0);
8813
8814 /*
8815 * We must avoid the readl() that normally takes place.
8816 * It locks machines, causes machine checks, and other
8817 * fun things. So, temporarily disable the 5701
8818 * hardware workaround, while we do the reset.
8819 */
8820 write_op = tp->write32;
8821 if (write_op == tg3_write_flush_reg32)
8822 tp->write32 = tg3_write32;
8823
8824 /* Prevent the irq handler from reading or writing PCI registers
8825 * during chip reset when the memory enable bit in the PCI command
8826 * register may be cleared. The chip does not generate interrupt
8827 * at this time, but the irq handler may still be called due to irq
8828 * sharing or irqpoll.
8829 */
8830 tg3_flag_set(tp, CHIP_RESETTING);
8831 for (i = 0; i < tp->irq_cnt; i++) {
8832 struct tg3_napi *tnapi = &tp->napi[i];
8833 if (tnapi->hw_status) {
8834 tnapi->hw_status->status = 0;
8835 tnapi->hw_status->status_tag = 0;
8836 }
8837 tnapi->last_tag = 0;
8838 tnapi->last_irq_tag = 0;
8839 }
8840 smp_mb();
8841
8842 for (i = 0; i < tp->irq_cnt; i++)
8843 synchronize_irq(tp->napi[i].irq_vec);
8844
8845 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8846 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8847 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8848 }
8849
8850 /* do the reset */
8851 val = GRC_MISC_CFG_CORECLK_RESET;
8852
8853 if (tg3_flag(tp, PCI_EXPRESS)) {
8854 /* Force PCIe 1.0a mode */
8855 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8856 !tg3_flag(tp, 57765_PLUS) &&
8857 tr32(TG3_PCIE_PHY_TSTCTL) ==
8858 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8859 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8860
8861 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8862 tw32(GRC_MISC_CFG, (1 << 29));
8863 val |= (1 << 29);
8864 }
8865 }
8866
8867 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8868 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8869 tw32(GRC_VCPU_EXT_CTRL,
8870 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8871 }
8872
8873 /* Manage gphy power for all CPMU absent PCIe devices. */
8874 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8875 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8876
8877 tw32(GRC_MISC_CFG, val);
8878
8879 /* restore 5701 hardware bug workaround write method */
8880 tp->write32 = write_op;
8881
8882 /* Unfortunately, we have to delay before the PCI read back.
8883 * Some 575X chips even will not respond to a PCI cfg access
8884 * when the reset command is given to the chip.
8885 *
8886 * How do these hardware designers expect things to work
8887 * properly if the PCI write is posted for a long period
8888 * of time? It is always necessary to have some method by
8889 * which a register read back can occur to push the write
8890 * out which does the reset.
8891 *
8892 * For most tg3 variants the trick below was working.
8893 * Ho hum...
8894 */
8895 udelay(120);
8896
8897 /* Flush PCI posted writes. The normal MMIO registers
8898 * are inaccessible at this time so this is the only
8899 * way to make this reliably (actually, this is no longer
8900 * the case, see above). I tried to use indirect
8901 * register read/write but this upset some 5701 variants.
8902 */
8903 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8904
8905 udelay(120);
8906
8907 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8908 u16 val16;
8909
8910 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8911 int j;
8912 u32 cfg_val;
8913
8914 /* Wait for link training to complete. */
8915 for (j = 0; j < 5000; j++)
8916 udelay(100);
8917
8918 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8919 pci_write_config_dword(tp->pdev, 0xc4,
8920 cfg_val | (1 << 15));
8921 }
8922
8923 /* Clear the "no snoop" and "relaxed ordering" bits. */
8924 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8925 /*
8926 * Older PCIe devices only support the 128 byte
8927 * MPS setting. Enforce the restriction.
8928 */
8929 if (!tg3_flag(tp, CPMU_PRESENT))
8930 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8931 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8932
8933 /* Clear error status */
8934 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8935 PCI_EXP_DEVSTA_CED |
8936 PCI_EXP_DEVSTA_NFED |
8937 PCI_EXP_DEVSTA_FED |
8938 PCI_EXP_DEVSTA_URD);
8939 }
8940
8941 tg3_restore_pci_state(tp);
8942
8943 tg3_flag_clear(tp, CHIP_RESETTING);
8944 tg3_flag_clear(tp, ERROR_PROCESSED);
8945
8946 val = 0;
8947 if (tg3_flag(tp, 5780_CLASS))
8948 val = tr32(MEMARB_MODE);
8949 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8950
8951 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8952 tg3_stop_fw(tp);
8953 tw32(0x5000, 0x400);
8954 }
8955
8956 if (tg3_flag(tp, IS_SSB_CORE)) {
8957 /*
8958 * BCM4785: In order to avoid repercussions from using
8959 * potentially defective internal ROM, stop the Rx RISC CPU,
8960 * which is not required.
8961 */
8962 tg3_stop_fw(tp);
8963 tg3_halt_cpu(tp, RX_CPU_BASE);
8964 }
8965
8966 err = tg3_poll_fw(tp);
8967 if (err)
8968 return err;
8969
8970 tw32(GRC_MODE, tp->grc_mode);
8971
8972 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8973 val = tr32(0xc4);
8974
8975 tw32(0xc4, val | (1 << 15));
8976 }
8977
8978 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8979 tg3_asic_rev(tp) == ASIC_REV_5705) {
8980 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8981 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8982 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8983 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8984 }
8985
8986 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8987 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8988 val = tp->mac_mode;
8989 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8990 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8991 val = tp->mac_mode;
8992 } else
8993 val = 0;
8994
8995 tw32_f(MAC_MODE, val);
8996 udelay(40);
8997
8998 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8999
9000 tg3_mdio_start(tp);
9001
9002 if (tg3_flag(tp, PCI_EXPRESS) &&
9003 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9004 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9005 !tg3_flag(tp, 57765_PLUS)) {
9006 val = tr32(0x7c00);
9007
9008 tw32(0x7c00, val | (1 << 25));
9009 }
9010
9011 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9012 val = tr32(TG3_CPMU_CLCK_ORIDE);
9013 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9014 }
9015
9016 /* Reprobe ASF enable state. */
9017 tg3_flag_clear(tp, ENABLE_ASF);
9018 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9019 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9020
9021 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9022 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9023 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9024 u32 nic_cfg;
9025
9026 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9027 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9028 tg3_flag_set(tp, ENABLE_ASF);
9029 tp->last_event_jiffies = jiffies;
9030 if (tg3_flag(tp, 5750_PLUS))
9031 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9032
9033 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9034 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9035 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9036 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9037 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9038 }
9039 }
9040
9041 return 0;
9042 }
9043
9044 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9045 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9046
9047 /* tp->lock is held. */
9048 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9049 {
9050 int err;
9051
9052 tg3_stop_fw(tp);
9053
9054 tg3_write_sig_pre_reset(tp, kind);
9055
9056 tg3_abort_hw(tp, silent);
9057 err = tg3_chip_reset(tp);
9058
9059 __tg3_set_mac_addr(tp, false);
9060
9061 tg3_write_sig_legacy(tp, kind);
9062 tg3_write_sig_post_reset(tp, kind);
9063
9064 if (tp->hw_stats) {
9065 /* Save the stats across chip resets... */
9066 tg3_get_nstats(tp, &tp->net_stats_prev);
9067 tg3_get_estats(tp, &tp->estats_prev);
9068
9069 /* And make sure the next sample is new data */
9070 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9071 }
9072
9073 if (err)
9074 return err;
9075
9076 return 0;
9077 }
9078
9079 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9080 {
9081 struct tg3 *tp = netdev_priv(dev);
9082 struct sockaddr *addr = p;
9083 int err = 0;
9084 bool skip_mac_1 = false;
9085
9086 if (!is_valid_ether_addr(addr->sa_data))
9087 return -EADDRNOTAVAIL;
9088
9089 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9090
9091 if (!netif_running(dev))
9092 return 0;
9093
9094 if (tg3_flag(tp, ENABLE_ASF)) {
9095 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9096
9097 addr0_high = tr32(MAC_ADDR_0_HIGH);
9098 addr0_low = tr32(MAC_ADDR_0_LOW);
9099 addr1_high = tr32(MAC_ADDR_1_HIGH);
9100 addr1_low = tr32(MAC_ADDR_1_LOW);
9101
9102 /* Skip MAC addr 1 if ASF is using it. */
9103 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9104 !(addr1_high == 0 && addr1_low == 0))
9105 skip_mac_1 = true;
9106 }
9107 spin_lock_bh(&tp->lock);
9108 __tg3_set_mac_addr(tp, skip_mac_1);
9109 spin_unlock_bh(&tp->lock);
9110
9111 return err;
9112 }
9113
9114 /* tp->lock is held. */
9115 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9116 dma_addr_t mapping, u32 maxlen_flags,
9117 u32 nic_addr)
9118 {
9119 tg3_write_mem(tp,
9120 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9121 ((u64) mapping >> 32));
9122 tg3_write_mem(tp,
9123 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9124 ((u64) mapping & 0xffffffff));
9125 tg3_write_mem(tp,
9126 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9127 maxlen_flags);
9128
9129 if (!tg3_flag(tp, 5705_PLUS))
9130 tg3_write_mem(tp,
9131 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9132 nic_addr);
9133 }
9134
9135
9136 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9137 {
9138 int i = 0;
9139
9140 if (!tg3_flag(tp, ENABLE_TSS)) {
9141 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9142 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9143 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9144 } else {
9145 tw32(HOSTCC_TXCOL_TICKS, 0);
9146 tw32(HOSTCC_TXMAX_FRAMES, 0);
9147 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9148
9149 for (; i < tp->txq_cnt; i++) {
9150 u32 reg;
9151
9152 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9153 tw32(reg, ec->tx_coalesce_usecs);
9154 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9155 tw32(reg, ec->tx_max_coalesced_frames);
9156 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9157 tw32(reg, ec->tx_max_coalesced_frames_irq);
9158 }
9159 }
9160
9161 for (; i < tp->irq_max - 1; i++) {
9162 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9163 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9164 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9165 }
9166 }
9167
9168 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9169 {
9170 int i = 0;
9171 u32 limit = tp->rxq_cnt;
9172
9173 if (!tg3_flag(tp, ENABLE_RSS)) {
9174 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9175 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9176 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9177 limit--;
9178 } else {
9179 tw32(HOSTCC_RXCOL_TICKS, 0);
9180 tw32(HOSTCC_RXMAX_FRAMES, 0);
9181 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9182 }
9183
9184 for (; i < limit; i++) {
9185 u32 reg;
9186
9187 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9188 tw32(reg, ec->rx_coalesce_usecs);
9189 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9190 tw32(reg, ec->rx_max_coalesced_frames);
9191 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9192 tw32(reg, ec->rx_max_coalesced_frames_irq);
9193 }
9194
9195 for (; i < tp->irq_max - 1; i++) {
9196 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9197 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9198 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9199 }
9200 }
9201
9202 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9203 {
9204 tg3_coal_tx_init(tp, ec);
9205 tg3_coal_rx_init(tp, ec);
9206
9207 if (!tg3_flag(tp, 5705_PLUS)) {
9208 u32 val = ec->stats_block_coalesce_usecs;
9209
9210 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9211 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9212
9213 if (!tp->link_up)
9214 val = 0;
9215
9216 tw32(HOSTCC_STAT_COAL_TICKS, val);
9217 }
9218 }
9219
9220 /* tp->lock is held. */
9221 static void tg3_rings_reset(struct tg3 *tp)
9222 {
9223 int i;
9224 u32 stblk, txrcb, rxrcb, limit;
9225 struct tg3_napi *tnapi = &tp->napi[0];
9226
9227 /* Disable all transmit rings but the first. */
9228 if (!tg3_flag(tp, 5705_PLUS))
9229 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9230 else if (tg3_flag(tp, 5717_PLUS))
9231 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9232 else if (tg3_flag(tp, 57765_CLASS) ||
9233 tg3_asic_rev(tp) == ASIC_REV_5762)
9234 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9235 else
9236 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9237
9238 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9239 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9240 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9241 BDINFO_FLAGS_DISABLED);
9242
9243
9244 /* Disable all receive return rings but the first. */
9245 if (tg3_flag(tp, 5717_PLUS))
9246 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9247 else if (!tg3_flag(tp, 5705_PLUS))
9248 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9249 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9250 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9251 tg3_flag(tp, 57765_CLASS))
9252 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9253 else
9254 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9255
9256 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9257 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9258 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9259 BDINFO_FLAGS_DISABLED);
9260
9261 /* Disable interrupts */
9262 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9263 tp->napi[0].chk_msi_cnt = 0;
9264 tp->napi[0].last_rx_cons = 0;
9265 tp->napi[0].last_tx_cons = 0;
9266
9267 /* Zero mailbox registers. */
9268 if (tg3_flag(tp, SUPPORT_MSIX)) {
9269 for (i = 1; i < tp->irq_max; i++) {
9270 tp->napi[i].tx_prod = 0;
9271 tp->napi[i].tx_cons = 0;
9272 if (tg3_flag(tp, ENABLE_TSS))
9273 tw32_mailbox(tp->napi[i].prodmbox, 0);
9274 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9275 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9276 tp->napi[i].chk_msi_cnt = 0;
9277 tp->napi[i].last_rx_cons = 0;
9278 tp->napi[i].last_tx_cons = 0;
9279 }
9280 if (!tg3_flag(tp, ENABLE_TSS))
9281 tw32_mailbox(tp->napi[0].prodmbox, 0);
9282 } else {
9283 tp->napi[0].tx_prod = 0;
9284 tp->napi[0].tx_cons = 0;
9285 tw32_mailbox(tp->napi[0].prodmbox, 0);
9286 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9287 }
9288
9289 /* Make sure the NIC-based send BD rings are disabled. */
9290 if (!tg3_flag(tp, 5705_PLUS)) {
9291 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9292 for (i = 0; i < 16; i++)
9293 tw32_tx_mbox(mbox + i * 8, 0);
9294 }
9295
9296 txrcb = NIC_SRAM_SEND_RCB;
9297 rxrcb = NIC_SRAM_RCV_RET_RCB;
9298
9299 /* Clear status block in ram. */
9300 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9301
9302 /* Set status block DMA address */
9303 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9304 ((u64) tnapi->status_mapping >> 32));
9305 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9306 ((u64) tnapi->status_mapping & 0xffffffff));
9307
9308 if (tnapi->tx_ring) {
9309 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9310 (TG3_TX_RING_SIZE <<
9311 BDINFO_FLAGS_MAXLEN_SHIFT),
9312 NIC_SRAM_TX_BUFFER_DESC);
9313 txrcb += TG3_BDINFO_SIZE;
9314 }
9315
9316 if (tnapi->rx_rcb) {
9317 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9318 (tp->rx_ret_ring_mask + 1) <<
9319 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9320 rxrcb += TG3_BDINFO_SIZE;
9321 }
9322
9323 stblk = HOSTCC_STATBLCK_RING1;
9324
9325 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9326 u64 mapping = (u64)tnapi->status_mapping;
9327 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9328 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9329
9330 /* Clear status block in ram. */
9331 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9332
9333 if (tnapi->tx_ring) {
9334 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9335 (TG3_TX_RING_SIZE <<
9336 BDINFO_FLAGS_MAXLEN_SHIFT),
9337 NIC_SRAM_TX_BUFFER_DESC);
9338 txrcb += TG3_BDINFO_SIZE;
9339 }
9340
9341 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9342 ((tp->rx_ret_ring_mask + 1) <<
9343 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9344
9345 stblk += 8;
9346 rxrcb += TG3_BDINFO_SIZE;
9347 }
9348 }
9349
9350 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9351 {
9352 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9353
9354 if (!tg3_flag(tp, 5750_PLUS) ||
9355 tg3_flag(tp, 5780_CLASS) ||
9356 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9357 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9358 tg3_flag(tp, 57765_PLUS))
9359 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9360 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9361 tg3_asic_rev(tp) == ASIC_REV_5787)
9362 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9363 else
9364 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9365
9366 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9367 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9368
9369 val = min(nic_rep_thresh, host_rep_thresh);
9370 tw32(RCVBDI_STD_THRESH, val);
9371
9372 if (tg3_flag(tp, 57765_PLUS))
9373 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9374
9375 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9376 return;
9377
9378 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9379
9380 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9381
9382 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9383 tw32(RCVBDI_JUMBO_THRESH, val);
9384
9385 if (tg3_flag(tp, 57765_PLUS))
9386 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9387 }
9388
9389 static inline u32 calc_crc(unsigned char *buf, int len)
9390 {
9391 u32 reg;
9392 u32 tmp;
9393 int j, k;
9394
9395 reg = 0xffffffff;
9396
9397 for (j = 0; j < len; j++) {
9398 reg ^= buf[j];
9399
9400 for (k = 0; k < 8; k++) {
9401 tmp = reg & 0x01;
9402
9403 reg >>= 1;
9404
9405 if (tmp)
9406 reg ^= 0xedb88320;
9407 }
9408 }
9409
9410 return ~reg;
9411 }
9412
9413 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9414 {
9415 /* accept or reject all multicast frames */
9416 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9417 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9418 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9419 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9420 }
9421
9422 static void __tg3_set_rx_mode(struct net_device *dev)
9423 {
9424 struct tg3 *tp = netdev_priv(dev);
9425 u32 rx_mode;
9426
9427 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9428 RX_MODE_KEEP_VLAN_TAG);
9429
9430 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9431 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9432 * flag clear.
9433 */
9434 if (!tg3_flag(tp, ENABLE_ASF))
9435 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9436 #endif
9437
9438 if (dev->flags & IFF_PROMISC) {
9439 /* Promiscuous mode. */
9440 rx_mode |= RX_MODE_PROMISC;
9441 } else if (dev->flags & IFF_ALLMULTI) {
9442 /* Accept all multicast. */
9443 tg3_set_multi(tp, 1);
9444 } else if (netdev_mc_empty(dev)) {
9445 /* Reject all multicast. */
9446 tg3_set_multi(tp, 0);
9447 } else {
9448 /* Accept one or more multicast(s). */
9449 struct netdev_hw_addr *ha;
9450 u32 mc_filter[4] = { 0, };
9451 u32 regidx;
9452 u32 bit;
9453 u32 crc;
9454
9455 netdev_for_each_mc_addr(ha, dev) {
9456 crc = calc_crc(ha->addr, ETH_ALEN);
9457 bit = ~crc & 0x7f;
9458 regidx = (bit & 0x60) >> 5;
9459 bit &= 0x1f;
9460 mc_filter[regidx] |= (1 << bit);
9461 }
9462
9463 tw32(MAC_HASH_REG_0, mc_filter[0]);
9464 tw32(MAC_HASH_REG_1, mc_filter[1]);
9465 tw32(MAC_HASH_REG_2, mc_filter[2]);
9466 tw32(MAC_HASH_REG_3, mc_filter[3]);
9467 }
9468
9469 if (rx_mode != tp->rx_mode) {
9470 tp->rx_mode = rx_mode;
9471 tw32_f(MAC_RX_MODE, rx_mode);
9472 udelay(10);
9473 }
9474 }
9475
9476 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9477 {
9478 int i;
9479
9480 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9481 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9482 }
9483
9484 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9485 {
9486 int i;
9487
9488 if (!tg3_flag(tp, SUPPORT_MSIX))
9489 return;
9490
9491 if (tp->rxq_cnt == 1) {
9492 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9493 return;
9494 }
9495
9496 /* Validate table against current IRQ count */
9497 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9498 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9499 break;
9500 }
9501
9502 if (i != TG3_RSS_INDIR_TBL_SIZE)
9503 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9504 }
9505
9506 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9507 {
9508 int i = 0;
9509 u32 reg = MAC_RSS_INDIR_TBL_0;
9510
9511 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9512 u32 val = tp->rss_ind_tbl[i];
9513 i++;
9514 for (; i % 8; i++) {
9515 val <<= 4;
9516 val |= tp->rss_ind_tbl[i];
9517 }
9518 tw32(reg, val);
9519 reg += 4;
9520 }
9521 }
9522
9523 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9524 {
9525 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9526 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9527 else
9528 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9529 }
9530
9531 /* tp->lock is held. */
9532 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9533 {
9534 u32 val, rdmac_mode;
9535 int i, err, limit;
9536 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9537
9538 tg3_disable_ints(tp);
9539
9540 tg3_stop_fw(tp);
9541
9542 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9543
9544 if (tg3_flag(tp, INIT_COMPLETE))
9545 tg3_abort_hw(tp, 1);
9546
9547 /* Enable MAC control of LPI */
9548 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9549 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9550 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9551 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9552 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9553
9554 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9555
9556 tw32_f(TG3_CPMU_EEE_CTRL,
9557 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9558
9559 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9560 TG3_CPMU_EEEMD_LPI_IN_TX |
9561 TG3_CPMU_EEEMD_LPI_IN_RX |
9562 TG3_CPMU_EEEMD_EEE_ENABLE;
9563
9564 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9565 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9566
9567 if (tg3_flag(tp, ENABLE_APE))
9568 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9569
9570 tw32_f(TG3_CPMU_EEE_MODE, val);
9571
9572 tw32_f(TG3_CPMU_EEE_DBTMR1,
9573 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9574 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9575
9576 tw32_f(TG3_CPMU_EEE_DBTMR2,
9577 TG3_CPMU_DBTMR2_APE_TX_2047US |
9578 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9579 }
9580
9581 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9582 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9583 tg3_phy_pull_config(tp);
9584 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9585 }
9586
9587 if (reset_phy)
9588 tg3_phy_reset(tp);
9589
9590 err = tg3_chip_reset(tp);
9591 if (err)
9592 return err;
9593
9594 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9595
9596 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9597 val = tr32(TG3_CPMU_CTRL);
9598 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9599 tw32(TG3_CPMU_CTRL, val);
9600
9601 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9602 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9603 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9604 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9605
9606 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9607 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9608 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9609 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9610
9611 val = tr32(TG3_CPMU_HST_ACC);
9612 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9613 val |= CPMU_HST_ACC_MACCLK_6_25;
9614 tw32(TG3_CPMU_HST_ACC, val);
9615 }
9616
9617 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9618 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9619 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9620 PCIE_PWR_MGMT_L1_THRESH_4MS;
9621 tw32(PCIE_PWR_MGMT_THRESH, val);
9622
9623 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9624 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9625
9626 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9627
9628 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9629 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9630 }
9631
9632 if (tg3_flag(tp, L1PLLPD_EN)) {
9633 u32 grc_mode = tr32(GRC_MODE);
9634
9635 /* Access the lower 1K of PL PCIE block registers. */
9636 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9637 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9638
9639 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9640 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9641 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9642
9643 tw32(GRC_MODE, grc_mode);
9644 }
9645
9646 if (tg3_flag(tp, 57765_CLASS)) {
9647 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9648 u32 grc_mode = tr32(GRC_MODE);
9649
9650 /* Access the lower 1K of PL PCIE block registers. */
9651 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9652 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9653
9654 val = tr32(TG3_PCIE_TLDLPL_PORT +
9655 TG3_PCIE_PL_LO_PHYCTL5);
9656 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9657 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9658
9659 tw32(GRC_MODE, grc_mode);
9660 }
9661
9662 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9663 u32 grc_mode;
9664
9665 /* Fix transmit hangs */
9666 val = tr32(TG3_CPMU_PADRNG_CTL);
9667 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9668 tw32(TG3_CPMU_PADRNG_CTL, val);
9669
9670 grc_mode = tr32(GRC_MODE);
9671
9672 /* Access the lower 1K of DL PCIE block registers. */
9673 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9674 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9675
9676 val = tr32(TG3_PCIE_TLDLPL_PORT +
9677 TG3_PCIE_DL_LO_FTSMAX);
9678 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9679 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9680 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9681
9682 tw32(GRC_MODE, grc_mode);
9683 }
9684
9685 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9686 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9687 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9688 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9689 }
9690
9691 /* This works around an issue with Athlon chipsets on
9692 * B3 tigon3 silicon. This bit has no effect on any
9693 * other revision. But do not set this on PCI Express
9694 * chips and don't even touch the clocks if the CPMU is present.
9695 */
9696 if (!tg3_flag(tp, CPMU_PRESENT)) {
9697 if (!tg3_flag(tp, PCI_EXPRESS))
9698 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9699 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9700 }
9701
9702 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9703 tg3_flag(tp, PCIX_MODE)) {
9704 val = tr32(TG3PCI_PCISTATE);
9705 val |= PCISTATE_RETRY_SAME_DMA;
9706 tw32(TG3PCI_PCISTATE, val);
9707 }
9708
9709 if (tg3_flag(tp, ENABLE_APE)) {
9710 /* Allow reads and writes to the
9711 * APE register and memory space.
9712 */
9713 val = tr32(TG3PCI_PCISTATE);
9714 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9715 PCISTATE_ALLOW_APE_SHMEM_WR |
9716 PCISTATE_ALLOW_APE_PSPACE_WR;
9717 tw32(TG3PCI_PCISTATE, val);
9718 }
9719
9720 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9721 /* Enable some hw fixes. */
9722 val = tr32(TG3PCI_MSI_DATA);
9723 val |= (1 << 26) | (1 << 28) | (1 << 29);
9724 tw32(TG3PCI_MSI_DATA, val);
9725 }
9726
9727 /* Descriptor ring init may make accesses to the
9728 * NIC SRAM area to setup the TX descriptors, so we
9729 * can only do this after the hardware has been
9730 * successfully reset.
9731 */
9732 err = tg3_init_rings(tp);
9733 if (err)
9734 return err;
9735
9736 if (tg3_flag(tp, 57765_PLUS)) {
9737 val = tr32(TG3PCI_DMA_RW_CTRL) &
9738 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9739 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9740 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9741 if (!tg3_flag(tp, 57765_CLASS) &&
9742 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9743 tg3_asic_rev(tp) != ASIC_REV_5762)
9744 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9745 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9746 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9747 tg3_asic_rev(tp) != ASIC_REV_5761) {
9748 /* This value is determined during the probe time DMA
9749 * engine test, tg3_test_dma.
9750 */
9751 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9752 }
9753
9754 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9755 GRC_MODE_4X_NIC_SEND_RINGS |
9756 GRC_MODE_NO_TX_PHDR_CSUM |
9757 GRC_MODE_NO_RX_PHDR_CSUM);
9758 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9759
9760 /* Pseudo-header checksum is done by hardware logic and not
9761 * the offload processers, so make the chip do the pseudo-
9762 * header checksums on receive. For transmit it is more
9763 * convenient to do the pseudo-header checksum in software
9764 * as Linux does that on transmit for us in all cases.
9765 */
9766 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9767
9768 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9769 if (tp->rxptpctl)
9770 tw32(TG3_RX_PTP_CTL,
9771 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9772
9773 if (tg3_flag(tp, PTP_CAPABLE))
9774 val |= GRC_MODE_TIME_SYNC_ENABLE;
9775
9776 tw32(GRC_MODE, tp->grc_mode | val);
9777
9778 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9779 val = tr32(GRC_MISC_CFG);
9780 val &= ~0xff;
9781 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9782 tw32(GRC_MISC_CFG, val);
9783
9784 /* Initialize MBUF/DESC pool. */
9785 if (tg3_flag(tp, 5750_PLUS)) {
9786 /* Do nothing. */
9787 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9788 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9789 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9790 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9791 else
9792 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9793 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9794 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9795 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9796 int fw_len;
9797
9798 fw_len = tp->fw_len;
9799 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9800 tw32(BUFMGR_MB_POOL_ADDR,
9801 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9802 tw32(BUFMGR_MB_POOL_SIZE,
9803 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9804 }
9805
9806 if (tp->dev->mtu <= ETH_DATA_LEN) {
9807 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9808 tp->bufmgr_config.mbuf_read_dma_low_water);
9809 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9810 tp->bufmgr_config.mbuf_mac_rx_low_water);
9811 tw32(BUFMGR_MB_HIGH_WATER,
9812 tp->bufmgr_config.mbuf_high_water);
9813 } else {
9814 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9815 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9816 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9817 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9818 tw32(BUFMGR_MB_HIGH_WATER,
9819 tp->bufmgr_config.mbuf_high_water_jumbo);
9820 }
9821 tw32(BUFMGR_DMA_LOW_WATER,
9822 tp->bufmgr_config.dma_low_water);
9823 tw32(BUFMGR_DMA_HIGH_WATER,
9824 tp->bufmgr_config.dma_high_water);
9825
9826 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9827 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9828 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9829 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9830 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9831 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9832 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9833 tw32(BUFMGR_MODE, val);
9834 for (i = 0; i < 2000; i++) {
9835 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9836 break;
9837 udelay(10);
9838 }
9839 if (i >= 2000) {
9840 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9841 return -ENODEV;
9842 }
9843
9844 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9845 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9846
9847 tg3_setup_rxbd_thresholds(tp);
9848
9849 /* Initialize TG3_BDINFO's at:
9850 * RCVDBDI_STD_BD: standard eth size rx ring
9851 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9852 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9853 *
9854 * like so:
9855 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9856 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9857 * ring attribute flags
9858 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9859 *
9860 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9861 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9862 *
9863 * The size of each ring is fixed in the firmware, but the location is
9864 * configurable.
9865 */
9866 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9867 ((u64) tpr->rx_std_mapping >> 32));
9868 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9869 ((u64) tpr->rx_std_mapping & 0xffffffff));
9870 if (!tg3_flag(tp, 5717_PLUS))
9871 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9872 NIC_SRAM_RX_BUFFER_DESC);
9873
9874 /* Disable the mini ring */
9875 if (!tg3_flag(tp, 5705_PLUS))
9876 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9877 BDINFO_FLAGS_DISABLED);
9878
9879 /* Program the jumbo buffer descriptor ring control
9880 * blocks on those devices that have them.
9881 */
9882 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9883 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9884
9885 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9886 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9887 ((u64) tpr->rx_jmb_mapping >> 32));
9888 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9889 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9890 val = TG3_RX_JMB_RING_SIZE(tp) <<
9891 BDINFO_FLAGS_MAXLEN_SHIFT;
9892 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9893 val | BDINFO_FLAGS_USE_EXT_RECV);
9894 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9895 tg3_flag(tp, 57765_CLASS) ||
9896 tg3_asic_rev(tp) == ASIC_REV_5762)
9897 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9898 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9899 } else {
9900 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9901 BDINFO_FLAGS_DISABLED);
9902 }
9903
9904 if (tg3_flag(tp, 57765_PLUS)) {
9905 val = TG3_RX_STD_RING_SIZE(tp);
9906 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9907 val |= (TG3_RX_STD_DMA_SZ << 2);
9908 } else
9909 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9910 } else
9911 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9912
9913 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9914
9915 tpr->rx_std_prod_idx = tp->rx_pending;
9916 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9917
9918 tpr->rx_jmb_prod_idx =
9919 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9920 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9921
9922 tg3_rings_reset(tp);
9923
9924 /* Initialize MAC address and backoff seed. */
9925 __tg3_set_mac_addr(tp, false);
9926
9927 /* MTU + ethernet header + FCS + optional VLAN tag */
9928 tw32(MAC_RX_MTU_SIZE,
9929 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9930
9931 /* The slot time is changed by tg3_setup_phy if we
9932 * run at gigabit with half duplex.
9933 */
9934 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9935 (6 << TX_LENGTHS_IPG_SHIFT) |
9936 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9937
9938 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9939 tg3_asic_rev(tp) == ASIC_REV_5762)
9940 val |= tr32(MAC_TX_LENGTHS) &
9941 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9942 TX_LENGTHS_CNT_DWN_VAL_MSK);
9943
9944 tw32(MAC_TX_LENGTHS, val);
9945
9946 /* Receive rules. */
9947 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9948 tw32(RCVLPC_CONFIG, 0x0181);
9949
9950 /* Calculate RDMAC_MODE setting early, we need it to determine
9951 * the RCVLPC_STATE_ENABLE mask.
9952 */
9953 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9954 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9955 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9956 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9957 RDMAC_MODE_LNGREAD_ENAB);
9958
9959 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9960 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9961
9962 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9963 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9964 tg3_asic_rev(tp) == ASIC_REV_57780)
9965 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9966 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9967 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9968
9969 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9970 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9971 if (tg3_flag(tp, TSO_CAPABLE) &&
9972 tg3_asic_rev(tp) == ASIC_REV_5705) {
9973 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9974 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9975 !tg3_flag(tp, IS_5788)) {
9976 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9977 }
9978 }
9979
9980 if (tg3_flag(tp, PCI_EXPRESS))
9981 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9982
9983 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9984 tp->dma_limit = 0;
9985 if (tp->dev->mtu <= ETH_DATA_LEN) {
9986 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9987 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9988 }
9989 }
9990
9991 if (tg3_flag(tp, HW_TSO_1) ||
9992 tg3_flag(tp, HW_TSO_2) ||
9993 tg3_flag(tp, HW_TSO_3))
9994 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9995
9996 if (tg3_flag(tp, 57765_PLUS) ||
9997 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9998 tg3_asic_rev(tp) == ASIC_REV_57780)
9999 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10000
10001 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10002 tg3_asic_rev(tp) == ASIC_REV_5762)
10003 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10004
10005 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10006 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10007 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10008 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10009 tg3_flag(tp, 57765_PLUS)) {
10010 u32 tgtreg;
10011
10012 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10013 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10014 else
10015 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10016
10017 val = tr32(tgtreg);
10018 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10019 tg3_asic_rev(tp) == ASIC_REV_5762) {
10020 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10021 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10022 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10023 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10024 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10025 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10026 }
10027 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10028 }
10029
10030 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10031 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10032 tg3_asic_rev(tp) == ASIC_REV_5762) {
10033 u32 tgtreg;
10034
10035 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10036 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10037 else
10038 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10039
10040 val = tr32(tgtreg);
10041 tw32(tgtreg, val |
10042 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10043 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10044 }
10045
10046 /* Receive/send statistics. */
10047 if (tg3_flag(tp, 5750_PLUS)) {
10048 val = tr32(RCVLPC_STATS_ENABLE);
10049 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10050 tw32(RCVLPC_STATS_ENABLE, val);
10051 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10052 tg3_flag(tp, TSO_CAPABLE)) {
10053 val = tr32(RCVLPC_STATS_ENABLE);
10054 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10055 tw32(RCVLPC_STATS_ENABLE, val);
10056 } else {
10057 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10058 }
10059 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10060 tw32(SNDDATAI_STATSENAB, 0xffffff);
10061 tw32(SNDDATAI_STATSCTRL,
10062 (SNDDATAI_SCTRL_ENABLE |
10063 SNDDATAI_SCTRL_FASTUPD));
10064
10065 /* Setup host coalescing engine. */
10066 tw32(HOSTCC_MODE, 0);
10067 for (i = 0; i < 2000; i++) {
10068 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10069 break;
10070 udelay(10);
10071 }
10072
10073 __tg3_set_coalesce(tp, &tp->coal);
10074
10075 if (!tg3_flag(tp, 5705_PLUS)) {
10076 /* Status/statistics block address. See tg3_timer,
10077 * the tg3_periodic_fetch_stats call there, and
10078 * tg3_get_stats to see how this works for 5705/5750 chips.
10079 */
10080 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10081 ((u64) tp->stats_mapping >> 32));
10082 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10083 ((u64) tp->stats_mapping & 0xffffffff));
10084 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10085
10086 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10087
10088 /* Clear statistics and status block memory areas */
10089 for (i = NIC_SRAM_STATS_BLK;
10090 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10091 i += sizeof(u32)) {
10092 tg3_write_mem(tp, i, 0);
10093 udelay(40);
10094 }
10095 }
10096
10097 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10098
10099 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10100 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10101 if (!tg3_flag(tp, 5705_PLUS))
10102 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10103
10104 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10105 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10106 /* reset to prevent losing 1st rx packet intermittently */
10107 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10108 udelay(10);
10109 }
10110
10111 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10112 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10113 MAC_MODE_FHDE_ENABLE;
10114 if (tg3_flag(tp, ENABLE_APE))
10115 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10116 if (!tg3_flag(tp, 5705_PLUS) &&
10117 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10118 tg3_asic_rev(tp) != ASIC_REV_5700)
10119 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10120 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10121 udelay(40);
10122
10123 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10124 * If TG3_FLAG_IS_NIC is zero, we should read the
10125 * register to preserve the GPIO settings for LOMs. The GPIOs,
10126 * whether used as inputs or outputs, are set by boot code after
10127 * reset.
10128 */
10129 if (!tg3_flag(tp, IS_NIC)) {
10130 u32 gpio_mask;
10131
10132 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10133 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10134 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10135
10136 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10137 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10138 GRC_LCLCTRL_GPIO_OUTPUT3;
10139
10140 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10141 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10142
10143 tp->grc_local_ctrl &= ~gpio_mask;
10144 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10145
10146 /* GPIO1 must be driven high for eeprom write protect */
10147 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10148 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10149 GRC_LCLCTRL_GPIO_OUTPUT1);
10150 }
10151 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10152 udelay(100);
10153
10154 if (tg3_flag(tp, USING_MSIX)) {
10155 val = tr32(MSGINT_MODE);
10156 val |= MSGINT_MODE_ENABLE;
10157 if (tp->irq_cnt > 1)
10158 val |= MSGINT_MODE_MULTIVEC_EN;
10159 if (!tg3_flag(tp, 1SHOT_MSI))
10160 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10161 tw32(MSGINT_MODE, val);
10162 }
10163
10164 if (!tg3_flag(tp, 5705_PLUS)) {
10165 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10166 udelay(40);
10167 }
10168
10169 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10170 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10171 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10172 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10173 WDMAC_MODE_LNGREAD_ENAB);
10174
10175 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10176 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10177 if (tg3_flag(tp, TSO_CAPABLE) &&
10178 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10179 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10180 /* nothing */
10181 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10182 !tg3_flag(tp, IS_5788)) {
10183 val |= WDMAC_MODE_RX_ACCEL;
10184 }
10185 }
10186
10187 /* Enable host coalescing bug fix */
10188 if (tg3_flag(tp, 5755_PLUS))
10189 val |= WDMAC_MODE_STATUS_TAG_FIX;
10190
10191 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10192 val |= WDMAC_MODE_BURST_ALL_DATA;
10193
10194 tw32_f(WDMAC_MODE, val);
10195 udelay(40);
10196
10197 if (tg3_flag(tp, PCIX_MODE)) {
10198 u16 pcix_cmd;
10199
10200 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10201 &pcix_cmd);
10202 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10203 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10204 pcix_cmd |= PCI_X_CMD_READ_2K;
10205 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10206 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10207 pcix_cmd |= PCI_X_CMD_READ_2K;
10208 }
10209 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10210 pcix_cmd);
10211 }
10212
10213 tw32_f(RDMAC_MODE, rdmac_mode);
10214 udelay(40);
10215
10216 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10217 tg3_asic_rev(tp) == ASIC_REV_5720) {
10218 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10219 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10220 break;
10221 }
10222 if (i < TG3_NUM_RDMA_CHANNELS) {
10223 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10224 val |= tg3_lso_rd_dma_workaround_bit(tp);
10225 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10226 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10227 }
10228 }
10229
10230 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10231 if (!tg3_flag(tp, 5705_PLUS))
10232 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10233
10234 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10235 tw32(SNDDATAC_MODE,
10236 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10237 else
10238 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10239
10240 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10241 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10242 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10243 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10244 val |= RCVDBDI_MODE_LRG_RING_SZ;
10245 tw32(RCVDBDI_MODE, val);
10246 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10247 if (tg3_flag(tp, HW_TSO_1) ||
10248 tg3_flag(tp, HW_TSO_2) ||
10249 tg3_flag(tp, HW_TSO_3))
10250 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10251 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10252 if (tg3_flag(tp, ENABLE_TSS))
10253 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10254 tw32(SNDBDI_MODE, val);
10255 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10256
10257 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10258 err = tg3_load_5701_a0_firmware_fix(tp);
10259 if (err)
10260 return err;
10261 }
10262
10263 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10264 /* Ignore any errors for the firmware download. If download
10265 * fails, the device will operate with EEE disabled
10266 */
10267 tg3_load_57766_firmware(tp);
10268 }
10269
10270 if (tg3_flag(tp, TSO_CAPABLE)) {
10271 err = tg3_load_tso_firmware(tp);
10272 if (err)
10273 return err;
10274 }
10275
10276 tp->tx_mode = TX_MODE_ENABLE;
10277
10278 if (tg3_flag(tp, 5755_PLUS) ||
10279 tg3_asic_rev(tp) == ASIC_REV_5906)
10280 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10281
10282 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10283 tg3_asic_rev(tp) == ASIC_REV_5762) {
10284 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10285 tp->tx_mode &= ~val;
10286 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10287 }
10288
10289 tw32_f(MAC_TX_MODE, tp->tx_mode);
10290 udelay(100);
10291
10292 if (tg3_flag(tp, ENABLE_RSS)) {
10293 tg3_rss_write_indir_tbl(tp);
10294
10295 /* Setup the "secret" hash key. */
10296 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10297 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10298 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10299 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10300 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10301 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10302 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10303 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10304 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10305 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10306 }
10307
10308 tp->rx_mode = RX_MODE_ENABLE;
10309 if (tg3_flag(tp, 5755_PLUS))
10310 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10311
10312 if (tg3_flag(tp, ENABLE_RSS))
10313 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10314 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10315 RX_MODE_RSS_IPV6_HASH_EN |
10316 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10317 RX_MODE_RSS_IPV4_HASH_EN |
10318 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10319
10320 tw32_f(MAC_RX_MODE, tp->rx_mode);
10321 udelay(10);
10322
10323 tw32(MAC_LED_CTRL, tp->led_ctrl);
10324
10325 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10326 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10327 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10328 udelay(10);
10329 }
10330 tw32_f(MAC_RX_MODE, tp->rx_mode);
10331 udelay(10);
10332
10333 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10334 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10335 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10336 /* Set drive transmission level to 1.2V */
10337 /* only if the signal pre-emphasis bit is not set */
10338 val = tr32(MAC_SERDES_CFG);
10339 val &= 0xfffff000;
10340 val |= 0x880;
10341 tw32(MAC_SERDES_CFG, val);
10342 }
10343 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10344 tw32(MAC_SERDES_CFG, 0x616000);
10345 }
10346
10347 /* Prevent chip from dropping frames when flow control
10348 * is enabled.
10349 */
10350 if (tg3_flag(tp, 57765_CLASS))
10351 val = 1;
10352 else
10353 val = 2;
10354 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10355
10356 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10357 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10358 /* Use hardware link auto-negotiation */
10359 tg3_flag_set(tp, HW_AUTONEG);
10360 }
10361
10362 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10363 tg3_asic_rev(tp) == ASIC_REV_5714) {
10364 u32 tmp;
10365
10366 tmp = tr32(SERDES_RX_CTRL);
10367 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10368 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10369 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10370 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10371 }
10372
10373 if (!tg3_flag(tp, USE_PHYLIB)) {
10374 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10375 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10376
10377 err = tg3_setup_phy(tp, false);
10378 if (err)
10379 return err;
10380
10381 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10382 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10383 u32 tmp;
10384
10385 /* Clear CRC stats. */
10386 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10387 tg3_writephy(tp, MII_TG3_TEST1,
10388 tmp | MII_TG3_TEST1_CRC_EN);
10389 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10390 }
10391 }
10392 }
10393
10394 __tg3_set_rx_mode(tp->dev);
10395
10396 /* Initialize receive rules. */
10397 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10398 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10399 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10400 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10401
10402 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10403 limit = 8;
10404 else
10405 limit = 16;
10406 if (tg3_flag(tp, ENABLE_ASF))
10407 limit -= 4;
10408 switch (limit) {
10409 case 16:
10410 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10411 case 15:
10412 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10413 case 14:
10414 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10415 case 13:
10416 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10417 case 12:
10418 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10419 case 11:
10420 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10421 case 10:
10422 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10423 case 9:
10424 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10425 case 8:
10426 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10427 case 7:
10428 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10429 case 6:
10430 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10431 case 5:
10432 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10433 case 4:
10434 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10435 case 3:
10436 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10437 case 2:
10438 case 1:
10439
10440 default:
10441 break;
10442 }
10443
10444 if (tg3_flag(tp, ENABLE_APE))
10445 /* Write our heartbeat update interval to APE. */
10446 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10447 APE_HOST_HEARTBEAT_INT_DISABLE);
10448
10449 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10450
10451 return 0;
10452 }
10453
10454 /* Called at device open time to get the chip ready for
10455 * packet processing. Invoked with tp->lock held.
10456 */
10457 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10458 {
10459 /* Chip may have been just powered on. If so, the boot code may still
10460 * be running initialization. Wait for it to finish to avoid races in
10461 * accessing the hardware.
10462 */
10463 tg3_enable_register_access(tp);
10464 tg3_poll_fw(tp);
10465
10466 tg3_switch_clocks(tp);
10467
10468 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10469
10470 return tg3_reset_hw(tp, reset_phy);
10471 }
10472
10473 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10474 {
10475 int i;
10476
10477 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10478 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10479
10480 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10481 off += len;
10482
10483 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10484 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10485 memset(ocir, 0, TG3_OCIR_LEN);
10486 }
10487 }
10488
10489 /* sysfs attributes for hwmon */
10490 static ssize_t tg3_show_temp(struct device *dev,
10491 struct device_attribute *devattr, char *buf)
10492 {
10493 struct pci_dev *pdev = to_pci_dev(dev);
10494 struct net_device *netdev = pci_get_drvdata(pdev);
10495 struct tg3 *tp = netdev_priv(netdev);
10496 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10497 u32 temperature;
10498
10499 spin_lock_bh(&tp->lock);
10500 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10501 sizeof(temperature));
10502 spin_unlock_bh(&tp->lock);
10503 return sprintf(buf, "%u\n", temperature);
10504 }
10505
10506
10507 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10508 TG3_TEMP_SENSOR_OFFSET);
10509 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10510 TG3_TEMP_CAUTION_OFFSET);
10511 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10512 TG3_TEMP_MAX_OFFSET);
10513
10514 static struct attribute *tg3_attributes[] = {
10515 &sensor_dev_attr_temp1_input.dev_attr.attr,
10516 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10517 &sensor_dev_attr_temp1_max.dev_attr.attr,
10518 NULL
10519 };
10520
10521 static const struct attribute_group tg3_group = {
10522 .attrs = tg3_attributes,
10523 };
10524
10525 static void tg3_hwmon_close(struct tg3 *tp)
10526 {
10527 if (tp->hwmon_dev) {
10528 hwmon_device_unregister(tp->hwmon_dev);
10529 tp->hwmon_dev = NULL;
10530 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10531 }
10532 }
10533
10534 static void tg3_hwmon_open(struct tg3 *tp)
10535 {
10536 int i, err;
10537 u32 size = 0;
10538 struct pci_dev *pdev = tp->pdev;
10539 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10540
10541 tg3_sd_scan_scratchpad(tp, ocirs);
10542
10543 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10544 if (!ocirs[i].src_data_length)
10545 continue;
10546
10547 size += ocirs[i].src_hdr_length;
10548 size += ocirs[i].src_data_length;
10549 }
10550
10551 if (!size)
10552 return;
10553
10554 /* Register hwmon sysfs hooks */
10555 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10556 if (err) {
10557 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10558 return;
10559 }
10560
10561 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10562 if (IS_ERR(tp->hwmon_dev)) {
10563 tp->hwmon_dev = NULL;
10564 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10565 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10566 }
10567 }
10568
10569
10570 #define TG3_STAT_ADD32(PSTAT, REG) \
10571 do { u32 __val = tr32(REG); \
10572 (PSTAT)->low += __val; \
10573 if ((PSTAT)->low < __val) \
10574 (PSTAT)->high += 1; \
10575 } while (0)
10576
10577 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10578 {
10579 struct tg3_hw_stats *sp = tp->hw_stats;
10580
10581 if (!tp->link_up)
10582 return;
10583
10584 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10585 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10586 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10587 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10588 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10589 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10590 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10591 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10592 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10593 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10594 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10595 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10596 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10597 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10598 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10599 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10600 u32 val;
10601
10602 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10603 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10604 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10605 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10606 }
10607
10608 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10609 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10610 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10611 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10612 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10613 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10614 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10615 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10616 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10617 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10618 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10619 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10620 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10621 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10622
10623 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10624 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10625 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10626 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10627 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10628 } else {
10629 u32 val = tr32(HOSTCC_FLOW_ATTN);
10630 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10631 if (val) {
10632 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10633 sp->rx_discards.low += val;
10634 if (sp->rx_discards.low < val)
10635 sp->rx_discards.high += 1;
10636 }
10637 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10638 }
10639 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10640 }
10641
10642 static void tg3_chk_missed_msi(struct tg3 *tp)
10643 {
10644 u32 i;
10645
10646 for (i = 0; i < tp->irq_cnt; i++) {
10647 struct tg3_napi *tnapi = &tp->napi[i];
10648
10649 if (tg3_has_work(tnapi)) {
10650 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10651 tnapi->last_tx_cons == tnapi->tx_cons) {
10652 if (tnapi->chk_msi_cnt < 1) {
10653 tnapi->chk_msi_cnt++;
10654 return;
10655 }
10656 tg3_msi(0, tnapi);
10657 }
10658 }
10659 tnapi->chk_msi_cnt = 0;
10660 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10661 tnapi->last_tx_cons = tnapi->tx_cons;
10662 }
10663 }
10664
10665 static void tg3_timer(unsigned long __opaque)
10666 {
10667 struct tg3 *tp = (struct tg3 *) __opaque;
10668
10669 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10670 goto restart_timer;
10671
10672 spin_lock(&tp->lock);
10673
10674 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10675 tg3_flag(tp, 57765_CLASS))
10676 tg3_chk_missed_msi(tp);
10677
10678 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10679 /* BCM4785: Flush posted writes from GbE to host memory. */
10680 tr32(HOSTCC_MODE);
10681 }
10682
10683 if (!tg3_flag(tp, TAGGED_STATUS)) {
10684 /* All of this garbage is because when using non-tagged
10685 * IRQ status the mailbox/status_block protocol the chip
10686 * uses with the cpu is race prone.
10687 */
10688 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10689 tw32(GRC_LOCAL_CTRL,
10690 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10691 } else {
10692 tw32(HOSTCC_MODE, tp->coalesce_mode |
10693 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10694 }
10695
10696 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10697 spin_unlock(&tp->lock);
10698 tg3_reset_task_schedule(tp);
10699 goto restart_timer;
10700 }
10701 }
10702
10703 /* This part only runs once per second. */
10704 if (!--tp->timer_counter) {
10705 if (tg3_flag(tp, 5705_PLUS))
10706 tg3_periodic_fetch_stats(tp);
10707
10708 if (tp->setlpicnt && !--tp->setlpicnt)
10709 tg3_phy_eee_enable(tp);
10710
10711 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10712 u32 mac_stat;
10713 int phy_event;
10714
10715 mac_stat = tr32(MAC_STATUS);
10716
10717 phy_event = 0;
10718 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10719 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10720 phy_event = 1;
10721 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10722 phy_event = 1;
10723
10724 if (phy_event)
10725 tg3_setup_phy(tp, false);
10726 } else if (tg3_flag(tp, POLL_SERDES)) {
10727 u32 mac_stat = tr32(MAC_STATUS);
10728 int need_setup = 0;
10729
10730 if (tp->link_up &&
10731 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10732 need_setup = 1;
10733 }
10734 if (!tp->link_up &&
10735 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10736 MAC_STATUS_SIGNAL_DET))) {
10737 need_setup = 1;
10738 }
10739 if (need_setup) {
10740 if (!tp->serdes_counter) {
10741 tw32_f(MAC_MODE,
10742 (tp->mac_mode &
10743 ~MAC_MODE_PORT_MODE_MASK));
10744 udelay(40);
10745 tw32_f(MAC_MODE, tp->mac_mode);
10746 udelay(40);
10747 }
10748 tg3_setup_phy(tp, false);
10749 }
10750 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10751 tg3_flag(tp, 5780_CLASS)) {
10752 tg3_serdes_parallel_detect(tp);
10753 }
10754
10755 tp->timer_counter = tp->timer_multiplier;
10756 }
10757
10758 /* Heartbeat is only sent once every 2 seconds.
10759 *
10760 * The heartbeat is to tell the ASF firmware that the host
10761 * driver is still alive. In the event that the OS crashes,
10762 * ASF needs to reset the hardware to free up the FIFO space
10763 * that may be filled with rx packets destined for the host.
10764 * If the FIFO is full, ASF will no longer function properly.
10765 *
10766 * Unintended resets have been reported on real time kernels
10767 * where the timer doesn't run on time. Netpoll will also have
10768 * same problem.
10769 *
10770 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10771 * to check the ring condition when the heartbeat is expiring
10772 * before doing the reset. This will prevent most unintended
10773 * resets.
10774 */
10775 if (!--tp->asf_counter) {
10776 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10777 tg3_wait_for_event_ack(tp);
10778
10779 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10780 FWCMD_NICDRV_ALIVE3);
10781 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10782 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10783 TG3_FW_UPDATE_TIMEOUT_SEC);
10784
10785 tg3_generate_fw_event(tp);
10786 }
10787 tp->asf_counter = tp->asf_multiplier;
10788 }
10789
10790 spin_unlock(&tp->lock);
10791
10792 restart_timer:
10793 tp->timer.expires = jiffies + tp->timer_offset;
10794 add_timer(&tp->timer);
10795 }
10796
10797 static void tg3_timer_init(struct tg3 *tp)
10798 {
10799 if (tg3_flag(tp, TAGGED_STATUS) &&
10800 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10801 !tg3_flag(tp, 57765_CLASS))
10802 tp->timer_offset = HZ;
10803 else
10804 tp->timer_offset = HZ / 10;
10805
10806 BUG_ON(tp->timer_offset > HZ);
10807
10808 tp->timer_multiplier = (HZ / tp->timer_offset);
10809 tp->asf_multiplier = (HZ / tp->timer_offset) *
10810 TG3_FW_UPDATE_FREQ_SEC;
10811
10812 init_timer(&tp->timer);
10813 tp->timer.data = (unsigned long) tp;
10814 tp->timer.function = tg3_timer;
10815 }
10816
10817 static void tg3_timer_start(struct tg3 *tp)
10818 {
10819 tp->asf_counter = tp->asf_multiplier;
10820 tp->timer_counter = tp->timer_multiplier;
10821
10822 tp->timer.expires = jiffies + tp->timer_offset;
10823 add_timer(&tp->timer);
10824 }
10825
10826 static void tg3_timer_stop(struct tg3 *tp)
10827 {
10828 del_timer_sync(&tp->timer);
10829 }
10830
10831 /* Restart hardware after configuration changes, self-test, etc.
10832 * Invoked with tp->lock held.
10833 */
10834 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10835 __releases(tp->lock)
10836 __acquires(tp->lock)
10837 {
10838 int err;
10839
10840 err = tg3_init_hw(tp, reset_phy);
10841 if (err) {
10842 netdev_err(tp->dev,
10843 "Failed to re-initialize device, aborting\n");
10844 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10845 tg3_full_unlock(tp);
10846 tg3_timer_stop(tp);
10847 tp->irq_sync = 0;
10848 tg3_napi_enable(tp);
10849 dev_close(tp->dev);
10850 tg3_full_lock(tp, 0);
10851 }
10852 return err;
10853 }
10854
10855 static void tg3_reset_task(struct work_struct *work)
10856 {
10857 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10858 int err;
10859
10860 tg3_full_lock(tp, 0);
10861
10862 if (!netif_running(tp->dev)) {
10863 tg3_flag_clear(tp, RESET_TASK_PENDING);
10864 tg3_full_unlock(tp);
10865 return;
10866 }
10867
10868 tg3_full_unlock(tp);
10869
10870 tg3_phy_stop(tp);
10871
10872 tg3_netif_stop(tp);
10873
10874 tg3_full_lock(tp, 1);
10875
10876 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10877 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10878 tp->write32_rx_mbox = tg3_write_flush_reg32;
10879 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10880 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10881 }
10882
10883 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10884 err = tg3_init_hw(tp, true);
10885 if (err)
10886 goto out;
10887
10888 tg3_netif_start(tp);
10889
10890 out:
10891 tg3_full_unlock(tp);
10892
10893 if (!err)
10894 tg3_phy_start(tp);
10895
10896 tg3_flag_clear(tp, RESET_TASK_PENDING);
10897 }
10898
10899 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10900 {
10901 irq_handler_t fn;
10902 unsigned long flags;
10903 char *name;
10904 struct tg3_napi *tnapi = &tp->napi[irq_num];
10905
10906 if (tp->irq_cnt == 1)
10907 name = tp->dev->name;
10908 else {
10909 name = &tnapi->irq_lbl[0];
10910 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10911 name[IFNAMSIZ-1] = 0;
10912 }
10913
10914 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10915 fn = tg3_msi;
10916 if (tg3_flag(tp, 1SHOT_MSI))
10917 fn = tg3_msi_1shot;
10918 flags = 0;
10919 } else {
10920 fn = tg3_interrupt;
10921 if (tg3_flag(tp, TAGGED_STATUS))
10922 fn = tg3_interrupt_tagged;
10923 flags = IRQF_SHARED;
10924 }
10925
10926 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10927 }
10928
10929 static int tg3_test_interrupt(struct tg3 *tp)
10930 {
10931 struct tg3_napi *tnapi = &tp->napi[0];
10932 struct net_device *dev = tp->dev;
10933 int err, i, intr_ok = 0;
10934 u32 val;
10935
10936 if (!netif_running(dev))
10937 return -ENODEV;
10938
10939 tg3_disable_ints(tp);
10940
10941 free_irq(tnapi->irq_vec, tnapi);
10942
10943 /*
10944 * Turn off MSI one shot mode. Otherwise this test has no
10945 * observable way to know whether the interrupt was delivered.
10946 */
10947 if (tg3_flag(tp, 57765_PLUS)) {
10948 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10949 tw32(MSGINT_MODE, val);
10950 }
10951
10952 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10953 IRQF_SHARED, dev->name, tnapi);
10954 if (err)
10955 return err;
10956
10957 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10958 tg3_enable_ints(tp);
10959
10960 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10961 tnapi->coal_now);
10962
10963 for (i = 0; i < 5; i++) {
10964 u32 int_mbox, misc_host_ctrl;
10965
10966 int_mbox = tr32_mailbox(tnapi->int_mbox);
10967 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10968
10969 if ((int_mbox != 0) ||
10970 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10971 intr_ok = 1;
10972 break;
10973 }
10974
10975 if (tg3_flag(tp, 57765_PLUS) &&
10976 tnapi->hw_status->status_tag != tnapi->last_tag)
10977 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10978
10979 msleep(10);
10980 }
10981
10982 tg3_disable_ints(tp);
10983
10984 free_irq(tnapi->irq_vec, tnapi);
10985
10986 err = tg3_request_irq(tp, 0);
10987
10988 if (err)
10989 return err;
10990
10991 if (intr_ok) {
10992 /* Reenable MSI one shot mode. */
10993 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10994 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10995 tw32(MSGINT_MODE, val);
10996 }
10997 return 0;
10998 }
10999
11000 return -EIO;
11001 }
11002
11003 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11004 * successfully restored
11005 */
11006 static int tg3_test_msi(struct tg3 *tp)
11007 {
11008 int err;
11009 u16 pci_cmd;
11010
11011 if (!tg3_flag(tp, USING_MSI))
11012 return 0;
11013
11014 /* Turn off SERR reporting in case MSI terminates with Master
11015 * Abort.
11016 */
11017 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11018 pci_write_config_word(tp->pdev, PCI_COMMAND,
11019 pci_cmd & ~PCI_COMMAND_SERR);
11020
11021 err = tg3_test_interrupt(tp);
11022
11023 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11024
11025 if (!err)
11026 return 0;
11027
11028 /* other failures */
11029 if (err != -EIO)
11030 return err;
11031
11032 /* MSI test failed, go back to INTx mode */
11033 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11034 "to INTx mode. Please report this failure to the PCI "
11035 "maintainer and include system chipset information\n");
11036
11037 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11038
11039 pci_disable_msi(tp->pdev);
11040
11041 tg3_flag_clear(tp, USING_MSI);
11042 tp->napi[0].irq_vec = tp->pdev->irq;
11043
11044 err = tg3_request_irq(tp, 0);
11045 if (err)
11046 return err;
11047
11048 /* Need to reset the chip because the MSI cycle may have terminated
11049 * with Master Abort.
11050 */
11051 tg3_full_lock(tp, 1);
11052
11053 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11054 err = tg3_init_hw(tp, true);
11055
11056 tg3_full_unlock(tp);
11057
11058 if (err)
11059 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11060
11061 return err;
11062 }
11063
11064 static int tg3_request_firmware(struct tg3 *tp)
11065 {
11066 const struct tg3_firmware_hdr *fw_hdr;
11067
11068 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11069 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11070 tp->fw_needed);
11071 return -ENOENT;
11072 }
11073
11074 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11075
11076 /* Firmware blob starts with version numbers, followed by
11077 * start address and _full_ length including BSS sections
11078 * (which must be longer than the actual data, of course
11079 */
11080
11081 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11082 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11083 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11084 tp->fw_len, tp->fw_needed);
11085 release_firmware(tp->fw);
11086 tp->fw = NULL;
11087 return -EINVAL;
11088 }
11089
11090 /* We no longer need firmware; we have it. */
11091 tp->fw_needed = NULL;
11092 return 0;
11093 }
11094
11095 static u32 tg3_irq_count(struct tg3 *tp)
11096 {
11097 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11098
11099 if (irq_cnt > 1) {
11100 /* We want as many rx rings enabled as there are cpus.
11101 * In multiqueue MSI-X mode, the first MSI-X vector
11102 * only deals with link interrupts, etc, so we add
11103 * one to the number of vectors we are requesting.
11104 */
11105 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11106 }
11107
11108 return irq_cnt;
11109 }
11110
11111 static bool tg3_enable_msix(struct tg3 *tp)
11112 {
11113 int i, rc;
11114 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11115
11116 tp->txq_cnt = tp->txq_req;
11117 tp->rxq_cnt = tp->rxq_req;
11118 if (!tp->rxq_cnt)
11119 tp->rxq_cnt = netif_get_num_default_rss_queues();
11120 if (tp->rxq_cnt > tp->rxq_max)
11121 tp->rxq_cnt = tp->rxq_max;
11122
11123 /* Disable multiple TX rings by default. Simple round-robin hardware
11124 * scheduling of the TX rings can cause starvation of rings with
11125 * small packets when other rings have TSO or jumbo packets.
11126 */
11127 if (!tp->txq_req)
11128 tp->txq_cnt = 1;
11129
11130 tp->irq_cnt = tg3_irq_count(tp);
11131
11132 for (i = 0; i < tp->irq_max; i++) {
11133 msix_ent[i].entry = i;
11134 msix_ent[i].vector = 0;
11135 }
11136
11137 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11138 if (rc < 0) {
11139 return false;
11140 } else if (rc != 0) {
11141 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11142 return false;
11143 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11144 tp->irq_cnt, rc);
11145 tp->irq_cnt = rc;
11146 tp->rxq_cnt = max(rc - 1, 1);
11147 if (tp->txq_cnt)
11148 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11149 }
11150
11151 for (i = 0; i < tp->irq_max; i++)
11152 tp->napi[i].irq_vec = msix_ent[i].vector;
11153
11154 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11155 pci_disable_msix(tp->pdev);
11156 return false;
11157 }
11158
11159 if (tp->irq_cnt == 1)
11160 return true;
11161
11162 tg3_flag_set(tp, ENABLE_RSS);
11163
11164 if (tp->txq_cnt > 1)
11165 tg3_flag_set(tp, ENABLE_TSS);
11166
11167 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11168
11169 return true;
11170 }
11171
11172 static void tg3_ints_init(struct tg3 *tp)
11173 {
11174 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11175 !tg3_flag(tp, TAGGED_STATUS)) {
11176 /* All MSI supporting chips should support tagged
11177 * status. Assert that this is the case.
11178 */
11179 netdev_warn(tp->dev,
11180 "MSI without TAGGED_STATUS? Not using MSI\n");
11181 goto defcfg;
11182 }
11183
11184 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11185 tg3_flag_set(tp, USING_MSIX);
11186 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11187 tg3_flag_set(tp, USING_MSI);
11188
11189 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11190 u32 msi_mode = tr32(MSGINT_MODE);
11191 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11192 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11193 if (!tg3_flag(tp, 1SHOT_MSI))
11194 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11195 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11196 }
11197 defcfg:
11198 if (!tg3_flag(tp, USING_MSIX)) {
11199 tp->irq_cnt = 1;
11200 tp->napi[0].irq_vec = tp->pdev->irq;
11201 }
11202
11203 if (tp->irq_cnt == 1) {
11204 tp->txq_cnt = 1;
11205 tp->rxq_cnt = 1;
11206 netif_set_real_num_tx_queues(tp->dev, 1);
11207 netif_set_real_num_rx_queues(tp->dev, 1);
11208 }
11209 }
11210
11211 static void tg3_ints_fini(struct tg3 *tp)
11212 {
11213 if (tg3_flag(tp, USING_MSIX))
11214 pci_disable_msix(tp->pdev);
11215 else if (tg3_flag(tp, USING_MSI))
11216 pci_disable_msi(tp->pdev);
11217 tg3_flag_clear(tp, USING_MSI);
11218 tg3_flag_clear(tp, USING_MSIX);
11219 tg3_flag_clear(tp, ENABLE_RSS);
11220 tg3_flag_clear(tp, ENABLE_TSS);
11221 }
11222
11223 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11224 bool init)
11225 {
11226 struct net_device *dev = tp->dev;
11227 int i, err;
11228
11229 /*
11230 * Setup interrupts first so we know how
11231 * many NAPI resources to allocate
11232 */
11233 tg3_ints_init(tp);
11234
11235 tg3_rss_check_indir_tbl(tp);
11236
11237 /* The placement of this call is tied
11238 * to the setup and use of Host TX descriptors.
11239 */
11240 err = tg3_alloc_consistent(tp);
11241 if (err)
11242 goto err_out1;
11243
11244 tg3_napi_init(tp);
11245
11246 tg3_napi_enable(tp);
11247
11248 for (i = 0; i < tp->irq_cnt; i++) {
11249 struct tg3_napi *tnapi = &tp->napi[i];
11250 err = tg3_request_irq(tp, i);
11251 if (err) {
11252 for (i--; i >= 0; i--) {
11253 tnapi = &tp->napi[i];
11254 free_irq(tnapi->irq_vec, tnapi);
11255 }
11256 goto err_out2;
11257 }
11258 }
11259
11260 tg3_full_lock(tp, 0);
11261
11262 err = tg3_init_hw(tp, reset_phy);
11263 if (err) {
11264 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11265 tg3_free_rings(tp);
11266 }
11267
11268 tg3_full_unlock(tp);
11269
11270 if (err)
11271 goto err_out3;
11272
11273 if (test_irq && tg3_flag(tp, USING_MSI)) {
11274 err = tg3_test_msi(tp);
11275
11276 if (err) {
11277 tg3_full_lock(tp, 0);
11278 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11279 tg3_free_rings(tp);
11280 tg3_full_unlock(tp);
11281
11282 goto err_out2;
11283 }
11284
11285 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11286 u32 val = tr32(PCIE_TRANSACTION_CFG);
11287
11288 tw32(PCIE_TRANSACTION_CFG,
11289 val | PCIE_TRANS_CFG_1SHOT_MSI);
11290 }
11291 }
11292
11293 tg3_phy_start(tp);
11294
11295 tg3_hwmon_open(tp);
11296
11297 tg3_full_lock(tp, 0);
11298
11299 tg3_timer_start(tp);
11300 tg3_flag_set(tp, INIT_COMPLETE);
11301 tg3_enable_ints(tp);
11302
11303 if (init)
11304 tg3_ptp_init(tp);
11305 else
11306 tg3_ptp_resume(tp);
11307
11308
11309 tg3_full_unlock(tp);
11310
11311 netif_tx_start_all_queues(dev);
11312
11313 /*
11314 * Reset loopback feature if it was turned on while the device was down
11315 * make sure that it's installed properly now.
11316 */
11317 if (dev->features & NETIF_F_LOOPBACK)
11318 tg3_set_loopback(dev, dev->features);
11319
11320 return 0;
11321
11322 err_out3:
11323 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11324 struct tg3_napi *tnapi = &tp->napi[i];
11325 free_irq(tnapi->irq_vec, tnapi);
11326 }
11327
11328 err_out2:
11329 tg3_napi_disable(tp);
11330 tg3_napi_fini(tp);
11331 tg3_free_consistent(tp);
11332
11333 err_out1:
11334 tg3_ints_fini(tp);
11335
11336 return err;
11337 }
11338
11339 static void tg3_stop(struct tg3 *tp)
11340 {
11341 int i;
11342
11343 tg3_reset_task_cancel(tp);
11344 tg3_netif_stop(tp);
11345
11346 tg3_timer_stop(tp);
11347
11348 tg3_hwmon_close(tp);
11349
11350 tg3_phy_stop(tp);
11351
11352 tg3_full_lock(tp, 1);
11353
11354 tg3_disable_ints(tp);
11355
11356 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11357 tg3_free_rings(tp);
11358 tg3_flag_clear(tp, INIT_COMPLETE);
11359
11360 tg3_full_unlock(tp);
11361
11362 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11363 struct tg3_napi *tnapi = &tp->napi[i];
11364 free_irq(tnapi->irq_vec, tnapi);
11365 }
11366
11367 tg3_ints_fini(tp);
11368
11369 tg3_napi_fini(tp);
11370
11371 tg3_free_consistent(tp);
11372 }
11373
11374 static int tg3_open(struct net_device *dev)
11375 {
11376 struct tg3 *tp = netdev_priv(dev);
11377 int err;
11378
11379 if (tp->fw_needed) {
11380 err = tg3_request_firmware(tp);
11381 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11382 if (err) {
11383 netdev_warn(tp->dev, "EEE capability disabled\n");
11384 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11385 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11386 netdev_warn(tp->dev, "EEE capability restored\n");
11387 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11388 }
11389 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11390 if (err)
11391 return err;
11392 } else if (err) {
11393 netdev_warn(tp->dev, "TSO capability disabled\n");
11394 tg3_flag_clear(tp, TSO_CAPABLE);
11395 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11396 netdev_notice(tp->dev, "TSO capability restored\n");
11397 tg3_flag_set(tp, TSO_CAPABLE);
11398 }
11399 }
11400
11401 tg3_carrier_off(tp);
11402
11403 err = tg3_power_up(tp);
11404 if (err)
11405 return err;
11406
11407 tg3_full_lock(tp, 0);
11408
11409 tg3_disable_ints(tp);
11410 tg3_flag_clear(tp, INIT_COMPLETE);
11411
11412 tg3_full_unlock(tp);
11413
11414 err = tg3_start(tp,
11415 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11416 true, true);
11417 if (err) {
11418 tg3_frob_aux_power(tp, false);
11419 pci_set_power_state(tp->pdev, PCI_D3hot);
11420 }
11421
11422 if (tg3_flag(tp, PTP_CAPABLE)) {
11423 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11424 &tp->pdev->dev);
11425 if (IS_ERR(tp->ptp_clock))
11426 tp->ptp_clock = NULL;
11427 }
11428
11429 return err;
11430 }
11431
11432 static int tg3_close(struct net_device *dev)
11433 {
11434 struct tg3 *tp = netdev_priv(dev);
11435
11436 tg3_ptp_fini(tp);
11437
11438 tg3_stop(tp);
11439
11440 /* Clear stats across close / open calls */
11441 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11442 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11443
11444 tg3_power_down(tp);
11445
11446 tg3_carrier_off(tp);
11447
11448 return 0;
11449 }
11450
11451 static inline u64 get_stat64(tg3_stat64_t *val)
11452 {
11453 return ((u64)val->high << 32) | ((u64)val->low);
11454 }
11455
11456 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11457 {
11458 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11459
11460 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11461 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11462 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11463 u32 val;
11464
11465 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11466 tg3_writephy(tp, MII_TG3_TEST1,
11467 val | MII_TG3_TEST1_CRC_EN);
11468 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11469 } else
11470 val = 0;
11471
11472 tp->phy_crc_errors += val;
11473
11474 return tp->phy_crc_errors;
11475 }
11476
11477 return get_stat64(&hw_stats->rx_fcs_errors);
11478 }
11479
11480 #define ESTAT_ADD(member) \
11481 estats->member = old_estats->member + \
11482 get_stat64(&hw_stats->member)
11483
11484 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11485 {
11486 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11487 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11488
11489 ESTAT_ADD(rx_octets);
11490 ESTAT_ADD(rx_fragments);
11491 ESTAT_ADD(rx_ucast_packets);
11492 ESTAT_ADD(rx_mcast_packets);
11493 ESTAT_ADD(rx_bcast_packets);
11494 ESTAT_ADD(rx_fcs_errors);
11495 ESTAT_ADD(rx_align_errors);
11496 ESTAT_ADD(rx_xon_pause_rcvd);
11497 ESTAT_ADD(rx_xoff_pause_rcvd);
11498 ESTAT_ADD(rx_mac_ctrl_rcvd);
11499 ESTAT_ADD(rx_xoff_entered);
11500 ESTAT_ADD(rx_frame_too_long_errors);
11501 ESTAT_ADD(rx_jabbers);
11502 ESTAT_ADD(rx_undersize_packets);
11503 ESTAT_ADD(rx_in_length_errors);
11504 ESTAT_ADD(rx_out_length_errors);
11505 ESTAT_ADD(rx_64_or_less_octet_packets);
11506 ESTAT_ADD(rx_65_to_127_octet_packets);
11507 ESTAT_ADD(rx_128_to_255_octet_packets);
11508 ESTAT_ADD(rx_256_to_511_octet_packets);
11509 ESTAT_ADD(rx_512_to_1023_octet_packets);
11510 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11511 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11512 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11513 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11514 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11515
11516 ESTAT_ADD(tx_octets);
11517 ESTAT_ADD(tx_collisions);
11518 ESTAT_ADD(tx_xon_sent);
11519 ESTAT_ADD(tx_xoff_sent);
11520 ESTAT_ADD(tx_flow_control);
11521 ESTAT_ADD(tx_mac_errors);
11522 ESTAT_ADD(tx_single_collisions);
11523 ESTAT_ADD(tx_mult_collisions);
11524 ESTAT_ADD(tx_deferred);
11525 ESTAT_ADD(tx_excessive_collisions);
11526 ESTAT_ADD(tx_late_collisions);
11527 ESTAT_ADD(tx_collide_2times);
11528 ESTAT_ADD(tx_collide_3times);
11529 ESTAT_ADD(tx_collide_4times);
11530 ESTAT_ADD(tx_collide_5times);
11531 ESTAT_ADD(tx_collide_6times);
11532 ESTAT_ADD(tx_collide_7times);
11533 ESTAT_ADD(tx_collide_8times);
11534 ESTAT_ADD(tx_collide_9times);
11535 ESTAT_ADD(tx_collide_10times);
11536 ESTAT_ADD(tx_collide_11times);
11537 ESTAT_ADD(tx_collide_12times);
11538 ESTAT_ADD(tx_collide_13times);
11539 ESTAT_ADD(tx_collide_14times);
11540 ESTAT_ADD(tx_collide_15times);
11541 ESTAT_ADD(tx_ucast_packets);
11542 ESTAT_ADD(tx_mcast_packets);
11543 ESTAT_ADD(tx_bcast_packets);
11544 ESTAT_ADD(tx_carrier_sense_errors);
11545 ESTAT_ADD(tx_discards);
11546 ESTAT_ADD(tx_errors);
11547
11548 ESTAT_ADD(dma_writeq_full);
11549 ESTAT_ADD(dma_write_prioq_full);
11550 ESTAT_ADD(rxbds_empty);
11551 ESTAT_ADD(rx_discards);
11552 ESTAT_ADD(rx_errors);
11553 ESTAT_ADD(rx_threshold_hit);
11554
11555 ESTAT_ADD(dma_readq_full);
11556 ESTAT_ADD(dma_read_prioq_full);
11557 ESTAT_ADD(tx_comp_queue_full);
11558
11559 ESTAT_ADD(ring_set_send_prod_index);
11560 ESTAT_ADD(ring_status_update);
11561 ESTAT_ADD(nic_irqs);
11562 ESTAT_ADD(nic_avoided_irqs);
11563 ESTAT_ADD(nic_tx_threshold_hit);
11564
11565 ESTAT_ADD(mbuf_lwm_thresh_hit);
11566 }
11567
11568 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11569 {
11570 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11571 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11572
11573 stats->rx_packets = old_stats->rx_packets +
11574 get_stat64(&hw_stats->rx_ucast_packets) +
11575 get_stat64(&hw_stats->rx_mcast_packets) +
11576 get_stat64(&hw_stats->rx_bcast_packets);
11577
11578 stats->tx_packets = old_stats->tx_packets +
11579 get_stat64(&hw_stats->tx_ucast_packets) +
11580 get_stat64(&hw_stats->tx_mcast_packets) +
11581 get_stat64(&hw_stats->tx_bcast_packets);
11582
11583 stats->rx_bytes = old_stats->rx_bytes +
11584 get_stat64(&hw_stats->rx_octets);
11585 stats->tx_bytes = old_stats->tx_bytes +
11586 get_stat64(&hw_stats->tx_octets);
11587
11588 stats->rx_errors = old_stats->rx_errors +
11589 get_stat64(&hw_stats->rx_errors);
11590 stats->tx_errors = old_stats->tx_errors +
11591 get_stat64(&hw_stats->tx_errors) +
11592 get_stat64(&hw_stats->tx_mac_errors) +
11593 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11594 get_stat64(&hw_stats->tx_discards);
11595
11596 stats->multicast = old_stats->multicast +
11597 get_stat64(&hw_stats->rx_mcast_packets);
11598 stats->collisions = old_stats->collisions +
11599 get_stat64(&hw_stats->tx_collisions);
11600
11601 stats->rx_length_errors = old_stats->rx_length_errors +
11602 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11603 get_stat64(&hw_stats->rx_undersize_packets);
11604
11605 stats->rx_over_errors = old_stats->rx_over_errors +
11606 get_stat64(&hw_stats->rxbds_empty);
11607 stats->rx_frame_errors = old_stats->rx_frame_errors +
11608 get_stat64(&hw_stats->rx_align_errors);
11609 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11610 get_stat64(&hw_stats->tx_discards);
11611 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11612 get_stat64(&hw_stats->tx_carrier_sense_errors);
11613
11614 stats->rx_crc_errors = old_stats->rx_crc_errors +
11615 tg3_calc_crc_errors(tp);
11616
11617 stats->rx_missed_errors = old_stats->rx_missed_errors +
11618 get_stat64(&hw_stats->rx_discards);
11619
11620 stats->rx_dropped = tp->rx_dropped;
11621 stats->tx_dropped = tp->tx_dropped;
11622 }
11623
11624 static int tg3_get_regs_len(struct net_device *dev)
11625 {
11626 return TG3_REG_BLK_SIZE;
11627 }
11628
11629 static void tg3_get_regs(struct net_device *dev,
11630 struct ethtool_regs *regs, void *_p)
11631 {
11632 struct tg3 *tp = netdev_priv(dev);
11633
11634 regs->version = 0;
11635
11636 memset(_p, 0, TG3_REG_BLK_SIZE);
11637
11638 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11639 return;
11640
11641 tg3_full_lock(tp, 0);
11642
11643 tg3_dump_legacy_regs(tp, (u32 *)_p);
11644
11645 tg3_full_unlock(tp);
11646 }
11647
11648 static int tg3_get_eeprom_len(struct net_device *dev)
11649 {
11650 struct tg3 *tp = netdev_priv(dev);
11651
11652 return tp->nvram_size;
11653 }
11654
11655 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11656 {
11657 struct tg3 *tp = netdev_priv(dev);
11658 int ret;
11659 u8 *pd;
11660 u32 i, offset, len, b_offset, b_count;
11661 __be32 val;
11662
11663 if (tg3_flag(tp, NO_NVRAM))
11664 return -EINVAL;
11665
11666 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11667 return -EAGAIN;
11668
11669 offset = eeprom->offset;
11670 len = eeprom->len;
11671 eeprom->len = 0;
11672
11673 eeprom->magic = TG3_EEPROM_MAGIC;
11674
11675 if (offset & 3) {
11676 /* adjustments to start on required 4 byte boundary */
11677 b_offset = offset & 3;
11678 b_count = 4 - b_offset;
11679 if (b_count > len) {
11680 /* i.e. offset=1 len=2 */
11681 b_count = len;
11682 }
11683 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11684 if (ret)
11685 return ret;
11686 memcpy(data, ((char *)&val) + b_offset, b_count);
11687 len -= b_count;
11688 offset += b_count;
11689 eeprom->len += b_count;
11690 }
11691
11692 /* read bytes up to the last 4 byte boundary */
11693 pd = &data[eeprom->len];
11694 for (i = 0; i < (len - (len & 3)); i += 4) {
11695 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11696 if (ret) {
11697 eeprom->len += i;
11698 return ret;
11699 }
11700 memcpy(pd + i, &val, 4);
11701 }
11702 eeprom->len += i;
11703
11704 if (len & 3) {
11705 /* read last bytes not ending on 4 byte boundary */
11706 pd = &data[eeprom->len];
11707 b_count = len & 3;
11708 b_offset = offset + len - b_count;
11709 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11710 if (ret)
11711 return ret;
11712 memcpy(pd, &val, b_count);
11713 eeprom->len += b_count;
11714 }
11715 return 0;
11716 }
11717
11718 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11719 {
11720 struct tg3 *tp = netdev_priv(dev);
11721 int ret;
11722 u32 offset, len, b_offset, odd_len;
11723 u8 *buf;
11724 __be32 start, end;
11725
11726 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11727 return -EAGAIN;
11728
11729 if (tg3_flag(tp, NO_NVRAM) ||
11730 eeprom->magic != TG3_EEPROM_MAGIC)
11731 return -EINVAL;
11732
11733 offset = eeprom->offset;
11734 len = eeprom->len;
11735
11736 if ((b_offset = (offset & 3))) {
11737 /* adjustments to start on required 4 byte boundary */
11738 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11739 if (ret)
11740 return ret;
11741 len += b_offset;
11742 offset &= ~3;
11743 if (len < 4)
11744 len = 4;
11745 }
11746
11747 odd_len = 0;
11748 if (len & 3) {
11749 /* adjustments to end on required 4 byte boundary */
11750 odd_len = 1;
11751 len = (len + 3) & ~3;
11752 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11753 if (ret)
11754 return ret;
11755 }
11756
11757 buf = data;
11758 if (b_offset || odd_len) {
11759 buf = kmalloc(len, GFP_KERNEL);
11760 if (!buf)
11761 return -ENOMEM;
11762 if (b_offset)
11763 memcpy(buf, &start, 4);
11764 if (odd_len)
11765 memcpy(buf+len-4, &end, 4);
11766 memcpy(buf + b_offset, data, eeprom->len);
11767 }
11768
11769 ret = tg3_nvram_write_block(tp, offset, len, buf);
11770
11771 if (buf != data)
11772 kfree(buf);
11773
11774 return ret;
11775 }
11776
11777 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11778 {
11779 struct tg3 *tp = netdev_priv(dev);
11780
11781 if (tg3_flag(tp, USE_PHYLIB)) {
11782 struct phy_device *phydev;
11783 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11784 return -EAGAIN;
11785 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11786 return phy_ethtool_gset(phydev, cmd);
11787 }
11788
11789 cmd->supported = (SUPPORTED_Autoneg);
11790
11791 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11792 cmd->supported |= (SUPPORTED_1000baseT_Half |
11793 SUPPORTED_1000baseT_Full);
11794
11795 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11796 cmd->supported |= (SUPPORTED_100baseT_Half |
11797 SUPPORTED_100baseT_Full |
11798 SUPPORTED_10baseT_Half |
11799 SUPPORTED_10baseT_Full |
11800 SUPPORTED_TP);
11801 cmd->port = PORT_TP;
11802 } else {
11803 cmd->supported |= SUPPORTED_FIBRE;
11804 cmd->port = PORT_FIBRE;
11805 }
11806
11807 cmd->advertising = tp->link_config.advertising;
11808 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11809 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11810 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11811 cmd->advertising |= ADVERTISED_Pause;
11812 } else {
11813 cmd->advertising |= ADVERTISED_Pause |
11814 ADVERTISED_Asym_Pause;
11815 }
11816 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11817 cmd->advertising |= ADVERTISED_Asym_Pause;
11818 }
11819 }
11820 if (netif_running(dev) && tp->link_up) {
11821 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11822 cmd->duplex = tp->link_config.active_duplex;
11823 cmd->lp_advertising = tp->link_config.rmt_adv;
11824 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11825 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11826 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11827 else
11828 cmd->eth_tp_mdix = ETH_TP_MDI;
11829 }
11830 } else {
11831 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11832 cmd->duplex = DUPLEX_UNKNOWN;
11833 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11834 }
11835 cmd->phy_address = tp->phy_addr;
11836 cmd->transceiver = XCVR_INTERNAL;
11837 cmd->autoneg = tp->link_config.autoneg;
11838 cmd->maxtxpkt = 0;
11839 cmd->maxrxpkt = 0;
11840 return 0;
11841 }
11842
11843 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11844 {
11845 struct tg3 *tp = netdev_priv(dev);
11846 u32 speed = ethtool_cmd_speed(cmd);
11847
11848 if (tg3_flag(tp, USE_PHYLIB)) {
11849 struct phy_device *phydev;
11850 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11851 return -EAGAIN;
11852 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11853 return phy_ethtool_sset(phydev, cmd);
11854 }
11855
11856 if (cmd->autoneg != AUTONEG_ENABLE &&
11857 cmd->autoneg != AUTONEG_DISABLE)
11858 return -EINVAL;
11859
11860 if (cmd->autoneg == AUTONEG_DISABLE &&
11861 cmd->duplex != DUPLEX_FULL &&
11862 cmd->duplex != DUPLEX_HALF)
11863 return -EINVAL;
11864
11865 if (cmd->autoneg == AUTONEG_ENABLE) {
11866 u32 mask = ADVERTISED_Autoneg |
11867 ADVERTISED_Pause |
11868 ADVERTISED_Asym_Pause;
11869
11870 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11871 mask |= ADVERTISED_1000baseT_Half |
11872 ADVERTISED_1000baseT_Full;
11873
11874 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11875 mask |= ADVERTISED_100baseT_Half |
11876 ADVERTISED_100baseT_Full |
11877 ADVERTISED_10baseT_Half |
11878 ADVERTISED_10baseT_Full |
11879 ADVERTISED_TP;
11880 else
11881 mask |= ADVERTISED_FIBRE;
11882
11883 if (cmd->advertising & ~mask)
11884 return -EINVAL;
11885
11886 mask &= (ADVERTISED_1000baseT_Half |
11887 ADVERTISED_1000baseT_Full |
11888 ADVERTISED_100baseT_Half |
11889 ADVERTISED_100baseT_Full |
11890 ADVERTISED_10baseT_Half |
11891 ADVERTISED_10baseT_Full);
11892
11893 cmd->advertising &= mask;
11894 } else {
11895 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11896 if (speed != SPEED_1000)
11897 return -EINVAL;
11898
11899 if (cmd->duplex != DUPLEX_FULL)
11900 return -EINVAL;
11901 } else {
11902 if (speed != SPEED_100 &&
11903 speed != SPEED_10)
11904 return -EINVAL;
11905 }
11906 }
11907
11908 tg3_full_lock(tp, 0);
11909
11910 tp->link_config.autoneg = cmd->autoneg;
11911 if (cmd->autoneg == AUTONEG_ENABLE) {
11912 tp->link_config.advertising = (cmd->advertising |
11913 ADVERTISED_Autoneg);
11914 tp->link_config.speed = SPEED_UNKNOWN;
11915 tp->link_config.duplex = DUPLEX_UNKNOWN;
11916 } else {
11917 tp->link_config.advertising = 0;
11918 tp->link_config.speed = speed;
11919 tp->link_config.duplex = cmd->duplex;
11920 }
11921
11922 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11923
11924 tg3_warn_mgmt_link_flap(tp);
11925
11926 if (netif_running(dev))
11927 tg3_setup_phy(tp, true);
11928
11929 tg3_full_unlock(tp);
11930
11931 return 0;
11932 }
11933
11934 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11935 {
11936 struct tg3 *tp = netdev_priv(dev);
11937
11938 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11939 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11940 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11941 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11942 }
11943
11944 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11945 {
11946 struct tg3 *tp = netdev_priv(dev);
11947
11948 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11949 wol->supported = WAKE_MAGIC;
11950 else
11951 wol->supported = 0;
11952 wol->wolopts = 0;
11953 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11954 wol->wolopts = WAKE_MAGIC;
11955 memset(&wol->sopass, 0, sizeof(wol->sopass));
11956 }
11957
11958 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11959 {
11960 struct tg3 *tp = netdev_priv(dev);
11961 struct device *dp = &tp->pdev->dev;
11962
11963 if (wol->wolopts & ~WAKE_MAGIC)
11964 return -EINVAL;
11965 if ((wol->wolopts & WAKE_MAGIC) &&
11966 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11967 return -EINVAL;
11968
11969 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11970
11971 spin_lock_bh(&tp->lock);
11972 if (device_may_wakeup(dp))
11973 tg3_flag_set(tp, WOL_ENABLE);
11974 else
11975 tg3_flag_clear(tp, WOL_ENABLE);
11976 spin_unlock_bh(&tp->lock);
11977
11978 return 0;
11979 }
11980
11981 static u32 tg3_get_msglevel(struct net_device *dev)
11982 {
11983 struct tg3 *tp = netdev_priv(dev);
11984 return tp->msg_enable;
11985 }
11986
11987 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11988 {
11989 struct tg3 *tp = netdev_priv(dev);
11990 tp->msg_enable = value;
11991 }
11992
11993 static int tg3_nway_reset(struct net_device *dev)
11994 {
11995 struct tg3 *tp = netdev_priv(dev);
11996 int r;
11997
11998 if (!netif_running(dev))
11999 return -EAGAIN;
12000
12001 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12002 return -EINVAL;
12003
12004 tg3_warn_mgmt_link_flap(tp);
12005
12006 if (tg3_flag(tp, USE_PHYLIB)) {
12007 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12008 return -EAGAIN;
12009 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12010 } else {
12011 u32 bmcr;
12012
12013 spin_lock_bh(&tp->lock);
12014 r = -EINVAL;
12015 tg3_readphy(tp, MII_BMCR, &bmcr);
12016 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12017 ((bmcr & BMCR_ANENABLE) ||
12018 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12019 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12020 BMCR_ANENABLE);
12021 r = 0;
12022 }
12023 spin_unlock_bh(&tp->lock);
12024 }
12025
12026 return r;
12027 }
12028
12029 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12030 {
12031 struct tg3 *tp = netdev_priv(dev);
12032
12033 ering->rx_max_pending = tp->rx_std_ring_mask;
12034 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12035 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12036 else
12037 ering->rx_jumbo_max_pending = 0;
12038
12039 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12040
12041 ering->rx_pending = tp->rx_pending;
12042 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12043 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12044 else
12045 ering->rx_jumbo_pending = 0;
12046
12047 ering->tx_pending = tp->napi[0].tx_pending;
12048 }
12049
12050 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12051 {
12052 struct tg3 *tp = netdev_priv(dev);
12053 int i, irq_sync = 0, err = 0;
12054
12055 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12056 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12057 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12058 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12059 (tg3_flag(tp, TSO_BUG) &&
12060 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12061 return -EINVAL;
12062
12063 if (netif_running(dev)) {
12064 tg3_phy_stop(tp);
12065 tg3_netif_stop(tp);
12066 irq_sync = 1;
12067 }
12068
12069 tg3_full_lock(tp, irq_sync);
12070
12071 tp->rx_pending = ering->rx_pending;
12072
12073 if (tg3_flag(tp, MAX_RXPEND_64) &&
12074 tp->rx_pending > 63)
12075 tp->rx_pending = 63;
12076
12077 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12078 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12079
12080 for (i = 0; i < tp->irq_max; i++)
12081 tp->napi[i].tx_pending = ering->tx_pending;
12082
12083 if (netif_running(dev)) {
12084 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12085 err = tg3_restart_hw(tp, false);
12086 if (!err)
12087 tg3_netif_start(tp);
12088 }
12089
12090 tg3_full_unlock(tp);
12091
12092 if (irq_sync && !err)
12093 tg3_phy_start(tp);
12094
12095 return err;
12096 }
12097
12098 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12099 {
12100 struct tg3 *tp = netdev_priv(dev);
12101
12102 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12103
12104 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12105 epause->rx_pause = 1;
12106 else
12107 epause->rx_pause = 0;
12108
12109 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12110 epause->tx_pause = 1;
12111 else
12112 epause->tx_pause = 0;
12113 }
12114
12115 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12116 {
12117 struct tg3 *tp = netdev_priv(dev);
12118 int err = 0;
12119
12120 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12121 tg3_warn_mgmt_link_flap(tp);
12122
12123 if (tg3_flag(tp, USE_PHYLIB)) {
12124 u32 newadv;
12125 struct phy_device *phydev;
12126
12127 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12128
12129 if (!(phydev->supported & SUPPORTED_Pause) ||
12130 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12131 (epause->rx_pause != epause->tx_pause)))
12132 return -EINVAL;
12133
12134 tp->link_config.flowctrl = 0;
12135 if (epause->rx_pause) {
12136 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12137
12138 if (epause->tx_pause) {
12139 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12140 newadv = ADVERTISED_Pause;
12141 } else
12142 newadv = ADVERTISED_Pause |
12143 ADVERTISED_Asym_Pause;
12144 } else if (epause->tx_pause) {
12145 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12146 newadv = ADVERTISED_Asym_Pause;
12147 } else
12148 newadv = 0;
12149
12150 if (epause->autoneg)
12151 tg3_flag_set(tp, PAUSE_AUTONEG);
12152 else
12153 tg3_flag_clear(tp, PAUSE_AUTONEG);
12154
12155 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12156 u32 oldadv = phydev->advertising &
12157 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12158 if (oldadv != newadv) {
12159 phydev->advertising &=
12160 ~(ADVERTISED_Pause |
12161 ADVERTISED_Asym_Pause);
12162 phydev->advertising |= newadv;
12163 if (phydev->autoneg) {
12164 /*
12165 * Always renegotiate the link to
12166 * inform our link partner of our
12167 * flow control settings, even if the
12168 * flow control is forced. Let
12169 * tg3_adjust_link() do the final
12170 * flow control setup.
12171 */
12172 return phy_start_aneg(phydev);
12173 }
12174 }
12175
12176 if (!epause->autoneg)
12177 tg3_setup_flow_control(tp, 0, 0);
12178 } else {
12179 tp->link_config.advertising &=
12180 ~(ADVERTISED_Pause |
12181 ADVERTISED_Asym_Pause);
12182 tp->link_config.advertising |= newadv;
12183 }
12184 } else {
12185 int irq_sync = 0;
12186
12187 if (netif_running(dev)) {
12188 tg3_netif_stop(tp);
12189 irq_sync = 1;
12190 }
12191
12192 tg3_full_lock(tp, irq_sync);
12193
12194 if (epause->autoneg)
12195 tg3_flag_set(tp, PAUSE_AUTONEG);
12196 else
12197 tg3_flag_clear(tp, PAUSE_AUTONEG);
12198 if (epause->rx_pause)
12199 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12200 else
12201 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12202 if (epause->tx_pause)
12203 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12204 else
12205 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12206
12207 if (netif_running(dev)) {
12208 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12209 err = tg3_restart_hw(tp, false);
12210 if (!err)
12211 tg3_netif_start(tp);
12212 }
12213
12214 tg3_full_unlock(tp);
12215 }
12216
12217 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12218
12219 return err;
12220 }
12221
12222 static int tg3_get_sset_count(struct net_device *dev, int sset)
12223 {
12224 switch (sset) {
12225 case ETH_SS_TEST:
12226 return TG3_NUM_TEST;
12227 case ETH_SS_STATS:
12228 return TG3_NUM_STATS;
12229 default:
12230 return -EOPNOTSUPP;
12231 }
12232 }
12233
12234 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12235 u32 *rules __always_unused)
12236 {
12237 struct tg3 *tp = netdev_priv(dev);
12238
12239 if (!tg3_flag(tp, SUPPORT_MSIX))
12240 return -EOPNOTSUPP;
12241
12242 switch (info->cmd) {
12243 case ETHTOOL_GRXRINGS:
12244 if (netif_running(tp->dev))
12245 info->data = tp->rxq_cnt;
12246 else {
12247 info->data = num_online_cpus();
12248 if (info->data > TG3_RSS_MAX_NUM_QS)
12249 info->data = TG3_RSS_MAX_NUM_QS;
12250 }
12251
12252 /* The first interrupt vector only
12253 * handles link interrupts.
12254 */
12255 info->data -= 1;
12256 return 0;
12257
12258 default:
12259 return -EOPNOTSUPP;
12260 }
12261 }
12262
12263 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12264 {
12265 u32 size = 0;
12266 struct tg3 *tp = netdev_priv(dev);
12267
12268 if (tg3_flag(tp, SUPPORT_MSIX))
12269 size = TG3_RSS_INDIR_TBL_SIZE;
12270
12271 return size;
12272 }
12273
12274 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12275 {
12276 struct tg3 *tp = netdev_priv(dev);
12277 int i;
12278
12279 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12280 indir[i] = tp->rss_ind_tbl[i];
12281
12282 return 0;
12283 }
12284
12285 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12286 {
12287 struct tg3 *tp = netdev_priv(dev);
12288 size_t i;
12289
12290 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12291 tp->rss_ind_tbl[i] = indir[i];
12292
12293 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12294 return 0;
12295
12296 /* It is legal to write the indirection
12297 * table while the device is running.
12298 */
12299 tg3_full_lock(tp, 0);
12300 tg3_rss_write_indir_tbl(tp);
12301 tg3_full_unlock(tp);
12302
12303 return 0;
12304 }
12305
12306 static void tg3_get_channels(struct net_device *dev,
12307 struct ethtool_channels *channel)
12308 {
12309 struct tg3 *tp = netdev_priv(dev);
12310 u32 deflt_qs = netif_get_num_default_rss_queues();
12311
12312 channel->max_rx = tp->rxq_max;
12313 channel->max_tx = tp->txq_max;
12314
12315 if (netif_running(dev)) {
12316 channel->rx_count = tp->rxq_cnt;
12317 channel->tx_count = tp->txq_cnt;
12318 } else {
12319 if (tp->rxq_req)
12320 channel->rx_count = tp->rxq_req;
12321 else
12322 channel->rx_count = min(deflt_qs, tp->rxq_max);
12323
12324 if (tp->txq_req)
12325 channel->tx_count = tp->txq_req;
12326 else
12327 channel->tx_count = min(deflt_qs, tp->txq_max);
12328 }
12329 }
12330
12331 static int tg3_set_channels(struct net_device *dev,
12332 struct ethtool_channels *channel)
12333 {
12334 struct tg3 *tp = netdev_priv(dev);
12335
12336 if (!tg3_flag(tp, SUPPORT_MSIX))
12337 return -EOPNOTSUPP;
12338
12339 if (channel->rx_count > tp->rxq_max ||
12340 channel->tx_count > tp->txq_max)
12341 return -EINVAL;
12342
12343 tp->rxq_req = channel->rx_count;
12344 tp->txq_req = channel->tx_count;
12345
12346 if (!netif_running(dev))
12347 return 0;
12348
12349 tg3_stop(tp);
12350
12351 tg3_carrier_off(tp);
12352
12353 tg3_start(tp, true, false, false);
12354
12355 return 0;
12356 }
12357
12358 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12359 {
12360 switch (stringset) {
12361 case ETH_SS_STATS:
12362 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12363 break;
12364 case ETH_SS_TEST:
12365 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12366 break;
12367 default:
12368 WARN_ON(1); /* we need a WARN() */
12369 break;
12370 }
12371 }
12372
12373 static int tg3_set_phys_id(struct net_device *dev,
12374 enum ethtool_phys_id_state state)
12375 {
12376 struct tg3 *tp = netdev_priv(dev);
12377
12378 if (!netif_running(tp->dev))
12379 return -EAGAIN;
12380
12381 switch (state) {
12382 case ETHTOOL_ID_ACTIVE:
12383 return 1; /* cycle on/off once per second */
12384
12385 case ETHTOOL_ID_ON:
12386 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12387 LED_CTRL_1000MBPS_ON |
12388 LED_CTRL_100MBPS_ON |
12389 LED_CTRL_10MBPS_ON |
12390 LED_CTRL_TRAFFIC_OVERRIDE |
12391 LED_CTRL_TRAFFIC_BLINK |
12392 LED_CTRL_TRAFFIC_LED);
12393 break;
12394
12395 case ETHTOOL_ID_OFF:
12396 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12397 LED_CTRL_TRAFFIC_OVERRIDE);
12398 break;
12399
12400 case ETHTOOL_ID_INACTIVE:
12401 tw32(MAC_LED_CTRL, tp->led_ctrl);
12402 break;
12403 }
12404
12405 return 0;
12406 }
12407
12408 static void tg3_get_ethtool_stats(struct net_device *dev,
12409 struct ethtool_stats *estats, u64 *tmp_stats)
12410 {
12411 struct tg3 *tp = netdev_priv(dev);
12412
12413 if (tp->hw_stats)
12414 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12415 else
12416 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12417 }
12418
12419 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12420 {
12421 int i;
12422 __be32 *buf;
12423 u32 offset = 0, len = 0;
12424 u32 magic, val;
12425
12426 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12427 return NULL;
12428
12429 if (magic == TG3_EEPROM_MAGIC) {
12430 for (offset = TG3_NVM_DIR_START;
12431 offset < TG3_NVM_DIR_END;
12432 offset += TG3_NVM_DIRENT_SIZE) {
12433 if (tg3_nvram_read(tp, offset, &val))
12434 return NULL;
12435
12436 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12437 TG3_NVM_DIRTYPE_EXTVPD)
12438 break;
12439 }
12440
12441 if (offset != TG3_NVM_DIR_END) {
12442 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12443 if (tg3_nvram_read(tp, offset + 4, &offset))
12444 return NULL;
12445
12446 offset = tg3_nvram_logical_addr(tp, offset);
12447 }
12448 }
12449
12450 if (!offset || !len) {
12451 offset = TG3_NVM_VPD_OFF;
12452 len = TG3_NVM_VPD_LEN;
12453 }
12454
12455 buf = kmalloc(len, GFP_KERNEL);
12456 if (buf == NULL)
12457 return NULL;
12458
12459 if (magic == TG3_EEPROM_MAGIC) {
12460 for (i = 0; i < len; i += 4) {
12461 /* The data is in little-endian format in NVRAM.
12462 * Use the big-endian read routines to preserve
12463 * the byte order as it exists in NVRAM.
12464 */
12465 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12466 goto error;
12467 }
12468 } else {
12469 u8 *ptr;
12470 ssize_t cnt;
12471 unsigned int pos = 0;
12472
12473 ptr = (u8 *)&buf[0];
12474 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12475 cnt = pci_read_vpd(tp->pdev, pos,
12476 len - pos, ptr);
12477 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12478 cnt = 0;
12479 else if (cnt < 0)
12480 goto error;
12481 }
12482 if (pos != len)
12483 goto error;
12484 }
12485
12486 *vpdlen = len;
12487
12488 return buf;
12489
12490 error:
12491 kfree(buf);
12492 return NULL;
12493 }
12494
12495 #define NVRAM_TEST_SIZE 0x100
12496 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12497 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12498 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12499 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12500 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12501 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12502 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12503 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12504
12505 static int tg3_test_nvram(struct tg3 *tp)
12506 {
12507 u32 csum, magic, len;
12508 __be32 *buf;
12509 int i, j, k, err = 0, size;
12510
12511 if (tg3_flag(tp, NO_NVRAM))
12512 return 0;
12513
12514 if (tg3_nvram_read(tp, 0, &magic) != 0)
12515 return -EIO;
12516
12517 if (magic == TG3_EEPROM_MAGIC)
12518 size = NVRAM_TEST_SIZE;
12519 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12520 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12521 TG3_EEPROM_SB_FORMAT_1) {
12522 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12523 case TG3_EEPROM_SB_REVISION_0:
12524 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12525 break;
12526 case TG3_EEPROM_SB_REVISION_2:
12527 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12528 break;
12529 case TG3_EEPROM_SB_REVISION_3:
12530 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12531 break;
12532 case TG3_EEPROM_SB_REVISION_4:
12533 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12534 break;
12535 case TG3_EEPROM_SB_REVISION_5:
12536 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12537 break;
12538 case TG3_EEPROM_SB_REVISION_6:
12539 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12540 break;
12541 default:
12542 return -EIO;
12543 }
12544 } else
12545 return 0;
12546 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12547 size = NVRAM_SELFBOOT_HW_SIZE;
12548 else
12549 return -EIO;
12550
12551 buf = kmalloc(size, GFP_KERNEL);
12552 if (buf == NULL)
12553 return -ENOMEM;
12554
12555 err = -EIO;
12556 for (i = 0, j = 0; i < size; i += 4, j++) {
12557 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12558 if (err)
12559 break;
12560 }
12561 if (i < size)
12562 goto out;
12563
12564 /* Selfboot format */
12565 magic = be32_to_cpu(buf[0]);
12566 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12567 TG3_EEPROM_MAGIC_FW) {
12568 u8 *buf8 = (u8 *) buf, csum8 = 0;
12569
12570 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12571 TG3_EEPROM_SB_REVISION_2) {
12572 /* For rev 2, the csum doesn't include the MBA. */
12573 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12574 csum8 += buf8[i];
12575 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12576 csum8 += buf8[i];
12577 } else {
12578 for (i = 0; i < size; i++)
12579 csum8 += buf8[i];
12580 }
12581
12582 if (csum8 == 0) {
12583 err = 0;
12584 goto out;
12585 }
12586
12587 err = -EIO;
12588 goto out;
12589 }
12590
12591 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12592 TG3_EEPROM_MAGIC_HW) {
12593 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12594 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12595 u8 *buf8 = (u8 *) buf;
12596
12597 /* Separate the parity bits and the data bytes. */
12598 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12599 if ((i == 0) || (i == 8)) {
12600 int l;
12601 u8 msk;
12602
12603 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12604 parity[k++] = buf8[i] & msk;
12605 i++;
12606 } else if (i == 16) {
12607 int l;
12608 u8 msk;
12609
12610 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12611 parity[k++] = buf8[i] & msk;
12612 i++;
12613
12614 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12615 parity[k++] = buf8[i] & msk;
12616 i++;
12617 }
12618 data[j++] = buf8[i];
12619 }
12620
12621 err = -EIO;
12622 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12623 u8 hw8 = hweight8(data[i]);
12624
12625 if ((hw8 & 0x1) && parity[i])
12626 goto out;
12627 else if (!(hw8 & 0x1) && !parity[i])
12628 goto out;
12629 }
12630 err = 0;
12631 goto out;
12632 }
12633
12634 err = -EIO;
12635
12636 /* Bootstrap checksum at offset 0x10 */
12637 csum = calc_crc((unsigned char *) buf, 0x10);
12638 if (csum != le32_to_cpu(buf[0x10/4]))
12639 goto out;
12640
12641 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12642 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12643 if (csum != le32_to_cpu(buf[0xfc/4]))
12644 goto out;
12645
12646 kfree(buf);
12647
12648 buf = tg3_vpd_readblock(tp, &len);
12649 if (!buf)
12650 return -ENOMEM;
12651
12652 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12653 if (i > 0) {
12654 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12655 if (j < 0)
12656 goto out;
12657
12658 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12659 goto out;
12660
12661 i += PCI_VPD_LRDT_TAG_SIZE;
12662 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12663 PCI_VPD_RO_KEYWORD_CHKSUM);
12664 if (j > 0) {
12665 u8 csum8 = 0;
12666
12667 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12668
12669 for (i = 0; i <= j; i++)
12670 csum8 += ((u8 *)buf)[i];
12671
12672 if (csum8)
12673 goto out;
12674 }
12675 }
12676
12677 err = 0;
12678
12679 out:
12680 kfree(buf);
12681 return err;
12682 }
12683
12684 #define TG3_SERDES_TIMEOUT_SEC 2
12685 #define TG3_COPPER_TIMEOUT_SEC 6
12686
12687 static int tg3_test_link(struct tg3 *tp)
12688 {
12689 int i, max;
12690
12691 if (!netif_running(tp->dev))
12692 return -ENODEV;
12693
12694 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12695 max = TG3_SERDES_TIMEOUT_SEC;
12696 else
12697 max = TG3_COPPER_TIMEOUT_SEC;
12698
12699 for (i = 0; i < max; i++) {
12700 if (tp->link_up)
12701 return 0;
12702
12703 if (msleep_interruptible(1000))
12704 break;
12705 }
12706
12707 return -EIO;
12708 }
12709
12710 /* Only test the commonly used registers */
12711 static int tg3_test_registers(struct tg3 *tp)
12712 {
12713 int i, is_5705, is_5750;
12714 u32 offset, read_mask, write_mask, val, save_val, read_val;
12715 static struct {
12716 u16 offset;
12717 u16 flags;
12718 #define TG3_FL_5705 0x1
12719 #define TG3_FL_NOT_5705 0x2
12720 #define TG3_FL_NOT_5788 0x4
12721 #define TG3_FL_NOT_5750 0x8
12722 u32 read_mask;
12723 u32 write_mask;
12724 } reg_tbl[] = {
12725 /* MAC Control Registers */
12726 { MAC_MODE, TG3_FL_NOT_5705,
12727 0x00000000, 0x00ef6f8c },
12728 { MAC_MODE, TG3_FL_5705,
12729 0x00000000, 0x01ef6b8c },
12730 { MAC_STATUS, TG3_FL_NOT_5705,
12731 0x03800107, 0x00000000 },
12732 { MAC_STATUS, TG3_FL_5705,
12733 0x03800100, 0x00000000 },
12734 { MAC_ADDR_0_HIGH, 0x0000,
12735 0x00000000, 0x0000ffff },
12736 { MAC_ADDR_0_LOW, 0x0000,
12737 0x00000000, 0xffffffff },
12738 { MAC_RX_MTU_SIZE, 0x0000,
12739 0x00000000, 0x0000ffff },
12740 { MAC_TX_MODE, 0x0000,
12741 0x00000000, 0x00000070 },
12742 { MAC_TX_LENGTHS, 0x0000,
12743 0x00000000, 0x00003fff },
12744 { MAC_RX_MODE, TG3_FL_NOT_5705,
12745 0x00000000, 0x000007fc },
12746 { MAC_RX_MODE, TG3_FL_5705,
12747 0x00000000, 0x000007dc },
12748 { MAC_HASH_REG_0, 0x0000,
12749 0x00000000, 0xffffffff },
12750 { MAC_HASH_REG_1, 0x0000,
12751 0x00000000, 0xffffffff },
12752 { MAC_HASH_REG_2, 0x0000,
12753 0x00000000, 0xffffffff },
12754 { MAC_HASH_REG_3, 0x0000,
12755 0x00000000, 0xffffffff },
12756
12757 /* Receive Data and Receive BD Initiator Control Registers. */
12758 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12759 0x00000000, 0xffffffff },
12760 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12761 0x00000000, 0xffffffff },
12762 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12763 0x00000000, 0x00000003 },
12764 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12765 0x00000000, 0xffffffff },
12766 { RCVDBDI_STD_BD+0, 0x0000,
12767 0x00000000, 0xffffffff },
12768 { RCVDBDI_STD_BD+4, 0x0000,
12769 0x00000000, 0xffffffff },
12770 { RCVDBDI_STD_BD+8, 0x0000,
12771 0x00000000, 0xffff0002 },
12772 { RCVDBDI_STD_BD+0xc, 0x0000,
12773 0x00000000, 0xffffffff },
12774
12775 /* Receive BD Initiator Control Registers. */
12776 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12777 0x00000000, 0xffffffff },
12778 { RCVBDI_STD_THRESH, TG3_FL_5705,
12779 0x00000000, 0x000003ff },
12780 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12781 0x00000000, 0xffffffff },
12782
12783 /* Host Coalescing Control Registers. */
12784 { HOSTCC_MODE, TG3_FL_NOT_5705,
12785 0x00000000, 0x00000004 },
12786 { HOSTCC_MODE, TG3_FL_5705,
12787 0x00000000, 0x000000f6 },
12788 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12789 0x00000000, 0xffffffff },
12790 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12791 0x00000000, 0x000003ff },
12792 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12793 0x00000000, 0xffffffff },
12794 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12795 0x00000000, 0x000003ff },
12796 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12797 0x00000000, 0xffffffff },
12798 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12799 0x00000000, 0x000000ff },
12800 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12801 0x00000000, 0xffffffff },
12802 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12803 0x00000000, 0x000000ff },
12804 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12805 0x00000000, 0xffffffff },
12806 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12807 0x00000000, 0xffffffff },
12808 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12809 0x00000000, 0xffffffff },
12810 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12811 0x00000000, 0x000000ff },
12812 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12813 0x00000000, 0xffffffff },
12814 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12815 0x00000000, 0x000000ff },
12816 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12817 0x00000000, 0xffffffff },
12818 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12819 0x00000000, 0xffffffff },
12820 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12821 0x00000000, 0xffffffff },
12822 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12823 0x00000000, 0xffffffff },
12824 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12825 0x00000000, 0xffffffff },
12826 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12827 0xffffffff, 0x00000000 },
12828 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12829 0xffffffff, 0x00000000 },
12830
12831 /* Buffer Manager Control Registers. */
12832 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12833 0x00000000, 0x007fff80 },
12834 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12835 0x00000000, 0x007fffff },
12836 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12837 0x00000000, 0x0000003f },
12838 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12839 0x00000000, 0x000001ff },
12840 { BUFMGR_MB_HIGH_WATER, 0x0000,
12841 0x00000000, 0x000001ff },
12842 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12843 0xffffffff, 0x00000000 },
12844 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12845 0xffffffff, 0x00000000 },
12846
12847 /* Mailbox Registers */
12848 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12849 0x00000000, 0x000001ff },
12850 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12851 0x00000000, 0x000001ff },
12852 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12853 0x00000000, 0x000007ff },
12854 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12855 0x00000000, 0x000001ff },
12856
12857 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12858 };
12859
12860 is_5705 = is_5750 = 0;
12861 if (tg3_flag(tp, 5705_PLUS)) {
12862 is_5705 = 1;
12863 if (tg3_flag(tp, 5750_PLUS))
12864 is_5750 = 1;
12865 }
12866
12867 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12868 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12869 continue;
12870
12871 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12872 continue;
12873
12874 if (tg3_flag(tp, IS_5788) &&
12875 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12876 continue;
12877
12878 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12879 continue;
12880
12881 offset = (u32) reg_tbl[i].offset;
12882 read_mask = reg_tbl[i].read_mask;
12883 write_mask = reg_tbl[i].write_mask;
12884
12885 /* Save the original register content */
12886 save_val = tr32(offset);
12887
12888 /* Determine the read-only value. */
12889 read_val = save_val & read_mask;
12890
12891 /* Write zero to the register, then make sure the read-only bits
12892 * are not changed and the read/write bits are all zeros.
12893 */
12894 tw32(offset, 0);
12895
12896 val = tr32(offset);
12897
12898 /* Test the read-only and read/write bits. */
12899 if (((val & read_mask) != read_val) || (val & write_mask))
12900 goto out;
12901
12902 /* Write ones to all the bits defined by RdMask and WrMask, then
12903 * make sure the read-only bits are not changed and the
12904 * read/write bits are all ones.
12905 */
12906 tw32(offset, read_mask | write_mask);
12907
12908 val = tr32(offset);
12909
12910 /* Test the read-only bits. */
12911 if ((val & read_mask) != read_val)
12912 goto out;
12913
12914 /* Test the read/write bits. */
12915 if ((val & write_mask) != write_mask)
12916 goto out;
12917
12918 tw32(offset, save_val);
12919 }
12920
12921 return 0;
12922
12923 out:
12924 if (netif_msg_hw(tp))
12925 netdev_err(tp->dev,
12926 "Register test failed at offset %x\n", offset);
12927 tw32(offset, save_val);
12928 return -EIO;
12929 }
12930
12931 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12932 {
12933 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12934 int i;
12935 u32 j;
12936
12937 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12938 for (j = 0; j < len; j += 4) {
12939 u32 val;
12940
12941 tg3_write_mem(tp, offset + j, test_pattern[i]);
12942 tg3_read_mem(tp, offset + j, &val);
12943 if (val != test_pattern[i])
12944 return -EIO;
12945 }
12946 }
12947 return 0;
12948 }
12949
12950 static int tg3_test_memory(struct tg3 *tp)
12951 {
12952 static struct mem_entry {
12953 u32 offset;
12954 u32 len;
12955 } mem_tbl_570x[] = {
12956 { 0x00000000, 0x00b50},
12957 { 0x00002000, 0x1c000},
12958 { 0xffffffff, 0x00000}
12959 }, mem_tbl_5705[] = {
12960 { 0x00000100, 0x0000c},
12961 { 0x00000200, 0x00008},
12962 { 0x00004000, 0x00800},
12963 { 0x00006000, 0x01000},
12964 { 0x00008000, 0x02000},
12965 { 0x00010000, 0x0e000},
12966 { 0xffffffff, 0x00000}
12967 }, mem_tbl_5755[] = {
12968 { 0x00000200, 0x00008},
12969 { 0x00004000, 0x00800},
12970 { 0x00006000, 0x00800},
12971 { 0x00008000, 0x02000},
12972 { 0x00010000, 0x0c000},
12973 { 0xffffffff, 0x00000}
12974 }, mem_tbl_5906[] = {
12975 { 0x00000200, 0x00008},
12976 { 0x00004000, 0x00400},
12977 { 0x00006000, 0x00400},
12978 { 0x00008000, 0x01000},
12979 { 0x00010000, 0x01000},
12980 { 0xffffffff, 0x00000}
12981 }, mem_tbl_5717[] = {
12982 { 0x00000200, 0x00008},
12983 { 0x00010000, 0x0a000},
12984 { 0x00020000, 0x13c00},
12985 { 0xffffffff, 0x00000}
12986 }, mem_tbl_57765[] = {
12987 { 0x00000200, 0x00008},
12988 { 0x00004000, 0x00800},
12989 { 0x00006000, 0x09800},
12990 { 0x00010000, 0x0a000},
12991 { 0xffffffff, 0x00000}
12992 };
12993 struct mem_entry *mem_tbl;
12994 int err = 0;
12995 int i;
12996
12997 if (tg3_flag(tp, 5717_PLUS))
12998 mem_tbl = mem_tbl_5717;
12999 else if (tg3_flag(tp, 57765_CLASS) ||
13000 tg3_asic_rev(tp) == ASIC_REV_5762)
13001 mem_tbl = mem_tbl_57765;
13002 else if (tg3_flag(tp, 5755_PLUS))
13003 mem_tbl = mem_tbl_5755;
13004 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13005 mem_tbl = mem_tbl_5906;
13006 else if (tg3_flag(tp, 5705_PLUS))
13007 mem_tbl = mem_tbl_5705;
13008 else
13009 mem_tbl = mem_tbl_570x;
13010
13011 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13012 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13013 if (err)
13014 break;
13015 }
13016
13017 return err;
13018 }
13019
13020 #define TG3_TSO_MSS 500
13021
13022 #define TG3_TSO_IP_HDR_LEN 20
13023 #define TG3_TSO_TCP_HDR_LEN 20
13024 #define TG3_TSO_TCP_OPT_LEN 12
13025
13026 static const u8 tg3_tso_header[] = {
13027 0x08, 0x00,
13028 0x45, 0x00, 0x00, 0x00,
13029 0x00, 0x00, 0x40, 0x00,
13030 0x40, 0x06, 0x00, 0x00,
13031 0x0a, 0x00, 0x00, 0x01,
13032 0x0a, 0x00, 0x00, 0x02,
13033 0x0d, 0x00, 0xe0, 0x00,
13034 0x00, 0x00, 0x01, 0x00,
13035 0x00, 0x00, 0x02, 0x00,
13036 0x80, 0x10, 0x10, 0x00,
13037 0x14, 0x09, 0x00, 0x00,
13038 0x01, 0x01, 0x08, 0x0a,
13039 0x11, 0x11, 0x11, 0x11,
13040 0x11, 0x11, 0x11, 0x11,
13041 };
13042
13043 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13044 {
13045 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13046 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13047 u32 budget;
13048 struct sk_buff *skb;
13049 u8 *tx_data, *rx_data;
13050 dma_addr_t map;
13051 int num_pkts, tx_len, rx_len, i, err;
13052 struct tg3_rx_buffer_desc *desc;
13053 struct tg3_napi *tnapi, *rnapi;
13054 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13055
13056 tnapi = &tp->napi[0];
13057 rnapi = &tp->napi[0];
13058 if (tp->irq_cnt > 1) {
13059 if (tg3_flag(tp, ENABLE_RSS))
13060 rnapi = &tp->napi[1];
13061 if (tg3_flag(tp, ENABLE_TSS))
13062 tnapi = &tp->napi[1];
13063 }
13064 coal_now = tnapi->coal_now | rnapi->coal_now;
13065
13066 err = -EIO;
13067
13068 tx_len = pktsz;
13069 skb = netdev_alloc_skb(tp->dev, tx_len);
13070 if (!skb)
13071 return -ENOMEM;
13072
13073 tx_data = skb_put(skb, tx_len);
13074 memcpy(tx_data, tp->dev->dev_addr, 6);
13075 memset(tx_data + 6, 0x0, 8);
13076
13077 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13078
13079 if (tso_loopback) {
13080 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13081
13082 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13083 TG3_TSO_TCP_OPT_LEN;
13084
13085 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13086 sizeof(tg3_tso_header));
13087 mss = TG3_TSO_MSS;
13088
13089 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13090 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13091
13092 /* Set the total length field in the IP header */
13093 iph->tot_len = htons((u16)(mss + hdr_len));
13094
13095 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13096 TXD_FLAG_CPU_POST_DMA);
13097
13098 if (tg3_flag(tp, HW_TSO_1) ||
13099 tg3_flag(tp, HW_TSO_2) ||
13100 tg3_flag(tp, HW_TSO_3)) {
13101 struct tcphdr *th;
13102 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13103 th = (struct tcphdr *)&tx_data[val];
13104 th->check = 0;
13105 } else
13106 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13107
13108 if (tg3_flag(tp, HW_TSO_3)) {
13109 mss |= (hdr_len & 0xc) << 12;
13110 if (hdr_len & 0x10)
13111 base_flags |= 0x00000010;
13112 base_flags |= (hdr_len & 0x3e0) << 5;
13113 } else if (tg3_flag(tp, HW_TSO_2))
13114 mss |= hdr_len << 9;
13115 else if (tg3_flag(tp, HW_TSO_1) ||
13116 tg3_asic_rev(tp) == ASIC_REV_5705) {
13117 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13118 } else {
13119 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13120 }
13121
13122 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13123 } else {
13124 num_pkts = 1;
13125 data_off = ETH_HLEN;
13126
13127 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13128 tx_len > VLAN_ETH_FRAME_LEN)
13129 base_flags |= TXD_FLAG_JMB_PKT;
13130 }
13131
13132 for (i = data_off; i < tx_len; i++)
13133 tx_data[i] = (u8) (i & 0xff);
13134
13135 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13136 if (pci_dma_mapping_error(tp->pdev, map)) {
13137 dev_kfree_skb(skb);
13138 return -EIO;
13139 }
13140
13141 val = tnapi->tx_prod;
13142 tnapi->tx_buffers[val].skb = skb;
13143 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13144
13145 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13146 rnapi->coal_now);
13147
13148 udelay(10);
13149
13150 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13151
13152 budget = tg3_tx_avail(tnapi);
13153 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13154 base_flags | TXD_FLAG_END, mss, 0)) {
13155 tnapi->tx_buffers[val].skb = NULL;
13156 dev_kfree_skb(skb);
13157 return -EIO;
13158 }
13159
13160 tnapi->tx_prod++;
13161
13162 /* Sync BD data before updating mailbox */
13163 wmb();
13164
13165 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13166 tr32_mailbox(tnapi->prodmbox);
13167
13168 udelay(10);
13169
13170 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13171 for (i = 0; i < 35; i++) {
13172 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13173 coal_now);
13174
13175 udelay(10);
13176
13177 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13178 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13179 if ((tx_idx == tnapi->tx_prod) &&
13180 (rx_idx == (rx_start_idx + num_pkts)))
13181 break;
13182 }
13183
13184 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13185 dev_kfree_skb(skb);
13186
13187 if (tx_idx != tnapi->tx_prod)
13188 goto out;
13189
13190 if (rx_idx != rx_start_idx + num_pkts)
13191 goto out;
13192
13193 val = data_off;
13194 while (rx_idx != rx_start_idx) {
13195 desc = &rnapi->rx_rcb[rx_start_idx++];
13196 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13197 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13198
13199 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13200 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13201 goto out;
13202
13203 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13204 - ETH_FCS_LEN;
13205
13206 if (!tso_loopback) {
13207 if (rx_len != tx_len)
13208 goto out;
13209
13210 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13211 if (opaque_key != RXD_OPAQUE_RING_STD)
13212 goto out;
13213 } else {
13214 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13215 goto out;
13216 }
13217 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13218 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13219 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13220 goto out;
13221 }
13222
13223 if (opaque_key == RXD_OPAQUE_RING_STD) {
13224 rx_data = tpr->rx_std_buffers[desc_idx].data;
13225 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13226 mapping);
13227 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13228 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13229 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13230 mapping);
13231 } else
13232 goto out;
13233
13234 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13235 PCI_DMA_FROMDEVICE);
13236
13237 rx_data += TG3_RX_OFFSET(tp);
13238 for (i = data_off; i < rx_len; i++, val++) {
13239 if (*(rx_data + i) != (u8) (val & 0xff))
13240 goto out;
13241 }
13242 }
13243
13244 err = 0;
13245
13246 /* tg3_free_rings will unmap and free the rx_data */
13247 out:
13248 return err;
13249 }
13250
13251 #define TG3_STD_LOOPBACK_FAILED 1
13252 #define TG3_JMB_LOOPBACK_FAILED 2
13253 #define TG3_TSO_LOOPBACK_FAILED 4
13254 #define TG3_LOOPBACK_FAILED \
13255 (TG3_STD_LOOPBACK_FAILED | \
13256 TG3_JMB_LOOPBACK_FAILED | \
13257 TG3_TSO_LOOPBACK_FAILED)
13258
13259 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13260 {
13261 int err = -EIO;
13262 u32 eee_cap;
13263 u32 jmb_pkt_sz = 9000;
13264
13265 if (tp->dma_limit)
13266 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13267
13268 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13269 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13270
13271 if (!netif_running(tp->dev)) {
13272 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13273 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13274 if (do_extlpbk)
13275 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13276 goto done;
13277 }
13278
13279 err = tg3_reset_hw(tp, true);
13280 if (err) {
13281 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13282 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13283 if (do_extlpbk)
13284 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13285 goto done;
13286 }
13287
13288 if (tg3_flag(tp, ENABLE_RSS)) {
13289 int i;
13290
13291 /* Reroute all rx packets to the 1st queue */
13292 for (i = MAC_RSS_INDIR_TBL_0;
13293 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13294 tw32(i, 0x0);
13295 }
13296
13297 /* HW errata - mac loopback fails in some cases on 5780.
13298 * Normal traffic and PHY loopback are not affected by
13299 * errata. Also, the MAC loopback test is deprecated for
13300 * all newer ASIC revisions.
13301 */
13302 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13303 !tg3_flag(tp, CPMU_PRESENT)) {
13304 tg3_mac_loopback(tp, true);
13305
13306 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13307 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13308
13309 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13310 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13311 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13312
13313 tg3_mac_loopback(tp, false);
13314 }
13315
13316 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13317 !tg3_flag(tp, USE_PHYLIB)) {
13318 int i;
13319
13320 tg3_phy_lpbk_set(tp, 0, false);
13321
13322 /* Wait for link */
13323 for (i = 0; i < 100; i++) {
13324 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13325 break;
13326 mdelay(1);
13327 }
13328
13329 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13330 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13331 if (tg3_flag(tp, TSO_CAPABLE) &&
13332 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13333 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13334 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13335 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13336 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13337
13338 if (do_extlpbk) {
13339 tg3_phy_lpbk_set(tp, 0, true);
13340
13341 /* All link indications report up, but the hardware
13342 * isn't really ready for about 20 msec. Double it
13343 * to be sure.
13344 */
13345 mdelay(40);
13346
13347 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13348 data[TG3_EXT_LOOPB_TEST] |=
13349 TG3_STD_LOOPBACK_FAILED;
13350 if (tg3_flag(tp, TSO_CAPABLE) &&
13351 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13352 data[TG3_EXT_LOOPB_TEST] |=
13353 TG3_TSO_LOOPBACK_FAILED;
13354 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13355 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13356 data[TG3_EXT_LOOPB_TEST] |=
13357 TG3_JMB_LOOPBACK_FAILED;
13358 }
13359
13360 /* Re-enable gphy autopowerdown. */
13361 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13362 tg3_phy_toggle_apd(tp, true);
13363 }
13364
13365 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13366 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13367
13368 done:
13369 tp->phy_flags |= eee_cap;
13370
13371 return err;
13372 }
13373
13374 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13375 u64 *data)
13376 {
13377 struct tg3 *tp = netdev_priv(dev);
13378 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13379
13380 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13381 tg3_power_up(tp)) {
13382 etest->flags |= ETH_TEST_FL_FAILED;
13383 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13384 return;
13385 }
13386
13387 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13388
13389 if (tg3_test_nvram(tp) != 0) {
13390 etest->flags |= ETH_TEST_FL_FAILED;
13391 data[TG3_NVRAM_TEST] = 1;
13392 }
13393 if (!doextlpbk && tg3_test_link(tp)) {
13394 etest->flags |= ETH_TEST_FL_FAILED;
13395 data[TG3_LINK_TEST] = 1;
13396 }
13397 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13398 int err, err2 = 0, irq_sync = 0;
13399
13400 if (netif_running(dev)) {
13401 tg3_phy_stop(tp);
13402 tg3_netif_stop(tp);
13403 irq_sync = 1;
13404 }
13405
13406 tg3_full_lock(tp, irq_sync);
13407 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13408 err = tg3_nvram_lock(tp);
13409 tg3_halt_cpu(tp, RX_CPU_BASE);
13410 if (!tg3_flag(tp, 5705_PLUS))
13411 tg3_halt_cpu(tp, TX_CPU_BASE);
13412 if (!err)
13413 tg3_nvram_unlock(tp);
13414
13415 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13416 tg3_phy_reset(tp);
13417
13418 if (tg3_test_registers(tp) != 0) {
13419 etest->flags |= ETH_TEST_FL_FAILED;
13420 data[TG3_REGISTER_TEST] = 1;
13421 }
13422
13423 if (tg3_test_memory(tp) != 0) {
13424 etest->flags |= ETH_TEST_FL_FAILED;
13425 data[TG3_MEMORY_TEST] = 1;
13426 }
13427
13428 if (doextlpbk)
13429 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13430
13431 if (tg3_test_loopback(tp, data, doextlpbk))
13432 etest->flags |= ETH_TEST_FL_FAILED;
13433
13434 tg3_full_unlock(tp);
13435
13436 if (tg3_test_interrupt(tp) != 0) {
13437 etest->flags |= ETH_TEST_FL_FAILED;
13438 data[TG3_INTERRUPT_TEST] = 1;
13439 }
13440
13441 tg3_full_lock(tp, 0);
13442
13443 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13444 if (netif_running(dev)) {
13445 tg3_flag_set(tp, INIT_COMPLETE);
13446 err2 = tg3_restart_hw(tp, true);
13447 if (!err2)
13448 tg3_netif_start(tp);
13449 }
13450
13451 tg3_full_unlock(tp);
13452
13453 if (irq_sync && !err2)
13454 tg3_phy_start(tp);
13455 }
13456 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13457 tg3_power_down(tp);
13458
13459 }
13460
13461 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13462 struct ifreq *ifr, int cmd)
13463 {
13464 struct tg3 *tp = netdev_priv(dev);
13465 struct hwtstamp_config stmpconf;
13466
13467 if (!tg3_flag(tp, PTP_CAPABLE))
13468 return -EINVAL;
13469
13470 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13471 return -EFAULT;
13472
13473 if (stmpconf.flags)
13474 return -EINVAL;
13475
13476 switch (stmpconf.tx_type) {
13477 case HWTSTAMP_TX_ON:
13478 tg3_flag_set(tp, TX_TSTAMP_EN);
13479 break;
13480 case HWTSTAMP_TX_OFF:
13481 tg3_flag_clear(tp, TX_TSTAMP_EN);
13482 break;
13483 default:
13484 return -ERANGE;
13485 }
13486
13487 switch (stmpconf.rx_filter) {
13488 case HWTSTAMP_FILTER_NONE:
13489 tp->rxptpctl = 0;
13490 break;
13491 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13492 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13493 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13494 break;
13495 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13496 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13497 TG3_RX_PTP_CTL_SYNC_EVNT;
13498 break;
13499 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13500 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13501 TG3_RX_PTP_CTL_DELAY_REQ;
13502 break;
13503 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13504 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13505 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13506 break;
13507 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13508 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13509 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13510 break;
13511 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13512 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13513 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13514 break;
13515 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13516 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13517 TG3_RX_PTP_CTL_SYNC_EVNT;
13518 break;
13519 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13520 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13521 TG3_RX_PTP_CTL_SYNC_EVNT;
13522 break;
13523 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13524 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13525 TG3_RX_PTP_CTL_SYNC_EVNT;
13526 break;
13527 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13528 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13529 TG3_RX_PTP_CTL_DELAY_REQ;
13530 break;
13531 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13532 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13533 TG3_RX_PTP_CTL_DELAY_REQ;
13534 break;
13535 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13536 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13537 TG3_RX_PTP_CTL_DELAY_REQ;
13538 break;
13539 default:
13540 return -ERANGE;
13541 }
13542
13543 if (netif_running(dev) && tp->rxptpctl)
13544 tw32(TG3_RX_PTP_CTL,
13545 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13546
13547 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13548 -EFAULT : 0;
13549 }
13550
13551 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13552 {
13553 struct mii_ioctl_data *data = if_mii(ifr);
13554 struct tg3 *tp = netdev_priv(dev);
13555 int err;
13556
13557 if (tg3_flag(tp, USE_PHYLIB)) {
13558 struct phy_device *phydev;
13559 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13560 return -EAGAIN;
13561 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13562 return phy_mii_ioctl(phydev, ifr, cmd);
13563 }
13564
13565 switch (cmd) {
13566 case SIOCGMIIPHY:
13567 data->phy_id = tp->phy_addr;
13568
13569 /* fallthru */
13570 case SIOCGMIIREG: {
13571 u32 mii_regval;
13572
13573 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13574 break; /* We have no PHY */
13575
13576 if (!netif_running(dev))
13577 return -EAGAIN;
13578
13579 spin_lock_bh(&tp->lock);
13580 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13581 data->reg_num & 0x1f, &mii_regval);
13582 spin_unlock_bh(&tp->lock);
13583
13584 data->val_out = mii_regval;
13585
13586 return err;
13587 }
13588
13589 case SIOCSMIIREG:
13590 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13591 break; /* We have no PHY */
13592
13593 if (!netif_running(dev))
13594 return -EAGAIN;
13595
13596 spin_lock_bh(&tp->lock);
13597 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13598 data->reg_num & 0x1f, data->val_in);
13599 spin_unlock_bh(&tp->lock);
13600
13601 return err;
13602
13603 case SIOCSHWTSTAMP:
13604 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13605
13606 default:
13607 /* do nothing */
13608 break;
13609 }
13610 return -EOPNOTSUPP;
13611 }
13612
13613 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13614 {
13615 struct tg3 *tp = netdev_priv(dev);
13616
13617 memcpy(ec, &tp->coal, sizeof(*ec));
13618 return 0;
13619 }
13620
13621 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13622 {
13623 struct tg3 *tp = netdev_priv(dev);
13624 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13625 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13626
13627 if (!tg3_flag(tp, 5705_PLUS)) {
13628 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13629 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13630 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13631 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13632 }
13633
13634 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13635 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13636 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13637 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13638 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13639 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13640 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13641 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13642 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13643 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13644 return -EINVAL;
13645
13646 /* No rx interrupts will be generated if both are zero */
13647 if ((ec->rx_coalesce_usecs == 0) &&
13648 (ec->rx_max_coalesced_frames == 0))
13649 return -EINVAL;
13650
13651 /* No tx interrupts will be generated if both are zero */
13652 if ((ec->tx_coalesce_usecs == 0) &&
13653 (ec->tx_max_coalesced_frames == 0))
13654 return -EINVAL;
13655
13656 /* Only copy relevant parameters, ignore all others. */
13657 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13658 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13659 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13660 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13661 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13662 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13663 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13664 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13665 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13666
13667 if (netif_running(dev)) {
13668 tg3_full_lock(tp, 0);
13669 __tg3_set_coalesce(tp, &tp->coal);
13670 tg3_full_unlock(tp);
13671 }
13672 return 0;
13673 }
13674
13675 static const struct ethtool_ops tg3_ethtool_ops = {
13676 .get_settings = tg3_get_settings,
13677 .set_settings = tg3_set_settings,
13678 .get_drvinfo = tg3_get_drvinfo,
13679 .get_regs_len = tg3_get_regs_len,
13680 .get_regs = tg3_get_regs,
13681 .get_wol = tg3_get_wol,
13682 .set_wol = tg3_set_wol,
13683 .get_msglevel = tg3_get_msglevel,
13684 .set_msglevel = tg3_set_msglevel,
13685 .nway_reset = tg3_nway_reset,
13686 .get_link = ethtool_op_get_link,
13687 .get_eeprom_len = tg3_get_eeprom_len,
13688 .get_eeprom = tg3_get_eeprom,
13689 .set_eeprom = tg3_set_eeprom,
13690 .get_ringparam = tg3_get_ringparam,
13691 .set_ringparam = tg3_set_ringparam,
13692 .get_pauseparam = tg3_get_pauseparam,
13693 .set_pauseparam = tg3_set_pauseparam,
13694 .self_test = tg3_self_test,
13695 .get_strings = tg3_get_strings,
13696 .set_phys_id = tg3_set_phys_id,
13697 .get_ethtool_stats = tg3_get_ethtool_stats,
13698 .get_coalesce = tg3_get_coalesce,
13699 .set_coalesce = tg3_set_coalesce,
13700 .get_sset_count = tg3_get_sset_count,
13701 .get_rxnfc = tg3_get_rxnfc,
13702 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13703 .get_rxfh_indir = tg3_get_rxfh_indir,
13704 .set_rxfh_indir = tg3_set_rxfh_indir,
13705 .get_channels = tg3_get_channels,
13706 .set_channels = tg3_set_channels,
13707 .get_ts_info = tg3_get_ts_info,
13708 };
13709
13710 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13711 struct rtnl_link_stats64 *stats)
13712 {
13713 struct tg3 *tp = netdev_priv(dev);
13714
13715 spin_lock_bh(&tp->lock);
13716 if (!tp->hw_stats) {
13717 spin_unlock_bh(&tp->lock);
13718 return &tp->net_stats_prev;
13719 }
13720
13721 tg3_get_nstats(tp, stats);
13722 spin_unlock_bh(&tp->lock);
13723
13724 return stats;
13725 }
13726
13727 static void tg3_set_rx_mode(struct net_device *dev)
13728 {
13729 struct tg3 *tp = netdev_priv(dev);
13730
13731 if (!netif_running(dev))
13732 return;
13733
13734 tg3_full_lock(tp, 0);
13735 __tg3_set_rx_mode(dev);
13736 tg3_full_unlock(tp);
13737 }
13738
13739 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13740 int new_mtu)
13741 {
13742 dev->mtu = new_mtu;
13743
13744 if (new_mtu > ETH_DATA_LEN) {
13745 if (tg3_flag(tp, 5780_CLASS)) {
13746 netdev_update_features(dev);
13747 tg3_flag_clear(tp, TSO_CAPABLE);
13748 } else {
13749 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13750 }
13751 } else {
13752 if (tg3_flag(tp, 5780_CLASS)) {
13753 tg3_flag_set(tp, TSO_CAPABLE);
13754 netdev_update_features(dev);
13755 }
13756 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13757 }
13758 }
13759
13760 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13761 {
13762 struct tg3 *tp = netdev_priv(dev);
13763 int err;
13764 bool reset_phy = false;
13765
13766 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13767 return -EINVAL;
13768
13769 if (!netif_running(dev)) {
13770 /* We'll just catch it later when the
13771 * device is up'd.
13772 */
13773 tg3_set_mtu(dev, tp, new_mtu);
13774 return 0;
13775 }
13776
13777 tg3_phy_stop(tp);
13778
13779 tg3_netif_stop(tp);
13780
13781 tg3_set_mtu(dev, tp, new_mtu);
13782
13783 tg3_full_lock(tp, 1);
13784
13785 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13786
13787 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13788 * breaks all requests to 256 bytes.
13789 */
13790 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13791 reset_phy = true;
13792
13793 err = tg3_restart_hw(tp, reset_phy);
13794
13795 if (!err)
13796 tg3_netif_start(tp);
13797
13798 tg3_full_unlock(tp);
13799
13800 if (!err)
13801 tg3_phy_start(tp);
13802
13803 return err;
13804 }
13805
13806 static const struct net_device_ops tg3_netdev_ops = {
13807 .ndo_open = tg3_open,
13808 .ndo_stop = tg3_close,
13809 .ndo_start_xmit = tg3_start_xmit,
13810 .ndo_get_stats64 = tg3_get_stats64,
13811 .ndo_validate_addr = eth_validate_addr,
13812 .ndo_set_rx_mode = tg3_set_rx_mode,
13813 .ndo_set_mac_address = tg3_set_mac_addr,
13814 .ndo_do_ioctl = tg3_ioctl,
13815 .ndo_tx_timeout = tg3_tx_timeout,
13816 .ndo_change_mtu = tg3_change_mtu,
13817 .ndo_fix_features = tg3_fix_features,
13818 .ndo_set_features = tg3_set_features,
13819 #ifdef CONFIG_NET_POLL_CONTROLLER
13820 .ndo_poll_controller = tg3_poll_controller,
13821 #endif
13822 };
13823
13824 static void tg3_get_eeprom_size(struct tg3 *tp)
13825 {
13826 u32 cursize, val, magic;
13827
13828 tp->nvram_size = EEPROM_CHIP_SIZE;
13829
13830 if (tg3_nvram_read(tp, 0, &magic) != 0)
13831 return;
13832
13833 if ((magic != TG3_EEPROM_MAGIC) &&
13834 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13835 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13836 return;
13837
13838 /*
13839 * Size the chip by reading offsets at increasing powers of two.
13840 * When we encounter our validation signature, we know the addressing
13841 * has wrapped around, and thus have our chip size.
13842 */
13843 cursize = 0x10;
13844
13845 while (cursize < tp->nvram_size) {
13846 if (tg3_nvram_read(tp, cursize, &val) != 0)
13847 return;
13848
13849 if (val == magic)
13850 break;
13851
13852 cursize <<= 1;
13853 }
13854
13855 tp->nvram_size = cursize;
13856 }
13857
13858 static void tg3_get_nvram_size(struct tg3 *tp)
13859 {
13860 u32 val;
13861
13862 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13863 return;
13864
13865 /* Selfboot format */
13866 if (val != TG3_EEPROM_MAGIC) {
13867 tg3_get_eeprom_size(tp);
13868 return;
13869 }
13870
13871 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13872 if (val != 0) {
13873 /* This is confusing. We want to operate on the
13874 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13875 * call will read from NVRAM and byteswap the data
13876 * according to the byteswapping settings for all
13877 * other register accesses. This ensures the data we
13878 * want will always reside in the lower 16-bits.
13879 * However, the data in NVRAM is in LE format, which
13880 * means the data from the NVRAM read will always be
13881 * opposite the endianness of the CPU. The 16-bit
13882 * byteswap then brings the data to CPU endianness.
13883 */
13884 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13885 return;
13886 }
13887 }
13888 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13889 }
13890
13891 static void tg3_get_nvram_info(struct tg3 *tp)
13892 {
13893 u32 nvcfg1;
13894
13895 nvcfg1 = tr32(NVRAM_CFG1);
13896 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13897 tg3_flag_set(tp, FLASH);
13898 } else {
13899 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13900 tw32(NVRAM_CFG1, nvcfg1);
13901 }
13902
13903 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13904 tg3_flag(tp, 5780_CLASS)) {
13905 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13906 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13907 tp->nvram_jedecnum = JEDEC_ATMEL;
13908 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13909 tg3_flag_set(tp, NVRAM_BUFFERED);
13910 break;
13911 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13912 tp->nvram_jedecnum = JEDEC_ATMEL;
13913 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13914 break;
13915 case FLASH_VENDOR_ATMEL_EEPROM:
13916 tp->nvram_jedecnum = JEDEC_ATMEL;
13917 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13918 tg3_flag_set(tp, NVRAM_BUFFERED);
13919 break;
13920 case FLASH_VENDOR_ST:
13921 tp->nvram_jedecnum = JEDEC_ST;
13922 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13923 tg3_flag_set(tp, NVRAM_BUFFERED);
13924 break;
13925 case FLASH_VENDOR_SAIFUN:
13926 tp->nvram_jedecnum = JEDEC_SAIFUN;
13927 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13928 break;
13929 case FLASH_VENDOR_SST_SMALL:
13930 case FLASH_VENDOR_SST_LARGE:
13931 tp->nvram_jedecnum = JEDEC_SST;
13932 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13933 break;
13934 }
13935 } else {
13936 tp->nvram_jedecnum = JEDEC_ATMEL;
13937 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13938 tg3_flag_set(tp, NVRAM_BUFFERED);
13939 }
13940 }
13941
13942 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13943 {
13944 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13945 case FLASH_5752PAGE_SIZE_256:
13946 tp->nvram_pagesize = 256;
13947 break;
13948 case FLASH_5752PAGE_SIZE_512:
13949 tp->nvram_pagesize = 512;
13950 break;
13951 case FLASH_5752PAGE_SIZE_1K:
13952 tp->nvram_pagesize = 1024;
13953 break;
13954 case FLASH_5752PAGE_SIZE_2K:
13955 tp->nvram_pagesize = 2048;
13956 break;
13957 case FLASH_5752PAGE_SIZE_4K:
13958 tp->nvram_pagesize = 4096;
13959 break;
13960 case FLASH_5752PAGE_SIZE_264:
13961 tp->nvram_pagesize = 264;
13962 break;
13963 case FLASH_5752PAGE_SIZE_528:
13964 tp->nvram_pagesize = 528;
13965 break;
13966 }
13967 }
13968
13969 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13970 {
13971 u32 nvcfg1;
13972
13973 nvcfg1 = tr32(NVRAM_CFG1);
13974
13975 /* NVRAM protection for TPM */
13976 if (nvcfg1 & (1 << 27))
13977 tg3_flag_set(tp, PROTECTED_NVRAM);
13978
13979 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13980 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13981 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13982 tp->nvram_jedecnum = JEDEC_ATMEL;
13983 tg3_flag_set(tp, NVRAM_BUFFERED);
13984 break;
13985 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13986 tp->nvram_jedecnum = JEDEC_ATMEL;
13987 tg3_flag_set(tp, NVRAM_BUFFERED);
13988 tg3_flag_set(tp, FLASH);
13989 break;
13990 case FLASH_5752VENDOR_ST_M45PE10:
13991 case FLASH_5752VENDOR_ST_M45PE20:
13992 case FLASH_5752VENDOR_ST_M45PE40:
13993 tp->nvram_jedecnum = JEDEC_ST;
13994 tg3_flag_set(tp, NVRAM_BUFFERED);
13995 tg3_flag_set(tp, FLASH);
13996 break;
13997 }
13998
13999 if (tg3_flag(tp, FLASH)) {
14000 tg3_nvram_get_pagesize(tp, nvcfg1);
14001 } else {
14002 /* For eeprom, set pagesize to maximum eeprom size */
14003 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14004
14005 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14006 tw32(NVRAM_CFG1, nvcfg1);
14007 }
14008 }
14009
14010 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14011 {
14012 u32 nvcfg1, protect = 0;
14013
14014 nvcfg1 = tr32(NVRAM_CFG1);
14015
14016 /* NVRAM protection for TPM */
14017 if (nvcfg1 & (1 << 27)) {
14018 tg3_flag_set(tp, PROTECTED_NVRAM);
14019 protect = 1;
14020 }
14021
14022 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14023 switch (nvcfg1) {
14024 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14025 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14026 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14027 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14028 tp->nvram_jedecnum = JEDEC_ATMEL;
14029 tg3_flag_set(tp, NVRAM_BUFFERED);
14030 tg3_flag_set(tp, FLASH);
14031 tp->nvram_pagesize = 264;
14032 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14033 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14034 tp->nvram_size = (protect ? 0x3e200 :
14035 TG3_NVRAM_SIZE_512KB);
14036 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14037 tp->nvram_size = (protect ? 0x1f200 :
14038 TG3_NVRAM_SIZE_256KB);
14039 else
14040 tp->nvram_size = (protect ? 0x1f200 :
14041 TG3_NVRAM_SIZE_128KB);
14042 break;
14043 case FLASH_5752VENDOR_ST_M45PE10:
14044 case FLASH_5752VENDOR_ST_M45PE20:
14045 case FLASH_5752VENDOR_ST_M45PE40:
14046 tp->nvram_jedecnum = JEDEC_ST;
14047 tg3_flag_set(tp, NVRAM_BUFFERED);
14048 tg3_flag_set(tp, FLASH);
14049 tp->nvram_pagesize = 256;
14050 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14051 tp->nvram_size = (protect ?
14052 TG3_NVRAM_SIZE_64KB :
14053 TG3_NVRAM_SIZE_128KB);
14054 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14055 tp->nvram_size = (protect ?
14056 TG3_NVRAM_SIZE_64KB :
14057 TG3_NVRAM_SIZE_256KB);
14058 else
14059 tp->nvram_size = (protect ?
14060 TG3_NVRAM_SIZE_128KB :
14061 TG3_NVRAM_SIZE_512KB);
14062 break;
14063 }
14064 }
14065
14066 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14067 {
14068 u32 nvcfg1;
14069
14070 nvcfg1 = tr32(NVRAM_CFG1);
14071
14072 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14073 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14074 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14075 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14076 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14077 tp->nvram_jedecnum = JEDEC_ATMEL;
14078 tg3_flag_set(tp, NVRAM_BUFFERED);
14079 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14080
14081 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14082 tw32(NVRAM_CFG1, nvcfg1);
14083 break;
14084 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14085 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14086 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14087 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14088 tp->nvram_jedecnum = JEDEC_ATMEL;
14089 tg3_flag_set(tp, NVRAM_BUFFERED);
14090 tg3_flag_set(tp, FLASH);
14091 tp->nvram_pagesize = 264;
14092 break;
14093 case FLASH_5752VENDOR_ST_M45PE10:
14094 case FLASH_5752VENDOR_ST_M45PE20:
14095 case FLASH_5752VENDOR_ST_M45PE40:
14096 tp->nvram_jedecnum = JEDEC_ST;
14097 tg3_flag_set(tp, NVRAM_BUFFERED);
14098 tg3_flag_set(tp, FLASH);
14099 tp->nvram_pagesize = 256;
14100 break;
14101 }
14102 }
14103
14104 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14105 {
14106 u32 nvcfg1, protect = 0;
14107
14108 nvcfg1 = tr32(NVRAM_CFG1);
14109
14110 /* NVRAM protection for TPM */
14111 if (nvcfg1 & (1 << 27)) {
14112 tg3_flag_set(tp, PROTECTED_NVRAM);
14113 protect = 1;
14114 }
14115
14116 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14117 switch (nvcfg1) {
14118 case FLASH_5761VENDOR_ATMEL_ADB021D:
14119 case FLASH_5761VENDOR_ATMEL_ADB041D:
14120 case FLASH_5761VENDOR_ATMEL_ADB081D:
14121 case FLASH_5761VENDOR_ATMEL_ADB161D:
14122 case FLASH_5761VENDOR_ATMEL_MDB021D:
14123 case FLASH_5761VENDOR_ATMEL_MDB041D:
14124 case FLASH_5761VENDOR_ATMEL_MDB081D:
14125 case FLASH_5761VENDOR_ATMEL_MDB161D:
14126 tp->nvram_jedecnum = JEDEC_ATMEL;
14127 tg3_flag_set(tp, NVRAM_BUFFERED);
14128 tg3_flag_set(tp, FLASH);
14129 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14130 tp->nvram_pagesize = 256;
14131 break;
14132 case FLASH_5761VENDOR_ST_A_M45PE20:
14133 case FLASH_5761VENDOR_ST_A_M45PE40:
14134 case FLASH_5761VENDOR_ST_A_M45PE80:
14135 case FLASH_5761VENDOR_ST_A_M45PE16:
14136 case FLASH_5761VENDOR_ST_M_M45PE20:
14137 case FLASH_5761VENDOR_ST_M_M45PE40:
14138 case FLASH_5761VENDOR_ST_M_M45PE80:
14139 case FLASH_5761VENDOR_ST_M_M45PE16:
14140 tp->nvram_jedecnum = JEDEC_ST;
14141 tg3_flag_set(tp, NVRAM_BUFFERED);
14142 tg3_flag_set(tp, FLASH);
14143 tp->nvram_pagesize = 256;
14144 break;
14145 }
14146
14147 if (protect) {
14148 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14149 } else {
14150 switch (nvcfg1) {
14151 case FLASH_5761VENDOR_ATMEL_ADB161D:
14152 case FLASH_5761VENDOR_ATMEL_MDB161D:
14153 case FLASH_5761VENDOR_ST_A_M45PE16:
14154 case FLASH_5761VENDOR_ST_M_M45PE16:
14155 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14156 break;
14157 case FLASH_5761VENDOR_ATMEL_ADB081D:
14158 case FLASH_5761VENDOR_ATMEL_MDB081D:
14159 case FLASH_5761VENDOR_ST_A_M45PE80:
14160 case FLASH_5761VENDOR_ST_M_M45PE80:
14161 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14162 break;
14163 case FLASH_5761VENDOR_ATMEL_ADB041D:
14164 case FLASH_5761VENDOR_ATMEL_MDB041D:
14165 case FLASH_5761VENDOR_ST_A_M45PE40:
14166 case FLASH_5761VENDOR_ST_M_M45PE40:
14167 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14168 break;
14169 case FLASH_5761VENDOR_ATMEL_ADB021D:
14170 case FLASH_5761VENDOR_ATMEL_MDB021D:
14171 case FLASH_5761VENDOR_ST_A_M45PE20:
14172 case FLASH_5761VENDOR_ST_M_M45PE20:
14173 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14174 break;
14175 }
14176 }
14177 }
14178
14179 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14180 {
14181 tp->nvram_jedecnum = JEDEC_ATMEL;
14182 tg3_flag_set(tp, NVRAM_BUFFERED);
14183 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14184 }
14185
14186 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14187 {
14188 u32 nvcfg1;
14189
14190 nvcfg1 = tr32(NVRAM_CFG1);
14191
14192 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14193 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14194 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14195 tp->nvram_jedecnum = JEDEC_ATMEL;
14196 tg3_flag_set(tp, NVRAM_BUFFERED);
14197 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14198
14199 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14200 tw32(NVRAM_CFG1, nvcfg1);
14201 return;
14202 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14203 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14204 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14205 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14206 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14207 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14208 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14209 tp->nvram_jedecnum = JEDEC_ATMEL;
14210 tg3_flag_set(tp, NVRAM_BUFFERED);
14211 tg3_flag_set(tp, FLASH);
14212
14213 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14214 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14215 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14216 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14217 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14218 break;
14219 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14220 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14221 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14222 break;
14223 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14224 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14225 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14226 break;
14227 }
14228 break;
14229 case FLASH_5752VENDOR_ST_M45PE10:
14230 case FLASH_5752VENDOR_ST_M45PE20:
14231 case FLASH_5752VENDOR_ST_M45PE40:
14232 tp->nvram_jedecnum = JEDEC_ST;
14233 tg3_flag_set(tp, NVRAM_BUFFERED);
14234 tg3_flag_set(tp, FLASH);
14235
14236 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14237 case FLASH_5752VENDOR_ST_M45PE10:
14238 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14239 break;
14240 case FLASH_5752VENDOR_ST_M45PE20:
14241 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14242 break;
14243 case FLASH_5752VENDOR_ST_M45PE40:
14244 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14245 break;
14246 }
14247 break;
14248 default:
14249 tg3_flag_set(tp, NO_NVRAM);
14250 return;
14251 }
14252
14253 tg3_nvram_get_pagesize(tp, nvcfg1);
14254 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14255 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14256 }
14257
14258
14259 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14260 {
14261 u32 nvcfg1;
14262
14263 nvcfg1 = tr32(NVRAM_CFG1);
14264
14265 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14266 case FLASH_5717VENDOR_ATMEL_EEPROM:
14267 case FLASH_5717VENDOR_MICRO_EEPROM:
14268 tp->nvram_jedecnum = JEDEC_ATMEL;
14269 tg3_flag_set(tp, NVRAM_BUFFERED);
14270 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14271
14272 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14273 tw32(NVRAM_CFG1, nvcfg1);
14274 return;
14275 case FLASH_5717VENDOR_ATMEL_MDB011D:
14276 case FLASH_5717VENDOR_ATMEL_ADB011B:
14277 case FLASH_5717VENDOR_ATMEL_ADB011D:
14278 case FLASH_5717VENDOR_ATMEL_MDB021D:
14279 case FLASH_5717VENDOR_ATMEL_ADB021B:
14280 case FLASH_5717VENDOR_ATMEL_ADB021D:
14281 case FLASH_5717VENDOR_ATMEL_45USPT:
14282 tp->nvram_jedecnum = JEDEC_ATMEL;
14283 tg3_flag_set(tp, NVRAM_BUFFERED);
14284 tg3_flag_set(tp, FLASH);
14285
14286 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14287 case FLASH_5717VENDOR_ATMEL_MDB021D:
14288 /* Detect size with tg3_nvram_get_size() */
14289 break;
14290 case FLASH_5717VENDOR_ATMEL_ADB021B:
14291 case FLASH_5717VENDOR_ATMEL_ADB021D:
14292 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14293 break;
14294 default:
14295 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14296 break;
14297 }
14298 break;
14299 case FLASH_5717VENDOR_ST_M_M25PE10:
14300 case FLASH_5717VENDOR_ST_A_M25PE10:
14301 case FLASH_5717VENDOR_ST_M_M45PE10:
14302 case FLASH_5717VENDOR_ST_A_M45PE10:
14303 case FLASH_5717VENDOR_ST_M_M25PE20:
14304 case FLASH_5717VENDOR_ST_A_M25PE20:
14305 case FLASH_5717VENDOR_ST_M_M45PE20:
14306 case FLASH_5717VENDOR_ST_A_M45PE20:
14307 case FLASH_5717VENDOR_ST_25USPT:
14308 case FLASH_5717VENDOR_ST_45USPT:
14309 tp->nvram_jedecnum = JEDEC_ST;
14310 tg3_flag_set(tp, NVRAM_BUFFERED);
14311 tg3_flag_set(tp, FLASH);
14312
14313 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14314 case FLASH_5717VENDOR_ST_M_M25PE20:
14315 case FLASH_5717VENDOR_ST_M_M45PE20:
14316 /* Detect size with tg3_nvram_get_size() */
14317 break;
14318 case FLASH_5717VENDOR_ST_A_M25PE20:
14319 case FLASH_5717VENDOR_ST_A_M45PE20:
14320 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14321 break;
14322 default:
14323 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14324 break;
14325 }
14326 break;
14327 default:
14328 tg3_flag_set(tp, NO_NVRAM);
14329 return;
14330 }
14331
14332 tg3_nvram_get_pagesize(tp, nvcfg1);
14333 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14334 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14335 }
14336
14337 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14338 {
14339 u32 nvcfg1, nvmpinstrp;
14340
14341 nvcfg1 = tr32(NVRAM_CFG1);
14342 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14343
14344 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14345 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14346 tg3_flag_set(tp, NO_NVRAM);
14347 return;
14348 }
14349
14350 switch (nvmpinstrp) {
14351 case FLASH_5762_EEPROM_HD:
14352 nvmpinstrp = FLASH_5720_EEPROM_HD;
14353 break;
14354 case FLASH_5762_EEPROM_LD:
14355 nvmpinstrp = FLASH_5720_EEPROM_LD;
14356 break;
14357 case FLASH_5720VENDOR_M_ST_M45PE20:
14358 /* This pinstrap supports multiple sizes, so force it
14359 * to read the actual size from location 0xf0.
14360 */
14361 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14362 break;
14363 }
14364 }
14365
14366 switch (nvmpinstrp) {
14367 case FLASH_5720_EEPROM_HD:
14368 case FLASH_5720_EEPROM_LD:
14369 tp->nvram_jedecnum = JEDEC_ATMEL;
14370 tg3_flag_set(tp, NVRAM_BUFFERED);
14371
14372 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14373 tw32(NVRAM_CFG1, nvcfg1);
14374 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14375 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14376 else
14377 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14378 return;
14379 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14380 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14381 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14382 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14383 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14384 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14385 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14386 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14387 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14388 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14389 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14390 case FLASH_5720VENDOR_ATMEL_45USPT:
14391 tp->nvram_jedecnum = JEDEC_ATMEL;
14392 tg3_flag_set(tp, NVRAM_BUFFERED);
14393 tg3_flag_set(tp, FLASH);
14394
14395 switch (nvmpinstrp) {
14396 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14397 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14398 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14399 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14400 break;
14401 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14402 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14403 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14404 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14405 break;
14406 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14407 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14408 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14409 break;
14410 default:
14411 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14412 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14413 break;
14414 }
14415 break;
14416 case FLASH_5720VENDOR_M_ST_M25PE10:
14417 case FLASH_5720VENDOR_M_ST_M45PE10:
14418 case FLASH_5720VENDOR_A_ST_M25PE10:
14419 case FLASH_5720VENDOR_A_ST_M45PE10:
14420 case FLASH_5720VENDOR_M_ST_M25PE20:
14421 case FLASH_5720VENDOR_M_ST_M45PE20:
14422 case FLASH_5720VENDOR_A_ST_M25PE20:
14423 case FLASH_5720VENDOR_A_ST_M45PE20:
14424 case FLASH_5720VENDOR_M_ST_M25PE40:
14425 case FLASH_5720VENDOR_M_ST_M45PE40:
14426 case FLASH_5720VENDOR_A_ST_M25PE40:
14427 case FLASH_5720VENDOR_A_ST_M45PE40:
14428 case FLASH_5720VENDOR_M_ST_M25PE80:
14429 case FLASH_5720VENDOR_M_ST_M45PE80:
14430 case FLASH_5720VENDOR_A_ST_M25PE80:
14431 case FLASH_5720VENDOR_A_ST_M45PE80:
14432 case FLASH_5720VENDOR_ST_25USPT:
14433 case FLASH_5720VENDOR_ST_45USPT:
14434 tp->nvram_jedecnum = JEDEC_ST;
14435 tg3_flag_set(tp, NVRAM_BUFFERED);
14436 tg3_flag_set(tp, FLASH);
14437
14438 switch (nvmpinstrp) {
14439 case FLASH_5720VENDOR_M_ST_M25PE20:
14440 case FLASH_5720VENDOR_M_ST_M45PE20:
14441 case FLASH_5720VENDOR_A_ST_M25PE20:
14442 case FLASH_5720VENDOR_A_ST_M45PE20:
14443 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14444 break;
14445 case FLASH_5720VENDOR_M_ST_M25PE40:
14446 case FLASH_5720VENDOR_M_ST_M45PE40:
14447 case FLASH_5720VENDOR_A_ST_M25PE40:
14448 case FLASH_5720VENDOR_A_ST_M45PE40:
14449 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14450 break;
14451 case FLASH_5720VENDOR_M_ST_M25PE80:
14452 case FLASH_5720VENDOR_M_ST_M45PE80:
14453 case FLASH_5720VENDOR_A_ST_M25PE80:
14454 case FLASH_5720VENDOR_A_ST_M45PE80:
14455 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14456 break;
14457 default:
14458 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14459 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14460 break;
14461 }
14462 break;
14463 default:
14464 tg3_flag_set(tp, NO_NVRAM);
14465 return;
14466 }
14467
14468 tg3_nvram_get_pagesize(tp, nvcfg1);
14469 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14470 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14471
14472 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14473 u32 val;
14474
14475 if (tg3_nvram_read(tp, 0, &val))
14476 return;
14477
14478 if (val != TG3_EEPROM_MAGIC &&
14479 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14480 tg3_flag_set(tp, NO_NVRAM);
14481 }
14482 }
14483
14484 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14485 static void tg3_nvram_init(struct tg3 *tp)
14486 {
14487 if (tg3_flag(tp, IS_SSB_CORE)) {
14488 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14489 tg3_flag_clear(tp, NVRAM);
14490 tg3_flag_clear(tp, NVRAM_BUFFERED);
14491 tg3_flag_set(tp, NO_NVRAM);
14492 return;
14493 }
14494
14495 tw32_f(GRC_EEPROM_ADDR,
14496 (EEPROM_ADDR_FSM_RESET |
14497 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14498 EEPROM_ADDR_CLKPERD_SHIFT)));
14499
14500 msleep(1);
14501
14502 /* Enable seeprom accesses. */
14503 tw32_f(GRC_LOCAL_CTRL,
14504 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14505 udelay(100);
14506
14507 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14508 tg3_asic_rev(tp) != ASIC_REV_5701) {
14509 tg3_flag_set(tp, NVRAM);
14510
14511 if (tg3_nvram_lock(tp)) {
14512 netdev_warn(tp->dev,
14513 "Cannot get nvram lock, %s failed\n",
14514 __func__);
14515 return;
14516 }
14517 tg3_enable_nvram_access(tp);
14518
14519 tp->nvram_size = 0;
14520
14521 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14522 tg3_get_5752_nvram_info(tp);
14523 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14524 tg3_get_5755_nvram_info(tp);
14525 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14526 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14527 tg3_asic_rev(tp) == ASIC_REV_5785)
14528 tg3_get_5787_nvram_info(tp);
14529 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14530 tg3_get_5761_nvram_info(tp);
14531 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14532 tg3_get_5906_nvram_info(tp);
14533 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14534 tg3_flag(tp, 57765_CLASS))
14535 tg3_get_57780_nvram_info(tp);
14536 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14537 tg3_asic_rev(tp) == ASIC_REV_5719)
14538 tg3_get_5717_nvram_info(tp);
14539 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14540 tg3_asic_rev(tp) == ASIC_REV_5762)
14541 tg3_get_5720_nvram_info(tp);
14542 else
14543 tg3_get_nvram_info(tp);
14544
14545 if (tp->nvram_size == 0)
14546 tg3_get_nvram_size(tp);
14547
14548 tg3_disable_nvram_access(tp);
14549 tg3_nvram_unlock(tp);
14550
14551 } else {
14552 tg3_flag_clear(tp, NVRAM);
14553 tg3_flag_clear(tp, NVRAM_BUFFERED);
14554
14555 tg3_get_eeprom_size(tp);
14556 }
14557 }
14558
14559 struct subsys_tbl_ent {
14560 u16 subsys_vendor, subsys_devid;
14561 u32 phy_id;
14562 };
14563
14564 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14565 /* Broadcom boards. */
14566 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14567 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14568 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14569 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14570 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14571 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14572 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14573 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14574 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14575 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14576 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14577 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14578 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14579 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14580 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14581 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14582 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14583 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14584 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14585 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14586 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14587 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14588
14589 /* 3com boards. */
14590 { TG3PCI_SUBVENDOR_ID_3COM,
14591 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14592 { TG3PCI_SUBVENDOR_ID_3COM,
14593 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14594 { TG3PCI_SUBVENDOR_ID_3COM,
14595 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14596 { TG3PCI_SUBVENDOR_ID_3COM,
14597 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14598 { TG3PCI_SUBVENDOR_ID_3COM,
14599 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14600
14601 /* DELL boards. */
14602 { TG3PCI_SUBVENDOR_ID_DELL,
14603 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14604 { TG3PCI_SUBVENDOR_ID_DELL,
14605 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14606 { TG3PCI_SUBVENDOR_ID_DELL,
14607 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14608 { TG3PCI_SUBVENDOR_ID_DELL,
14609 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14610
14611 /* Compaq boards. */
14612 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14613 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14614 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14615 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14616 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14617 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14618 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14619 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14620 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14621 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14622
14623 /* IBM boards. */
14624 { TG3PCI_SUBVENDOR_ID_IBM,
14625 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14626 };
14627
14628 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14629 {
14630 int i;
14631
14632 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14633 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14634 tp->pdev->subsystem_vendor) &&
14635 (subsys_id_to_phy_id[i].subsys_devid ==
14636 tp->pdev->subsystem_device))
14637 return &subsys_id_to_phy_id[i];
14638 }
14639 return NULL;
14640 }
14641
14642 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14643 {
14644 u32 val;
14645
14646 tp->phy_id = TG3_PHY_ID_INVALID;
14647 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14648
14649 /* Assume an onboard device and WOL capable by default. */
14650 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14651 tg3_flag_set(tp, WOL_CAP);
14652
14653 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14654 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14655 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14656 tg3_flag_set(tp, IS_NIC);
14657 }
14658 val = tr32(VCPU_CFGSHDW);
14659 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14660 tg3_flag_set(tp, ASPM_WORKAROUND);
14661 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14662 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14663 tg3_flag_set(tp, WOL_ENABLE);
14664 device_set_wakeup_enable(&tp->pdev->dev, true);
14665 }
14666 goto done;
14667 }
14668
14669 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14670 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14671 u32 nic_cfg, led_cfg;
14672 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14673 int eeprom_phy_serdes = 0;
14674
14675 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14676 tp->nic_sram_data_cfg = nic_cfg;
14677
14678 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14679 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14680 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14681 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14682 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14683 (ver > 0) && (ver < 0x100))
14684 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14685
14686 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14687 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14688
14689 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14690 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14691 eeprom_phy_serdes = 1;
14692
14693 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14694 if (nic_phy_id != 0) {
14695 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14696 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14697
14698 eeprom_phy_id = (id1 >> 16) << 10;
14699 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14700 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14701 } else
14702 eeprom_phy_id = 0;
14703
14704 tp->phy_id = eeprom_phy_id;
14705 if (eeprom_phy_serdes) {
14706 if (!tg3_flag(tp, 5705_PLUS))
14707 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14708 else
14709 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14710 }
14711
14712 if (tg3_flag(tp, 5750_PLUS))
14713 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14714 SHASTA_EXT_LED_MODE_MASK);
14715 else
14716 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14717
14718 switch (led_cfg) {
14719 default:
14720 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14721 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14722 break;
14723
14724 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14725 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14726 break;
14727
14728 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14729 tp->led_ctrl = LED_CTRL_MODE_MAC;
14730
14731 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14732 * read on some older 5700/5701 bootcode.
14733 */
14734 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14735 tg3_asic_rev(tp) == ASIC_REV_5701)
14736 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14737
14738 break;
14739
14740 case SHASTA_EXT_LED_SHARED:
14741 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14742 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14743 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14744 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14745 LED_CTRL_MODE_PHY_2);
14746 break;
14747
14748 case SHASTA_EXT_LED_MAC:
14749 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14750 break;
14751
14752 case SHASTA_EXT_LED_COMBO:
14753 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14754 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14755 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14756 LED_CTRL_MODE_PHY_2);
14757 break;
14758
14759 }
14760
14761 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14762 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14763 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14764 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14765
14766 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14767 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14768
14769 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14770 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14771 if ((tp->pdev->subsystem_vendor ==
14772 PCI_VENDOR_ID_ARIMA) &&
14773 (tp->pdev->subsystem_device == 0x205a ||
14774 tp->pdev->subsystem_device == 0x2063))
14775 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14776 } else {
14777 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14778 tg3_flag_set(tp, IS_NIC);
14779 }
14780
14781 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14782 tg3_flag_set(tp, ENABLE_ASF);
14783 if (tg3_flag(tp, 5750_PLUS))
14784 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14785 }
14786
14787 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14788 tg3_flag(tp, 5750_PLUS))
14789 tg3_flag_set(tp, ENABLE_APE);
14790
14791 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14792 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14793 tg3_flag_clear(tp, WOL_CAP);
14794
14795 if (tg3_flag(tp, WOL_CAP) &&
14796 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14797 tg3_flag_set(tp, WOL_ENABLE);
14798 device_set_wakeup_enable(&tp->pdev->dev, true);
14799 }
14800
14801 if (cfg2 & (1 << 17))
14802 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14803
14804 /* serdes signal pre-emphasis in register 0x590 set by */
14805 /* bootcode if bit 18 is set */
14806 if (cfg2 & (1 << 18))
14807 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14808
14809 if ((tg3_flag(tp, 57765_PLUS) ||
14810 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14811 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14812 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14813 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14814
14815 if (tg3_flag(tp, PCI_EXPRESS)) {
14816 u32 cfg3;
14817
14818 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14819 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14820 !tg3_flag(tp, 57765_PLUS) &&
14821 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14822 tg3_flag_set(tp, ASPM_WORKAROUND);
14823 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14824 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14825 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14826 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14827 }
14828
14829 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14830 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14831 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14832 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14833 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14834 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14835 }
14836 done:
14837 if (tg3_flag(tp, WOL_CAP))
14838 device_set_wakeup_enable(&tp->pdev->dev,
14839 tg3_flag(tp, WOL_ENABLE));
14840 else
14841 device_set_wakeup_capable(&tp->pdev->dev, false);
14842 }
14843
14844 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14845 {
14846 int i, err;
14847 u32 val2, off = offset * 8;
14848
14849 err = tg3_nvram_lock(tp);
14850 if (err)
14851 return err;
14852
14853 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14854 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14855 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14856 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14857 udelay(10);
14858
14859 for (i = 0; i < 100; i++) {
14860 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14861 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14862 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14863 break;
14864 }
14865 udelay(10);
14866 }
14867
14868 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14869
14870 tg3_nvram_unlock(tp);
14871 if (val2 & APE_OTP_STATUS_CMD_DONE)
14872 return 0;
14873
14874 return -EBUSY;
14875 }
14876
14877 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14878 {
14879 int i;
14880 u32 val;
14881
14882 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14883 tw32(OTP_CTRL, cmd);
14884
14885 /* Wait for up to 1 ms for command to execute. */
14886 for (i = 0; i < 100; i++) {
14887 val = tr32(OTP_STATUS);
14888 if (val & OTP_STATUS_CMD_DONE)
14889 break;
14890 udelay(10);
14891 }
14892
14893 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14894 }
14895
14896 /* Read the gphy configuration from the OTP region of the chip. The gphy
14897 * configuration is a 32-bit value that straddles the alignment boundary.
14898 * We do two 32-bit reads and then shift and merge the results.
14899 */
14900 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14901 {
14902 u32 bhalf_otp, thalf_otp;
14903
14904 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14905
14906 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14907 return 0;
14908
14909 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14910
14911 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14912 return 0;
14913
14914 thalf_otp = tr32(OTP_READ_DATA);
14915
14916 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14917
14918 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14919 return 0;
14920
14921 bhalf_otp = tr32(OTP_READ_DATA);
14922
14923 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14924 }
14925
14926 static void tg3_phy_init_link_config(struct tg3 *tp)
14927 {
14928 u32 adv = ADVERTISED_Autoneg;
14929
14930 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14931 adv |= ADVERTISED_1000baseT_Half |
14932 ADVERTISED_1000baseT_Full;
14933
14934 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14935 adv |= ADVERTISED_100baseT_Half |
14936 ADVERTISED_100baseT_Full |
14937 ADVERTISED_10baseT_Half |
14938 ADVERTISED_10baseT_Full |
14939 ADVERTISED_TP;
14940 else
14941 adv |= ADVERTISED_FIBRE;
14942
14943 tp->link_config.advertising = adv;
14944 tp->link_config.speed = SPEED_UNKNOWN;
14945 tp->link_config.duplex = DUPLEX_UNKNOWN;
14946 tp->link_config.autoneg = AUTONEG_ENABLE;
14947 tp->link_config.active_speed = SPEED_UNKNOWN;
14948 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14949
14950 tp->old_link = -1;
14951 }
14952
14953 static int tg3_phy_probe(struct tg3 *tp)
14954 {
14955 u32 hw_phy_id_1, hw_phy_id_2;
14956 u32 hw_phy_id, hw_phy_id_masked;
14957 int err;
14958
14959 /* flow control autonegotiation is default behavior */
14960 tg3_flag_set(tp, PAUSE_AUTONEG);
14961 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14962
14963 if (tg3_flag(tp, ENABLE_APE)) {
14964 switch (tp->pci_fn) {
14965 case 0:
14966 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14967 break;
14968 case 1:
14969 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14970 break;
14971 case 2:
14972 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14973 break;
14974 case 3:
14975 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14976 break;
14977 }
14978 }
14979
14980 if (!tg3_flag(tp, ENABLE_ASF) &&
14981 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14982 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14983 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14984 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14985
14986 if (tg3_flag(tp, USE_PHYLIB))
14987 return tg3_phy_init(tp);
14988
14989 /* Reading the PHY ID register can conflict with ASF
14990 * firmware access to the PHY hardware.
14991 */
14992 err = 0;
14993 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14994 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14995 } else {
14996 /* Now read the physical PHY_ID from the chip and verify
14997 * that it is sane. If it doesn't look good, we fall back
14998 * to either the hard-coded table based PHY_ID and failing
14999 * that the value found in the eeprom area.
15000 */
15001 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15002 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15003
15004 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15005 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15006 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15007
15008 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15009 }
15010
15011 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15012 tp->phy_id = hw_phy_id;
15013 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15014 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15015 else
15016 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15017 } else {
15018 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15019 /* Do nothing, phy ID already set up in
15020 * tg3_get_eeprom_hw_cfg().
15021 */
15022 } else {
15023 struct subsys_tbl_ent *p;
15024
15025 /* No eeprom signature? Try the hardcoded
15026 * subsys device table.
15027 */
15028 p = tg3_lookup_by_subsys(tp);
15029 if (p) {
15030 tp->phy_id = p->phy_id;
15031 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15032 /* For now we saw the IDs 0xbc050cd0,
15033 * 0xbc050f80 and 0xbc050c30 on devices
15034 * connected to an BCM4785 and there are
15035 * probably more. Just assume that the phy is
15036 * supported when it is connected to a SSB core
15037 * for now.
15038 */
15039 return -ENODEV;
15040 }
15041
15042 if (!tp->phy_id ||
15043 tp->phy_id == TG3_PHY_ID_BCM8002)
15044 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15045 }
15046 }
15047
15048 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15049 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15050 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15051 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15052 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15053 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15054 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15055 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15056 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
15057 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15058
15059 tg3_phy_init_link_config(tp);
15060
15061 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15062 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15063 !tg3_flag(tp, ENABLE_APE) &&
15064 !tg3_flag(tp, ENABLE_ASF)) {
15065 u32 bmsr, dummy;
15066
15067 tg3_readphy(tp, MII_BMSR, &bmsr);
15068 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15069 (bmsr & BMSR_LSTATUS))
15070 goto skip_phy_reset;
15071
15072 err = tg3_phy_reset(tp);
15073 if (err)
15074 return err;
15075
15076 tg3_phy_set_wirespeed(tp);
15077
15078 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15079 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15080 tp->link_config.flowctrl);
15081
15082 tg3_writephy(tp, MII_BMCR,
15083 BMCR_ANENABLE | BMCR_ANRESTART);
15084 }
15085 }
15086
15087 skip_phy_reset:
15088 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15089 err = tg3_init_5401phy_dsp(tp);
15090 if (err)
15091 return err;
15092
15093 err = tg3_init_5401phy_dsp(tp);
15094 }
15095
15096 return err;
15097 }
15098
15099 static void tg3_read_vpd(struct tg3 *tp)
15100 {
15101 u8 *vpd_data;
15102 unsigned int block_end, rosize, len;
15103 u32 vpdlen;
15104 int j, i = 0;
15105
15106 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15107 if (!vpd_data)
15108 goto out_no_vpd;
15109
15110 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15111 if (i < 0)
15112 goto out_not_found;
15113
15114 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15115 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15116 i += PCI_VPD_LRDT_TAG_SIZE;
15117
15118 if (block_end > vpdlen)
15119 goto out_not_found;
15120
15121 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15122 PCI_VPD_RO_KEYWORD_MFR_ID);
15123 if (j > 0) {
15124 len = pci_vpd_info_field_size(&vpd_data[j]);
15125
15126 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15127 if (j + len > block_end || len != 4 ||
15128 memcmp(&vpd_data[j], "1028", 4))
15129 goto partno;
15130
15131 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15132 PCI_VPD_RO_KEYWORD_VENDOR0);
15133 if (j < 0)
15134 goto partno;
15135
15136 len = pci_vpd_info_field_size(&vpd_data[j]);
15137
15138 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15139 if (j + len > block_end)
15140 goto partno;
15141
15142 if (len >= sizeof(tp->fw_ver))
15143 len = sizeof(tp->fw_ver) - 1;
15144 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15145 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15146 &vpd_data[j]);
15147 }
15148
15149 partno:
15150 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15151 PCI_VPD_RO_KEYWORD_PARTNO);
15152 if (i < 0)
15153 goto out_not_found;
15154
15155 len = pci_vpd_info_field_size(&vpd_data[i]);
15156
15157 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15158 if (len > TG3_BPN_SIZE ||
15159 (len + i) > vpdlen)
15160 goto out_not_found;
15161
15162 memcpy(tp->board_part_number, &vpd_data[i], len);
15163
15164 out_not_found:
15165 kfree(vpd_data);
15166 if (tp->board_part_number[0])
15167 return;
15168
15169 out_no_vpd:
15170 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15171 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15172 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15173 strcpy(tp->board_part_number, "BCM5717");
15174 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15175 strcpy(tp->board_part_number, "BCM5718");
15176 else
15177 goto nomatch;
15178 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15179 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15180 strcpy(tp->board_part_number, "BCM57780");
15181 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15182 strcpy(tp->board_part_number, "BCM57760");
15183 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15184 strcpy(tp->board_part_number, "BCM57790");
15185 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15186 strcpy(tp->board_part_number, "BCM57788");
15187 else
15188 goto nomatch;
15189 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15190 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15191 strcpy(tp->board_part_number, "BCM57761");
15192 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15193 strcpy(tp->board_part_number, "BCM57765");
15194 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15195 strcpy(tp->board_part_number, "BCM57781");
15196 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15197 strcpy(tp->board_part_number, "BCM57785");
15198 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15199 strcpy(tp->board_part_number, "BCM57791");
15200 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15201 strcpy(tp->board_part_number, "BCM57795");
15202 else
15203 goto nomatch;
15204 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15205 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15206 strcpy(tp->board_part_number, "BCM57762");
15207 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15208 strcpy(tp->board_part_number, "BCM57766");
15209 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15210 strcpy(tp->board_part_number, "BCM57782");
15211 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15212 strcpy(tp->board_part_number, "BCM57786");
15213 else
15214 goto nomatch;
15215 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15216 strcpy(tp->board_part_number, "BCM95906");
15217 } else {
15218 nomatch:
15219 strcpy(tp->board_part_number, "none");
15220 }
15221 }
15222
15223 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15224 {
15225 u32 val;
15226
15227 if (tg3_nvram_read(tp, offset, &val) ||
15228 (val & 0xfc000000) != 0x0c000000 ||
15229 tg3_nvram_read(tp, offset + 4, &val) ||
15230 val != 0)
15231 return 0;
15232
15233 return 1;
15234 }
15235
15236 static void tg3_read_bc_ver(struct tg3 *tp)
15237 {
15238 u32 val, offset, start, ver_offset;
15239 int i, dst_off;
15240 bool newver = false;
15241
15242 if (tg3_nvram_read(tp, 0xc, &offset) ||
15243 tg3_nvram_read(tp, 0x4, &start))
15244 return;
15245
15246 offset = tg3_nvram_logical_addr(tp, offset);
15247
15248 if (tg3_nvram_read(tp, offset, &val))
15249 return;
15250
15251 if ((val & 0xfc000000) == 0x0c000000) {
15252 if (tg3_nvram_read(tp, offset + 4, &val))
15253 return;
15254
15255 if (val == 0)
15256 newver = true;
15257 }
15258
15259 dst_off = strlen(tp->fw_ver);
15260
15261 if (newver) {
15262 if (TG3_VER_SIZE - dst_off < 16 ||
15263 tg3_nvram_read(tp, offset + 8, &ver_offset))
15264 return;
15265
15266 offset = offset + ver_offset - start;
15267 for (i = 0; i < 16; i += 4) {
15268 __be32 v;
15269 if (tg3_nvram_read_be32(tp, offset + i, &v))
15270 return;
15271
15272 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15273 }
15274 } else {
15275 u32 major, minor;
15276
15277 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15278 return;
15279
15280 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15281 TG3_NVM_BCVER_MAJSFT;
15282 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15283 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15284 "v%d.%02d", major, minor);
15285 }
15286 }
15287
15288 static void tg3_read_hwsb_ver(struct tg3 *tp)
15289 {
15290 u32 val, major, minor;
15291
15292 /* Use native endian representation */
15293 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15294 return;
15295
15296 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15297 TG3_NVM_HWSB_CFG1_MAJSFT;
15298 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15299 TG3_NVM_HWSB_CFG1_MINSFT;
15300
15301 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15302 }
15303
15304 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15305 {
15306 u32 offset, major, minor, build;
15307
15308 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15309
15310 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15311 return;
15312
15313 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15314 case TG3_EEPROM_SB_REVISION_0:
15315 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15316 break;
15317 case TG3_EEPROM_SB_REVISION_2:
15318 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15319 break;
15320 case TG3_EEPROM_SB_REVISION_3:
15321 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15322 break;
15323 case TG3_EEPROM_SB_REVISION_4:
15324 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15325 break;
15326 case TG3_EEPROM_SB_REVISION_5:
15327 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15328 break;
15329 case TG3_EEPROM_SB_REVISION_6:
15330 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15331 break;
15332 default:
15333 return;
15334 }
15335
15336 if (tg3_nvram_read(tp, offset, &val))
15337 return;
15338
15339 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15340 TG3_EEPROM_SB_EDH_BLD_SHFT;
15341 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15342 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15343 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15344
15345 if (minor > 99 || build > 26)
15346 return;
15347
15348 offset = strlen(tp->fw_ver);
15349 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15350 " v%d.%02d", major, minor);
15351
15352 if (build > 0) {
15353 offset = strlen(tp->fw_ver);
15354 if (offset < TG3_VER_SIZE - 1)
15355 tp->fw_ver[offset] = 'a' + build - 1;
15356 }
15357 }
15358
15359 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15360 {
15361 u32 val, offset, start;
15362 int i, vlen;
15363
15364 for (offset = TG3_NVM_DIR_START;
15365 offset < TG3_NVM_DIR_END;
15366 offset += TG3_NVM_DIRENT_SIZE) {
15367 if (tg3_nvram_read(tp, offset, &val))
15368 return;
15369
15370 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15371 break;
15372 }
15373
15374 if (offset == TG3_NVM_DIR_END)
15375 return;
15376
15377 if (!tg3_flag(tp, 5705_PLUS))
15378 start = 0x08000000;
15379 else if (tg3_nvram_read(tp, offset - 4, &start))
15380 return;
15381
15382 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15383 !tg3_fw_img_is_valid(tp, offset) ||
15384 tg3_nvram_read(tp, offset + 8, &val))
15385 return;
15386
15387 offset += val - start;
15388
15389 vlen = strlen(tp->fw_ver);
15390
15391 tp->fw_ver[vlen++] = ',';
15392 tp->fw_ver[vlen++] = ' ';
15393
15394 for (i = 0; i < 4; i++) {
15395 __be32 v;
15396 if (tg3_nvram_read_be32(tp, offset, &v))
15397 return;
15398
15399 offset += sizeof(v);
15400
15401 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15402 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15403 break;
15404 }
15405
15406 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15407 vlen += sizeof(v);
15408 }
15409 }
15410
15411 static void tg3_probe_ncsi(struct tg3 *tp)
15412 {
15413 u32 apedata;
15414
15415 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15416 if (apedata != APE_SEG_SIG_MAGIC)
15417 return;
15418
15419 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15420 if (!(apedata & APE_FW_STATUS_READY))
15421 return;
15422
15423 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15424 tg3_flag_set(tp, APE_HAS_NCSI);
15425 }
15426
15427 static void tg3_read_dash_ver(struct tg3 *tp)
15428 {
15429 int vlen;
15430 u32 apedata;
15431 char *fwtype;
15432
15433 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15434
15435 if (tg3_flag(tp, APE_HAS_NCSI))
15436 fwtype = "NCSI";
15437 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15438 fwtype = "SMASH";
15439 else
15440 fwtype = "DASH";
15441
15442 vlen = strlen(tp->fw_ver);
15443
15444 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15445 fwtype,
15446 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15447 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15448 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15449 (apedata & APE_FW_VERSION_BLDMSK));
15450 }
15451
15452 static void tg3_read_otp_ver(struct tg3 *tp)
15453 {
15454 u32 val, val2;
15455
15456 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15457 return;
15458
15459 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15460 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15461 TG3_OTP_MAGIC0_VALID(val)) {
15462 u64 val64 = (u64) val << 32 | val2;
15463 u32 ver = 0;
15464 int i, vlen;
15465
15466 for (i = 0; i < 7; i++) {
15467 if ((val64 & 0xff) == 0)
15468 break;
15469 ver = val64 & 0xff;
15470 val64 >>= 8;
15471 }
15472 vlen = strlen(tp->fw_ver);
15473 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15474 }
15475 }
15476
15477 static void tg3_read_fw_ver(struct tg3 *tp)
15478 {
15479 u32 val;
15480 bool vpd_vers = false;
15481
15482 if (tp->fw_ver[0] != 0)
15483 vpd_vers = true;
15484
15485 if (tg3_flag(tp, NO_NVRAM)) {
15486 strcat(tp->fw_ver, "sb");
15487 tg3_read_otp_ver(tp);
15488 return;
15489 }
15490
15491 if (tg3_nvram_read(tp, 0, &val))
15492 return;
15493
15494 if (val == TG3_EEPROM_MAGIC)
15495 tg3_read_bc_ver(tp);
15496 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15497 tg3_read_sb_ver(tp, val);
15498 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15499 tg3_read_hwsb_ver(tp);
15500
15501 if (tg3_flag(tp, ENABLE_ASF)) {
15502 if (tg3_flag(tp, ENABLE_APE)) {
15503 tg3_probe_ncsi(tp);
15504 if (!vpd_vers)
15505 tg3_read_dash_ver(tp);
15506 } else if (!vpd_vers) {
15507 tg3_read_mgmtfw_ver(tp);
15508 }
15509 }
15510
15511 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15512 }
15513
15514 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15515 {
15516 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15517 return TG3_RX_RET_MAX_SIZE_5717;
15518 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15519 return TG3_RX_RET_MAX_SIZE_5700;
15520 else
15521 return TG3_RX_RET_MAX_SIZE_5705;
15522 }
15523
15524 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15525 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15526 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15527 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15528 { },
15529 };
15530
15531 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15532 {
15533 struct pci_dev *peer;
15534 unsigned int func, devnr = tp->pdev->devfn & ~7;
15535
15536 for (func = 0; func < 8; func++) {
15537 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15538 if (peer && peer != tp->pdev)
15539 break;
15540 pci_dev_put(peer);
15541 }
15542 /* 5704 can be configured in single-port mode, set peer to
15543 * tp->pdev in that case.
15544 */
15545 if (!peer) {
15546 peer = tp->pdev;
15547 return peer;
15548 }
15549
15550 /*
15551 * We don't need to keep the refcount elevated; there's no way
15552 * to remove one half of this device without removing the other
15553 */
15554 pci_dev_put(peer);
15555
15556 return peer;
15557 }
15558
15559 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15560 {
15561 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15562 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15563 u32 reg;
15564
15565 /* All devices that use the alternate
15566 * ASIC REV location have a CPMU.
15567 */
15568 tg3_flag_set(tp, CPMU_PRESENT);
15569
15570 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15571 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15572 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15573 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15574 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15575 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15576 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15577 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15578 reg = TG3PCI_GEN2_PRODID_ASICREV;
15579 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15580 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15581 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15582 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15583 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15584 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15585 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15586 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15587 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15588 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15589 reg = TG3PCI_GEN15_PRODID_ASICREV;
15590 else
15591 reg = TG3PCI_PRODID_ASICREV;
15592
15593 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15594 }
15595
15596 /* Wrong chip ID in 5752 A0. This code can be removed later
15597 * as A0 is not in production.
15598 */
15599 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15600 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15601
15602 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15603 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15604
15605 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15606 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15607 tg3_asic_rev(tp) == ASIC_REV_5720)
15608 tg3_flag_set(tp, 5717_PLUS);
15609
15610 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15611 tg3_asic_rev(tp) == ASIC_REV_57766)
15612 tg3_flag_set(tp, 57765_CLASS);
15613
15614 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15615 tg3_asic_rev(tp) == ASIC_REV_5762)
15616 tg3_flag_set(tp, 57765_PLUS);
15617
15618 /* Intentionally exclude ASIC_REV_5906 */
15619 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15620 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15621 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15622 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15623 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15624 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15625 tg3_flag(tp, 57765_PLUS))
15626 tg3_flag_set(tp, 5755_PLUS);
15627
15628 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15629 tg3_asic_rev(tp) == ASIC_REV_5714)
15630 tg3_flag_set(tp, 5780_CLASS);
15631
15632 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15633 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15634 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15635 tg3_flag(tp, 5755_PLUS) ||
15636 tg3_flag(tp, 5780_CLASS))
15637 tg3_flag_set(tp, 5750_PLUS);
15638
15639 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15640 tg3_flag(tp, 5750_PLUS))
15641 tg3_flag_set(tp, 5705_PLUS);
15642 }
15643
15644 static bool tg3_10_100_only_device(struct tg3 *tp,
15645 const struct pci_device_id *ent)
15646 {
15647 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15648
15649 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15650 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15651 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15652 return true;
15653
15654 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15655 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15656 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15657 return true;
15658 } else {
15659 return true;
15660 }
15661 }
15662
15663 return false;
15664 }
15665
15666 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15667 {
15668 u32 misc_ctrl_reg;
15669 u32 pci_state_reg, grc_misc_cfg;
15670 u32 val;
15671 u16 pci_cmd;
15672 int err;
15673
15674 /* Force memory write invalidate off. If we leave it on,
15675 * then on 5700_BX chips we have to enable a workaround.
15676 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15677 * to match the cacheline size. The Broadcom driver have this
15678 * workaround but turns MWI off all the times so never uses
15679 * it. This seems to suggest that the workaround is insufficient.
15680 */
15681 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15682 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15683 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15684
15685 /* Important! -- Make sure register accesses are byteswapped
15686 * correctly. Also, for those chips that require it, make
15687 * sure that indirect register accesses are enabled before
15688 * the first operation.
15689 */
15690 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15691 &misc_ctrl_reg);
15692 tp->misc_host_ctrl |= (misc_ctrl_reg &
15693 MISC_HOST_CTRL_CHIPREV);
15694 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15695 tp->misc_host_ctrl);
15696
15697 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15698
15699 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15700 * we need to disable memory and use config. cycles
15701 * only to access all registers. The 5702/03 chips
15702 * can mistakenly decode the special cycles from the
15703 * ICH chipsets as memory write cycles, causing corruption
15704 * of register and memory space. Only certain ICH bridges
15705 * will drive special cycles with non-zero data during the
15706 * address phase which can fall within the 5703's address
15707 * range. This is not an ICH bug as the PCI spec allows
15708 * non-zero address during special cycles. However, only
15709 * these ICH bridges are known to drive non-zero addresses
15710 * during special cycles.
15711 *
15712 * Since special cycles do not cross PCI bridges, we only
15713 * enable this workaround if the 5703 is on the secondary
15714 * bus of these ICH bridges.
15715 */
15716 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15717 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15718 static struct tg3_dev_id {
15719 u32 vendor;
15720 u32 device;
15721 u32 rev;
15722 } ich_chipsets[] = {
15723 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15724 PCI_ANY_ID },
15725 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15726 PCI_ANY_ID },
15727 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15728 0xa },
15729 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15730 PCI_ANY_ID },
15731 { },
15732 };
15733 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15734 struct pci_dev *bridge = NULL;
15735
15736 while (pci_id->vendor != 0) {
15737 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15738 bridge);
15739 if (!bridge) {
15740 pci_id++;
15741 continue;
15742 }
15743 if (pci_id->rev != PCI_ANY_ID) {
15744 if (bridge->revision > pci_id->rev)
15745 continue;
15746 }
15747 if (bridge->subordinate &&
15748 (bridge->subordinate->number ==
15749 tp->pdev->bus->number)) {
15750 tg3_flag_set(tp, ICH_WORKAROUND);
15751 pci_dev_put(bridge);
15752 break;
15753 }
15754 }
15755 }
15756
15757 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15758 static struct tg3_dev_id {
15759 u32 vendor;
15760 u32 device;
15761 } bridge_chipsets[] = {
15762 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15763 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15764 { },
15765 };
15766 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15767 struct pci_dev *bridge = NULL;
15768
15769 while (pci_id->vendor != 0) {
15770 bridge = pci_get_device(pci_id->vendor,
15771 pci_id->device,
15772 bridge);
15773 if (!bridge) {
15774 pci_id++;
15775 continue;
15776 }
15777 if (bridge->subordinate &&
15778 (bridge->subordinate->number <=
15779 tp->pdev->bus->number) &&
15780 (bridge->subordinate->busn_res.end >=
15781 tp->pdev->bus->number)) {
15782 tg3_flag_set(tp, 5701_DMA_BUG);
15783 pci_dev_put(bridge);
15784 break;
15785 }
15786 }
15787 }
15788
15789 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15790 * DMA addresses > 40-bit. This bridge may have other additional
15791 * 57xx devices behind it in some 4-port NIC designs for example.
15792 * Any tg3 device found behind the bridge will also need the 40-bit
15793 * DMA workaround.
15794 */
15795 if (tg3_flag(tp, 5780_CLASS)) {
15796 tg3_flag_set(tp, 40BIT_DMA_BUG);
15797 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15798 } else {
15799 struct pci_dev *bridge = NULL;
15800
15801 do {
15802 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15803 PCI_DEVICE_ID_SERVERWORKS_EPB,
15804 bridge);
15805 if (bridge && bridge->subordinate &&
15806 (bridge->subordinate->number <=
15807 tp->pdev->bus->number) &&
15808 (bridge->subordinate->busn_res.end >=
15809 tp->pdev->bus->number)) {
15810 tg3_flag_set(tp, 40BIT_DMA_BUG);
15811 pci_dev_put(bridge);
15812 break;
15813 }
15814 } while (bridge);
15815 }
15816
15817 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15818 tg3_asic_rev(tp) == ASIC_REV_5714)
15819 tp->pdev_peer = tg3_find_peer(tp);
15820
15821 /* Determine TSO capabilities */
15822 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15823 ; /* Do nothing. HW bug. */
15824 else if (tg3_flag(tp, 57765_PLUS))
15825 tg3_flag_set(tp, HW_TSO_3);
15826 else if (tg3_flag(tp, 5755_PLUS) ||
15827 tg3_asic_rev(tp) == ASIC_REV_5906)
15828 tg3_flag_set(tp, HW_TSO_2);
15829 else if (tg3_flag(tp, 5750_PLUS)) {
15830 tg3_flag_set(tp, HW_TSO_1);
15831 tg3_flag_set(tp, TSO_BUG);
15832 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15833 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15834 tg3_flag_clear(tp, TSO_BUG);
15835 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15836 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15837 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15838 tg3_flag_set(tp, FW_TSO);
15839 tg3_flag_set(tp, TSO_BUG);
15840 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15841 tp->fw_needed = FIRMWARE_TG3TSO5;
15842 else
15843 tp->fw_needed = FIRMWARE_TG3TSO;
15844 }
15845
15846 /* Selectively allow TSO based on operating conditions */
15847 if (tg3_flag(tp, HW_TSO_1) ||
15848 tg3_flag(tp, HW_TSO_2) ||
15849 tg3_flag(tp, HW_TSO_3) ||
15850 tg3_flag(tp, FW_TSO)) {
15851 /* For firmware TSO, assume ASF is disabled.
15852 * We'll disable TSO later if we discover ASF
15853 * is enabled in tg3_get_eeprom_hw_cfg().
15854 */
15855 tg3_flag_set(tp, TSO_CAPABLE);
15856 } else {
15857 tg3_flag_clear(tp, TSO_CAPABLE);
15858 tg3_flag_clear(tp, TSO_BUG);
15859 tp->fw_needed = NULL;
15860 }
15861
15862 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15863 tp->fw_needed = FIRMWARE_TG3;
15864
15865 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15866 tp->fw_needed = FIRMWARE_TG357766;
15867
15868 tp->irq_max = 1;
15869
15870 if (tg3_flag(tp, 5750_PLUS)) {
15871 tg3_flag_set(tp, SUPPORT_MSI);
15872 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15873 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15874 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15875 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15876 tp->pdev_peer == tp->pdev))
15877 tg3_flag_clear(tp, SUPPORT_MSI);
15878
15879 if (tg3_flag(tp, 5755_PLUS) ||
15880 tg3_asic_rev(tp) == ASIC_REV_5906) {
15881 tg3_flag_set(tp, 1SHOT_MSI);
15882 }
15883
15884 if (tg3_flag(tp, 57765_PLUS)) {
15885 tg3_flag_set(tp, SUPPORT_MSIX);
15886 tp->irq_max = TG3_IRQ_MAX_VECS;
15887 }
15888 }
15889
15890 tp->txq_max = 1;
15891 tp->rxq_max = 1;
15892 if (tp->irq_max > 1) {
15893 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15894 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15895
15896 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15897 tg3_asic_rev(tp) == ASIC_REV_5720)
15898 tp->txq_max = tp->irq_max - 1;
15899 }
15900
15901 if (tg3_flag(tp, 5755_PLUS) ||
15902 tg3_asic_rev(tp) == ASIC_REV_5906)
15903 tg3_flag_set(tp, SHORT_DMA_BUG);
15904
15905 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15906 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15907
15908 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15909 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15910 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15911 tg3_asic_rev(tp) == ASIC_REV_5762)
15912 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15913
15914 if (tg3_flag(tp, 57765_PLUS) &&
15915 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15916 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15917
15918 if (!tg3_flag(tp, 5705_PLUS) ||
15919 tg3_flag(tp, 5780_CLASS) ||
15920 tg3_flag(tp, USE_JUMBO_BDFLAG))
15921 tg3_flag_set(tp, JUMBO_CAPABLE);
15922
15923 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15924 &pci_state_reg);
15925
15926 if (pci_is_pcie(tp->pdev)) {
15927 u16 lnkctl;
15928
15929 tg3_flag_set(tp, PCI_EXPRESS);
15930
15931 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15932 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15933 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15934 tg3_flag_clear(tp, HW_TSO_2);
15935 tg3_flag_clear(tp, TSO_CAPABLE);
15936 }
15937 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15938 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15939 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15940 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15941 tg3_flag_set(tp, CLKREQ_BUG);
15942 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15943 tg3_flag_set(tp, L1PLLPD_EN);
15944 }
15945 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15946 /* BCM5785 devices are effectively PCIe devices, and should
15947 * follow PCIe codepaths, but do not have a PCIe capabilities
15948 * section.
15949 */
15950 tg3_flag_set(tp, PCI_EXPRESS);
15951 } else if (!tg3_flag(tp, 5705_PLUS) ||
15952 tg3_flag(tp, 5780_CLASS)) {
15953 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15954 if (!tp->pcix_cap) {
15955 dev_err(&tp->pdev->dev,
15956 "Cannot find PCI-X capability, aborting\n");
15957 return -EIO;
15958 }
15959
15960 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15961 tg3_flag_set(tp, PCIX_MODE);
15962 }
15963
15964 /* If we have an AMD 762 or VIA K8T800 chipset, write
15965 * reordering to the mailbox registers done by the host
15966 * controller can cause major troubles. We read back from
15967 * every mailbox register write to force the writes to be
15968 * posted to the chip in order.
15969 */
15970 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15971 !tg3_flag(tp, PCI_EXPRESS))
15972 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15973
15974 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15975 &tp->pci_cacheline_sz);
15976 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15977 &tp->pci_lat_timer);
15978 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15979 tp->pci_lat_timer < 64) {
15980 tp->pci_lat_timer = 64;
15981 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15982 tp->pci_lat_timer);
15983 }
15984
15985 /* Important! -- It is critical that the PCI-X hw workaround
15986 * situation is decided before the first MMIO register access.
15987 */
15988 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15989 /* 5700 BX chips need to have their TX producer index
15990 * mailboxes written twice to workaround a bug.
15991 */
15992 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15993
15994 /* If we are in PCI-X mode, enable register write workaround.
15995 *
15996 * The workaround is to use indirect register accesses
15997 * for all chip writes not to mailbox registers.
15998 */
15999 if (tg3_flag(tp, PCIX_MODE)) {
16000 u32 pm_reg;
16001
16002 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16003
16004 /* The chip can have it's power management PCI config
16005 * space registers clobbered due to this bug.
16006 * So explicitly force the chip into D0 here.
16007 */
16008 pci_read_config_dword(tp->pdev,
16009 tp->pm_cap + PCI_PM_CTRL,
16010 &pm_reg);
16011 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16012 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16013 pci_write_config_dword(tp->pdev,
16014 tp->pm_cap + PCI_PM_CTRL,
16015 pm_reg);
16016
16017 /* Also, force SERR#/PERR# in PCI command. */
16018 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16019 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16020 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16021 }
16022 }
16023
16024 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16025 tg3_flag_set(tp, PCI_HIGH_SPEED);
16026 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16027 tg3_flag_set(tp, PCI_32BIT);
16028
16029 /* Chip-specific fixup from Broadcom driver */
16030 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16031 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16032 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16033 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16034 }
16035
16036 /* Default fast path register access methods */
16037 tp->read32 = tg3_read32;
16038 tp->write32 = tg3_write32;
16039 tp->read32_mbox = tg3_read32;
16040 tp->write32_mbox = tg3_write32;
16041 tp->write32_tx_mbox = tg3_write32;
16042 tp->write32_rx_mbox = tg3_write32;
16043
16044 /* Various workaround register access methods */
16045 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16046 tp->write32 = tg3_write_indirect_reg32;
16047 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16048 (tg3_flag(tp, PCI_EXPRESS) &&
16049 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16050 /*
16051 * Back to back register writes can cause problems on these
16052 * chips, the workaround is to read back all reg writes
16053 * except those to mailbox regs.
16054 *
16055 * See tg3_write_indirect_reg32().
16056 */
16057 tp->write32 = tg3_write_flush_reg32;
16058 }
16059
16060 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16061 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16062 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16063 tp->write32_rx_mbox = tg3_write_flush_reg32;
16064 }
16065
16066 if (tg3_flag(tp, ICH_WORKAROUND)) {
16067 tp->read32 = tg3_read_indirect_reg32;
16068 tp->write32 = tg3_write_indirect_reg32;
16069 tp->read32_mbox = tg3_read_indirect_mbox;
16070 tp->write32_mbox = tg3_write_indirect_mbox;
16071 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16072 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16073
16074 iounmap(tp->regs);
16075 tp->regs = NULL;
16076
16077 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16078 pci_cmd &= ~PCI_COMMAND_MEMORY;
16079 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16080 }
16081 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16082 tp->read32_mbox = tg3_read32_mbox_5906;
16083 tp->write32_mbox = tg3_write32_mbox_5906;
16084 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16085 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16086 }
16087
16088 if (tp->write32 == tg3_write_indirect_reg32 ||
16089 (tg3_flag(tp, PCIX_MODE) &&
16090 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16091 tg3_asic_rev(tp) == ASIC_REV_5701)))
16092 tg3_flag_set(tp, SRAM_USE_CONFIG);
16093
16094 /* The memory arbiter has to be enabled in order for SRAM accesses
16095 * to succeed. Normally on powerup the tg3 chip firmware will make
16096 * sure it is enabled, but other entities such as system netboot
16097 * code might disable it.
16098 */
16099 val = tr32(MEMARB_MODE);
16100 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16101
16102 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16103 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16104 tg3_flag(tp, 5780_CLASS)) {
16105 if (tg3_flag(tp, PCIX_MODE)) {
16106 pci_read_config_dword(tp->pdev,
16107 tp->pcix_cap + PCI_X_STATUS,
16108 &val);
16109 tp->pci_fn = val & 0x7;
16110 }
16111 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16112 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16113 tg3_asic_rev(tp) == ASIC_REV_5720) {
16114 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16115 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16116 val = tr32(TG3_CPMU_STATUS);
16117
16118 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16119 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16120 else
16121 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16122 TG3_CPMU_STATUS_FSHFT_5719;
16123 }
16124
16125 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16126 tp->write32_tx_mbox = tg3_write_flush_reg32;
16127 tp->write32_rx_mbox = tg3_write_flush_reg32;
16128 }
16129
16130 /* Get eeprom hw config before calling tg3_set_power_state().
16131 * In particular, the TG3_FLAG_IS_NIC flag must be
16132 * determined before calling tg3_set_power_state() so that
16133 * we know whether or not to switch out of Vaux power.
16134 * When the flag is set, it means that GPIO1 is used for eeprom
16135 * write protect and also implies that it is a LOM where GPIOs
16136 * are not used to switch power.
16137 */
16138 tg3_get_eeprom_hw_cfg(tp);
16139
16140 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16141 tg3_flag_clear(tp, TSO_CAPABLE);
16142 tg3_flag_clear(tp, TSO_BUG);
16143 tp->fw_needed = NULL;
16144 }
16145
16146 if (tg3_flag(tp, ENABLE_APE)) {
16147 /* Allow reads and writes to the
16148 * APE register and memory space.
16149 */
16150 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16151 PCISTATE_ALLOW_APE_SHMEM_WR |
16152 PCISTATE_ALLOW_APE_PSPACE_WR;
16153 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16154 pci_state_reg);
16155
16156 tg3_ape_lock_init(tp);
16157 }
16158
16159 /* Set up tp->grc_local_ctrl before calling
16160 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16161 * will bring 5700's external PHY out of reset.
16162 * It is also used as eeprom write protect on LOMs.
16163 */
16164 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16165 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16166 tg3_flag(tp, EEPROM_WRITE_PROT))
16167 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16168 GRC_LCLCTRL_GPIO_OUTPUT1);
16169 /* Unused GPIO3 must be driven as output on 5752 because there
16170 * are no pull-up resistors on unused GPIO pins.
16171 */
16172 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16173 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16174
16175 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16176 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16177 tg3_flag(tp, 57765_CLASS))
16178 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16179
16180 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16181 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16182 /* Turn off the debug UART. */
16183 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16184 if (tg3_flag(tp, IS_NIC))
16185 /* Keep VMain power. */
16186 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16187 GRC_LCLCTRL_GPIO_OUTPUT0;
16188 }
16189
16190 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16191 tp->grc_local_ctrl |=
16192 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16193
16194 /* Switch out of Vaux if it is a NIC */
16195 tg3_pwrsrc_switch_to_vmain(tp);
16196
16197 /* Derive initial jumbo mode from MTU assigned in
16198 * ether_setup() via the alloc_etherdev() call
16199 */
16200 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16201 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16202
16203 /* Determine WakeOnLan speed to use. */
16204 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16205 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16206 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16207 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16208 tg3_flag_clear(tp, WOL_SPEED_100MB);
16209 } else {
16210 tg3_flag_set(tp, WOL_SPEED_100MB);
16211 }
16212
16213 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16214 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16215
16216 /* A few boards don't want Ethernet@WireSpeed phy feature */
16217 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16218 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16219 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16220 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16221 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16222 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16223 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16224
16225 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16226 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16227 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16228 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16229 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16230
16231 if (tg3_flag(tp, 5705_PLUS) &&
16232 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16233 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16234 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16235 !tg3_flag(tp, 57765_PLUS)) {
16236 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16237 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16238 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16239 tg3_asic_rev(tp) == ASIC_REV_5761) {
16240 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16241 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16242 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16243 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16244 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16245 } else
16246 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16247 }
16248
16249 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16250 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16251 tp->phy_otp = tg3_read_otp_phycfg(tp);
16252 if (tp->phy_otp == 0)
16253 tp->phy_otp = TG3_OTP_DEFAULT;
16254 }
16255
16256 if (tg3_flag(tp, CPMU_PRESENT))
16257 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16258 else
16259 tp->mi_mode = MAC_MI_MODE_BASE;
16260
16261 tp->coalesce_mode = 0;
16262 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16263 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16264 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16265
16266 /* Set these bits to enable statistics workaround. */
16267 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16268 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16269 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16270 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16271 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16272 }
16273
16274 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16275 tg3_asic_rev(tp) == ASIC_REV_57780)
16276 tg3_flag_set(tp, USE_PHYLIB);
16277
16278 err = tg3_mdio_init(tp);
16279 if (err)
16280 return err;
16281
16282 /* Initialize data/descriptor byte/word swapping. */
16283 val = tr32(GRC_MODE);
16284 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16285 tg3_asic_rev(tp) == ASIC_REV_5762)
16286 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16287 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16288 GRC_MODE_B2HRX_ENABLE |
16289 GRC_MODE_HTX2B_ENABLE |
16290 GRC_MODE_HOST_STACKUP);
16291 else
16292 val &= GRC_MODE_HOST_STACKUP;
16293
16294 tw32(GRC_MODE, val | tp->grc_mode);
16295
16296 tg3_switch_clocks(tp);
16297
16298 /* Clear this out for sanity. */
16299 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16300
16301 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16302 tw32(TG3PCI_REG_BASE_ADDR, 0);
16303
16304 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16305 &pci_state_reg);
16306 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16307 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16308 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16309 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16310 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16311 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16312 void __iomem *sram_base;
16313
16314 /* Write some dummy words into the SRAM status block
16315 * area, see if it reads back correctly. If the return
16316 * value is bad, force enable the PCIX workaround.
16317 */
16318 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16319
16320 writel(0x00000000, sram_base);
16321 writel(0x00000000, sram_base + 4);
16322 writel(0xffffffff, sram_base + 4);
16323 if (readl(sram_base) != 0x00000000)
16324 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16325 }
16326 }
16327
16328 udelay(50);
16329 tg3_nvram_init(tp);
16330
16331 /* If the device has an NVRAM, no need to load patch firmware */
16332 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16333 !tg3_flag(tp, NO_NVRAM))
16334 tp->fw_needed = NULL;
16335
16336 grc_misc_cfg = tr32(GRC_MISC_CFG);
16337 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16338
16339 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16340 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16341 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16342 tg3_flag_set(tp, IS_5788);
16343
16344 if (!tg3_flag(tp, IS_5788) &&
16345 tg3_asic_rev(tp) != ASIC_REV_5700)
16346 tg3_flag_set(tp, TAGGED_STATUS);
16347 if (tg3_flag(tp, TAGGED_STATUS)) {
16348 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16349 HOSTCC_MODE_CLRTICK_TXBD);
16350
16351 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16352 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16353 tp->misc_host_ctrl);
16354 }
16355
16356 /* Preserve the APE MAC_MODE bits */
16357 if (tg3_flag(tp, ENABLE_APE))
16358 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16359 else
16360 tp->mac_mode = 0;
16361
16362 if (tg3_10_100_only_device(tp, ent))
16363 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16364
16365 err = tg3_phy_probe(tp);
16366 if (err) {
16367 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16368 /* ... but do not return immediately ... */
16369 tg3_mdio_fini(tp);
16370 }
16371
16372 tg3_read_vpd(tp);
16373 tg3_read_fw_ver(tp);
16374
16375 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16376 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16377 } else {
16378 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16379 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16380 else
16381 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16382 }
16383
16384 /* 5700 {AX,BX} chips have a broken status block link
16385 * change bit implementation, so we must use the
16386 * status register in those cases.
16387 */
16388 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16389 tg3_flag_set(tp, USE_LINKCHG_REG);
16390 else
16391 tg3_flag_clear(tp, USE_LINKCHG_REG);
16392
16393 /* The led_ctrl is set during tg3_phy_probe, here we might
16394 * have to force the link status polling mechanism based
16395 * upon subsystem IDs.
16396 */
16397 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16398 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16399 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16400 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16401 tg3_flag_set(tp, USE_LINKCHG_REG);
16402 }
16403
16404 /* For all SERDES we poll the MAC status register. */
16405 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16406 tg3_flag_set(tp, POLL_SERDES);
16407 else
16408 tg3_flag_clear(tp, POLL_SERDES);
16409
16410 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16411 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16412 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16413 tg3_flag(tp, PCIX_MODE)) {
16414 tp->rx_offset = NET_SKB_PAD;
16415 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16416 tp->rx_copy_thresh = ~(u16)0;
16417 #endif
16418 }
16419
16420 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16421 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16422 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16423
16424 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16425
16426 /* Increment the rx prod index on the rx std ring by at most
16427 * 8 for these chips to workaround hw errata.
16428 */
16429 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16430 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16431 tg3_asic_rev(tp) == ASIC_REV_5755)
16432 tp->rx_std_max_post = 8;
16433
16434 if (tg3_flag(tp, ASPM_WORKAROUND))
16435 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16436 PCIE_PWR_MGMT_L1_THRESH_MSK;
16437
16438 return err;
16439 }
16440
16441 #ifdef CONFIG_SPARC
16442 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16443 {
16444 struct net_device *dev = tp->dev;
16445 struct pci_dev *pdev = tp->pdev;
16446 struct device_node *dp = pci_device_to_OF_node(pdev);
16447 const unsigned char *addr;
16448 int len;
16449
16450 addr = of_get_property(dp, "local-mac-address", &len);
16451 if (addr && len == 6) {
16452 memcpy(dev->dev_addr, addr, 6);
16453 return 0;
16454 }
16455 return -ENODEV;
16456 }
16457
16458 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16459 {
16460 struct net_device *dev = tp->dev;
16461
16462 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16463 return 0;
16464 }
16465 #endif
16466
16467 static int tg3_get_device_address(struct tg3 *tp)
16468 {
16469 struct net_device *dev = tp->dev;
16470 u32 hi, lo, mac_offset;
16471 int addr_ok = 0;
16472 int err;
16473
16474 #ifdef CONFIG_SPARC
16475 if (!tg3_get_macaddr_sparc(tp))
16476 return 0;
16477 #endif
16478
16479 if (tg3_flag(tp, IS_SSB_CORE)) {
16480 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16481 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16482 return 0;
16483 }
16484
16485 mac_offset = 0x7c;
16486 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16487 tg3_flag(tp, 5780_CLASS)) {
16488 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16489 mac_offset = 0xcc;
16490 if (tg3_nvram_lock(tp))
16491 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16492 else
16493 tg3_nvram_unlock(tp);
16494 } else if (tg3_flag(tp, 5717_PLUS)) {
16495 if (tp->pci_fn & 1)
16496 mac_offset = 0xcc;
16497 if (tp->pci_fn > 1)
16498 mac_offset += 0x18c;
16499 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16500 mac_offset = 0x10;
16501
16502 /* First try to get it from MAC address mailbox. */
16503 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16504 if ((hi >> 16) == 0x484b) {
16505 dev->dev_addr[0] = (hi >> 8) & 0xff;
16506 dev->dev_addr[1] = (hi >> 0) & 0xff;
16507
16508 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16509 dev->dev_addr[2] = (lo >> 24) & 0xff;
16510 dev->dev_addr[3] = (lo >> 16) & 0xff;
16511 dev->dev_addr[4] = (lo >> 8) & 0xff;
16512 dev->dev_addr[5] = (lo >> 0) & 0xff;
16513
16514 /* Some old bootcode may report a 0 MAC address in SRAM */
16515 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16516 }
16517 if (!addr_ok) {
16518 /* Next, try NVRAM. */
16519 if (!tg3_flag(tp, NO_NVRAM) &&
16520 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16521 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16522 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16523 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16524 }
16525 /* Finally just fetch it out of the MAC control regs. */
16526 else {
16527 hi = tr32(MAC_ADDR_0_HIGH);
16528 lo = tr32(MAC_ADDR_0_LOW);
16529
16530 dev->dev_addr[5] = lo & 0xff;
16531 dev->dev_addr[4] = (lo >> 8) & 0xff;
16532 dev->dev_addr[3] = (lo >> 16) & 0xff;
16533 dev->dev_addr[2] = (lo >> 24) & 0xff;
16534 dev->dev_addr[1] = hi & 0xff;
16535 dev->dev_addr[0] = (hi >> 8) & 0xff;
16536 }
16537 }
16538
16539 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16540 #ifdef CONFIG_SPARC
16541 if (!tg3_get_default_macaddr_sparc(tp))
16542 return 0;
16543 #endif
16544 return -EINVAL;
16545 }
16546 return 0;
16547 }
16548
16549 #define BOUNDARY_SINGLE_CACHELINE 1
16550 #define BOUNDARY_MULTI_CACHELINE 2
16551
16552 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16553 {
16554 int cacheline_size;
16555 u8 byte;
16556 int goal;
16557
16558 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16559 if (byte == 0)
16560 cacheline_size = 1024;
16561 else
16562 cacheline_size = (int) byte * 4;
16563
16564 /* On 5703 and later chips, the boundary bits have no
16565 * effect.
16566 */
16567 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16568 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16569 !tg3_flag(tp, PCI_EXPRESS))
16570 goto out;
16571
16572 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16573 goal = BOUNDARY_MULTI_CACHELINE;
16574 #else
16575 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16576 goal = BOUNDARY_SINGLE_CACHELINE;
16577 #else
16578 goal = 0;
16579 #endif
16580 #endif
16581
16582 if (tg3_flag(tp, 57765_PLUS)) {
16583 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16584 goto out;
16585 }
16586
16587 if (!goal)
16588 goto out;
16589
16590 /* PCI controllers on most RISC systems tend to disconnect
16591 * when a device tries to burst across a cache-line boundary.
16592 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16593 *
16594 * Unfortunately, for PCI-E there are only limited
16595 * write-side controls for this, and thus for reads
16596 * we will still get the disconnects. We'll also waste
16597 * these PCI cycles for both read and write for chips
16598 * other than 5700 and 5701 which do not implement the
16599 * boundary bits.
16600 */
16601 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16602 switch (cacheline_size) {
16603 case 16:
16604 case 32:
16605 case 64:
16606 case 128:
16607 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16608 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16609 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16610 } else {
16611 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16612 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16613 }
16614 break;
16615
16616 case 256:
16617 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16618 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16619 break;
16620
16621 default:
16622 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16623 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16624 break;
16625 }
16626 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16627 switch (cacheline_size) {
16628 case 16:
16629 case 32:
16630 case 64:
16631 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16632 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16633 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16634 break;
16635 }
16636 /* fallthrough */
16637 case 128:
16638 default:
16639 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16640 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16641 break;
16642 }
16643 } else {
16644 switch (cacheline_size) {
16645 case 16:
16646 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16647 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16648 DMA_RWCTRL_WRITE_BNDRY_16);
16649 break;
16650 }
16651 /* fallthrough */
16652 case 32:
16653 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16654 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16655 DMA_RWCTRL_WRITE_BNDRY_32);
16656 break;
16657 }
16658 /* fallthrough */
16659 case 64:
16660 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16661 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16662 DMA_RWCTRL_WRITE_BNDRY_64);
16663 break;
16664 }
16665 /* fallthrough */
16666 case 128:
16667 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16668 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16669 DMA_RWCTRL_WRITE_BNDRY_128);
16670 break;
16671 }
16672 /* fallthrough */
16673 case 256:
16674 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16675 DMA_RWCTRL_WRITE_BNDRY_256);
16676 break;
16677 case 512:
16678 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16679 DMA_RWCTRL_WRITE_BNDRY_512);
16680 break;
16681 case 1024:
16682 default:
16683 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16684 DMA_RWCTRL_WRITE_BNDRY_1024);
16685 break;
16686 }
16687 }
16688
16689 out:
16690 return val;
16691 }
16692
16693 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16694 int size, bool to_device)
16695 {
16696 struct tg3_internal_buffer_desc test_desc;
16697 u32 sram_dma_descs;
16698 int i, ret;
16699
16700 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16701
16702 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16703 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16704 tw32(RDMAC_STATUS, 0);
16705 tw32(WDMAC_STATUS, 0);
16706
16707 tw32(BUFMGR_MODE, 0);
16708 tw32(FTQ_RESET, 0);
16709
16710 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16711 test_desc.addr_lo = buf_dma & 0xffffffff;
16712 test_desc.nic_mbuf = 0x00002100;
16713 test_desc.len = size;
16714
16715 /*
16716 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16717 * the *second* time the tg3 driver was getting loaded after an
16718 * initial scan.
16719 *
16720 * Broadcom tells me:
16721 * ...the DMA engine is connected to the GRC block and a DMA
16722 * reset may affect the GRC block in some unpredictable way...
16723 * The behavior of resets to individual blocks has not been tested.
16724 *
16725 * Broadcom noted the GRC reset will also reset all sub-components.
16726 */
16727 if (to_device) {
16728 test_desc.cqid_sqid = (13 << 8) | 2;
16729
16730 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16731 udelay(40);
16732 } else {
16733 test_desc.cqid_sqid = (16 << 8) | 7;
16734
16735 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16736 udelay(40);
16737 }
16738 test_desc.flags = 0x00000005;
16739
16740 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16741 u32 val;
16742
16743 val = *(((u32 *)&test_desc) + i);
16744 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16745 sram_dma_descs + (i * sizeof(u32)));
16746 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16747 }
16748 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16749
16750 if (to_device)
16751 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16752 else
16753 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16754
16755 ret = -ENODEV;
16756 for (i = 0; i < 40; i++) {
16757 u32 val;
16758
16759 if (to_device)
16760 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16761 else
16762 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16763 if ((val & 0xffff) == sram_dma_descs) {
16764 ret = 0;
16765 break;
16766 }
16767
16768 udelay(100);
16769 }
16770
16771 return ret;
16772 }
16773
16774 #define TEST_BUFFER_SIZE 0x2000
16775
16776 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16777 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16778 { },
16779 };
16780
16781 static int tg3_test_dma(struct tg3 *tp)
16782 {
16783 dma_addr_t buf_dma;
16784 u32 *buf, saved_dma_rwctrl;
16785 int ret = 0;
16786
16787 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16788 &buf_dma, GFP_KERNEL);
16789 if (!buf) {
16790 ret = -ENOMEM;
16791 goto out_nofree;
16792 }
16793
16794 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16795 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16796
16797 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16798
16799 if (tg3_flag(tp, 57765_PLUS))
16800 goto out;
16801
16802 if (tg3_flag(tp, PCI_EXPRESS)) {
16803 /* DMA read watermark not used on PCIE */
16804 tp->dma_rwctrl |= 0x00180000;
16805 } else if (!tg3_flag(tp, PCIX_MODE)) {
16806 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16807 tg3_asic_rev(tp) == ASIC_REV_5750)
16808 tp->dma_rwctrl |= 0x003f0000;
16809 else
16810 tp->dma_rwctrl |= 0x003f000f;
16811 } else {
16812 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16813 tg3_asic_rev(tp) == ASIC_REV_5704) {
16814 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16815 u32 read_water = 0x7;
16816
16817 /* If the 5704 is behind the EPB bridge, we can
16818 * do the less restrictive ONE_DMA workaround for
16819 * better performance.
16820 */
16821 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16822 tg3_asic_rev(tp) == ASIC_REV_5704)
16823 tp->dma_rwctrl |= 0x8000;
16824 else if (ccval == 0x6 || ccval == 0x7)
16825 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16826
16827 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16828 read_water = 4;
16829 /* Set bit 23 to enable PCIX hw bug fix */
16830 tp->dma_rwctrl |=
16831 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16832 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16833 (1 << 23);
16834 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16835 /* 5780 always in PCIX mode */
16836 tp->dma_rwctrl |= 0x00144000;
16837 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16838 /* 5714 always in PCIX mode */
16839 tp->dma_rwctrl |= 0x00148000;
16840 } else {
16841 tp->dma_rwctrl |= 0x001b000f;
16842 }
16843 }
16844 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16845 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16846
16847 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16848 tg3_asic_rev(tp) == ASIC_REV_5704)
16849 tp->dma_rwctrl &= 0xfffffff0;
16850
16851 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16852 tg3_asic_rev(tp) == ASIC_REV_5701) {
16853 /* Remove this if it causes problems for some boards. */
16854 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16855
16856 /* On 5700/5701 chips, we need to set this bit.
16857 * Otherwise the chip will issue cacheline transactions
16858 * to streamable DMA memory with not all the byte
16859 * enables turned on. This is an error on several
16860 * RISC PCI controllers, in particular sparc64.
16861 *
16862 * On 5703/5704 chips, this bit has been reassigned
16863 * a different meaning. In particular, it is used
16864 * on those chips to enable a PCI-X workaround.
16865 */
16866 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16867 }
16868
16869 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16870
16871 #if 0
16872 /* Unneeded, already done by tg3_get_invariants. */
16873 tg3_switch_clocks(tp);
16874 #endif
16875
16876 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16877 tg3_asic_rev(tp) != ASIC_REV_5701)
16878 goto out;
16879
16880 /* It is best to perform DMA test with maximum write burst size
16881 * to expose the 5700/5701 write DMA bug.
16882 */
16883 saved_dma_rwctrl = tp->dma_rwctrl;
16884 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16885 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16886
16887 while (1) {
16888 u32 *p = buf, i;
16889
16890 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16891 p[i] = i;
16892
16893 /* Send the buffer to the chip. */
16894 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16895 if (ret) {
16896 dev_err(&tp->pdev->dev,
16897 "%s: Buffer write failed. err = %d\n",
16898 __func__, ret);
16899 break;
16900 }
16901
16902 #if 0
16903 /* validate data reached card RAM correctly. */
16904 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16905 u32 val;
16906 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16907 if (le32_to_cpu(val) != p[i]) {
16908 dev_err(&tp->pdev->dev,
16909 "%s: Buffer corrupted on device! "
16910 "(%d != %d)\n", __func__, val, i);
16911 /* ret = -ENODEV here? */
16912 }
16913 p[i] = 0;
16914 }
16915 #endif
16916 /* Now read it back. */
16917 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16918 if (ret) {
16919 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16920 "err = %d\n", __func__, ret);
16921 break;
16922 }
16923
16924 /* Verify it. */
16925 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16926 if (p[i] == i)
16927 continue;
16928
16929 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16930 DMA_RWCTRL_WRITE_BNDRY_16) {
16931 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16932 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16933 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16934 break;
16935 } else {
16936 dev_err(&tp->pdev->dev,
16937 "%s: Buffer corrupted on read back! "
16938 "(%d != %d)\n", __func__, p[i], i);
16939 ret = -ENODEV;
16940 goto out;
16941 }
16942 }
16943
16944 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16945 /* Success. */
16946 ret = 0;
16947 break;
16948 }
16949 }
16950 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16951 DMA_RWCTRL_WRITE_BNDRY_16) {
16952 /* DMA test passed without adjusting DMA boundary,
16953 * now look for chipsets that are known to expose the
16954 * DMA bug without failing the test.
16955 */
16956 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16957 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16958 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16959 } else {
16960 /* Safe to use the calculated DMA boundary. */
16961 tp->dma_rwctrl = saved_dma_rwctrl;
16962 }
16963
16964 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16965 }
16966
16967 out:
16968 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16969 out_nofree:
16970 return ret;
16971 }
16972
16973 static void tg3_init_bufmgr_config(struct tg3 *tp)
16974 {
16975 if (tg3_flag(tp, 57765_PLUS)) {
16976 tp->bufmgr_config.mbuf_read_dma_low_water =
16977 DEFAULT_MB_RDMA_LOW_WATER_5705;
16978 tp->bufmgr_config.mbuf_mac_rx_low_water =
16979 DEFAULT_MB_MACRX_LOW_WATER_57765;
16980 tp->bufmgr_config.mbuf_high_water =
16981 DEFAULT_MB_HIGH_WATER_57765;
16982
16983 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16984 DEFAULT_MB_RDMA_LOW_WATER_5705;
16985 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16986 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16987 tp->bufmgr_config.mbuf_high_water_jumbo =
16988 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16989 } else if (tg3_flag(tp, 5705_PLUS)) {
16990 tp->bufmgr_config.mbuf_read_dma_low_water =
16991 DEFAULT_MB_RDMA_LOW_WATER_5705;
16992 tp->bufmgr_config.mbuf_mac_rx_low_water =
16993 DEFAULT_MB_MACRX_LOW_WATER_5705;
16994 tp->bufmgr_config.mbuf_high_water =
16995 DEFAULT_MB_HIGH_WATER_5705;
16996 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16997 tp->bufmgr_config.mbuf_mac_rx_low_water =
16998 DEFAULT_MB_MACRX_LOW_WATER_5906;
16999 tp->bufmgr_config.mbuf_high_water =
17000 DEFAULT_MB_HIGH_WATER_5906;
17001 }
17002
17003 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17004 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17005 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17006 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17007 tp->bufmgr_config.mbuf_high_water_jumbo =
17008 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17009 } else {
17010 tp->bufmgr_config.mbuf_read_dma_low_water =
17011 DEFAULT_MB_RDMA_LOW_WATER;
17012 tp->bufmgr_config.mbuf_mac_rx_low_water =
17013 DEFAULT_MB_MACRX_LOW_WATER;
17014 tp->bufmgr_config.mbuf_high_water =
17015 DEFAULT_MB_HIGH_WATER;
17016
17017 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17018 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17019 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17020 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17021 tp->bufmgr_config.mbuf_high_water_jumbo =
17022 DEFAULT_MB_HIGH_WATER_JUMBO;
17023 }
17024
17025 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17026 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17027 }
17028
17029 static char *tg3_phy_string(struct tg3 *tp)
17030 {
17031 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17032 case TG3_PHY_ID_BCM5400: return "5400";
17033 case TG3_PHY_ID_BCM5401: return "5401";
17034 case TG3_PHY_ID_BCM5411: return "5411";
17035 case TG3_PHY_ID_BCM5701: return "5701";
17036 case TG3_PHY_ID_BCM5703: return "5703";
17037 case TG3_PHY_ID_BCM5704: return "5704";
17038 case TG3_PHY_ID_BCM5705: return "5705";
17039 case TG3_PHY_ID_BCM5750: return "5750";
17040 case TG3_PHY_ID_BCM5752: return "5752";
17041 case TG3_PHY_ID_BCM5714: return "5714";
17042 case TG3_PHY_ID_BCM5780: return "5780";
17043 case TG3_PHY_ID_BCM5755: return "5755";
17044 case TG3_PHY_ID_BCM5787: return "5787";
17045 case TG3_PHY_ID_BCM5784: return "5784";
17046 case TG3_PHY_ID_BCM5756: return "5722/5756";
17047 case TG3_PHY_ID_BCM5906: return "5906";
17048 case TG3_PHY_ID_BCM5761: return "5761";
17049 case TG3_PHY_ID_BCM5718C: return "5718C";
17050 case TG3_PHY_ID_BCM5718S: return "5718S";
17051 case TG3_PHY_ID_BCM57765: return "57765";
17052 case TG3_PHY_ID_BCM5719C: return "5719C";
17053 case TG3_PHY_ID_BCM5720C: return "5720C";
17054 case TG3_PHY_ID_BCM5762: return "5762C";
17055 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17056 case 0: return "serdes";
17057 default: return "unknown";
17058 }
17059 }
17060
17061 static char *tg3_bus_string(struct tg3 *tp, char *str)
17062 {
17063 if (tg3_flag(tp, PCI_EXPRESS)) {
17064 strcpy(str, "PCI Express");
17065 return str;
17066 } else if (tg3_flag(tp, PCIX_MODE)) {
17067 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17068
17069 strcpy(str, "PCIX:");
17070
17071 if ((clock_ctrl == 7) ||
17072 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17073 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17074 strcat(str, "133MHz");
17075 else if (clock_ctrl == 0)
17076 strcat(str, "33MHz");
17077 else if (clock_ctrl == 2)
17078 strcat(str, "50MHz");
17079 else if (clock_ctrl == 4)
17080 strcat(str, "66MHz");
17081 else if (clock_ctrl == 6)
17082 strcat(str, "100MHz");
17083 } else {
17084 strcpy(str, "PCI:");
17085 if (tg3_flag(tp, PCI_HIGH_SPEED))
17086 strcat(str, "66MHz");
17087 else
17088 strcat(str, "33MHz");
17089 }
17090 if (tg3_flag(tp, PCI_32BIT))
17091 strcat(str, ":32-bit");
17092 else
17093 strcat(str, ":64-bit");
17094 return str;
17095 }
17096
17097 static void tg3_init_coal(struct tg3 *tp)
17098 {
17099 struct ethtool_coalesce *ec = &tp->coal;
17100
17101 memset(ec, 0, sizeof(*ec));
17102 ec->cmd = ETHTOOL_GCOALESCE;
17103 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17104 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17105 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17106 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17107 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17108 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17109 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17110 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17111 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17112
17113 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17114 HOSTCC_MODE_CLRTICK_TXBD)) {
17115 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17116 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17117 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17118 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17119 }
17120
17121 if (tg3_flag(tp, 5705_PLUS)) {
17122 ec->rx_coalesce_usecs_irq = 0;
17123 ec->tx_coalesce_usecs_irq = 0;
17124 ec->stats_block_coalesce_usecs = 0;
17125 }
17126 }
17127
17128 static int tg3_init_one(struct pci_dev *pdev,
17129 const struct pci_device_id *ent)
17130 {
17131 struct net_device *dev;
17132 struct tg3 *tp;
17133 int i, err, pm_cap;
17134 u32 sndmbx, rcvmbx, intmbx;
17135 char str[40];
17136 u64 dma_mask, persist_dma_mask;
17137 netdev_features_t features = 0;
17138
17139 printk_once(KERN_INFO "%s\n", version);
17140
17141 err = pci_enable_device(pdev);
17142 if (err) {
17143 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17144 return err;
17145 }
17146
17147 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17148 if (err) {
17149 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17150 goto err_out_disable_pdev;
17151 }
17152
17153 pci_set_master(pdev);
17154
17155 /* Find power-management capability. */
17156 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17157 if (pm_cap == 0) {
17158 dev_err(&pdev->dev,
17159 "Cannot find Power Management capability, aborting\n");
17160 err = -EIO;
17161 goto err_out_free_res;
17162 }
17163
17164 err = pci_set_power_state(pdev, PCI_D0);
17165 if (err) {
17166 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17167 goto err_out_free_res;
17168 }
17169
17170 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17171 if (!dev) {
17172 err = -ENOMEM;
17173 goto err_out_power_down;
17174 }
17175
17176 SET_NETDEV_DEV(dev, &pdev->dev);
17177
17178 tp = netdev_priv(dev);
17179 tp->pdev = pdev;
17180 tp->dev = dev;
17181 tp->pm_cap = pm_cap;
17182 tp->rx_mode = TG3_DEF_RX_MODE;
17183 tp->tx_mode = TG3_DEF_TX_MODE;
17184 tp->irq_sync = 1;
17185
17186 if (tg3_debug > 0)
17187 tp->msg_enable = tg3_debug;
17188 else
17189 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17190
17191 if (pdev_is_ssb_gige_core(pdev)) {
17192 tg3_flag_set(tp, IS_SSB_CORE);
17193 if (ssb_gige_must_flush_posted_writes(pdev))
17194 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17195 if (ssb_gige_one_dma_at_once(pdev))
17196 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17197 if (ssb_gige_have_roboswitch(pdev))
17198 tg3_flag_set(tp, ROBOSWITCH);
17199 if (ssb_gige_is_rgmii(pdev))
17200 tg3_flag_set(tp, RGMII_MODE);
17201 }
17202
17203 /* The word/byte swap controls here control register access byte
17204 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17205 * setting below.
17206 */
17207 tp->misc_host_ctrl =
17208 MISC_HOST_CTRL_MASK_PCI_INT |
17209 MISC_HOST_CTRL_WORD_SWAP |
17210 MISC_HOST_CTRL_INDIR_ACCESS |
17211 MISC_HOST_CTRL_PCISTATE_RW;
17212
17213 /* The NONFRM (non-frame) byte/word swap controls take effect
17214 * on descriptor entries, anything which isn't packet data.
17215 *
17216 * The StrongARM chips on the board (one for tx, one for rx)
17217 * are running in big-endian mode.
17218 */
17219 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17220 GRC_MODE_WSWAP_NONFRM_DATA);
17221 #ifdef __BIG_ENDIAN
17222 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17223 #endif
17224 spin_lock_init(&tp->lock);
17225 spin_lock_init(&tp->indirect_lock);
17226 INIT_WORK(&tp->reset_task, tg3_reset_task);
17227
17228 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17229 if (!tp->regs) {
17230 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17231 err = -ENOMEM;
17232 goto err_out_free_dev;
17233 }
17234
17235 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17236 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17237 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17238 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17239 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17240 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17241 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17242 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17243 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17244 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17245 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17246 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17247 tg3_flag_set(tp, ENABLE_APE);
17248 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17249 if (!tp->aperegs) {
17250 dev_err(&pdev->dev,
17251 "Cannot map APE registers, aborting\n");
17252 err = -ENOMEM;
17253 goto err_out_iounmap;
17254 }
17255 }
17256
17257 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17258 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17259
17260 dev->ethtool_ops = &tg3_ethtool_ops;
17261 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17262 dev->netdev_ops = &tg3_netdev_ops;
17263 dev->irq = pdev->irq;
17264
17265 err = tg3_get_invariants(tp, ent);
17266 if (err) {
17267 dev_err(&pdev->dev,
17268 "Problem fetching invariants of chip, aborting\n");
17269 goto err_out_apeunmap;
17270 }
17271
17272 /* The EPB bridge inside 5714, 5715, and 5780 and any
17273 * device behind the EPB cannot support DMA addresses > 40-bit.
17274 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17275 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17276 * do DMA address check in tg3_start_xmit().
17277 */
17278 if (tg3_flag(tp, IS_5788))
17279 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17280 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17281 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17282 #ifdef CONFIG_HIGHMEM
17283 dma_mask = DMA_BIT_MASK(64);
17284 #endif
17285 } else
17286 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17287
17288 /* Configure DMA attributes. */
17289 if (dma_mask > DMA_BIT_MASK(32)) {
17290 err = pci_set_dma_mask(pdev, dma_mask);
17291 if (!err) {
17292 features |= NETIF_F_HIGHDMA;
17293 err = pci_set_consistent_dma_mask(pdev,
17294 persist_dma_mask);
17295 if (err < 0) {
17296 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17297 "DMA for consistent allocations\n");
17298 goto err_out_apeunmap;
17299 }
17300 }
17301 }
17302 if (err || dma_mask == DMA_BIT_MASK(32)) {
17303 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17304 if (err) {
17305 dev_err(&pdev->dev,
17306 "No usable DMA configuration, aborting\n");
17307 goto err_out_apeunmap;
17308 }
17309 }
17310
17311 tg3_init_bufmgr_config(tp);
17312
17313 /* 5700 B0 chips do not support checksumming correctly due
17314 * to hardware bugs.
17315 */
17316 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17317 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17318
17319 if (tg3_flag(tp, 5755_PLUS))
17320 features |= NETIF_F_IPV6_CSUM;
17321 }
17322
17323 /* TSO is on by default on chips that support hardware TSO.
17324 * Firmware TSO on older chips gives lower performance, so it
17325 * is off by default, but can be enabled using ethtool.
17326 */
17327 if ((tg3_flag(tp, HW_TSO_1) ||
17328 tg3_flag(tp, HW_TSO_2) ||
17329 tg3_flag(tp, HW_TSO_3)) &&
17330 (features & NETIF_F_IP_CSUM))
17331 features |= NETIF_F_TSO;
17332 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17333 if (features & NETIF_F_IPV6_CSUM)
17334 features |= NETIF_F_TSO6;
17335 if (tg3_flag(tp, HW_TSO_3) ||
17336 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17337 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17338 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17339 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17340 tg3_asic_rev(tp) == ASIC_REV_57780)
17341 features |= NETIF_F_TSO_ECN;
17342 }
17343
17344 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17345 NETIF_F_HW_VLAN_CTAG_RX;
17346 dev->vlan_features |= features;
17347
17348 /*
17349 * Add loopback capability only for a subset of devices that support
17350 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17351 * loopback for the remaining devices.
17352 */
17353 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17354 !tg3_flag(tp, CPMU_PRESENT))
17355 /* Add the loopback capability */
17356 features |= NETIF_F_LOOPBACK;
17357
17358 dev->hw_features |= features;
17359
17360 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17361 !tg3_flag(tp, TSO_CAPABLE) &&
17362 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17363 tg3_flag_set(tp, MAX_RXPEND_64);
17364 tp->rx_pending = 63;
17365 }
17366
17367 err = tg3_get_device_address(tp);
17368 if (err) {
17369 dev_err(&pdev->dev,
17370 "Could not obtain valid ethernet address, aborting\n");
17371 goto err_out_apeunmap;
17372 }
17373
17374 /*
17375 * Reset chip in case UNDI or EFI driver did not shutdown
17376 * DMA self test will enable WDMAC and we'll see (spurious)
17377 * pending DMA on the PCI bus at that point.
17378 */
17379 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17380 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17381 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17382 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17383 }
17384
17385 err = tg3_test_dma(tp);
17386 if (err) {
17387 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17388 goto err_out_apeunmap;
17389 }
17390
17391 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17392 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17393 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17394 for (i = 0; i < tp->irq_max; i++) {
17395 struct tg3_napi *tnapi = &tp->napi[i];
17396
17397 tnapi->tp = tp;
17398 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17399
17400 tnapi->int_mbox = intmbx;
17401 if (i <= 4)
17402 intmbx += 0x8;
17403 else
17404 intmbx += 0x4;
17405
17406 tnapi->consmbox = rcvmbx;
17407 tnapi->prodmbox = sndmbx;
17408
17409 if (i)
17410 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17411 else
17412 tnapi->coal_now = HOSTCC_MODE_NOW;
17413
17414 if (!tg3_flag(tp, SUPPORT_MSIX))
17415 break;
17416
17417 /*
17418 * If we support MSIX, we'll be using RSS. If we're using
17419 * RSS, the first vector only handles link interrupts and the
17420 * remaining vectors handle rx and tx interrupts. Reuse the
17421 * mailbox values for the next iteration. The values we setup
17422 * above are still useful for the single vectored mode.
17423 */
17424 if (!i)
17425 continue;
17426
17427 rcvmbx += 0x8;
17428
17429 if (sndmbx & 0x4)
17430 sndmbx -= 0x4;
17431 else
17432 sndmbx += 0xc;
17433 }
17434
17435 tg3_init_coal(tp);
17436
17437 pci_set_drvdata(pdev, dev);
17438
17439 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17440 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17441 tg3_asic_rev(tp) == ASIC_REV_5762)
17442 tg3_flag_set(tp, PTP_CAPABLE);
17443
17444 if (tg3_flag(tp, 5717_PLUS)) {
17445 /* Resume a low-power mode */
17446 tg3_frob_aux_power(tp, false);
17447 }
17448
17449 tg3_timer_init(tp);
17450
17451 tg3_carrier_off(tp);
17452
17453 err = register_netdev(dev);
17454 if (err) {
17455 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17456 goto err_out_apeunmap;
17457 }
17458
17459 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17460 tp->board_part_number,
17461 tg3_chip_rev_id(tp),
17462 tg3_bus_string(tp, str),
17463 dev->dev_addr);
17464
17465 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17466 struct phy_device *phydev;
17467 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17468 netdev_info(dev,
17469 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17470 phydev->drv->name, dev_name(&phydev->dev));
17471 } else {
17472 char *ethtype;
17473
17474 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17475 ethtype = "10/100Base-TX";
17476 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17477 ethtype = "1000Base-SX";
17478 else
17479 ethtype = "10/100/1000Base-T";
17480
17481 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17482 "(WireSpeed[%d], EEE[%d])\n",
17483 tg3_phy_string(tp), ethtype,
17484 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17485 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17486 }
17487
17488 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17489 (dev->features & NETIF_F_RXCSUM) != 0,
17490 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17491 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17492 tg3_flag(tp, ENABLE_ASF) != 0,
17493 tg3_flag(tp, TSO_CAPABLE) != 0);
17494 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17495 tp->dma_rwctrl,
17496 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17497 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17498
17499 pci_save_state(pdev);
17500
17501 return 0;
17502
17503 err_out_apeunmap:
17504 if (tp->aperegs) {
17505 iounmap(tp->aperegs);
17506 tp->aperegs = NULL;
17507 }
17508
17509 err_out_iounmap:
17510 if (tp->regs) {
17511 iounmap(tp->regs);
17512 tp->regs = NULL;
17513 }
17514
17515 err_out_free_dev:
17516 free_netdev(dev);
17517
17518 err_out_power_down:
17519 pci_set_power_state(pdev, PCI_D3hot);
17520
17521 err_out_free_res:
17522 pci_release_regions(pdev);
17523
17524 err_out_disable_pdev:
17525 pci_disable_device(pdev);
17526 pci_set_drvdata(pdev, NULL);
17527 return err;
17528 }
17529
17530 static void tg3_remove_one(struct pci_dev *pdev)
17531 {
17532 struct net_device *dev = pci_get_drvdata(pdev);
17533
17534 if (dev) {
17535 struct tg3 *tp = netdev_priv(dev);
17536
17537 release_firmware(tp->fw);
17538
17539 tg3_reset_task_cancel(tp);
17540
17541 if (tg3_flag(tp, USE_PHYLIB)) {
17542 tg3_phy_fini(tp);
17543 tg3_mdio_fini(tp);
17544 }
17545
17546 unregister_netdev(dev);
17547 if (tp->aperegs) {
17548 iounmap(tp->aperegs);
17549 tp->aperegs = NULL;
17550 }
17551 if (tp->regs) {
17552 iounmap(tp->regs);
17553 tp->regs = NULL;
17554 }
17555 free_netdev(dev);
17556 pci_release_regions(pdev);
17557 pci_disable_device(pdev);
17558 pci_set_drvdata(pdev, NULL);
17559 }
17560 }
17561
17562 #ifdef CONFIG_PM_SLEEP
17563 static int tg3_suspend(struct device *device)
17564 {
17565 struct pci_dev *pdev = to_pci_dev(device);
17566 struct net_device *dev = pci_get_drvdata(pdev);
17567 struct tg3 *tp = netdev_priv(dev);
17568 int err;
17569
17570 if (!netif_running(dev))
17571 return 0;
17572
17573 tg3_reset_task_cancel(tp);
17574 tg3_phy_stop(tp);
17575 tg3_netif_stop(tp);
17576
17577 tg3_timer_stop(tp);
17578
17579 tg3_full_lock(tp, 1);
17580 tg3_disable_ints(tp);
17581 tg3_full_unlock(tp);
17582
17583 netif_device_detach(dev);
17584
17585 tg3_full_lock(tp, 0);
17586 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17587 tg3_flag_clear(tp, INIT_COMPLETE);
17588 tg3_full_unlock(tp);
17589
17590 err = tg3_power_down_prepare(tp);
17591 if (err) {
17592 int err2;
17593
17594 tg3_full_lock(tp, 0);
17595
17596 tg3_flag_set(tp, INIT_COMPLETE);
17597 err2 = tg3_restart_hw(tp, true);
17598 if (err2)
17599 goto out;
17600
17601 tg3_timer_start(tp);
17602
17603 netif_device_attach(dev);
17604 tg3_netif_start(tp);
17605
17606 out:
17607 tg3_full_unlock(tp);
17608
17609 if (!err2)
17610 tg3_phy_start(tp);
17611 }
17612
17613 return err;
17614 }
17615
17616 static int tg3_resume(struct device *device)
17617 {
17618 struct pci_dev *pdev = to_pci_dev(device);
17619 struct net_device *dev = pci_get_drvdata(pdev);
17620 struct tg3 *tp = netdev_priv(dev);
17621 int err;
17622
17623 if (!netif_running(dev))
17624 return 0;
17625
17626 netif_device_attach(dev);
17627
17628 tg3_full_lock(tp, 0);
17629
17630 tg3_flag_set(tp, INIT_COMPLETE);
17631 err = tg3_restart_hw(tp,
17632 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17633 if (err)
17634 goto out;
17635
17636 tg3_timer_start(tp);
17637
17638 tg3_netif_start(tp);
17639
17640 out:
17641 tg3_full_unlock(tp);
17642
17643 if (!err)
17644 tg3_phy_start(tp);
17645
17646 return err;
17647 }
17648 #endif /* CONFIG_PM_SLEEP */
17649
17650 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17651
17652 /**
17653 * tg3_io_error_detected - called when PCI error is detected
17654 * @pdev: Pointer to PCI device
17655 * @state: The current pci connection state
17656 *
17657 * This function is called after a PCI bus error affecting
17658 * this device has been detected.
17659 */
17660 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17661 pci_channel_state_t state)
17662 {
17663 struct net_device *netdev = pci_get_drvdata(pdev);
17664 struct tg3 *tp = netdev_priv(netdev);
17665 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17666
17667 netdev_info(netdev, "PCI I/O error detected\n");
17668
17669 rtnl_lock();
17670
17671 if (!netif_running(netdev))
17672 goto done;
17673
17674 tg3_phy_stop(tp);
17675
17676 tg3_netif_stop(tp);
17677
17678 tg3_timer_stop(tp);
17679
17680 /* Want to make sure that the reset task doesn't run */
17681 tg3_reset_task_cancel(tp);
17682
17683 netif_device_detach(netdev);
17684
17685 /* Clean up software state, even if MMIO is blocked */
17686 tg3_full_lock(tp, 0);
17687 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17688 tg3_full_unlock(tp);
17689
17690 done:
17691 if (state == pci_channel_io_perm_failure)
17692 err = PCI_ERS_RESULT_DISCONNECT;
17693 else
17694 pci_disable_device(pdev);
17695
17696 rtnl_unlock();
17697
17698 return err;
17699 }
17700
17701 /**
17702 * tg3_io_slot_reset - called after the pci bus has been reset.
17703 * @pdev: Pointer to PCI device
17704 *
17705 * Restart the card from scratch, as if from a cold-boot.
17706 * At this point, the card has exprienced a hard reset,
17707 * followed by fixups by BIOS, and has its config space
17708 * set up identically to what it was at cold boot.
17709 */
17710 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17711 {
17712 struct net_device *netdev = pci_get_drvdata(pdev);
17713 struct tg3 *tp = netdev_priv(netdev);
17714 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17715 int err;
17716
17717 rtnl_lock();
17718
17719 if (pci_enable_device(pdev)) {
17720 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17721 goto done;
17722 }
17723
17724 pci_set_master(pdev);
17725 pci_restore_state(pdev);
17726 pci_save_state(pdev);
17727
17728 if (!netif_running(netdev)) {
17729 rc = PCI_ERS_RESULT_RECOVERED;
17730 goto done;
17731 }
17732
17733 err = tg3_power_up(tp);
17734 if (err)
17735 goto done;
17736
17737 rc = PCI_ERS_RESULT_RECOVERED;
17738
17739 done:
17740 rtnl_unlock();
17741
17742 return rc;
17743 }
17744
17745 /**
17746 * tg3_io_resume - called when traffic can start flowing again.
17747 * @pdev: Pointer to PCI device
17748 *
17749 * This callback is called when the error recovery driver tells
17750 * us that its OK to resume normal operation.
17751 */
17752 static void tg3_io_resume(struct pci_dev *pdev)
17753 {
17754 struct net_device *netdev = pci_get_drvdata(pdev);
17755 struct tg3 *tp = netdev_priv(netdev);
17756 int err;
17757
17758 rtnl_lock();
17759
17760 if (!netif_running(netdev))
17761 goto done;
17762
17763 tg3_full_lock(tp, 0);
17764 tg3_flag_set(tp, INIT_COMPLETE);
17765 err = tg3_restart_hw(tp, true);
17766 if (err) {
17767 tg3_full_unlock(tp);
17768 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17769 goto done;
17770 }
17771
17772 netif_device_attach(netdev);
17773
17774 tg3_timer_start(tp);
17775
17776 tg3_netif_start(tp);
17777
17778 tg3_full_unlock(tp);
17779
17780 tg3_phy_start(tp);
17781
17782 done:
17783 rtnl_unlock();
17784 }
17785
17786 static const struct pci_error_handlers tg3_err_handler = {
17787 .error_detected = tg3_io_error_detected,
17788 .slot_reset = tg3_io_slot_reset,
17789 .resume = tg3_io_resume
17790 };
17791
17792 static struct pci_driver tg3_driver = {
17793 .name = DRV_MODULE_NAME,
17794 .id_table = tg3_pci_tbl,
17795 .probe = tg3_init_one,
17796 .remove = tg3_remove_one,
17797 .err_handler = &tg3_err_handler,
17798 .driver.pm = &tg3_pm_ops,
17799 };
17800
17801 static int __init tg3_init(void)
17802 {
17803 return pci_register_driver(&tg3_driver);
17804 }
17805
17806 static void __exit tg3_cleanup(void)
17807 {
17808 pci_unregister_driver(&tg3_driver);
17809 }
17810
17811 module_init(tg3_init);
17812 module_exit(tg3_cleanup);