tg3: Don't check undefined error bits in RXBD
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0 0
67 #define BAR_2 2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME "tg3"
96 #define TG3_MAJ_NUM 3
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
105
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
109 (NETIF_MSG_DRV | \
110 NETIF_MSG_PROBE | \
111 NETIF_MSG_LINK | \
112 NETIF_MSG_TIMER | \
113 NETIF_MSG_IFDOWN | \
114 NETIF_MSG_IFUP | \
115 NETIF_MSG_RX_ERR | \
116 NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
119
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124 #define TG3_TX_TIMEOUT (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
161 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB 64
165
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
194 #else
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
218
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 { "rx_octets" },
357 { "rx_fragments" },
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
361 { "rx_fcs_errors" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
368 { "rx_jabbers" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
382
383 { "tx_octets" },
384 { "tx_collisions" },
385
386 { "tx_xon_sent" },
387 { "tx_xoff_sent" },
388 { "tx_flow_control" },
389 { "tx_mac_errors" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
392 { "tx_deferred" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
413 { "tx_discards" },
414 { "tx_errors" },
415
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
418 { "rxbds_empty" },
419 { "rx_discards" },
420 { "rx_errors" },
421 { "rx_threshold_hit" },
422
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
426
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
429 { "nic_irqs" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
432
433 { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
445
446
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
458 };
459
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 unsigned long flags;
486
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 unsigned long flags;
502 u32 val;
503
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 unsigned long flags;
514
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
518 return;
519 }
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
523 return;
524 }
525
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
533 */
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 (val == 0x1)) {
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 unsigned long flags;
544 u32 val;
545
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557 */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
563 else {
564 /* Posted method */
565 tg3_write32(tp, off, val);
566 if (usec_wait)
567 udelay(usec_wait);
568 tp->read32(tp, off);
569 }
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
572 */
573 if (usec_wait)
574 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 void __iomem *mbox = tp->regs + off;
589 writel(val, mbox);
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 writel(val, mbox);
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
594 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 unsigned long flags;
621
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 return;
625
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 } else {
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 }
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 unsigned long flags;
646
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 *val = 0;
650 return;
651 }
652
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 } else {
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 }
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 int i;
673 u32 regbase, bit;
674
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
677 else
678 regbase = TG3_APE_PER_LOCK_GRANT;
679
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 switch (i) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
688 break;
689 default:
690 if (!tp->pci_fn)
691 bit = APE_LOCK_GRANT_DRIVER;
692 else
693 bit = 1 << tp->pci_fn;
694 }
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
696 }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 int i, off;
703 int ret = 0;
704 u32 status, req, gnt, bit;
705
706 if (!tg3_flag(tp, ENABLE_APE))
707 return 0;
708
709 switch (locknum) {
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 return 0;
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
715 if (!tp->pci_fn)
716 bit = APE_LOCK_REQ_DRIVER;
717 else
718 bit = 1 << tp->pci_fn;
719 break;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
725 break;
726 default:
727 return -EINVAL;
728 }
729
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
733 } else {
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
736 }
737
738 off = 4 * locknum;
739
740 tg3_ape_write32(tp, req + off, bit);
741
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
745 if (status == bit)
746 break;
747 if (pci_channel_offline(tp->pdev))
748 break;
749
750 udelay(10);
751 }
752
753 if (status != bit) {
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp, gnt + off, bit);
756 ret = -EBUSY;
757 }
758
759 return ret;
760 }
761
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764 u32 gnt, bit;
765
766 if (!tg3_flag(tp, ENABLE_APE))
767 return;
768
769 switch (locknum) {
770 case TG3_APE_LOCK_GPIO:
771 if (tg3_asic_rev(tp) == ASIC_REV_5761)
772 return;
773 case TG3_APE_LOCK_GRC:
774 case TG3_APE_LOCK_MEM:
775 if (!tp->pci_fn)
776 bit = APE_LOCK_GRANT_DRIVER;
777 else
778 bit = 1 << tp->pci_fn;
779 break;
780 case TG3_APE_LOCK_PHY0:
781 case TG3_APE_LOCK_PHY1:
782 case TG3_APE_LOCK_PHY2:
783 case TG3_APE_LOCK_PHY3:
784 bit = APE_LOCK_GRANT_DRIVER;
785 break;
786 default:
787 return;
788 }
789
790 if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 gnt = TG3_APE_LOCK_GRANT;
792 else
793 gnt = TG3_APE_PER_LOCK_GRANT;
794
795 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800 u32 apedata;
801
802 while (timeout_us) {
803 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804 return -EBUSY;
805
806 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808 break;
809
810 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811
812 udelay(10);
813 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814 }
815
816 return timeout_us ? 0 : -EBUSY;
817 }
818
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821 u32 i, apedata;
822
823 for (i = 0; i < timeout_us / 10; i++) {
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825
826 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827 break;
828
829 udelay(10);
830 }
831
832 return i == timeout_us / 10;
833 }
834
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 len)
837 {
838 int err;
839 u32 i, bufoff, msgoff, maxlen, apedata;
840
841 if (!tg3_flag(tp, APE_HAS_NCSI))
842 return 0;
843
844 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845 if (apedata != APE_SEG_SIG_MAGIC)
846 return -ENODEV;
847
848 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 if (!(apedata & APE_FW_STATUS_READY))
850 return -EAGAIN;
851
852 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853 TG3_APE_SHMEM_BASE;
854 msgoff = bufoff + 2 * sizeof(u32);
855 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856
857 while (len) {
858 u32 length;
859
860 /* Cap xfer sizes to scratchpad limits. */
861 length = (len > maxlen) ? maxlen : len;
862 len -= length;
863
864 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 if (!(apedata & APE_FW_STATUS_READY))
866 return -EAGAIN;
867
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err = tg3_ape_event_lock(tp, 1000);
870 if (err)
871 return err;
872
873 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874 APE_EVENT_STATUS_SCRTCHPD_READ |
875 APE_EVENT_STATUS_EVENT_PENDING;
876 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877
878 tg3_ape_write32(tp, bufoff, base_off);
879 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880
881 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883
884 base_off += length;
885
886 if (tg3_ape_wait_for_event(tp, 30000))
887 return -EAGAIN;
888
889 for (i = 0; length; i += 4, length -= 4) {
890 u32 val = tg3_ape_read32(tp, msgoff + i);
891 memcpy(data, &val, sizeof(u32));
892 data++;
893 }
894 }
895
896 return 0;
897 }
898
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901 int err;
902 u32 apedata;
903
904 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905 if (apedata != APE_SEG_SIG_MAGIC)
906 return -EAGAIN;
907
908 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909 if (!(apedata & APE_FW_STATUS_READY))
910 return -EAGAIN;
911
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err = tg3_ape_event_lock(tp, 1000);
914 if (err)
915 return err;
916
917 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918 event | APE_EVENT_STATUS_EVENT_PENDING);
919
920 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922
923 return 0;
924 }
925
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928 u32 event;
929 u32 apedata;
930
931 if (!tg3_flag(tp, ENABLE_APE))
932 return;
933
934 switch (kind) {
935 case RESET_KIND_INIT:
936 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937 APE_HOST_SEG_SIG_MAGIC);
938 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939 APE_HOST_SEG_LEN_MAGIC);
940 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945 APE_HOST_BEHAV_NO_PHYLOCK);
946 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947 TG3_APE_HOST_DRVR_STATE_START);
948
949 event = APE_EVENT_STATUS_STATE_START;
950 break;
951 case RESET_KIND_SHUTDOWN:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
956 */
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958
959 if (device_may_wakeup(&tp->pdev->dev) &&
960 tg3_flag(tp, WOL_ENABLE)) {
961 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962 TG3_APE_HOST_WOL_SPEED_AUTO);
963 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964 } else
965 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968
969 event = APE_EVENT_STATUS_STATE_UNLOAD;
970 break;
971 case RESET_KIND_SUSPEND:
972 event = APE_EVENT_STATUS_STATE_SUSPEND;
973 break;
974 default:
975 return;
976 }
977
978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979
980 tg3_ape_send_event(tp, event);
981 }
982
983 static void tg3_disable_ints(struct tg3 *tp)
984 {
985 int i;
986
987 tw32(TG3PCI_MISC_HOST_CTRL,
988 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
989 for (i = 0; i < tp->irq_max; i++)
990 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
991 }
992
993 static void tg3_enable_ints(struct tg3 *tp)
994 {
995 int i;
996
997 tp->irq_sync = 0;
998 wmb();
999
1000 tw32(TG3PCI_MISC_HOST_CTRL,
1001 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1002
1003 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1004 for (i = 0; i < tp->irq_cnt; i++) {
1005 struct tg3_napi *tnapi = &tp->napi[i];
1006
1007 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 if (tg3_flag(tp, 1SHOT_MSI))
1009 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010
1011 tp->coal_now |= tnapi->coal_now;
1012 }
1013
1014 /* Force an initial interrupt */
1015 if (!tg3_flag(tp, TAGGED_STATUS) &&
1016 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1017 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1018 else
1019 tw32(HOSTCC_MODE, tp->coal_now);
1020
1021 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1022 }
1023
1024 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1025 {
1026 struct tg3 *tp = tnapi->tp;
1027 struct tg3_hw_status *sblk = tnapi->hw_status;
1028 unsigned int work_exists = 0;
1029
1030 /* check for phy events */
1031 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1032 if (sblk->status & SD_STATUS_LINK_CHG)
1033 work_exists = 1;
1034 }
1035
1036 /* check for TX work to do */
1037 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1038 work_exists = 1;
1039
1040 /* check for RX work to do */
1041 if (tnapi->rx_rcb_prod_idx &&
1042 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1043 work_exists = 1;
1044
1045 return work_exists;
1046 }
1047
1048 /* tg3_int_reenable
1049 * similar to tg3_enable_ints, but it accurately determines whether there
1050 * is new work pending and can return without flushing the PIO write
1051 * which reenables interrupts
1052 */
1053 static void tg3_int_reenable(struct tg3_napi *tnapi)
1054 {
1055 struct tg3 *tp = tnapi->tp;
1056
1057 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1058 mmiowb();
1059
1060 /* When doing tagged status, this work check is unnecessary.
1061 * The last_tag we write above tells the chip which piece of
1062 * work we've completed.
1063 */
1064 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1065 tw32(HOSTCC_MODE, tp->coalesce_mode |
1066 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1067 }
1068
1069 static void tg3_switch_clocks(struct tg3 *tp)
1070 {
1071 u32 clock_ctrl;
1072 u32 orig_clock_ctrl;
1073
1074 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1075 return;
1076
1077 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1078
1079 orig_clock_ctrl = clock_ctrl;
1080 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1081 CLOCK_CTRL_CLKRUN_OENABLE |
1082 0x1f);
1083 tp->pci_clock_ctrl = clock_ctrl;
1084
1085 if (tg3_flag(tp, 5705_PLUS)) {
1086 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1089 }
1090 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092 clock_ctrl |
1093 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1094 40);
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1097 40);
1098 }
1099 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1100 }
1101
1102 #define PHY_BUSY_LOOPS 5000
1103
1104 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1105 u32 *val)
1106 {
1107 u32 frame_val;
1108 unsigned int loops;
1109 int ret;
1110
1111 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1112 tw32_f(MAC_MI_MODE,
1113 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114 udelay(80);
1115 }
1116
1117 tg3_ape_lock(tp, tp->phy_ape_lock);
1118
1119 *val = 0x0;
1120
1121 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1122 MI_COM_PHY_ADDR_MASK);
1123 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1124 MI_COM_REG_ADDR_MASK);
1125 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1126
1127 tw32_f(MAC_MI_COM, frame_val);
1128
1129 loops = PHY_BUSY_LOOPS;
1130 while (loops != 0) {
1131 udelay(10);
1132 frame_val = tr32(MAC_MI_COM);
1133
1134 if ((frame_val & MI_COM_BUSY) == 0) {
1135 udelay(5);
1136 frame_val = tr32(MAC_MI_COM);
1137 break;
1138 }
1139 loops -= 1;
1140 }
1141
1142 ret = -EBUSY;
1143 if (loops != 0) {
1144 *val = frame_val & MI_COM_DATA_MASK;
1145 ret = 0;
1146 }
1147
1148 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1149 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150 udelay(80);
1151 }
1152
1153 tg3_ape_unlock(tp, tp->phy_ape_lock);
1154
1155 return ret;
1156 }
1157
1158 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1159 {
1160 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1161 }
1162
1163 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1164 u32 val)
1165 {
1166 u32 frame_val;
1167 unsigned int loops;
1168 int ret;
1169
1170 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1171 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1172 return 0;
1173
1174 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1175 tw32_f(MAC_MI_MODE,
1176 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177 udelay(80);
1178 }
1179
1180 tg3_ape_lock(tp, tp->phy_ape_lock);
1181
1182 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1183 MI_COM_PHY_ADDR_MASK);
1184 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1185 MI_COM_REG_ADDR_MASK);
1186 frame_val |= (val & MI_COM_DATA_MASK);
1187 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1188
1189 tw32_f(MAC_MI_COM, frame_val);
1190
1191 loops = PHY_BUSY_LOOPS;
1192 while (loops != 0) {
1193 udelay(10);
1194 frame_val = tr32(MAC_MI_COM);
1195 if ((frame_val & MI_COM_BUSY) == 0) {
1196 udelay(5);
1197 frame_val = tr32(MAC_MI_COM);
1198 break;
1199 }
1200 loops -= 1;
1201 }
1202
1203 ret = -EBUSY;
1204 if (loops != 0)
1205 ret = 0;
1206
1207 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1208 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209 udelay(80);
1210 }
1211
1212 tg3_ape_unlock(tp, tp->phy_ape_lock);
1213
1214 return ret;
1215 }
1216
1217 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1218 {
1219 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1220 }
1221
1222 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 {
1224 int err;
1225
1226 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227 if (err)
1228 goto done;
1229
1230 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231 if (err)
1232 goto done;
1233
1234 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1235 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236 if (err)
1237 goto done;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1240
1241 done:
1242 return err;
1243 }
1244
1245 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 {
1247 int err;
1248
1249 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254 if (err)
1255 goto done;
1256
1257 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259 if (err)
1260 goto done;
1261
1262 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1263
1264 done:
1265 return err;
1266 }
1267
1268 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 {
1270 int err;
1271
1272 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1273 if (!err)
1274 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1275
1276 return err;
1277 }
1278
1279 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 {
1281 int err;
1282
1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284 if (!err)
1285 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287 return err;
1288 }
1289
1290 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 {
1292 int err;
1293
1294 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1295 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1296 MII_TG3_AUXCTL_SHDWSEL_MISC);
1297 if (!err)
1298 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1299
1300 return err;
1301 }
1302
1303 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1304 {
1305 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1306 set |= MII_TG3_AUXCTL_MISC_WREN;
1307
1308 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1309 }
1310
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1312 {
1313 u32 val;
1314 int err;
1315
1316 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1317
1318 if (err)
1319 return err;
1320 if (enable)
1321
1322 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323 else
1324 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325
1326 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1327 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1328
1329 return err;
1330 }
1331
1332 static int tg3_bmcr_reset(struct tg3 *tp)
1333 {
1334 u32 phy_control;
1335 int limit, err;
1336
1337 /* OK, reset it, and poll the BMCR_RESET bit until it
1338 * clears or we time out.
1339 */
1340 phy_control = BMCR_RESET;
1341 err = tg3_writephy(tp, MII_BMCR, phy_control);
1342 if (err != 0)
1343 return -EBUSY;
1344
1345 limit = 5000;
1346 while (limit--) {
1347 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348 if (err != 0)
1349 return -EBUSY;
1350
1351 if ((phy_control & BMCR_RESET) == 0) {
1352 udelay(40);
1353 break;
1354 }
1355 udelay(10);
1356 }
1357 if (limit < 0)
1358 return -EBUSY;
1359
1360 return 0;
1361 }
1362
1363 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1364 {
1365 struct tg3 *tp = bp->priv;
1366 u32 val;
1367
1368 spin_lock_bh(&tp->lock);
1369
1370 if (tg3_readphy(tp, reg, &val))
1371 val = -EIO;
1372
1373 spin_unlock_bh(&tp->lock);
1374
1375 return val;
1376 }
1377
1378 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1379 {
1380 struct tg3 *tp = bp->priv;
1381 u32 ret = 0;
1382
1383 spin_lock_bh(&tp->lock);
1384
1385 if (tg3_writephy(tp, reg, val))
1386 ret = -EIO;
1387
1388 spin_unlock_bh(&tp->lock);
1389
1390 return ret;
1391 }
1392
1393 static int tg3_mdio_reset(struct mii_bus *bp)
1394 {
1395 return 0;
1396 }
1397
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1399 {
1400 u32 val;
1401 struct phy_device *phydev;
1402
1403 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1404 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405 case PHY_ID_BCM50610:
1406 case PHY_ID_BCM50610M:
1407 val = MAC_PHYCFG2_50610_LED_MODES;
1408 break;
1409 case PHY_ID_BCMAC131:
1410 val = MAC_PHYCFG2_AC131_LED_MODES;
1411 break;
1412 case PHY_ID_RTL8211C:
1413 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1414 break;
1415 case PHY_ID_RTL8201E:
1416 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1417 break;
1418 default:
1419 return;
1420 }
1421
1422 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423 tw32(MAC_PHYCFG2, val);
1424
1425 val = tr32(MAC_PHYCFG1);
1426 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429 tw32(MAC_PHYCFG1, val);
1430
1431 return;
1432 }
1433
1434 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436 MAC_PHYCFG2_FMODE_MASK_MASK |
1437 MAC_PHYCFG2_GMODE_MASK_MASK |
1438 MAC_PHYCFG2_ACT_MASK_MASK |
1439 MAC_PHYCFG2_QUAL_MASK_MASK |
1440 MAC_PHYCFG2_INBAND_ENABLE;
1441
1442 tw32(MAC_PHYCFG2, val);
1443
1444 val = tr32(MAC_PHYCFG1);
1445 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1452 }
1453 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455 tw32(MAC_PHYCFG1, val);
1456
1457 val = tr32(MAC_EXT_RGMII_MODE);
1458 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459 MAC_RGMII_MODE_RX_QUALITY |
1460 MAC_RGMII_MODE_RX_ACTIVITY |
1461 MAC_RGMII_MODE_RX_ENG_DET |
1462 MAC_RGMII_MODE_TX_ENABLE |
1463 MAC_RGMII_MODE_TX_LOWPWR |
1464 MAC_RGMII_MODE_TX_RESET);
1465 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467 val |= MAC_RGMII_MODE_RX_INT_B |
1468 MAC_RGMII_MODE_RX_QUALITY |
1469 MAC_RGMII_MODE_RX_ACTIVITY |
1470 MAC_RGMII_MODE_RX_ENG_DET;
1471 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472 val |= MAC_RGMII_MODE_TX_ENABLE |
1473 MAC_RGMII_MODE_TX_LOWPWR |
1474 MAC_RGMII_MODE_TX_RESET;
1475 }
1476 tw32(MAC_EXT_RGMII_MODE, val);
1477 }
1478
1479 static void tg3_mdio_start(struct tg3 *tp)
1480 {
1481 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482 tw32_f(MAC_MI_MODE, tp->mi_mode);
1483 udelay(80);
1484
1485 if (tg3_flag(tp, MDIOBUS_INITED) &&
1486 tg3_asic_rev(tp) == ASIC_REV_5785)
1487 tg3_mdio_config_5785(tp);
1488 }
1489
1490 static int tg3_mdio_init(struct tg3 *tp)
1491 {
1492 int i;
1493 u32 reg;
1494 struct phy_device *phydev;
1495
1496 if (tg3_flag(tp, 5717_PLUS)) {
1497 u32 is_serdes;
1498
1499 tp->phy_addr = tp->pci_fn + 1;
1500
1501 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1503 else
1504 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505 TG3_CPMU_PHY_STRAP_IS_SERDES;
1506 if (is_serdes)
1507 tp->phy_addr += 7;
1508 } else
1509 tp->phy_addr = TG3_PHY_MII_ADDR;
1510
1511 tg3_mdio_start(tp);
1512
1513 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1514 return 0;
1515
1516 tp->mdio_bus = mdiobus_alloc();
1517 if (tp->mdio_bus == NULL)
1518 return -ENOMEM;
1519
1520 tp->mdio_bus->name = "tg3 mdio bus";
1521 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1522 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1523 tp->mdio_bus->priv = tp;
1524 tp->mdio_bus->parent = &tp->pdev->dev;
1525 tp->mdio_bus->read = &tg3_mdio_read;
1526 tp->mdio_bus->write = &tg3_mdio_write;
1527 tp->mdio_bus->reset = &tg3_mdio_reset;
1528 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1529 tp->mdio_bus->irq = &tp->mdio_irq[0];
1530
1531 for (i = 0; i < PHY_MAX_ADDR; i++)
1532 tp->mdio_bus->irq[i] = PHY_POLL;
1533
1534 /* The bus registration will look for all the PHYs on the mdio bus.
1535 * Unfortunately, it does not ensure the PHY is powered up before
1536 * accessing the PHY ID registers. A chip reset is the
1537 * quickest way to bring the device back to an operational state..
1538 */
1539 if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1540 tg3_bmcr_reset(tp);
1541
1542 i = mdiobus_register(tp->mdio_bus);
1543 if (i) {
1544 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1545 mdiobus_free(tp->mdio_bus);
1546 return i;
1547 }
1548
1549 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1550
1551 if (!phydev || !phydev->drv) {
1552 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1553 mdiobus_unregister(tp->mdio_bus);
1554 mdiobus_free(tp->mdio_bus);
1555 return -ENODEV;
1556 }
1557
1558 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1559 case PHY_ID_BCM57780:
1560 phydev->interface = PHY_INTERFACE_MODE_GMII;
1561 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1562 break;
1563 case PHY_ID_BCM50610:
1564 case PHY_ID_BCM50610M:
1565 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1566 PHY_BRCM_RX_REFCLK_UNUSED |
1567 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1568 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1569 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1570 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1571 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1572 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1573 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1574 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1575 /* fallthru */
1576 case PHY_ID_RTL8211C:
1577 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1578 break;
1579 case PHY_ID_RTL8201E:
1580 case PHY_ID_BCMAC131:
1581 phydev->interface = PHY_INTERFACE_MODE_MII;
1582 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584 break;
1585 }
1586
1587 tg3_flag_set(tp, MDIOBUS_INITED);
1588
1589 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1590 tg3_mdio_config_5785(tp);
1591
1592 return 0;
1593 }
1594
1595 static void tg3_mdio_fini(struct tg3 *tp)
1596 {
1597 if (tg3_flag(tp, MDIOBUS_INITED)) {
1598 tg3_flag_clear(tp, MDIOBUS_INITED);
1599 mdiobus_unregister(tp->mdio_bus);
1600 mdiobus_free(tp->mdio_bus);
1601 }
1602 }
1603
1604 /* tp->lock is held. */
1605 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 {
1607 u32 val;
1608
1609 val = tr32(GRC_RX_CPU_EVENT);
1610 val |= GRC_RX_CPU_DRIVER_EVENT;
1611 tw32_f(GRC_RX_CPU_EVENT, val);
1612
1613 tp->last_event_jiffies = jiffies;
1614 }
1615
1616 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1617
1618 /* tp->lock is held. */
1619 static void tg3_wait_for_event_ack(struct tg3 *tp)
1620 {
1621 int i;
1622 unsigned int delay_cnt;
1623 long time_remain;
1624
1625 /* If enough time has passed, no wait is necessary. */
1626 time_remain = (long)(tp->last_event_jiffies + 1 +
1627 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1628 (long)jiffies;
1629 if (time_remain < 0)
1630 return;
1631
1632 /* Check if we can shorten the wait time. */
1633 delay_cnt = jiffies_to_usecs(time_remain);
1634 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1635 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1636 delay_cnt = (delay_cnt >> 3) + 1;
1637
1638 for (i = 0; i < delay_cnt; i++) {
1639 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1640 break;
1641 if (pci_channel_offline(tp->pdev))
1642 break;
1643
1644 udelay(8);
1645 }
1646 }
1647
1648 /* tp->lock is held. */
1649 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1650 {
1651 u32 reg, val;
1652
1653 val = 0;
1654 if (!tg3_readphy(tp, MII_BMCR, &reg))
1655 val = reg << 16;
1656 if (!tg3_readphy(tp, MII_BMSR, &reg))
1657 val |= (reg & 0xffff);
1658 *data++ = val;
1659
1660 val = 0;
1661 if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1662 val = reg << 16;
1663 if (!tg3_readphy(tp, MII_LPA, &reg))
1664 val |= (reg & 0xffff);
1665 *data++ = val;
1666
1667 val = 0;
1668 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1669 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1670 val = reg << 16;
1671 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1672 val |= (reg & 0xffff);
1673 }
1674 *data++ = val;
1675
1676 if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1677 val = reg << 16;
1678 else
1679 val = 0;
1680 *data++ = val;
1681 }
1682
1683 /* tp->lock is held. */
1684 static void tg3_ump_link_report(struct tg3 *tp)
1685 {
1686 u32 data[4];
1687
1688 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1689 return;
1690
1691 tg3_phy_gather_ump_data(tp, data);
1692
1693 tg3_wait_for_event_ack(tp);
1694
1695 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1697 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1698 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1699 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1701
1702 tg3_generate_fw_event(tp);
1703 }
1704
1705 /* tp->lock is held. */
1706 static void tg3_stop_fw(struct tg3 *tp)
1707 {
1708 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1709 /* Wait for RX cpu to ACK the previous event. */
1710 tg3_wait_for_event_ack(tp);
1711
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1713
1714 tg3_generate_fw_event(tp);
1715
1716 /* Wait for RX cpu to ACK this event. */
1717 tg3_wait_for_event_ack(tp);
1718 }
1719 }
1720
1721 /* tp->lock is held. */
1722 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1723 {
1724 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1725 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1726
1727 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1728 switch (kind) {
1729 case RESET_KIND_INIT:
1730 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1731 DRV_STATE_START);
1732 break;
1733
1734 case RESET_KIND_SHUTDOWN:
1735 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736 DRV_STATE_UNLOAD);
1737 break;
1738
1739 case RESET_KIND_SUSPEND:
1740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 DRV_STATE_SUSPEND);
1742 break;
1743
1744 default:
1745 break;
1746 }
1747 }
1748
1749 if (kind == RESET_KIND_INIT ||
1750 kind == RESET_KIND_SUSPEND)
1751 tg3_ape_driver_state_change(tp, kind);
1752 }
1753
1754 /* tp->lock is held. */
1755 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1756 {
1757 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1758 switch (kind) {
1759 case RESET_KIND_INIT:
1760 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761 DRV_STATE_START_DONE);
1762 break;
1763
1764 case RESET_KIND_SHUTDOWN:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_UNLOAD_DONE);
1767 break;
1768
1769 default:
1770 break;
1771 }
1772 }
1773
1774 if (kind == RESET_KIND_SHUTDOWN)
1775 tg3_ape_driver_state_change(tp, kind);
1776 }
1777
1778 /* tp->lock is held. */
1779 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1780 {
1781 if (tg3_flag(tp, ENABLE_ASF)) {
1782 switch (kind) {
1783 case RESET_KIND_INIT:
1784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785 DRV_STATE_START);
1786 break;
1787
1788 case RESET_KIND_SHUTDOWN:
1789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790 DRV_STATE_UNLOAD);
1791 break;
1792
1793 case RESET_KIND_SUSPEND:
1794 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795 DRV_STATE_SUSPEND);
1796 break;
1797
1798 default:
1799 break;
1800 }
1801 }
1802 }
1803
1804 static int tg3_poll_fw(struct tg3 *tp)
1805 {
1806 int i;
1807 u32 val;
1808
1809 if (tg3_flag(tp, NO_FWARE_REPORTED))
1810 return 0;
1811
1812 if (tg3_flag(tp, IS_SSB_CORE)) {
1813 /* We don't use firmware. */
1814 return 0;
1815 }
1816
1817 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1818 /* Wait up to 20ms for init done. */
1819 for (i = 0; i < 200; i++) {
1820 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1821 return 0;
1822 if (pci_channel_offline(tp->pdev))
1823 return -ENODEV;
1824
1825 udelay(100);
1826 }
1827 return -ENODEV;
1828 }
1829
1830 /* Wait for firmware initialization to complete. */
1831 for (i = 0; i < 100000; i++) {
1832 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1833 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1834 break;
1835 if (pci_channel_offline(tp->pdev)) {
1836 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1837 tg3_flag_set(tp, NO_FWARE_REPORTED);
1838 netdev_info(tp->dev, "No firmware running\n");
1839 }
1840
1841 break;
1842 }
1843
1844 udelay(10);
1845 }
1846
1847 /* Chip might not be fitted with firmware. Some Sun onboard
1848 * parts are configured like that. So don't signal the timeout
1849 * of the above loop as an error, but do report the lack of
1850 * running firmware once.
1851 */
1852 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1853 tg3_flag_set(tp, NO_FWARE_REPORTED);
1854
1855 netdev_info(tp->dev, "No firmware running\n");
1856 }
1857
1858 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1859 /* The 57765 A0 needs a little more
1860 * time to do some important work.
1861 */
1862 mdelay(10);
1863 }
1864
1865 return 0;
1866 }
1867
1868 static void tg3_link_report(struct tg3 *tp)
1869 {
1870 if (!netif_carrier_ok(tp->dev)) {
1871 netif_info(tp, link, tp->dev, "Link is down\n");
1872 tg3_ump_link_report(tp);
1873 } else if (netif_msg_link(tp)) {
1874 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1875 (tp->link_config.active_speed == SPEED_1000 ?
1876 1000 :
1877 (tp->link_config.active_speed == SPEED_100 ?
1878 100 : 10)),
1879 (tp->link_config.active_duplex == DUPLEX_FULL ?
1880 "full" : "half"));
1881
1882 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1883 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1884 "on" : "off",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1886 "on" : "off");
1887
1888 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1889 netdev_info(tp->dev, "EEE is %s\n",
1890 tp->setlpicnt ? "enabled" : "disabled");
1891
1892 tg3_ump_link_report(tp);
1893 }
1894
1895 tp->link_up = netif_carrier_ok(tp->dev);
1896 }
1897
1898 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1899 {
1900 u32 flowctrl = 0;
1901
1902 if (adv & ADVERTISE_PAUSE_CAP) {
1903 flowctrl |= FLOW_CTRL_RX;
1904 if (!(adv & ADVERTISE_PAUSE_ASYM))
1905 flowctrl |= FLOW_CTRL_TX;
1906 } else if (adv & ADVERTISE_PAUSE_ASYM)
1907 flowctrl |= FLOW_CTRL_TX;
1908
1909 return flowctrl;
1910 }
1911
1912 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1913 {
1914 u16 miireg;
1915
1916 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1917 miireg = ADVERTISE_1000XPAUSE;
1918 else if (flow_ctrl & FLOW_CTRL_TX)
1919 miireg = ADVERTISE_1000XPSE_ASYM;
1920 else if (flow_ctrl & FLOW_CTRL_RX)
1921 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1922 else
1923 miireg = 0;
1924
1925 return miireg;
1926 }
1927
1928 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1929 {
1930 u32 flowctrl = 0;
1931
1932 if (adv & ADVERTISE_1000XPAUSE) {
1933 flowctrl |= FLOW_CTRL_RX;
1934 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1935 flowctrl |= FLOW_CTRL_TX;
1936 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1937 flowctrl |= FLOW_CTRL_TX;
1938
1939 return flowctrl;
1940 }
1941
1942 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1943 {
1944 u8 cap = 0;
1945
1946 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1947 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1948 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1949 if (lcladv & ADVERTISE_1000XPAUSE)
1950 cap = FLOW_CTRL_RX;
1951 if (rmtadv & ADVERTISE_1000XPAUSE)
1952 cap = FLOW_CTRL_TX;
1953 }
1954
1955 return cap;
1956 }
1957
1958 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1959 {
1960 u8 autoneg;
1961 u8 flowctrl = 0;
1962 u32 old_rx_mode = tp->rx_mode;
1963 u32 old_tx_mode = tp->tx_mode;
1964
1965 if (tg3_flag(tp, USE_PHYLIB))
1966 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1967 else
1968 autoneg = tp->link_config.autoneg;
1969
1970 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1971 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1972 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1973 else
1974 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1975 } else
1976 flowctrl = tp->link_config.flowctrl;
1977
1978 tp->link_config.active_flowctrl = flowctrl;
1979
1980 if (flowctrl & FLOW_CTRL_RX)
1981 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1982 else
1983 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1984
1985 if (old_rx_mode != tp->rx_mode)
1986 tw32_f(MAC_RX_MODE, tp->rx_mode);
1987
1988 if (flowctrl & FLOW_CTRL_TX)
1989 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1990 else
1991 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1992
1993 if (old_tx_mode != tp->tx_mode)
1994 tw32_f(MAC_TX_MODE, tp->tx_mode);
1995 }
1996
1997 static void tg3_adjust_link(struct net_device *dev)
1998 {
1999 u8 oldflowctrl, linkmesg = 0;
2000 u32 mac_mode, lcl_adv, rmt_adv;
2001 struct tg3 *tp = netdev_priv(dev);
2002 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2003
2004 spin_lock_bh(&tp->lock);
2005
2006 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2007 MAC_MODE_HALF_DUPLEX);
2008
2009 oldflowctrl = tp->link_config.active_flowctrl;
2010
2011 if (phydev->link) {
2012 lcl_adv = 0;
2013 rmt_adv = 0;
2014
2015 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2016 mac_mode |= MAC_MODE_PORT_MODE_MII;
2017 else if (phydev->speed == SPEED_1000 ||
2018 tg3_asic_rev(tp) != ASIC_REV_5785)
2019 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2020 else
2021 mac_mode |= MAC_MODE_PORT_MODE_MII;
2022
2023 if (phydev->duplex == DUPLEX_HALF)
2024 mac_mode |= MAC_MODE_HALF_DUPLEX;
2025 else {
2026 lcl_adv = mii_advertise_flowctrl(
2027 tp->link_config.flowctrl);
2028
2029 if (phydev->pause)
2030 rmt_adv = LPA_PAUSE_CAP;
2031 if (phydev->asym_pause)
2032 rmt_adv |= LPA_PAUSE_ASYM;
2033 }
2034
2035 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2036 } else
2037 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2038
2039 if (mac_mode != tp->mac_mode) {
2040 tp->mac_mode = mac_mode;
2041 tw32_f(MAC_MODE, tp->mac_mode);
2042 udelay(40);
2043 }
2044
2045 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2046 if (phydev->speed == SPEED_10)
2047 tw32(MAC_MI_STAT,
2048 MAC_MI_STAT_10MBPS_MODE |
2049 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2050 else
2051 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 }
2053
2054 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2055 tw32(MAC_TX_LENGTHS,
2056 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2057 (6 << TX_LENGTHS_IPG_SHIFT) |
2058 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2059 else
2060 tw32(MAC_TX_LENGTHS,
2061 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062 (6 << TX_LENGTHS_IPG_SHIFT) |
2063 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2064
2065 if (phydev->link != tp->old_link ||
2066 phydev->speed != tp->link_config.active_speed ||
2067 phydev->duplex != tp->link_config.active_duplex ||
2068 oldflowctrl != tp->link_config.active_flowctrl)
2069 linkmesg = 1;
2070
2071 tp->old_link = phydev->link;
2072 tp->link_config.active_speed = phydev->speed;
2073 tp->link_config.active_duplex = phydev->duplex;
2074
2075 spin_unlock_bh(&tp->lock);
2076
2077 if (linkmesg)
2078 tg3_link_report(tp);
2079 }
2080
2081 static int tg3_phy_init(struct tg3 *tp)
2082 {
2083 struct phy_device *phydev;
2084
2085 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2086 return 0;
2087
2088 /* Bring the PHY back to a known state. */
2089 tg3_bmcr_reset(tp);
2090
2091 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2092
2093 /* Attach the MAC to the PHY. */
2094 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2095 tg3_adjust_link, phydev->interface);
2096 if (IS_ERR(phydev)) {
2097 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2098 return PTR_ERR(phydev);
2099 }
2100
2101 /* Mask with MAC supported features. */
2102 switch (phydev->interface) {
2103 case PHY_INTERFACE_MODE_GMII:
2104 case PHY_INTERFACE_MODE_RGMII:
2105 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2106 phydev->supported &= (PHY_GBIT_FEATURES |
2107 SUPPORTED_Pause |
2108 SUPPORTED_Asym_Pause);
2109 break;
2110 }
2111 /* fallthru */
2112 case PHY_INTERFACE_MODE_MII:
2113 phydev->supported &= (PHY_BASIC_FEATURES |
2114 SUPPORTED_Pause |
2115 SUPPORTED_Asym_Pause);
2116 break;
2117 default:
2118 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2119 return -EINVAL;
2120 }
2121
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124 phydev->advertising = phydev->supported;
2125
2126 return 0;
2127 }
2128
2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131 struct phy_device *phydev;
2132
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134 return;
2135
2136 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2137
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 phydev->advertising = tp->link_config.advertising;
2144 }
2145
2146 phy_start(phydev);
2147
2148 phy_start_aneg(phydev);
2149 }
2150
2151 static void tg3_phy_stop(struct tg3 *tp)
2152 {
2153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154 return;
2155
2156 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2157 }
2158
2159 static void tg3_phy_fini(struct tg3 *tp)
2160 {
2161 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2163 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164 }
2165 }
2166
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2168 {
2169 int err;
2170 u32 val;
2171
2172 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2173 return 0;
2174
2175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176 /* Cannot do read-modify-write on 5401 */
2177 err = tg3_phy_auxctl_write(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2180 0x4c20);
2181 goto done;
2182 }
2183
2184 err = tg3_phy_auxctl_read(tp,
2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186 if (err)
2187 return err;
2188
2189 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190 err = tg3_phy_auxctl_write(tp,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2192
2193 done:
2194 return err;
2195 }
2196
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 {
2199 u32 phytest;
2200
2201 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2202 u32 phy;
2203
2204 tg3_writephy(tp, MII_TG3_FET_TEST,
2205 phytest | MII_TG3_FET_SHADOW_EN);
2206 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2207 if (enable)
2208 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2209 else
2210 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2212 }
2213 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214 }
2215 }
2216
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 {
2219 u32 reg;
2220
2221 if (!tg3_flag(tp, 5705_PLUS) ||
2222 (tg3_flag(tp, 5717_PLUS) &&
2223 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2224 return;
2225
2226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227 tg3_phy_fet_toggle_apd(tp, enable);
2228 return;
2229 }
2230
2231 reg = MII_TG3_MISC_SHDW_WREN |
2232 MII_TG3_MISC_SHDW_SCR5_SEL |
2233 MII_TG3_MISC_SHDW_SCR5_LPED |
2234 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239
2240 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2241
2242
2243 reg = MII_TG3_MISC_SHDW_WREN |
2244 MII_TG3_MISC_SHDW_APD_SEL |
2245 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2246 if (enable)
2247 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2248
2249 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2250 }
2251
2252 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2253 {
2254 u32 phy;
2255
2256 if (!tg3_flag(tp, 5705_PLUS) ||
2257 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2258 return;
2259
2260 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2261 u32 ephy;
2262
2263 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2264 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2265
2266 tg3_writephy(tp, MII_TG3_FET_TEST,
2267 ephy | MII_TG3_FET_SHADOW_EN);
2268 if (!tg3_readphy(tp, reg, &phy)) {
2269 if (enable)
2270 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 else
2272 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273 tg3_writephy(tp, reg, phy);
2274 }
2275 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2276 }
2277 } else {
2278 int ret;
2279
2280 ret = tg3_phy_auxctl_read(tp,
2281 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2282 if (!ret) {
2283 if (enable)
2284 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 else
2286 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287 tg3_phy_auxctl_write(tp,
2288 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2289 }
2290 }
2291 }
2292
2293 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2294 {
2295 int ret;
2296 u32 val;
2297
2298 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2299 return;
2300
2301 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2302 if (!ret)
2303 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2304 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2305 }
2306
2307 static void tg3_phy_apply_otp(struct tg3 *tp)
2308 {
2309 u32 otp, phy;
2310
2311 if (!tp->phy_otp)
2312 return;
2313
2314 otp = tp->phy_otp;
2315
2316 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2317 return;
2318
2319 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2320 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2321 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2322
2323 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2324 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2325 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2326
2327 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2328 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2329 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2330
2331 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2333
2334 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2335 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2336
2337 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2338 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2339 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2340
2341 tg3_phy_toggle_auxctl_smdsp(tp, false);
2342 }
2343
2344 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2345 {
2346 u32 val;
2347
2348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2349 return;
2350
2351 tp->setlpicnt = 0;
2352
2353 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2354 current_link_up &&
2355 tp->link_config.active_duplex == DUPLEX_FULL &&
2356 (tp->link_config.active_speed == SPEED_100 ||
2357 tp->link_config.active_speed == SPEED_1000)) {
2358 u32 eeectl;
2359
2360 if (tp->link_config.active_speed == SPEED_1000)
2361 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2362 else
2363 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2364
2365 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2366
2367 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2368 TG3_CL45_D7_EEERES_STAT, &val);
2369
2370 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2371 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2372 tp->setlpicnt = 2;
2373 }
2374
2375 if (!tp->setlpicnt) {
2376 if (current_link_up &&
2377 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2378 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2379 tg3_phy_toggle_auxctl_smdsp(tp, false);
2380 }
2381
2382 val = tr32(TG3_CPMU_EEE_MODE);
2383 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2384 }
2385 }
2386
2387 static void tg3_phy_eee_enable(struct tg3 *tp)
2388 {
2389 u32 val;
2390
2391 if (tp->link_config.active_speed == SPEED_1000 &&
2392 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2393 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2394 tg3_flag(tp, 57765_CLASS)) &&
2395 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2396 val = MII_TG3_DSP_TAP26_ALNOKO |
2397 MII_TG3_DSP_TAP26_RMRXSTO;
2398 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2399 tg3_phy_toggle_auxctl_smdsp(tp, false);
2400 }
2401
2402 val = tr32(TG3_CPMU_EEE_MODE);
2403 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2404 }
2405
2406 static int tg3_wait_macro_done(struct tg3 *tp)
2407 {
2408 int limit = 100;
2409
2410 while (limit--) {
2411 u32 tmp32;
2412
2413 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2414 if ((tmp32 & 0x1000) == 0)
2415 break;
2416 }
2417 }
2418 if (limit < 0)
2419 return -EBUSY;
2420
2421 return 0;
2422 }
2423
2424 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2425 {
2426 static const u32 test_pat[4][6] = {
2427 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2428 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2429 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2430 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2431 };
2432 int chan;
2433
2434 for (chan = 0; chan < 4; chan++) {
2435 int i;
2436
2437 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2438 (chan * 0x2000) | 0x0200);
2439 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2440
2441 for (i = 0; i < 6; i++)
2442 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2443 test_pat[chan][i]);
2444
2445 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2446 if (tg3_wait_macro_done(tp)) {
2447 *resetp = 1;
2448 return -EBUSY;
2449 }
2450
2451 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2452 (chan * 0x2000) | 0x0200);
2453 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2454 if (tg3_wait_macro_done(tp)) {
2455 *resetp = 1;
2456 return -EBUSY;
2457 }
2458
2459 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2460 if (tg3_wait_macro_done(tp)) {
2461 *resetp = 1;
2462 return -EBUSY;
2463 }
2464
2465 for (i = 0; i < 6; i += 2) {
2466 u32 low, high;
2467
2468 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2469 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2470 tg3_wait_macro_done(tp)) {
2471 *resetp = 1;
2472 return -EBUSY;
2473 }
2474 low &= 0x7fff;
2475 high &= 0x000f;
2476 if (low != test_pat[chan][i] ||
2477 high != test_pat[chan][i+1]) {
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2479 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2481
2482 return -EBUSY;
2483 }
2484 }
2485 }
2486
2487 return 0;
2488 }
2489
2490 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2491 {
2492 int chan;
2493
2494 for (chan = 0; chan < 4; chan++) {
2495 int i;
2496
2497 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2498 (chan * 0x2000) | 0x0200);
2499 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2500 for (i = 0; i < 6; i++)
2501 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2502 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2503 if (tg3_wait_macro_done(tp))
2504 return -EBUSY;
2505 }
2506
2507 return 0;
2508 }
2509
2510 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2511 {
2512 u32 reg32, phy9_orig;
2513 int retries, do_phy_reset, err;
2514
2515 retries = 10;
2516 do_phy_reset = 1;
2517 do {
2518 if (do_phy_reset) {
2519 err = tg3_bmcr_reset(tp);
2520 if (err)
2521 return err;
2522 do_phy_reset = 0;
2523 }
2524
2525 /* Disable transmitter and interrupt. */
2526 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2527 continue;
2528
2529 reg32 |= 0x3000;
2530 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2531
2532 /* Set full-duplex, 1000 mbps. */
2533 tg3_writephy(tp, MII_BMCR,
2534 BMCR_FULLDPLX | BMCR_SPEED1000);
2535
2536 /* Set to master mode. */
2537 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2538 continue;
2539
2540 tg3_writephy(tp, MII_CTRL1000,
2541 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2542
2543 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2544 if (err)
2545 return err;
2546
2547 /* Block the PHY control access. */
2548 tg3_phydsp_write(tp, 0x8005, 0x0800);
2549
2550 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2551 if (!err)
2552 break;
2553 } while (--retries);
2554
2555 err = tg3_phy_reset_chanpat(tp);
2556 if (err)
2557 return err;
2558
2559 tg3_phydsp_write(tp, 0x8005, 0x0000);
2560
2561 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2562 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2563
2564 tg3_phy_toggle_auxctl_smdsp(tp, false);
2565
2566 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2567
2568 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2569 reg32 &= ~0x3000;
2570 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2571 } else if (!err)
2572 err = -EBUSY;
2573
2574 return err;
2575 }
2576
2577 static void tg3_carrier_off(struct tg3 *tp)
2578 {
2579 netif_carrier_off(tp->dev);
2580 tp->link_up = false;
2581 }
2582
2583 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2584 {
2585 if (tg3_flag(tp, ENABLE_ASF))
2586 netdev_warn(tp->dev,
2587 "Management side-band traffic will be interrupted during phy settings change\n");
2588 }
2589
2590 /* This will reset the tigon3 PHY if there is no valid
2591 * link unless the FORCE argument is non-zero.
2592 */
2593 static int tg3_phy_reset(struct tg3 *tp)
2594 {
2595 u32 val, cpmuctrl;
2596 int err;
2597
2598 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2599 val = tr32(GRC_MISC_CFG);
2600 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2601 udelay(40);
2602 }
2603 err = tg3_readphy(tp, MII_BMSR, &val);
2604 err |= tg3_readphy(tp, MII_BMSR, &val);
2605 if (err != 0)
2606 return -EBUSY;
2607
2608 if (netif_running(tp->dev) && tp->link_up) {
2609 netif_carrier_off(tp->dev);
2610 tg3_link_report(tp);
2611 }
2612
2613 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2614 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2615 tg3_asic_rev(tp) == ASIC_REV_5705) {
2616 err = tg3_phy_reset_5703_4_5(tp);
2617 if (err)
2618 return err;
2619 goto out;
2620 }
2621
2622 cpmuctrl = 0;
2623 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2624 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2625 cpmuctrl = tr32(TG3_CPMU_CTRL);
2626 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2627 tw32(TG3_CPMU_CTRL,
2628 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2629 }
2630
2631 err = tg3_bmcr_reset(tp);
2632 if (err)
2633 return err;
2634
2635 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2636 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2637 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2638
2639 tw32(TG3_CPMU_CTRL, cpmuctrl);
2640 }
2641
2642 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2643 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2644 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2645 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2646 CPMU_LSPD_1000MB_MACCLK_12_5) {
2647 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2648 udelay(40);
2649 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2650 }
2651 }
2652
2653 if (tg3_flag(tp, 5717_PLUS) &&
2654 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2655 return 0;
2656
2657 tg3_phy_apply_otp(tp);
2658
2659 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2660 tg3_phy_toggle_apd(tp, true);
2661 else
2662 tg3_phy_toggle_apd(tp, false);
2663
2664 out:
2665 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2666 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2667 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2668 tg3_phydsp_write(tp, 0x000a, 0x0323);
2669 tg3_phy_toggle_auxctl_smdsp(tp, false);
2670 }
2671
2672 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2673 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2674 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2675 }
2676
2677 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2678 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2679 tg3_phydsp_write(tp, 0x000a, 0x310b);
2680 tg3_phydsp_write(tp, 0x201f, 0x9506);
2681 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2682 tg3_phy_toggle_auxctl_smdsp(tp, false);
2683 }
2684 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2685 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2686 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2687 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2688 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2689 tg3_writephy(tp, MII_TG3_TEST1,
2690 MII_TG3_TEST1_TRIM_EN | 0x4);
2691 } else
2692 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2693
2694 tg3_phy_toggle_auxctl_smdsp(tp, false);
2695 }
2696 }
2697
2698 /* Set Extended packet length bit (bit 14) on all chips that */
2699 /* support jumbo frames */
2700 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2701 /* Cannot do read-modify-write on 5401 */
2702 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2703 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2704 /* Set bit 14 with read-modify-write to preserve other bits */
2705 err = tg3_phy_auxctl_read(tp,
2706 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2707 if (!err)
2708 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2709 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2710 }
2711
2712 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2713 * jumbo frames transmission.
2714 */
2715 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2716 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2717 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2718 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2719 }
2720
2721 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2722 /* adjust output voltage */
2723 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2724 }
2725
2726 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2727 tg3_phydsp_write(tp, 0xffb, 0x4000);
2728
2729 tg3_phy_toggle_automdix(tp, true);
2730 tg3_phy_set_wirespeed(tp);
2731 return 0;
2732 }
2733
2734 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2735 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2736 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2737 TG3_GPIO_MSG_NEED_VAUX)
2738 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2739 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2740 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2741 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2742 (TG3_GPIO_MSG_DRVR_PRES << 12))
2743
2744 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2745 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2746 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2747 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2748 (TG3_GPIO_MSG_NEED_VAUX << 12))
2749
2750 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2751 {
2752 u32 status, shift;
2753
2754 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2755 tg3_asic_rev(tp) == ASIC_REV_5719)
2756 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2757 else
2758 status = tr32(TG3_CPMU_DRV_STATUS);
2759
2760 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2761 status &= ~(TG3_GPIO_MSG_MASK << shift);
2762 status |= (newstat << shift);
2763
2764 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2765 tg3_asic_rev(tp) == ASIC_REV_5719)
2766 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2767 else
2768 tw32(TG3_CPMU_DRV_STATUS, status);
2769
2770 return status >> TG3_APE_GPIO_MSG_SHIFT;
2771 }
2772
2773 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2774 {
2775 if (!tg3_flag(tp, IS_NIC))
2776 return 0;
2777
2778 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2779 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2780 tg3_asic_rev(tp) == ASIC_REV_5720) {
2781 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2782 return -EIO;
2783
2784 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2785
2786 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2787 TG3_GRC_LCLCTL_PWRSW_DELAY);
2788
2789 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2790 } else {
2791 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2792 TG3_GRC_LCLCTL_PWRSW_DELAY);
2793 }
2794
2795 return 0;
2796 }
2797
2798 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2799 {
2800 u32 grc_local_ctrl;
2801
2802 if (!tg3_flag(tp, IS_NIC) ||
2803 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2804 tg3_asic_rev(tp) == ASIC_REV_5701)
2805 return;
2806
2807 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2808
2809 tw32_wait_f(GRC_LOCAL_CTRL,
2810 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY);
2812
2813 tw32_wait_f(GRC_LOCAL_CTRL,
2814 grc_local_ctrl,
2815 TG3_GRC_LCLCTL_PWRSW_DELAY);
2816
2817 tw32_wait_f(GRC_LOCAL_CTRL,
2818 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 }
2821
2822 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2823 {
2824 if (!tg3_flag(tp, IS_NIC))
2825 return;
2826
2827 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2828 tg3_asic_rev(tp) == ASIC_REV_5701) {
2829 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2830 (GRC_LCLCTRL_GPIO_OE0 |
2831 GRC_LCLCTRL_GPIO_OE1 |
2832 GRC_LCLCTRL_GPIO_OE2 |
2833 GRC_LCLCTRL_GPIO_OUTPUT0 |
2834 GRC_LCLCTRL_GPIO_OUTPUT1),
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2837 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2838 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2839 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2840 GRC_LCLCTRL_GPIO_OE1 |
2841 GRC_LCLCTRL_GPIO_OE2 |
2842 GRC_LCLCTRL_GPIO_OUTPUT0 |
2843 GRC_LCLCTRL_GPIO_OUTPUT1 |
2844 tp->grc_local_ctrl;
2845 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2849 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2853 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 } else {
2856 u32 no_gpio2;
2857 u32 grc_local_ctrl = 0;
2858
2859 /* Workaround to prevent overdrawing Amps. */
2860 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2861 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2862 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2863 grc_local_ctrl,
2864 TG3_GRC_LCLCTL_PWRSW_DELAY);
2865 }
2866
2867 /* On 5753 and variants, GPIO2 cannot be used. */
2868 no_gpio2 = tp->nic_sram_data_cfg &
2869 NIC_SRAM_DATA_CFG_NO_GPIO2;
2870
2871 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2872 GRC_LCLCTRL_GPIO_OE1 |
2873 GRC_LCLCTRL_GPIO_OE2 |
2874 GRC_LCLCTRL_GPIO_OUTPUT1 |
2875 GRC_LCLCTRL_GPIO_OUTPUT2;
2876 if (no_gpio2) {
2877 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2878 GRC_LCLCTRL_GPIO_OUTPUT2);
2879 }
2880 tw32_wait_f(GRC_LOCAL_CTRL,
2881 tp->grc_local_ctrl | grc_local_ctrl,
2882 TG3_GRC_LCLCTL_PWRSW_DELAY);
2883
2884 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2885
2886 tw32_wait_f(GRC_LOCAL_CTRL,
2887 tp->grc_local_ctrl | grc_local_ctrl,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY);
2889
2890 if (!no_gpio2) {
2891 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2892 tw32_wait_f(GRC_LOCAL_CTRL,
2893 tp->grc_local_ctrl | grc_local_ctrl,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY);
2895 }
2896 }
2897 }
2898
2899 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2900 {
2901 u32 msg = 0;
2902
2903 /* Serialize power state transitions */
2904 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2905 return;
2906
2907 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2908 msg = TG3_GPIO_MSG_NEED_VAUX;
2909
2910 msg = tg3_set_function_status(tp, msg);
2911
2912 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2913 goto done;
2914
2915 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2916 tg3_pwrsrc_switch_to_vaux(tp);
2917 else
2918 tg3_pwrsrc_die_with_vmain(tp);
2919
2920 done:
2921 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2922 }
2923
2924 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2925 {
2926 bool need_vaux = false;
2927
2928 /* The GPIOs do something completely different on 57765. */
2929 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2930 return;
2931
2932 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2933 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2934 tg3_asic_rev(tp) == ASIC_REV_5720) {
2935 tg3_frob_aux_power_5717(tp, include_wol ?
2936 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2937 return;
2938 }
2939
2940 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2941 struct net_device *dev_peer;
2942
2943 dev_peer = pci_get_drvdata(tp->pdev_peer);
2944
2945 /* remove_one() may have been run on the peer. */
2946 if (dev_peer) {
2947 struct tg3 *tp_peer = netdev_priv(dev_peer);
2948
2949 if (tg3_flag(tp_peer, INIT_COMPLETE))
2950 return;
2951
2952 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2953 tg3_flag(tp_peer, ENABLE_ASF))
2954 need_vaux = true;
2955 }
2956 }
2957
2958 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2959 tg3_flag(tp, ENABLE_ASF))
2960 need_vaux = true;
2961
2962 if (need_vaux)
2963 tg3_pwrsrc_switch_to_vaux(tp);
2964 else
2965 tg3_pwrsrc_die_with_vmain(tp);
2966 }
2967
2968 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2969 {
2970 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2971 return 1;
2972 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2973 if (speed != SPEED_10)
2974 return 1;
2975 } else if (speed == SPEED_10)
2976 return 1;
2977
2978 return 0;
2979 }
2980
2981 static bool tg3_phy_power_bug(struct tg3 *tp)
2982 {
2983 switch (tg3_asic_rev(tp)) {
2984 case ASIC_REV_5700:
2985 case ASIC_REV_5704:
2986 return true;
2987 case ASIC_REV_5780:
2988 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2989 return true;
2990 return false;
2991 case ASIC_REV_5717:
2992 if (!tp->pci_fn)
2993 return true;
2994 return false;
2995 case ASIC_REV_5719:
2996 case ASIC_REV_5720:
2997 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2998 !tp->pci_fn)
2999 return true;
3000 return false;
3001 }
3002
3003 return false;
3004 }
3005
3006 static bool tg3_phy_led_bug(struct tg3 *tp)
3007 {
3008 switch (tg3_asic_rev(tp)) {
3009 case ASIC_REV_5719:
3010 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3011 !tp->pci_fn)
3012 return true;
3013 return false;
3014 }
3015
3016 return false;
3017 }
3018
3019 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3020 {
3021 u32 val;
3022
3023 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3024 return;
3025
3026 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3027 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3028 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3029 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3030
3031 sg_dig_ctrl |=
3032 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3033 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3034 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3035 }
3036 return;
3037 }
3038
3039 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3040 tg3_bmcr_reset(tp);
3041 val = tr32(GRC_MISC_CFG);
3042 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3043 udelay(40);
3044 return;
3045 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3046 u32 phytest;
3047 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3048 u32 phy;
3049
3050 tg3_writephy(tp, MII_ADVERTISE, 0);
3051 tg3_writephy(tp, MII_BMCR,
3052 BMCR_ANENABLE | BMCR_ANRESTART);
3053
3054 tg3_writephy(tp, MII_TG3_FET_TEST,
3055 phytest | MII_TG3_FET_SHADOW_EN);
3056 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3057 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3058 tg3_writephy(tp,
3059 MII_TG3_FET_SHDW_AUXMODE4,
3060 phy);
3061 }
3062 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3063 }
3064 return;
3065 } else if (do_low_power) {
3066 if (!tg3_phy_led_bug(tp))
3067 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3068 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3069
3070 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3071 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3072 MII_TG3_AUXCTL_PCTL_VREG_11V;
3073 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3074 }
3075
3076 /* The PHY should not be powered down on some chips because
3077 * of bugs.
3078 */
3079 if (tg3_phy_power_bug(tp))
3080 return;
3081
3082 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3083 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3084 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3085 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3086 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3087 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3088 }
3089
3090 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3091 }
3092
3093 /* tp->lock is held. */
3094 static int tg3_nvram_lock(struct tg3 *tp)
3095 {
3096 if (tg3_flag(tp, NVRAM)) {
3097 int i;
3098
3099 if (tp->nvram_lock_cnt == 0) {
3100 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3101 for (i = 0; i < 8000; i++) {
3102 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3103 break;
3104 udelay(20);
3105 }
3106 if (i == 8000) {
3107 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3108 return -ENODEV;
3109 }
3110 }
3111 tp->nvram_lock_cnt++;
3112 }
3113 return 0;
3114 }
3115
3116 /* tp->lock is held. */
3117 static void tg3_nvram_unlock(struct tg3 *tp)
3118 {
3119 if (tg3_flag(tp, NVRAM)) {
3120 if (tp->nvram_lock_cnt > 0)
3121 tp->nvram_lock_cnt--;
3122 if (tp->nvram_lock_cnt == 0)
3123 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3124 }
3125 }
3126
3127 /* tp->lock is held. */
3128 static void tg3_enable_nvram_access(struct tg3 *tp)
3129 {
3130 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3131 u32 nvaccess = tr32(NVRAM_ACCESS);
3132
3133 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3134 }
3135 }
3136
3137 /* tp->lock is held. */
3138 static void tg3_disable_nvram_access(struct tg3 *tp)
3139 {
3140 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3141 u32 nvaccess = tr32(NVRAM_ACCESS);
3142
3143 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3144 }
3145 }
3146
3147 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3148 u32 offset, u32 *val)
3149 {
3150 u32 tmp;
3151 int i;
3152
3153 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3154 return -EINVAL;
3155
3156 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3157 EEPROM_ADDR_DEVID_MASK |
3158 EEPROM_ADDR_READ);
3159 tw32(GRC_EEPROM_ADDR,
3160 tmp |
3161 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3162 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3163 EEPROM_ADDR_ADDR_MASK) |
3164 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3165
3166 for (i = 0; i < 1000; i++) {
3167 tmp = tr32(GRC_EEPROM_ADDR);
3168
3169 if (tmp & EEPROM_ADDR_COMPLETE)
3170 break;
3171 msleep(1);
3172 }
3173 if (!(tmp & EEPROM_ADDR_COMPLETE))
3174 return -EBUSY;
3175
3176 tmp = tr32(GRC_EEPROM_DATA);
3177
3178 /*
3179 * The data will always be opposite the native endian
3180 * format. Perform a blind byteswap to compensate.
3181 */
3182 *val = swab32(tmp);
3183
3184 return 0;
3185 }
3186
3187 #define NVRAM_CMD_TIMEOUT 10000
3188
3189 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3190 {
3191 int i;
3192
3193 tw32(NVRAM_CMD, nvram_cmd);
3194 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3195 udelay(10);
3196 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3197 udelay(10);
3198 break;
3199 }
3200 }
3201
3202 if (i == NVRAM_CMD_TIMEOUT)
3203 return -EBUSY;
3204
3205 return 0;
3206 }
3207
3208 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3209 {
3210 if (tg3_flag(tp, NVRAM) &&
3211 tg3_flag(tp, NVRAM_BUFFERED) &&
3212 tg3_flag(tp, FLASH) &&
3213 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3214 (tp->nvram_jedecnum == JEDEC_ATMEL))
3215
3216 addr = ((addr / tp->nvram_pagesize) <<
3217 ATMEL_AT45DB0X1B_PAGE_POS) +
3218 (addr % tp->nvram_pagesize);
3219
3220 return addr;
3221 }
3222
3223 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3224 {
3225 if (tg3_flag(tp, NVRAM) &&
3226 tg3_flag(tp, NVRAM_BUFFERED) &&
3227 tg3_flag(tp, FLASH) &&
3228 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3229 (tp->nvram_jedecnum == JEDEC_ATMEL))
3230
3231 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3232 tp->nvram_pagesize) +
3233 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3234
3235 return addr;
3236 }
3237
3238 /* NOTE: Data read in from NVRAM is byteswapped according to
3239 * the byteswapping settings for all other register accesses.
3240 * tg3 devices are BE devices, so on a BE machine, the data
3241 * returned will be exactly as it is seen in NVRAM. On a LE
3242 * machine, the 32-bit value will be byteswapped.
3243 */
3244 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3245 {
3246 int ret;
3247
3248 if (!tg3_flag(tp, NVRAM))
3249 return tg3_nvram_read_using_eeprom(tp, offset, val);
3250
3251 offset = tg3_nvram_phys_addr(tp, offset);
3252
3253 if (offset > NVRAM_ADDR_MSK)
3254 return -EINVAL;
3255
3256 ret = tg3_nvram_lock(tp);
3257 if (ret)
3258 return ret;
3259
3260 tg3_enable_nvram_access(tp);
3261
3262 tw32(NVRAM_ADDR, offset);
3263 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3264 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3265
3266 if (ret == 0)
3267 *val = tr32(NVRAM_RDDATA);
3268
3269 tg3_disable_nvram_access(tp);
3270
3271 tg3_nvram_unlock(tp);
3272
3273 return ret;
3274 }
3275
3276 /* Ensures NVRAM data is in bytestream format. */
3277 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3278 {
3279 u32 v;
3280 int res = tg3_nvram_read(tp, offset, &v);
3281 if (!res)
3282 *val = cpu_to_be32(v);
3283 return res;
3284 }
3285
3286 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3287 u32 offset, u32 len, u8 *buf)
3288 {
3289 int i, j, rc = 0;
3290 u32 val;
3291
3292 for (i = 0; i < len; i += 4) {
3293 u32 addr;
3294 __be32 data;
3295
3296 addr = offset + i;
3297
3298 memcpy(&data, buf + i, 4);
3299
3300 /*
3301 * The SEEPROM interface expects the data to always be opposite
3302 * the native endian format. We accomplish this by reversing
3303 * all the operations that would have been performed on the
3304 * data from a call to tg3_nvram_read_be32().
3305 */
3306 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3307
3308 val = tr32(GRC_EEPROM_ADDR);
3309 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3310
3311 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3312 EEPROM_ADDR_READ);
3313 tw32(GRC_EEPROM_ADDR, val |
3314 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3315 (addr & EEPROM_ADDR_ADDR_MASK) |
3316 EEPROM_ADDR_START |
3317 EEPROM_ADDR_WRITE);
3318
3319 for (j = 0; j < 1000; j++) {
3320 val = tr32(GRC_EEPROM_ADDR);
3321
3322 if (val & EEPROM_ADDR_COMPLETE)
3323 break;
3324 msleep(1);
3325 }
3326 if (!(val & EEPROM_ADDR_COMPLETE)) {
3327 rc = -EBUSY;
3328 break;
3329 }
3330 }
3331
3332 return rc;
3333 }
3334
3335 /* offset and length are dword aligned */
3336 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3337 u8 *buf)
3338 {
3339 int ret = 0;
3340 u32 pagesize = tp->nvram_pagesize;
3341 u32 pagemask = pagesize - 1;
3342 u32 nvram_cmd;
3343 u8 *tmp;
3344
3345 tmp = kmalloc(pagesize, GFP_KERNEL);
3346 if (tmp == NULL)
3347 return -ENOMEM;
3348
3349 while (len) {
3350 int j;
3351 u32 phy_addr, page_off, size;
3352
3353 phy_addr = offset & ~pagemask;
3354
3355 for (j = 0; j < pagesize; j += 4) {
3356 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3357 (__be32 *) (tmp + j));
3358 if (ret)
3359 break;
3360 }
3361 if (ret)
3362 break;
3363
3364 page_off = offset & pagemask;
3365 size = pagesize;
3366 if (len < size)
3367 size = len;
3368
3369 len -= size;
3370
3371 memcpy(tmp + page_off, buf, size);
3372
3373 offset = offset + (pagesize - page_off);
3374
3375 tg3_enable_nvram_access(tp);
3376
3377 /*
3378 * Before we can erase the flash page, we need
3379 * to issue a special "write enable" command.
3380 */
3381 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3382
3383 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3384 break;
3385
3386 /* Erase the target page */
3387 tw32(NVRAM_ADDR, phy_addr);
3388
3389 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3390 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3391
3392 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3393 break;
3394
3395 /* Issue another write enable to start the write. */
3396 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3397
3398 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3399 break;
3400
3401 for (j = 0; j < pagesize; j += 4) {
3402 __be32 data;
3403
3404 data = *((__be32 *) (tmp + j));
3405
3406 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3407
3408 tw32(NVRAM_ADDR, phy_addr + j);
3409
3410 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3411 NVRAM_CMD_WR;
3412
3413 if (j == 0)
3414 nvram_cmd |= NVRAM_CMD_FIRST;
3415 else if (j == (pagesize - 4))
3416 nvram_cmd |= NVRAM_CMD_LAST;
3417
3418 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3419 if (ret)
3420 break;
3421 }
3422 if (ret)
3423 break;
3424 }
3425
3426 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3427 tg3_nvram_exec_cmd(tp, nvram_cmd);
3428
3429 kfree(tmp);
3430
3431 return ret;
3432 }
3433
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3436 u8 *buf)
3437 {
3438 int i, ret = 0;
3439
3440 for (i = 0; i < len; i += 4, offset += 4) {
3441 u32 page_off, phy_addr, nvram_cmd;
3442 __be32 data;
3443
3444 memcpy(&data, buf + i, 4);
3445 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3446
3447 page_off = offset % tp->nvram_pagesize;
3448
3449 phy_addr = tg3_nvram_phys_addr(tp, offset);
3450
3451 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3452
3453 if (page_off == 0 || i == 0)
3454 nvram_cmd |= NVRAM_CMD_FIRST;
3455 if (page_off == (tp->nvram_pagesize - 4))
3456 nvram_cmd |= NVRAM_CMD_LAST;
3457
3458 if (i == (len - 4))
3459 nvram_cmd |= NVRAM_CMD_LAST;
3460
3461 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3462 !tg3_flag(tp, FLASH) ||
3463 !tg3_flag(tp, 57765_PLUS))
3464 tw32(NVRAM_ADDR, phy_addr);
3465
3466 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3467 !tg3_flag(tp, 5755_PLUS) &&
3468 (tp->nvram_jedecnum == JEDEC_ST) &&
3469 (nvram_cmd & NVRAM_CMD_FIRST)) {
3470 u32 cmd;
3471
3472 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3473 ret = tg3_nvram_exec_cmd(tp, cmd);
3474 if (ret)
3475 break;
3476 }
3477 if (!tg3_flag(tp, FLASH)) {
3478 /* We always do complete word writes to eeprom. */
3479 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3480 }
3481
3482 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3483 if (ret)
3484 break;
3485 }
3486 return ret;
3487 }
3488
3489 /* offset and length are dword aligned */
3490 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3491 {
3492 int ret;
3493
3494 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3495 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3496 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3497 udelay(40);
3498 }
3499
3500 if (!tg3_flag(tp, NVRAM)) {
3501 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3502 } else {
3503 u32 grc_mode;
3504
3505 ret = tg3_nvram_lock(tp);
3506 if (ret)
3507 return ret;
3508
3509 tg3_enable_nvram_access(tp);
3510 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3511 tw32(NVRAM_WRITE1, 0x406);
3512
3513 grc_mode = tr32(GRC_MODE);
3514 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3515
3516 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3517 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3518 buf);
3519 } else {
3520 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3521 buf);
3522 }
3523
3524 grc_mode = tr32(GRC_MODE);
3525 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3526
3527 tg3_disable_nvram_access(tp);
3528 tg3_nvram_unlock(tp);
3529 }
3530
3531 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3533 udelay(40);
3534 }
3535
3536 return ret;
3537 }
3538
3539 #define RX_CPU_SCRATCH_BASE 0x30000
3540 #define RX_CPU_SCRATCH_SIZE 0x04000
3541 #define TX_CPU_SCRATCH_BASE 0x34000
3542 #define TX_CPU_SCRATCH_SIZE 0x04000
3543
3544 /* tp->lock is held. */
3545 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3546 {
3547 int i;
3548 const int iters = 10000;
3549
3550 for (i = 0; i < iters; i++) {
3551 tw32(cpu_base + CPU_STATE, 0xffffffff);
3552 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3553 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3554 break;
3555 if (pci_channel_offline(tp->pdev))
3556 return -EBUSY;
3557 }
3558
3559 return (i == iters) ? -EBUSY : 0;
3560 }
3561
3562 /* tp->lock is held. */
3563 static int tg3_rxcpu_pause(struct tg3 *tp)
3564 {
3565 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3566
3567 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3568 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3569 udelay(10);
3570
3571 return rc;
3572 }
3573
3574 /* tp->lock is held. */
3575 static int tg3_txcpu_pause(struct tg3 *tp)
3576 {
3577 return tg3_pause_cpu(tp, TX_CPU_BASE);
3578 }
3579
3580 /* tp->lock is held. */
3581 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583 tw32(cpu_base + CPU_STATE, 0xffffffff);
3584 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3585 }
3586
3587 /* tp->lock is held. */
3588 static void tg3_rxcpu_resume(struct tg3 *tp)
3589 {
3590 tg3_resume_cpu(tp, RX_CPU_BASE);
3591 }
3592
3593 /* tp->lock is held. */
3594 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3595 {
3596 int rc;
3597
3598 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3599
3600 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3601 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3602
3603 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3604 return 0;
3605 }
3606 if (cpu_base == RX_CPU_BASE) {
3607 rc = tg3_rxcpu_pause(tp);
3608 } else {
3609 /*
3610 * There is only an Rx CPU for the 5750 derivative in the
3611 * BCM4785.
3612 */
3613 if (tg3_flag(tp, IS_SSB_CORE))
3614 return 0;
3615
3616 rc = tg3_txcpu_pause(tp);
3617 }
3618
3619 if (rc) {
3620 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3621 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3622 return -ENODEV;
3623 }
3624
3625 /* Clear firmware's nvram arbitration. */
3626 if (tg3_flag(tp, NVRAM))
3627 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3628 return 0;
3629 }
3630
3631 static int tg3_fw_data_len(struct tg3 *tp,
3632 const struct tg3_firmware_hdr *fw_hdr)
3633 {
3634 int fw_len;
3635
3636 /* Non fragmented firmware have one firmware header followed by a
3637 * contiguous chunk of data to be written. The length field in that
3638 * header is not the length of data to be written but the complete
3639 * length of the bss. The data length is determined based on
3640 * tp->fw->size minus headers.
3641 *
3642 * Fragmented firmware have a main header followed by multiple
3643 * fragments. Each fragment is identical to non fragmented firmware
3644 * with a firmware header followed by a contiguous chunk of data. In
3645 * the main header, the length field is unused and set to 0xffffffff.
3646 * In each fragment header the length is the entire size of that
3647 * fragment i.e. fragment data + header length. Data length is
3648 * therefore length field in the header minus TG3_FW_HDR_LEN.
3649 */
3650 if (tp->fw_len == 0xffffffff)
3651 fw_len = be32_to_cpu(fw_hdr->len);
3652 else
3653 fw_len = tp->fw->size;
3654
3655 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3656 }
3657
3658 /* tp->lock is held. */
3659 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3660 u32 cpu_scratch_base, int cpu_scratch_size,
3661 const struct tg3_firmware_hdr *fw_hdr)
3662 {
3663 int err, i;
3664 void (*write_op)(struct tg3 *, u32, u32);
3665 int total_len = tp->fw->size;
3666
3667 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3668 netdev_err(tp->dev,
3669 "%s: Trying to load TX cpu firmware which is 5705\n",
3670 __func__);
3671 return -EINVAL;
3672 }
3673
3674 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3675 write_op = tg3_write_mem;
3676 else
3677 write_op = tg3_write_indirect_reg32;
3678
3679 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3680 /* It is possible that bootcode is still loading at this point.
3681 * Get the nvram lock first before halting the cpu.
3682 */
3683 int lock_err = tg3_nvram_lock(tp);
3684 err = tg3_halt_cpu(tp, cpu_base);
3685 if (!lock_err)
3686 tg3_nvram_unlock(tp);
3687 if (err)
3688 goto out;
3689
3690 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3691 write_op(tp, cpu_scratch_base + i, 0);
3692 tw32(cpu_base + CPU_STATE, 0xffffffff);
3693 tw32(cpu_base + CPU_MODE,
3694 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3695 } else {
3696 /* Subtract additional main header for fragmented firmware and
3697 * advance to the first fragment
3698 */
3699 total_len -= TG3_FW_HDR_LEN;
3700 fw_hdr++;
3701 }
3702
3703 do {
3704 u32 *fw_data = (u32 *)(fw_hdr + 1);
3705 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3706 write_op(tp, cpu_scratch_base +
3707 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3708 (i * sizeof(u32)),
3709 be32_to_cpu(fw_data[i]));
3710
3711 total_len -= be32_to_cpu(fw_hdr->len);
3712
3713 /* Advance to next fragment */
3714 fw_hdr = (struct tg3_firmware_hdr *)
3715 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3716 } while (total_len > 0);
3717
3718 err = 0;
3719
3720 out:
3721 return err;
3722 }
3723
3724 /* tp->lock is held. */
3725 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3726 {
3727 int i;
3728 const int iters = 5;
3729
3730 tw32(cpu_base + CPU_STATE, 0xffffffff);
3731 tw32_f(cpu_base + CPU_PC, pc);
3732
3733 for (i = 0; i < iters; i++) {
3734 if (tr32(cpu_base + CPU_PC) == pc)
3735 break;
3736 tw32(cpu_base + CPU_STATE, 0xffffffff);
3737 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3738 tw32_f(cpu_base + CPU_PC, pc);
3739 udelay(1000);
3740 }
3741
3742 return (i == iters) ? -EBUSY : 0;
3743 }
3744
3745 /* tp->lock is held. */
3746 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3747 {
3748 const struct tg3_firmware_hdr *fw_hdr;
3749 int err;
3750
3751 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3752
3753 /* Firmware blob starts with version numbers, followed by
3754 start address and length. We are setting complete length.
3755 length = end_address_of_bss - start_address_of_text.
3756 Remainder is the blob to be loaded contiguously
3757 from start address. */
3758
3759 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3760 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3761 fw_hdr);
3762 if (err)
3763 return err;
3764
3765 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3766 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3767 fw_hdr);
3768 if (err)
3769 return err;
3770
3771 /* Now startup only the RX cpu. */
3772 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3773 be32_to_cpu(fw_hdr->base_addr));
3774 if (err) {
3775 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3776 "should be %08x\n", __func__,
3777 tr32(RX_CPU_BASE + CPU_PC),
3778 be32_to_cpu(fw_hdr->base_addr));
3779 return -ENODEV;
3780 }
3781
3782 tg3_rxcpu_resume(tp);
3783
3784 return 0;
3785 }
3786
3787 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3788 {
3789 const int iters = 1000;
3790 int i;
3791 u32 val;
3792
3793 /* Wait for boot code to complete initialization and enter service
3794 * loop. It is then safe to download service patches
3795 */
3796 for (i = 0; i < iters; i++) {
3797 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3798 break;
3799
3800 udelay(10);
3801 }
3802
3803 if (i == iters) {
3804 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3805 return -EBUSY;
3806 }
3807
3808 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3809 if (val & 0xff) {
3810 netdev_warn(tp->dev,
3811 "Other patches exist. Not downloading EEE patch\n");
3812 return -EEXIST;
3813 }
3814
3815 return 0;
3816 }
3817
3818 /* tp->lock is held. */
3819 static void tg3_load_57766_firmware(struct tg3 *tp)
3820 {
3821 struct tg3_firmware_hdr *fw_hdr;
3822
3823 if (!tg3_flag(tp, NO_NVRAM))
3824 return;
3825
3826 if (tg3_validate_rxcpu_state(tp))
3827 return;
3828
3829 if (!tp->fw)
3830 return;
3831
3832 /* This firmware blob has a different format than older firmware
3833 * releases as given below. The main difference is we have fragmented
3834 * data to be written to non-contiguous locations.
3835 *
3836 * In the beginning we have a firmware header identical to other
3837 * firmware which consists of version, base addr and length. The length
3838 * here is unused and set to 0xffffffff.
3839 *
3840 * This is followed by a series of firmware fragments which are
3841 * individually identical to previous firmware. i.e. they have the
3842 * firmware header and followed by data for that fragment. The version
3843 * field of the individual fragment header is unused.
3844 */
3845
3846 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3847 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3848 return;
3849
3850 if (tg3_rxcpu_pause(tp))
3851 return;
3852
3853 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3854 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3855
3856 tg3_rxcpu_resume(tp);
3857 }
3858
3859 /* tp->lock is held. */
3860 static int tg3_load_tso_firmware(struct tg3 *tp)
3861 {
3862 const struct tg3_firmware_hdr *fw_hdr;
3863 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3864 int err;
3865
3866 if (!tg3_flag(tp, FW_TSO))
3867 return 0;
3868
3869 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3870
3871 /* Firmware blob starts with version numbers, followed by
3872 start address and length. We are setting complete length.
3873 length = end_address_of_bss - start_address_of_text.
3874 Remainder is the blob to be loaded contiguously
3875 from start address. */
3876
3877 cpu_scratch_size = tp->fw_len;
3878
3879 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3880 cpu_base = RX_CPU_BASE;
3881 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3882 } else {
3883 cpu_base = TX_CPU_BASE;
3884 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3885 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3886 }
3887
3888 err = tg3_load_firmware_cpu(tp, cpu_base,
3889 cpu_scratch_base, cpu_scratch_size,
3890 fw_hdr);
3891 if (err)
3892 return err;
3893
3894 /* Now startup the cpu. */
3895 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3896 be32_to_cpu(fw_hdr->base_addr));
3897 if (err) {
3898 netdev_err(tp->dev,
3899 "%s fails to set CPU PC, is %08x should be %08x\n",
3900 __func__, tr32(cpu_base + CPU_PC),
3901 be32_to_cpu(fw_hdr->base_addr));
3902 return -ENODEV;
3903 }
3904
3905 tg3_resume_cpu(tp, cpu_base);
3906 return 0;
3907 }
3908
3909
3910 /* tp->lock is held. */
3911 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3912 {
3913 u32 addr_high, addr_low;
3914 int i;
3915
3916 addr_high = ((tp->dev->dev_addr[0] << 8) |
3917 tp->dev->dev_addr[1]);
3918 addr_low = ((tp->dev->dev_addr[2] << 24) |
3919 (tp->dev->dev_addr[3] << 16) |
3920 (tp->dev->dev_addr[4] << 8) |
3921 (tp->dev->dev_addr[5] << 0));
3922 for (i = 0; i < 4; i++) {
3923 if (i == 1 && skip_mac_1)
3924 continue;
3925 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3926 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3927 }
3928
3929 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3930 tg3_asic_rev(tp) == ASIC_REV_5704) {
3931 for (i = 0; i < 12; i++) {
3932 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3933 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3934 }
3935 }
3936
3937 addr_high = (tp->dev->dev_addr[0] +
3938 tp->dev->dev_addr[1] +
3939 tp->dev->dev_addr[2] +
3940 tp->dev->dev_addr[3] +
3941 tp->dev->dev_addr[4] +
3942 tp->dev->dev_addr[5]) &
3943 TX_BACKOFF_SEED_MASK;
3944 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3945 }
3946
3947 static void tg3_enable_register_access(struct tg3 *tp)
3948 {
3949 /*
3950 * Make sure register accesses (indirect or otherwise) will function
3951 * correctly.
3952 */
3953 pci_write_config_dword(tp->pdev,
3954 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3955 }
3956
3957 static int tg3_power_up(struct tg3 *tp)
3958 {
3959 int err;
3960
3961 tg3_enable_register_access(tp);
3962
3963 err = pci_set_power_state(tp->pdev, PCI_D0);
3964 if (!err) {
3965 /* Switch out of Vaux if it is a NIC */
3966 tg3_pwrsrc_switch_to_vmain(tp);
3967 } else {
3968 netdev_err(tp->dev, "Transition to D0 failed\n");
3969 }
3970
3971 return err;
3972 }
3973
3974 static int tg3_setup_phy(struct tg3 *, bool);
3975
3976 static int tg3_power_down_prepare(struct tg3 *tp)
3977 {
3978 u32 misc_host_ctrl;
3979 bool device_should_wake, do_low_power;
3980
3981 tg3_enable_register_access(tp);
3982
3983 /* Restore the CLKREQ setting. */
3984 if (tg3_flag(tp, CLKREQ_BUG))
3985 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3986 PCI_EXP_LNKCTL_CLKREQ_EN);
3987
3988 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3989 tw32(TG3PCI_MISC_HOST_CTRL,
3990 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3991
3992 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3993 tg3_flag(tp, WOL_ENABLE);
3994
3995 if (tg3_flag(tp, USE_PHYLIB)) {
3996 do_low_power = false;
3997 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3998 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3999 struct phy_device *phydev;
4000 u32 phyid, advertising;
4001
4002 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4003
4004 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4005
4006 tp->link_config.speed = phydev->speed;
4007 tp->link_config.duplex = phydev->duplex;
4008 tp->link_config.autoneg = phydev->autoneg;
4009 tp->link_config.advertising = phydev->advertising;
4010
4011 advertising = ADVERTISED_TP |
4012 ADVERTISED_Pause |
4013 ADVERTISED_Autoneg |
4014 ADVERTISED_10baseT_Half;
4015
4016 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4017 if (tg3_flag(tp, WOL_SPEED_100MB))
4018 advertising |=
4019 ADVERTISED_100baseT_Half |
4020 ADVERTISED_100baseT_Full |
4021 ADVERTISED_10baseT_Full;
4022 else
4023 advertising |= ADVERTISED_10baseT_Full;
4024 }
4025
4026 phydev->advertising = advertising;
4027
4028 phy_start_aneg(phydev);
4029
4030 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4031 if (phyid != PHY_ID_BCMAC131) {
4032 phyid &= PHY_BCM_OUI_MASK;
4033 if (phyid == PHY_BCM_OUI_1 ||
4034 phyid == PHY_BCM_OUI_2 ||
4035 phyid == PHY_BCM_OUI_3)
4036 do_low_power = true;
4037 }
4038 }
4039 } else {
4040 do_low_power = true;
4041
4042 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4043 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4044
4045 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4046 tg3_setup_phy(tp, false);
4047 }
4048
4049 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4050 u32 val;
4051
4052 val = tr32(GRC_VCPU_EXT_CTRL);
4053 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4054 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4055 int i;
4056 u32 val;
4057
4058 for (i = 0; i < 200; i++) {
4059 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4060 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4061 break;
4062 msleep(1);
4063 }
4064 }
4065 if (tg3_flag(tp, WOL_CAP))
4066 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4067 WOL_DRV_STATE_SHUTDOWN |
4068 WOL_DRV_WOL |
4069 WOL_SET_MAGIC_PKT);
4070
4071 if (device_should_wake) {
4072 u32 mac_mode;
4073
4074 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4075 if (do_low_power &&
4076 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4077 tg3_phy_auxctl_write(tp,
4078 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4079 MII_TG3_AUXCTL_PCTL_WOL_EN |
4080 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4081 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4082 udelay(40);
4083 }
4084
4085 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4086 mac_mode = MAC_MODE_PORT_MODE_GMII;
4087 else if (tp->phy_flags &
4088 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4089 if (tp->link_config.active_speed == SPEED_1000)
4090 mac_mode = MAC_MODE_PORT_MODE_GMII;
4091 else
4092 mac_mode = MAC_MODE_PORT_MODE_MII;
4093 } else
4094 mac_mode = MAC_MODE_PORT_MODE_MII;
4095
4096 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4097 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4098 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4099 SPEED_100 : SPEED_10;
4100 if (tg3_5700_link_polarity(tp, speed))
4101 mac_mode |= MAC_MODE_LINK_POLARITY;
4102 else
4103 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4104 }
4105 } else {
4106 mac_mode = MAC_MODE_PORT_MODE_TBI;
4107 }
4108
4109 if (!tg3_flag(tp, 5750_PLUS))
4110 tw32(MAC_LED_CTRL, tp->led_ctrl);
4111
4112 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4113 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4114 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4115 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4116
4117 if (tg3_flag(tp, ENABLE_APE))
4118 mac_mode |= MAC_MODE_APE_TX_EN |
4119 MAC_MODE_APE_RX_EN |
4120 MAC_MODE_TDE_ENABLE;
4121
4122 tw32_f(MAC_MODE, mac_mode);
4123 udelay(100);
4124
4125 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4126 udelay(10);
4127 }
4128
4129 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4130 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4131 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4132 u32 base_val;
4133
4134 base_val = tp->pci_clock_ctrl;
4135 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4136 CLOCK_CTRL_TXCLK_DISABLE);
4137
4138 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4139 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4140 } else if (tg3_flag(tp, 5780_CLASS) ||
4141 tg3_flag(tp, CPMU_PRESENT) ||
4142 tg3_asic_rev(tp) == ASIC_REV_5906) {
4143 /* do nothing */
4144 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4145 u32 newbits1, newbits2;
4146
4147 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4148 tg3_asic_rev(tp) == ASIC_REV_5701) {
4149 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4150 CLOCK_CTRL_TXCLK_DISABLE |
4151 CLOCK_CTRL_ALTCLK);
4152 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4153 } else if (tg3_flag(tp, 5705_PLUS)) {
4154 newbits1 = CLOCK_CTRL_625_CORE;
4155 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4156 } else {
4157 newbits1 = CLOCK_CTRL_ALTCLK;
4158 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4159 }
4160
4161 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4162 40);
4163
4164 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4165 40);
4166
4167 if (!tg3_flag(tp, 5705_PLUS)) {
4168 u32 newbits3;
4169
4170 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4171 tg3_asic_rev(tp) == ASIC_REV_5701) {
4172 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4173 CLOCK_CTRL_TXCLK_DISABLE |
4174 CLOCK_CTRL_44MHZ_CORE);
4175 } else {
4176 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4177 }
4178
4179 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4180 tp->pci_clock_ctrl | newbits3, 40);
4181 }
4182 }
4183
4184 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4185 tg3_power_down_phy(tp, do_low_power);
4186
4187 tg3_frob_aux_power(tp, true);
4188
4189 /* Workaround for unstable PLL clock */
4190 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4191 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4192 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4193 u32 val = tr32(0x7d00);
4194
4195 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4196 tw32(0x7d00, val);
4197 if (!tg3_flag(tp, ENABLE_ASF)) {
4198 int err;
4199
4200 err = tg3_nvram_lock(tp);
4201 tg3_halt_cpu(tp, RX_CPU_BASE);
4202 if (!err)
4203 tg3_nvram_unlock(tp);
4204 }
4205 }
4206
4207 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4208
4209 return 0;
4210 }
4211
4212 static void tg3_power_down(struct tg3 *tp)
4213 {
4214 tg3_power_down_prepare(tp);
4215
4216 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4217 pci_set_power_state(tp->pdev, PCI_D3hot);
4218 }
4219
4220 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4221 {
4222 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4223 case MII_TG3_AUX_STAT_10HALF:
4224 *speed = SPEED_10;
4225 *duplex = DUPLEX_HALF;
4226 break;
4227
4228 case MII_TG3_AUX_STAT_10FULL:
4229 *speed = SPEED_10;
4230 *duplex = DUPLEX_FULL;
4231 break;
4232
4233 case MII_TG3_AUX_STAT_100HALF:
4234 *speed = SPEED_100;
4235 *duplex = DUPLEX_HALF;
4236 break;
4237
4238 case MII_TG3_AUX_STAT_100FULL:
4239 *speed = SPEED_100;
4240 *duplex = DUPLEX_FULL;
4241 break;
4242
4243 case MII_TG3_AUX_STAT_1000HALF:
4244 *speed = SPEED_1000;
4245 *duplex = DUPLEX_HALF;
4246 break;
4247
4248 case MII_TG3_AUX_STAT_1000FULL:
4249 *speed = SPEED_1000;
4250 *duplex = DUPLEX_FULL;
4251 break;
4252
4253 default:
4254 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4255 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4256 SPEED_10;
4257 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4258 DUPLEX_HALF;
4259 break;
4260 }
4261 *speed = SPEED_UNKNOWN;
4262 *duplex = DUPLEX_UNKNOWN;
4263 break;
4264 }
4265 }
4266
4267 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4268 {
4269 int err = 0;
4270 u32 val, new_adv;
4271
4272 new_adv = ADVERTISE_CSMA;
4273 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4274 new_adv |= mii_advertise_flowctrl(flowctrl);
4275
4276 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4277 if (err)
4278 goto done;
4279
4280 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4281 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4282
4283 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4284 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4285 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4286
4287 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4288 if (err)
4289 goto done;
4290 }
4291
4292 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4293 goto done;
4294
4295 tw32(TG3_CPMU_EEE_MODE,
4296 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4297
4298 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4299 if (!err) {
4300 u32 err2;
4301
4302 val = 0;
4303 /* Advertise 100-BaseTX EEE ability */
4304 if (advertise & ADVERTISED_100baseT_Full)
4305 val |= MDIO_AN_EEE_ADV_100TX;
4306 /* Advertise 1000-BaseT EEE ability */
4307 if (advertise & ADVERTISED_1000baseT_Full)
4308 val |= MDIO_AN_EEE_ADV_1000T;
4309 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4310 if (err)
4311 val = 0;
4312
4313 switch (tg3_asic_rev(tp)) {
4314 case ASIC_REV_5717:
4315 case ASIC_REV_57765:
4316 case ASIC_REV_57766:
4317 case ASIC_REV_5719:
4318 /* If we advertised any eee advertisements above... */
4319 if (val)
4320 val = MII_TG3_DSP_TAP26_ALNOKO |
4321 MII_TG3_DSP_TAP26_RMRXSTO |
4322 MII_TG3_DSP_TAP26_OPCSINPT;
4323 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4324 /* Fall through */
4325 case ASIC_REV_5720:
4326 case ASIC_REV_5762:
4327 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4328 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4329 MII_TG3_DSP_CH34TP2_HIBW01);
4330 }
4331
4332 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4333 if (!err)
4334 err = err2;
4335 }
4336
4337 done:
4338 return err;
4339 }
4340
4341 static void tg3_phy_copper_begin(struct tg3 *tp)
4342 {
4343 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4344 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4345 u32 adv, fc;
4346
4347 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4348 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4349 adv = ADVERTISED_10baseT_Half |
4350 ADVERTISED_10baseT_Full;
4351 if (tg3_flag(tp, WOL_SPEED_100MB))
4352 adv |= ADVERTISED_100baseT_Half |
4353 ADVERTISED_100baseT_Full;
4354 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4355 adv |= ADVERTISED_1000baseT_Half |
4356 ADVERTISED_1000baseT_Full;
4357
4358 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4359 } else {
4360 adv = tp->link_config.advertising;
4361 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4362 adv &= ~(ADVERTISED_1000baseT_Half |
4363 ADVERTISED_1000baseT_Full);
4364
4365 fc = tp->link_config.flowctrl;
4366 }
4367
4368 tg3_phy_autoneg_cfg(tp, adv, fc);
4369
4370 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4371 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4372 /* Normally during power down we want to autonegotiate
4373 * the lowest possible speed for WOL. However, to avoid
4374 * link flap, we leave it untouched.
4375 */
4376 return;
4377 }
4378
4379 tg3_writephy(tp, MII_BMCR,
4380 BMCR_ANENABLE | BMCR_ANRESTART);
4381 } else {
4382 int i;
4383 u32 bmcr, orig_bmcr;
4384
4385 tp->link_config.active_speed = tp->link_config.speed;
4386 tp->link_config.active_duplex = tp->link_config.duplex;
4387
4388 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4389 /* With autoneg disabled, 5715 only links up when the
4390 * advertisement register has the configured speed
4391 * enabled.
4392 */
4393 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4394 }
4395
4396 bmcr = 0;
4397 switch (tp->link_config.speed) {
4398 default:
4399 case SPEED_10:
4400 break;
4401
4402 case SPEED_100:
4403 bmcr |= BMCR_SPEED100;
4404 break;
4405
4406 case SPEED_1000:
4407 bmcr |= BMCR_SPEED1000;
4408 break;
4409 }
4410
4411 if (tp->link_config.duplex == DUPLEX_FULL)
4412 bmcr |= BMCR_FULLDPLX;
4413
4414 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4415 (bmcr != orig_bmcr)) {
4416 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4417 for (i = 0; i < 1500; i++) {
4418 u32 tmp;
4419
4420 udelay(10);
4421 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4422 tg3_readphy(tp, MII_BMSR, &tmp))
4423 continue;
4424 if (!(tmp & BMSR_LSTATUS)) {
4425 udelay(40);
4426 break;
4427 }
4428 }
4429 tg3_writephy(tp, MII_BMCR, bmcr);
4430 udelay(40);
4431 }
4432 }
4433 }
4434
4435 static int tg3_phy_pull_config(struct tg3 *tp)
4436 {
4437 int err;
4438 u32 val;
4439
4440 err = tg3_readphy(tp, MII_BMCR, &val);
4441 if (err)
4442 goto done;
4443
4444 if (!(val & BMCR_ANENABLE)) {
4445 tp->link_config.autoneg = AUTONEG_DISABLE;
4446 tp->link_config.advertising = 0;
4447 tg3_flag_clear(tp, PAUSE_AUTONEG);
4448
4449 err = -EIO;
4450
4451 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4452 case 0:
4453 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4454 goto done;
4455
4456 tp->link_config.speed = SPEED_10;
4457 break;
4458 case BMCR_SPEED100:
4459 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4460 goto done;
4461
4462 tp->link_config.speed = SPEED_100;
4463 break;
4464 case BMCR_SPEED1000:
4465 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4466 tp->link_config.speed = SPEED_1000;
4467 break;
4468 }
4469 /* Fall through */
4470 default:
4471 goto done;
4472 }
4473
4474 if (val & BMCR_FULLDPLX)
4475 tp->link_config.duplex = DUPLEX_FULL;
4476 else
4477 tp->link_config.duplex = DUPLEX_HALF;
4478
4479 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4480
4481 err = 0;
4482 goto done;
4483 }
4484
4485 tp->link_config.autoneg = AUTONEG_ENABLE;
4486 tp->link_config.advertising = ADVERTISED_Autoneg;
4487 tg3_flag_set(tp, PAUSE_AUTONEG);
4488
4489 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4490 u32 adv;
4491
4492 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4493 if (err)
4494 goto done;
4495
4496 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4497 tp->link_config.advertising |= adv | ADVERTISED_TP;
4498
4499 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4500 } else {
4501 tp->link_config.advertising |= ADVERTISED_FIBRE;
4502 }
4503
4504 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4505 u32 adv;
4506
4507 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4508 err = tg3_readphy(tp, MII_CTRL1000, &val);
4509 if (err)
4510 goto done;
4511
4512 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4513 } else {
4514 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4515 if (err)
4516 goto done;
4517
4518 adv = tg3_decode_flowctrl_1000X(val);
4519 tp->link_config.flowctrl = adv;
4520
4521 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4522 adv = mii_adv_to_ethtool_adv_x(val);
4523 }
4524
4525 tp->link_config.advertising |= adv;
4526 }
4527
4528 done:
4529 return err;
4530 }
4531
4532 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4533 {
4534 int err;
4535
4536 /* Turn off tap power management. */
4537 /* Set Extended packet length bit */
4538 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4539
4540 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4541 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4542 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4543 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4544 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4545
4546 udelay(40);
4547
4548 return err;
4549 }
4550
4551 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4552 {
4553 u32 val;
4554 u32 tgtadv = 0;
4555 u32 advertising = tp->link_config.advertising;
4556
4557 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4558 return true;
4559
4560 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4561 return false;
4562
4563 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4564
4565
4566 if (advertising & ADVERTISED_100baseT_Full)
4567 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4568 if (advertising & ADVERTISED_1000baseT_Full)
4569 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4570
4571 if (val != tgtadv)
4572 return false;
4573
4574 return true;
4575 }
4576
4577 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4578 {
4579 u32 advmsk, tgtadv, advertising;
4580
4581 advertising = tp->link_config.advertising;
4582 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4583
4584 advmsk = ADVERTISE_ALL;
4585 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4586 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4587 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4588 }
4589
4590 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4591 return false;
4592
4593 if ((*lcladv & advmsk) != tgtadv)
4594 return false;
4595
4596 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4597 u32 tg3_ctrl;
4598
4599 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4600
4601 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4602 return false;
4603
4604 if (tgtadv &&
4605 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4606 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4607 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4608 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4609 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4610 } else {
4611 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4612 }
4613
4614 if (tg3_ctrl != tgtadv)
4615 return false;
4616 }
4617
4618 return true;
4619 }
4620
4621 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4622 {
4623 u32 lpeth = 0;
4624
4625 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4626 u32 val;
4627
4628 if (tg3_readphy(tp, MII_STAT1000, &val))
4629 return false;
4630
4631 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4632 }
4633
4634 if (tg3_readphy(tp, MII_LPA, rmtadv))
4635 return false;
4636
4637 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4638 tp->link_config.rmt_adv = lpeth;
4639
4640 return true;
4641 }
4642
4643 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4644 {
4645 if (curr_link_up != tp->link_up) {
4646 if (curr_link_up) {
4647 netif_carrier_on(tp->dev);
4648 } else {
4649 netif_carrier_off(tp->dev);
4650 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4651 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4652 }
4653
4654 tg3_link_report(tp);
4655 return true;
4656 }
4657
4658 return false;
4659 }
4660
4661 static void tg3_clear_mac_status(struct tg3 *tp)
4662 {
4663 tw32(MAC_EVENT, 0);
4664
4665 tw32_f(MAC_STATUS,
4666 MAC_STATUS_SYNC_CHANGED |
4667 MAC_STATUS_CFG_CHANGED |
4668 MAC_STATUS_MI_COMPLETION |
4669 MAC_STATUS_LNKSTATE_CHANGED);
4670 udelay(40);
4671 }
4672
4673 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4674 {
4675 bool current_link_up;
4676 u32 bmsr, val;
4677 u32 lcl_adv, rmt_adv;
4678 u16 current_speed;
4679 u8 current_duplex;
4680 int i, err;
4681
4682 tg3_clear_mac_status(tp);
4683
4684 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4685 tw32_f(MAC_MI_MODE,
4686 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4687 udelay(80);
4688 }
4689
4690 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4691
4692 /* Some third-party PHYs need to be reset on link going
4693 * down.
4694 */
4695 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4696 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4697 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4698 tp->link_up) {
4699 tg3_readphy(tp, MII_BMSR, &bmsr);
4700 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4701 !(bmsr & BMSR_LSTATUS))
4702 force_reset = true;
4703 }
4704 if (force_reset)
4705 tg3_phy_reset(tp);
4706
4707 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4708 tg3_readphy(tp, MII_BMSR, &bmsr);
4709 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4710 !tg3_flag(tp, INIT_COMPLETE))
4711 bmsr = 0;
4712
4713 if (!(bmsr & BMSR_LSTATUS)) {
4714 err = tg3_init_5401phy_dsp(tp);
4715 if (err)
4716 return err;
4717
4718 tg3_readphy(tp, MII_BMSR, &bmsr);
4719 for (i = 0; i < 1000; i++) {
4720 udelay(10);
4721 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4722 (bmsr & BMSR_LSTATUS)) {
4723 udelay(40);
4724 break;
4725 }
4726 }
4727
4728 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4729 TG3_PHY_REV_BCM5401_B0 &&
4730 !(bmsr & BMSR_LSTATUS) &&
4731 tp->link_config.active_speed == SPEED_1000) {
4732 err = tg3_phy_reset(tp);
4733 if (!err)
4734 err = tg3_init_5401phy_dsp(tp);
4735 if (err)
4736 return err;
4737 }
4738 }
4739 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4740 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4741 /* 5701 {A0,B0} CRC bug workaround */
4742 tg3_writephy(tp, 0x15, 0x0a75);
4743 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4744 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4745 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4746 }
4747
4748 /* Clear pending interrupts... */
4749 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4750 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4751
4752 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4753 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4754 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4755 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4756
4757 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4758 tg3_asic_rev(tp) == ASIC_REV_5701) {
4759 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4760 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4761 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4762 else
4763 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4764 }
4765
4766 current_link_up = false;
4767 current_speed = SPEED_UNKNOWN;
4768 current_duplex = DUPLEX_UNKNOWN;
4769 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4770 tp->link_config.rmt_adv = 0;
4771
4772 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4773 err = tg3_phy_auxctl_read(tp,
4774 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4775 &val);
4776 if (!err && !(val & (1 << 10))) {
4777 tg3_phy_auxctl_write(tp,
4778 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4779 val | (1 << 10));
4780 goto relink;
4781 }
4782 }
4783
4784 bmsr = 0;
4785 for (i = 0; i < 100; i++) {
4786 tg3_readphy(tp, MII_BMSR, &bmsr);
4787 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4788 (bmsr & BMSR_LSTATUS))
4789 break;
4790 udelay(40);
4791 }
4792
4793 if (bmsr & BMSR_LSTATUS) {
4794 u32 aux_stat, bmcr;
4795
4796 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4797 for (i = 0; i < 2000; i++) {
4798 udelay(10);
4799 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4800 aux_stat)
4801 break;
4802 }
4803
4804 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4805 &current_speed,
4806 &current_duplex);
4807
4808 bmcr = 0;
4809 for (i = 0; i < 200; i++) {
4810 tg3_readphy(tp, MII_BMCR, &bmcr);
4811 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4812 continue;
4813 if (bmcr && bmcr != 0x7fff)
4814 break;
4815 udelay(10);
4816 }
4817
4818 lcl_adv = 0;
4819 rmt_adv = 0;
4820
4821 tp->link_config.active_speed = current_speed;
4822 tp->link_config.active_duplex = current_duplex;
4823
4824 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4825 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4826
4827 if ((bmcr & BMCR_ANENABLE) &&
4828 eee_config_ok &&
4829 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4830 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4831 current_link_up = true;
4832
4833 /* EEE settings changes take effect only after a phy
4834 * reset. If we have skipped a reset due to Link Flap
4835 * Avoidance being enabled, do it now.
4836 */
4837 if (!eee_config_ok &&
4838 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4839 !force_reset)
4840 tg3_phy_reset(tp);
4841 } else {
4842 if (!(bmcr & BMCR_ANENABLE) &&
4843 tp->link_config.speed == current_speed &&
4844 tp->link_config.duplex == current_duplex) {
4845 current_link_up = true;
4846 }
4847 }
4848
4849 if (current_link_up &&
4850 tp->link_config.active_duplex == DUPLEX_FULL) {
4851 u32 reg, bit;
4852
4853 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4854 reg = MII_TG3_FET_GEN_STAT;
4855 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4856 } else {
4857 reg = MII_TG3_EXT_STAT;
4858 bit = MII_TG3_EXT_STAT_MDIX;
4859 }
4860
4861 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4862 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4863
4864 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4865 }
4866 }
4867
4868 relink:
4869 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4870 tg3_phy_copper_begin(tp);
4871
4872 if (tg3_flag(tp, ROBOSWITCH)) {
4873 current_link_up = true;
4874 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4875 current_speed = SPEED_1000;
4876 current_duplex = DUPLEX_FULL;
4877 tp->link_config.active_speed = current_speed;
4878 tp->link_config.active_duplex = current_duplex;
4879 }
4880
4881 tg3_readphy(tp, MII_BMSR, &bmsr);
4882 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4883 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4884 current_link_up = true;
4885 }
4886
4887 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4888 if (current_link_up) {
4889 if (tp->link_config.active_speed == SPEED_100 ||
4890 tp->link_config.active_speed == SPEED_10)
4891 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4892 else
4893 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4894 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4895 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4896 else
4897 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4898
4899 /* In order for the 5750 core in BCM4785 chip to work properly
4900 * in RGMII mode, the Led Control Register must be set up.
4901 */
4902 if (tg3_flag(tp, RGMII_MODE)) {
4903 u32 led_ctrl = tr32(MAC_LED_CTRL);
4904 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4905
4906 if (tp->link_config.active_speed == SPEED_10)
4907 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4908 else if (tp->link_config.active_speed == SPEED_100)
4909 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4910 LED_CTRL_100MBPS_ON);
4911 else if (tp->link_config.active_speed == SPEED_1000)
4912 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4913 LED_CTRL_1000MBPS_ON);
4914
4915 tw32(MAC_LED_CTRL, led_ctrl);
4916 udelay(40);
4917 }
4918
4919 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4920 if (tp->link_config.active_duplex == DUPLEX_HALF)
4921 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4922
4923 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4924 if (current_link_up &&
4925 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4926 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4927 else
4928 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4929 }
4930
4931 /* ??? Without this setting Netgear GA302T PHY does not
4932 * ??? send/receive packets...
4933 */
4934 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4935 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4936 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4937 tw32_f(MAC_MI_MODE, tp->mi_mode);
4938 udelay(80);
4939 }
4940
4941 tw32_f(MAC_MODE, tp->mac_mode);
4942 udelay(40);
4943
4944 tg3_phy_eee_adjust(tp, current_link_up);
4945
4946 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4947 /* Polled via timer. */
4948 tw32_f(MAC_EVENT, 0);
4949 } else {
4950 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4951 }
4952 udelay(40);
4953
4954 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4955 current_link_up &&
4956 tp->link_config.active_speed == SPEED_1000 &&
4957 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4958 udelay(120);
4959 tw32_f(MAC_STATUS,
4960 (MAC_STATUS_SYNC_CHANGED |
4961 MAC_STATUS_CFG_CHANGED));
4962 udelay(40);
4963 tg3_write_mem(tp,
4964 NIC_SRAM_FIRMWARE_MBOX,
4965 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4966 }
4967
4968 /* Prevent send BD corruption. */
4969 if (tg3_flag(tp, CLKREQ_BUG)) {
4970 if (tp->link_config.active_speed == SPEED_100 ||
4971 tp->link_config.active_speed == SPEED_10)
4972 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4973 PCI_EXP_LNKCTL_CLKREQ_EN);
4974 else
4975 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4976 PCI_EXP_LNKCTL_CLKREQ_EN);
4977 }
4978
4979 tg3_test_and_report_link_chg(tp, current_link_up);
4980
4981 return 0;
4982 }
4983
4984 struct tg3_fiber_aneginfo {
4985 int state;
4986 #define ANEG_STATE_UNKNOWN 0
4987 #define ANEG_STATE_AN_ENABLE 1
4988 #define ANEG_STATE_RESTART_INIT 2
4989 #define ANEG_STATE_RESTART 3
4990 #define ANEG_STATE_DISABLE_LINK_OK 4
4991 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4992 #define ANEG_STATE_ABILITY_DETECT 6
4993 #define ANEG_STATE_ACK_DETECT_INIT 7
4994 #define ANEG_STATE_ACK_DETECT 8
4995 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4996 #define ANEG_STATE_COMPLETE_ACK 10
4997 #define ANEG_STATE_IDLE_DETECT_INIT 11
4998 #define ANEG_STATE_IDLE_DETECT 12
4999 #define ANEG_STATE_LINK_OK 13
5000 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5001 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5002
5003 u32 flags;
5004 #define MR_AN_ENABLE 0x00000001
5005 #define MR_RESTART_AN 0x00000002
5006 #define MR_AN_COMPLETE 0x00000004
5007 #define MR_PAGE_RX 0x00000008
5008 #define MR_NP_LOADED 0x00000010
5009 #define MR_TOGGLE_TX 0x00000020
5010 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5011 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5012 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5013 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5014 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5015 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5016 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5017 #define MR_TOGGLE_RX 0x00002000
5018 #define MR_NP_RX 0x00004000
5019
5020 #define MR_LINK_OK 0x80000000
5021
5022 unsigned long link_time, cur_time;
5023
5024 u32 ability_match_cfg;
5025 int ability_match_count;
5026
5027 char ability_match, idle_match, ack_match;
5028
5029 u32 txconfig, rxconfig;
5030 #define ANEG_CFG_NP 0x00000080
5031 #define ANEG_CFG_ACK 0x00000040
5032 #define ANEG_CFG_RF2 0x00000020
5033 #define ANEG_CFG_RF1 0x00000010
5034 #define ANEG_CFG_PS2 0x00000001
5035 #define ANEG_CFG_PS1 0x00008000
5036 #define ANEG_CFG_HD 0x00004000
5037 #define ANEG_CFG_FD 0x00002000
5038 #define ANEG_CFG_INVAL 0x00001f06
5039
5040 };
5041 #define ANEG_OK 0
5042 #define ANEG_DONE 1
5043 #define ANEG_TIMER_ENAB 2
5044 #define ANEG_FAILED -1
5045
5046 #define ANEG_STATE_SETTLE_TIME 10000
5047
5048 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5049 struct tg3_fiber_aneginfo *ap)
5050 {
5051 u16 flowctrl;
5052 unsigned long delta;
5053 u32 rx_cfg_reg;
5054 int ret;
5055
5056 if (ap->state == ANEG_STATE_UNKNOWN) {
5057 ap->rxconfig = 0;
5058 ap->link_time = 0;
5059 ap->cur_time = 0;
5060 ap->ability_match_cfg = 0;
5061 ap->ability_match_count = 0;
5062 ap->ability_match = 0;
5063 ap->idle_match = 0;
5064 ap->ack_match = 0;
5065 }
5066 ap->cur_time++;
5067
5068 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5069 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5070
5071 if (rx_cfg_reg != ap->ability_match_cfg) {
5072 ap->ability_match_cfg = rx_cfg_reg;
5073 ap->ability_match = 0;
5074 ap->ability_match_count = 0;
5075 } else {
5076 if (++ap->ability_match_count > 1) {
5077 ap->ability_match = 1;
5078 ap->ability_match_cfg = rx_cfg_reg;
5079 }
5080 }
5081 if (rx_cfg_reg & ANEG_CFG_ACK)
5082 ap->ack_match = 1;
5083 else
5084 ap->ack_match = 0;
5085
5086 ap->idle_match = 0;
5087 } else {
5088 ap->idle_match = 1;
5089 ap->ability_match_cfg = 0;
5090 ap->ability_match_count = 0;
5091 ap->ability_match = 0;
5092 ap->ack_match = 0;
5093
5094 rx_cfg_reg = 0;
5095 }
5096
5097 ap->rxconfig = rx_cfg_reg;
5098 ret = ANEG_OK;
5099
5100 switch (ap->state) {
5101 case ANEG_STATE_UNKNOWN:
5102 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5103 ap->state = ANEG_STATE_AN_ENABLE;
5104
5105 /* fallthru */
5106 case ANEG_STATE_AN_ENABLE:
5107 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5108 if (ap->flags & MR_AN_ENABLE) {
5109 ap->link_time = 0;
5110 ap->cur_time = 0;
5111 ap->ability_match_cfg = 0;
5112 ap->ability_match_count = 0;
5113 ap->ability_match = 0;
5114 ap->idle_match = 0;
5115 ap->ack_match = 0;
5116
5117 ap->state = ANEG_STATE_RESTART_INIT;
5118 } else {
5119 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5120 }
5121 break;
5122
5123 case ANEG_STATE_RESTART_INIT:
5124 ap->link_time = ap->cur_time;
5125 ap->flags &= ~(MR_NP_LOADED);
5126 ap->txconfig = 0;
5127 tw32(MAC_TX_AUTO_NEG, 0);
5128 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5129 tw32_f(MAC_MODE, tp->mac_mode);
5130 udelay(40);
5131
5132 ret = ANEG_TIMER_ENAB;
5133 ap->state = ANEG_STATE_RESTART;
5134
5135 /* fallthru */
5136 case ANEG_STATE_RESTART:
5137 delta = ap->cur_time - ap->link_time;
5138 if (delta > ANEG_STATE_SETTLE_TIME)
5139 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5140 else
5141 ret = ANEG_TIMER_ENAB;
5142 break;
5143
5144 case ANEG_STATE_DISABLE_LINK_OK:
5145 ret = ANEG_DONE;
5146 break;
5147
5148 case ANEG_STATE_ABILITY_DETECT_INIT:
5149 ap->flags &= ~(MR_TOGGLE_TX);
5150 ap->txconfig = ANEG_CFG_FD;
5151 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5152 if (flowctrl & ADVERTISE_1000XPAUSE)
5153 ap->txconfig |= ANEG_CFG_PS1;
5154 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5155 ap->txconfig |= ANEG_CFG_PS2;
5156 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5157 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5158 tw32_f(MAC_MODE, tp->mac_mode);
5159 udelay(40);
5160
5161 ap->state = ANEG_STATE_ABILITY_DETECT;
5162 break;
5163
5164 case ANEG_STATE_ABILITY_DETECT:
5165 if (ap->ability_match != 0 && ap->rxconfig != 0)
5166 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5167 break;
5168
5169 case ANEG_STATE_ACK_DETECT_INIT:
5170 ap->txconfig |= ANEG_CFG_ACK;
5171 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5172 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5173 tw32_f(MAC_MODE, tp->mac_mode);
5174 udelay(40);
5175
5176 ap->state = ANEG_STATE_ACK_DETECT;
5177
5178 /* fallthru */
5179 case ANEG_STATE_ACK_DETECT:
5180 if (ap->ack_match != 0) {
5181 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5182 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5183 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5184 } else {
5185 ap->state = ANEG_STATE_AN_ENABLE;
5186 }
5187 } else if (ap->ability_match != 0 &&
5188 ap->rxconfig == 0) {
5189 ap->state = ANEG_STATE_AN_ENABLE;
5190 }
5191 break;
5192
5193 case ANEG_STATE_COMPLETE_ACK_INIT:
5194 if (ap->rxconfig & ANEG_CFG_INVAL) {
5195 ret = ANEG_FAILED;
5196 break;
5197 }
5198 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5199 MR_LP_ADV_HALF_DUPLEX |
5200 MR_LP_ADV_SYM_PAUSE |
5201 MR_LP_ADV_ASYM_PAUSE |
5202 MR_LP_ADV_REMOTE_FAULT1 |
5203 MR_LP_ADV_REMOTE_FAULT2 |
5204 MR_LP_ADV_NEXT_PAGE |
5205 MR_TOGGLE_RX |
5206 MR_NP_RX);
5207 if (ap->rxconfig & ANEG_CFG_FD)
5208 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5209 if (ap->rxconfig & ANEG_CFG_HD)
5210 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5211 if (ap->rxconfig & ANEG_CFG_PS1)
5212 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5213 if (ap->rxconfig & ANEG_CFG_PS2)
5214 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5215 if (ap->rxconfig & ANEG_CFG_RF1)
5216 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5217 if (ap->rxconfig & ANEG_CFG_RF2)
5218 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5219 if (ap->rxconfig & ANEG_CFG_NP)
5220 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5221
5222 ap->link_time = ap->cur_time;
5223
5224 ap->flags ^= (MR_TOGGLE_TX);
5225 if (ap->rxconfig & 0x0008)
5226 ap->flags |= MR_TOGGLE_RX;
5227 if (ap->rxconfig & ANEG_CFG_NP)
5228 ap->flags |= MR_NP_RX;
5229 ap->flags |= MR_PAGE_RX;
5230
5231 ap->state = ANEG_STATE_COMPLETE_ACK;
5232 ret = ANEG_TIMER_ENAB;
5233 break;
5234
5235 case ANEG_STATE_COMPLETE_ACK:
5236 if (ap->ability_match != 0 &&
5237 ap->rxconfig == 0) {
5238 ap->state = ANEG_STATE_AN_ENABLE;
5239 break;
5240 }
5241 delta = ap->cur_time - ap->link_time;
5242 if (delta > ANEG_STATE_SETTLE_TIME) {
5243 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5244 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5245 } else {
5246 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5247 !(ap->flags & MR_NP_RX)) {
5248 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5249 } else {
5250 ret = ANEG_FAILED;
5251 }
5252 }
5253 }
5254 break;
5255
5256 case ANEG_STATE_IDLE_DETECT_INIT:
5257 ap->link_time = ap->cur_time;
5258 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5259 tw32_f(MAC_MODE, tp->mac_mode);
5260 udelay(40);
5261
5262 ap->state = ANEG_STATE_IDLE_DETECT;
5263 ret = ANEG_TIMER_ENAB;
5264 break;
5265
5266 case ANEG_STATE_IDLE_DETECT:
5267 if (ap->ability_match != 0 &&
5268 ap->rxconfig == 0) {
5269 ap->state = ANEG_STATE_AN_ENABLE;
5270 break;
5271 }
5272 delta = ap->cur_time - ap->link_time;
5273 if (delta > ANEG_STATE_SETTLE_TIME) {
5274 /* XXX another gem from the Broadcom driver :( */
5275 ap->state = ANEG_STATE_LINK_OK;
5276 }
5277 break;
5278
5279 case ANEG_STATE_LINK_OK:
5280 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5281 ret = ANEG_DONE;
5282 break;
5283
5284 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5285 /* ??? unimplemented */
5286 break;
5287
5288 case ANEG_STATE_NEXT_PAGE_WAIT:
5289 /* ??? unimplemented */
5290 break;
5291
5292 default:
5293 ret = ANEG_FAILED;
5294 break;
5295 }
5296
5297 return ret;
5298 }
5299
5300 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5301 {
5302 int res = 0;
5303 struct tg3_fiber_aneginfo aninfo;
5304 int status = ANEG_FAILED;
5305 unsigned int tick;
5306 u32 tmp;
5307
5308 tw32_f(MAC_TX_AUTO_NEG, 0);
5309
5310 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5311 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5312 udelay(40);
5313
5314 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5315 udelay(40);
5316
5317 memset(&aninfo, 0, sizeof(aninfo));
5318 aninfo.flags |= MR_AN_ENABLE;
5319 aninfo.state = ANEG_STATE_UNKNOWN;
5320 aninfo.cur_time = 0;
5321 tick = 0;
5322 while (++tick < 195000) {
5323 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5324 if (status == ANEG_DONE || status == ANEG_FAILED)
5325 break;
5326
5327 udelay(1);
5328 }
5329
5330 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5331 tw32_f(MAC_MODE, tp->mac_mode);
5332 udelay(40);
5333
5334 *txflags = aninfo.txconfig;
5335 *rxflags = aninfo.flags;
5336
5337 if (status == ANEG_DONE &&
5338 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5339 MR_LP_ADV_FULL_DUPLEX)))
5340 res = 1;
5341
5342 return res;
5343 }
5344
5345 static void tg3_init_bcm8002(struct tg3 *tp)
5346 {
5347 u32 mac_status = tr32(MAC_STATUS);
5348 int i;
5349
5350 /* Reset when initting first time or we have a link. */
5351 if (tg3_flag(tp, INIT_COMPLETE) &&
5352 !(mac_status & MAC_STATUS_PCS_SYNCED))
5353 return;
5354
5355 /* Set PLL lock range. */
5356 tg3_writephy(tp, 0x16, 0x8007);
5357
5358 /* SW reset */
5359 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5360
5361 /* Wait for reset to complete. */
5362 /* XXX schedule_timeout() ... */
5363 for (i = 0; i < 500; i++)
5364 udelay(10);
5365
5366 /* Config mode; select PMA/Ch 1 regs. */
5367 tg3_writephy(tp, 0x10, 0x8411);
5368
5369 /* Enable auto-lock and comdet, select txclk for tx. */
5370 tg3_writephy(tp, 0x11, 0x0a10);
5371
5372 tg3_writephy(tp, 0x18, 0x00a0);
5373 tg3_writephy(tp, 0x16, 0x41ff);
5374
5375 /* Assert and deassert POR. */
5376 tg3_writephy(tp, 0x13, 0x0400);
5377 udelay(40);
5378 tg3_writephy(tp, 0x13, 0x0000);
5379
5380 tg3_writephy(tp, 0x11, 0x0a50);
5381 udelay(40);
5382 tg3_writephy(tp, 0x11, 0x0a10);
5383
5384 /* Wait for signal to stabilize */
5385 /* XXX schedule_timeout() ... */
5386 for (i = 0; i < 15000; i++)
5387 udelay(10);
5388
5389 /* Deselect the channel register so we can read the PHYID
5390 * later.
5391 */
5392 tg3_writephy(tp, 0x10, 0x8011);
5393 }
5394
5395 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5396 {
5397 u16 flowctrl;
5398 bool current_link_up;
5399 u32 sg_dig_ctrl, sg_dig_status;
5400 u32 serdes_cfg, expected_sg_dig_ctrl;
5401 int workaround, port_a;
5402
5403 serdes_cfg = 0;
5404 expected_sg_dig_ctrl = 0;
5405 workaround = 0;
5406 port_a = 1;
5407 current_link_up = false;
5408
5409 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5410 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5411 workaround = 1;
5412 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5413 port_a = 0;
5414
5415 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5416 /* preserve bits 20-23 for voltage regulator */
5417 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5418 }
5419
5420 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5421
5422 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5423 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5424 if (workaround) {
5425 u32 val = serdes_cfg;
5426
5427 if (port_a)
5428 val |= 0xc010000;
5429 else
5430 val |= 0x4010000;
5431 tw32_f(MAC_SERDES_CFG, val);
5432 }
5433
5434 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5435 }
5436 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5437 tg3_setup_flow_control(tp, 0, 0);
5438 current_link_up = true;
5439 }
5440 goto out;
5441 }
5442
5443 /* Want auto-negotiation. */
5444 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5445
5446 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5447 if (flowctrl & ADVERTISE_1000XPAUSE)
5448 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5449 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5450 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5451
5452 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5453 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5454 tp->serdes_counter &&
5455 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5456 MAC_STATUS_RCVD_CFG)) ==
5457 MAC_STATUS_PCS_SYNCED)) {
5458 tp->serdes_counter--;
5459 current_link_up = true;
5460 goto out;
5461 }
5462 restart_autoneg:
5463 if (workaround)
5464 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5465 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5466 udelay(5);
5467 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5468
5469 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5471 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5472 MAC_STATUS_SIGNAL_DET)) {
5473 sg_dig_status = tr32(SG_DIG_STATUS);
5474 mac_status = tr32(MAC_STATUS);
5475
5476 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5477 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5478 u32 local_adv = 0, remote_adv = 0;
5479
5480 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5481 local_adv |= ADVERTISE_1000XPAUSE;
5482 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5483 local_adv |= ADVERTISE_1000XPSE_ASYM;
5484
5485 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5486 remote_adv |= LPA_1000XPAUSE;
5487 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5488 remote_adv |= LPA_1000XPAUSE_ASYM;
5489
5490 tp->link_config.rmt_adv =
5491 mii_adv_to_ethtool_adv_x(remote_adv);
5492
5493 tg3_setup_flow_control(tp, local_adv, remote_adv);
5494 current_link_up = true;
5495 tp->serdes_counter = 0;
5496 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5497 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5498 if (tp->serdes_counter)
5499 tp->serdes_counter--;
5500 else {
5501 if (workaround) {
5502 u32 val = serdes_cfg;
5503
5504 if (port_a)
5505 val |= 0xc010000;
5506 else
5507 val |= 0x4010000;
5508
5509 tw32_f(MAC_SERDES_CFG, val);
5510 }
5511
5512 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5513 udelay(40);
5514
5515 /* Link parallel detection - link is up */
5516 /* only if we have PCS_SYNC and not */
5517 /* receiving config code words */
5518 mac_status = tr32(MAC_STATUS);
5519 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5520 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5521 tg3_setup_flow_control(tp, 0, 0);
5522 current_link_up = true;
5523 tp->phy_flags |=
5524 TG3_PHYFLG_PARALLEL_DETECT;
5525 tp->serdes_counter =
5526 SERDES_PARALLEL_DET_TIMEOUT;
5527 } else
5528 goto restart_autoneg;
5529 }
5530 }
5531 } else {
5532 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5533 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5534 }
5535
5536 out:
5537 return current_link_up;
5538 }
5539
5540 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5541 {
5542 bool current_link_up = false;
5543
5544 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5545 goto out;
5546
5547 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5548 u32 txflags, rxflags;
5549 int i;
5550
5551 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5552 u32 local_adv = 0, remote_adv = 0;
5553
5554 if (txflags & ANEG_CFG_PS1)
5555 local_adv |= ADVERTISE_1000XPAUSE;
5556 if (txflags & ANEG_CFG_PS2)
5557 local_adv |= ADVERTISE_1000XPSE_ASYM;
5558
5559 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5560 remote_adv |= LPA_1000XPAUSE;
5561 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5562 remote_adv |= LPA_1000XPAUSE_ASYM;
5563
5564 tp->link_config.rmt_adv =
5565 mii_adv_to_ethtool_adv_x(remote_adv);
5566
5567 tg3_setup_flow_control(tp, local_adv, remote_adv);
5568
5569 current_link_up = true;
5570 }
5571 for (i = 0; i < 30; i++) {
5572 udelay(20);
5573 tw32_f(MAC_STATUS,
5574 (MAC_STATUS_SYNC_CHANGED |
5575 MAC_STATUS_CFG_CHANGED));
5576 udelay(40);
5577 if ((tr32(MAC_STATUS) &
5578 (MAC_STATUS_SYNC_CHANGED |
5579 MAC_STATUS_CFG_CHANGED)) == 0)
5580 break;
5581 }
5582
5583 mac_status = tr32(MAC_STATUS);
5584 if (!current_link_up &&
5585 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5586 !(mac_status & MAC_STATUS_RCVD_CFG))
5587 current_link_up = true;
5588 } else {
5589 tg3_setup_flow_control(tp, 0, 0);
5590
5591 /* Forcing 1000FD link up. */
5592 current_link_up = true;
5593
5594 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5595 udelay(40);
5596
5597 tw32_f(MAC_MODE, tp->mac_mode);
5598 udelay(40);
5599 }
5600
5601 out:
5602 return current_link_up;
5603 }
5604
5605 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5606 {
5607 u32 orig_pause_cfg;
5608 u16 orig_active_speed;
5609 u8 orig_active_duplex;
5610 u32 mac_status;
5611 bool current_link_up;
5612 int i;
5613
5614 orig_pause_cfg = tp->link_config.active_flowctrl;
5615 orig_active_speed = tp->link_config.active_speed;
5616 orig_active_duplex = tp->link_config.active_duplex;
5617
5618 if (!tg3_flag(tp, HW_AUTONEG) &&
5619 tp->link_up &&
5620 tg3_flag(tp, INIT_COMPLETE)) {
5621 mac_status = tr32(MAC_STATUS);
5622 mac_status &= (MAC_STATUS_PCS_SYNCED |
5623 MAC_STATUS_SIGNAL_DET |
5624 MAC_STATUS_CFG_CHANGED |
5625 MAC_STATUS_RCVD_CFG);
5626 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5627 MAC_STATUS_SIGNAL_DET)) {
5628 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5629 MAC_STATUS_CFG_CHANGED));
5630 return 0;
5631 }
5632 }
5633
5634 tw32_f(MAC_TX_AUTO_NEG, 0);
5635
5636 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5637 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5638 tw32_f(MAC_MODE, tp->mac_mode);
5639 udelay(40);
5640
5641 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5642 tg3_init_bcm8002(tp);
5643
5644 /* Enable link change event even when serdes polling. */
5645 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5646 udelay(40);
5647
5648 current_link_up = false;
5649 tp->link_config.rmt_adv = 0;
5650 mac_status = tr32(MAC_STATUS);
5651
5652 if (tg3_flag(tp, HW_AUTONEG))
5653 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5654 else
5655 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5656
5657 tp->napi[0].hw_status->status =
5658 (SD_STATUS_UPDATED |
5659 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5660
5661 for (i = 0; i < 100; i++) {
5662 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5663 MAC_STATUS_CFG_CHANGED));
5664 udelay(5);
5665 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5666 MAC_STATUS_CFG_CHANGED |
5667 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5668 break;
5669 }
5670
5671 mac_status = tr32(MAC_STATUS);
5672 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5673 current_link_up = false;
5674 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5675 tp->serdes_counter == 0) {
5676 tw32_f(MAC_MODE, (tp->mac_mode |
5677 MAC_MODE_SEND_CONFIGS));
5678 udelay(1);
5679 tw32_f(MAC_MODE, tp->mac_mode);
5680 }
5681 }
5682
5683 if (current_link_up) {
5684 tp->link_config.active_speed = SPEED_1000;
5685 tp->link_config.active_duplex = DUPLEX_FULL;
5686 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5687 LED_CTRL_LNKLED_OVERRIDE |
5688 LED_CTRL_1000MBPS_ON));
5689 } else {
5690 tp->link_config.active_speed = SPEED_UNKNOWN;
5691 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5692 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5693 LED_CTRL_LNKLED_OVERRIDE |
5694 LED_CTRL_TRAFFIC_OVERRIDE));
5695 }
5696
5697 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5698 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5699 if (orig_pause_cfg != now_pause_cfg ||
5700 orig_active_speed != tp->link_config.active_speed ||
5701 orig_active_duplex != tp->link_config.active_duplex)
5702 tg3_link_report(tp);
5703 }
5704
5705 return 0;
5706 }
5707
5708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5709 {
5710 int err = 0;
5711 u32 bmsr, bmcr;
5712 u16 current_speed = SPEED_UNKNOWN;
5713 u8 current_duplex = DUPLEX_UNKNOWN;
5714 bool current_link_up = false;
5715 u32 local_adv, remote_adv, sgsr;
5716
5717 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5718 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5719 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5720 (sgsr & SERDES_TG3_SGMII_MODE)) {
5721
5722 if (force_reset)
5723 tg3_phy_reset(tp);
5724
5725 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5726
5727 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5728 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5729 } else {
5730 current_link_up = true;
5731 if (sgsr & SERDES_TG3_SPEED_1000) {
5732 current_speed = SPEED_1000;
5733 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5734 } else if (sgsr & SERDES_TG3_SPEED_100) {
5735 current_speed = SPEED_100;
5736 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5737 } else {
5738 current_speed = SPEED_10;
5739 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5740 }
5741
5742 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5743 current_duplex = DUPLEX_FULL;
5744 else
5745 current_duplex = DUPLEX_HALF;
5746 }
5747
5748 tw32_f(MAC_MODE, tp->mac_mode);
5749 udelay(40);
5750
5751 tg3_clear_mac_status(tp);
5752
5753 goto fiber_setup_done;
5754 }
5755
5756 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5757 tw32_f(MAC_MODE, tp->mac_mode);
5758 udelay(40);
5759
5760 tg3_clear_mac_status(tp);
5761
5762 if (force_reset)
5763 tg3_phy_reset(tp);
5764
5765 tp->link_config.rmt_adv = 0;
5766
5767 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5768 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5769 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5770 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5771 bmsr |= BMSR_LSTATUS;
5772 else
5773 bmsr &= ~BMSR_LSTATUS;
5774 }
5775
5776 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5777
5778 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5779 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5780 /* do nothing, just check for link up at the end */
5781 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5782 u32 adv, newadv;
5783
5784 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5785 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5786 ADVERTISE_1000XPAUSE |
5787 ADVERTISE_1000XPSE_ASYM |
5788 ADVERTISE_SLCT);
5789
5790 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5791 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5792
5793 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5794 tg3_writephy(tp, MII_ADVERTISE, newadv);
5795 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5796 tg3_writephy(tp, MII_BMCR, bmcr);
5797
5798 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5799 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5800 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5801
5802 return err;
5803 }
5804 } else {
5805 u32 new_bmcr;
5806
5807 bmcr &= ~BMCR_SPEED1000;
5808 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5809
5810 if (tp->link_config.duplex == DUPLEX_FULL)
5811 new_bmcr |= BMCR_FULLDPLX;
5812
5813 if (new_bmcr != bmcr) {
5814 /* BMCR_SPEED1000 is a reserved bit that needs
5815 * to be set on write.
5816 */
5817 new_bmcr |= BMCR_SPEED1000;
5818
5819 /* Force a linkdown */
5820 if (tp->link_up) {
5821 u32 adv;
5822
5823 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5824 adv &= ~(ADVERTISE_1000XFULL |
5825 ADVERTISE_1000XHALF |
5826 ADVERTISE_SLCT);
5827 tg3_writephy(tp, MII_ADVERTISE, adv);
5828 tg3_writephy(tp, MII_BMCR, bmcr |
5829 BMCR_ANRESTART |
5830 BMCR_ANENABLE);
5831 udelay(10);
5832 tg3_carrier_off(tp);
5833 }
5834 tg3_writephy(tp, MII_BMCR, new_bmcr);
5835 bmcr = new_bmcr;
5836 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5837 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5838 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5839 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5840 bmsr |= BMSR_LSTATUS;
5841 else
5842 bmsr &= ~BMSR_LSTATUS;
5843 }
5844 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5845 }
5846 }
5847
5848 if (bmsr & BMSR_LSTATUS) {
5849 current_speed = SPEED_1000;
5850 current_link_up = true;
5851 if (bmcr & BMCR_FULLDPLX)
5852 current_duplex = DUPLEX_FULL;
5853 else
5854 current_duplex = DUPLEX_HALF;
5855
5856 local_adv = 0;
5857 remote_adv = 0;
5858
5859 if (bmcr & BMCR_ANENABLE) {
5860 u32 common;
5861
5862 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5863 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5864 common = local_adv & remote_adv;
5865 if (common & (ADVERTISE_1000XHALF |
5866 ADVERTISE_1000XFULL)) {
5867 if (common & ADVERTISE_1000XFULL)
5868 current_duplex = DUPLEX_FULL;
5869 else
5870 current_duplex = DUPLEX_HALF;
5871
5872 tp->link_config.rmt_adv =
5873 mii_adv_to_ethtool_adv_x(remote_adv);
5874 } else if (!tg3_flag(tp, 5780_CLASS)) {
5875 /* Link is up via parallel detect */
5876 } else {
5877 current_link_up = false;
5878 }
5879 }
5880 }
5881
5882 fiber_setup_done:
5883 if (current_link_up && current_duplex == DUPLEX_FULL)
5884 tg3_setup_flow_control(tp, local_adv, remote_adv);
5885
5886 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5887 if (tp->link_config.active_duplex == DUPLEX_HALF)
5888 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5889
5890 tw32_f(MAC_MODE, tp->mac_mode);
5891 udelay(40);
5892
5893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5894
5895 tp->link_config.active_speed = current_speed;
5896 tp->link_config.active_duplex = current_duplex;
5897
5898 tg3_test_and_report_link_chg(tp, current_link_up);
5899 return err;
5900 }
5901
5902 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5903 {
5904 if (tp->serdes_counter) {
5905 /* Give autoneg time to complete. */
5906 tp->serdes_counter--;
5907 return;
5908 }
5909
5910 if (!tp->link_up &&
5911 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5912 u32 bmcr;
5913
5914 tg3_readphy(tp, MII_BMCR, &bmcr);
5915 if (bmcr & BMCR_ANENABLE) {
5916 u32 phy1, phy2;
5917
5918 /* Select shadow register 0x1f */
5919 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5920 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5921
5922 /* Select expansion interrupt status register */
5923 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5924 MII_TG3_DSP_EXP1_INT_STAT);
5925 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5926 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5927
5928 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5929 /* We have signal detect and not receiving
5930 * config code words, link is up by parallel
5931 * detection.
5932 */
5933
5934 bmcr &= ~BMCR_ANENABLE;
5935 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5936 tg3_writephy(tp, MII_BMCR, bmcr);
5937 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5938 }
5939 }
5940 } else if (tp->link_up &&
5941 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5942 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5943 u32 phy2;
5944
5945 /* Select expansion interrupt status register */
5946 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5947 MII_TG3_DSP_EXP1_INT_STAT);
5948 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5949 if (phy2 & 0x20) {
5950 u32 bmcr;
5951
5952 /* Config code words received, turn on autoneg. */
5953 tg3_readphy(tp, MII_BMCR, &bmcr);
5954 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5955
5956 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5957
5958 }
5959 }
5960 }
5961
5962 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5963 {
5964 u32 val;
5965 int err;
5966
5967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5968 err = tg3_setup_fiber_phy(tp, force_reset);
5969 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5970 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5971 else
5972 err = tg3_setup_copper_phy(tp, force_reset);
5973
5974 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5975 u32 scale;
5976
5977 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5978 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5979 scale = 65;
5980 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5981 scale = 6;
5982 else
5983 scale = 12;
5984
5985 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5986 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5987 tw32(GRC_MISC_CFG, val);
5988 }
5989
5990 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5991 (6 << TX_LENGTHS_IPG_SHIFT);
5992 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5993 tg3_asic_rev(tp) == ASIC_REV_5762)
5994 val |= tr32(MAC_TX_LENGTHS) &
5995 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5996 TX_LENGTHS_CNT_DWN_VAL_MSK);
5997
5998 if (tp->link_config.active_speed == SPEED_1000 &&
5999 tp->link_config.active_duplex == DUPLEX_HALF)
6000 tw32(MAC_TX_LENGTHS, val |
6001 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6002 else
6003 tw32(MAC_TX_LENGTHS, val |
6004 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6005
6006 if (!tg3_flag(tp, 5705_PLUS)) {
6007 if (tp->link_up) {
6008 tw32(HOSTCC_STAT_COAL_TICKS,
6009 tp->coal.stats_block_coalesce_usecs);
6010 } else {
6011 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6012 }
6013 }
6014
6015 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6016 val = tr32(PCIE_PWR_MGMT_THRESH);
6017 if (!tp->link_up)
6018 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6019 tp->pwrmgmt_thresh;
6020 else
6021 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6022 tw32(PCIE_PWR_MGMT_THRESH, val);
6023 }
6024
6025 return err;
6026 }
6027
6028 /* tp->lock must be held */
6029 static u64 tg3_refclk_read(struct tg3 *tp)
6030 {
6031 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6032 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6033 }
6034
6035 /* tp->lock must be held */
6036 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6037 {
6038 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6039 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6040 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6041 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6042 }
6043
6044 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6045 static inline void tg3_full_unlock(struct tg3 *tp);
6046 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6047 {
6048 struct tg3 *tp = netdev_priv(dev);
6049
6050 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6051 SOF_TIMESTAMPING_RX_SOFTWARE |
6052 SOF_TIMESTAMPING_SOFTWARE;
6053
6054 if (tg3_flag(tp, PTP_CAPABLE)) {
6055 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6056 SOF_TIMESTAMPING_RX_HARDWARE |
6057 SOF_TIMESTAMPING_RAW_HARDWARE;
6058 }
6059
6060 if (tp->ptp_clock)
6061 info->phc_index = ptp_clock_index(tp->ptp_clock);
6062 else
6063 info->phc_index = -1;
6064
6065 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6066
6067 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6068 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6069 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6070 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6071 return 0;
6072 }
6073
6074 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6075 {
6076 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6077 bool neg_adj = false;
6078 u32 correction = 0;
6079
6080 if (ppb < 0) {
6081 neg_adj = true;
6082 ppb = -ppb;
6083 }
6084
6085 /* Frequency adjustment is performed using hardware with a 24 bit
6086 * accumulator and a programmable correction value. On each clk, the
6087 * correction value gets added to the accumulator and when it
6088 * overflows, the time counter is incremented/decremented.
6089 *
6090 * So conversion from ppb to correction value is
6091 * ppb * (1 << 24) / 1000000000
6092 */
6093 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6094 TG3_EAV_REF_CLK_CORRECT_MASK;
6095
6096 tg3_full_lock(tp, 0);
6097
6098 if (correction)
6099 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6100 TG3_EAV_REF_CLK_CORRECT_EN |
6101 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6102 else
6103 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6104
6105 tg3_full_unlock(tp);
6106
6107 return 0;
6108 }
6109
6110 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6111 {
6112 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6113
6114 tg3_full_lock(tp, 0);
6115 tp->ptp_adjust += delta;
6116 tg3_full_unlock(tp);
6117
6118 return 0;
6119 }
6120
6121 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6122 {
6123 u64 ns;
6124 u32 remainder;
6125 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6126
6127 tg3_full_lock(tp, 0);
6128 ns = tg3_refclk_read(tp);
6129 ns += tp->ptp_adjust;
6130 tg3_full_unlock(tp);
6131
6132 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6133 ts->tv_nsec = remainder;
6134
6135 return 0;
6136 }
6137
6138 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6139 const struct timespec *ts)
6140 {
6141 u64 ns;
6142 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6143
6144 ns = timespec_to_ns(ts);
6145
6146 tg3_full_lock(tp, 0);
6147 tg3_refclk_write(tp, ns);
6148 tp->ptp_adjust = 0;
6149 tg3_full_unlock(tp);
6150
6151 return 0;
6152 }
6153
6154 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6155 struct ptp_clock_request *rq, int on)
6156 {
6157 return -EOPNOTSUPP;
6158 }
6159
6160 static const struct ptp_clock_info tg3_ptp_caps = {
6161 .owner = THIS_MODULE,
6162 .name = "tg3 clock",
6163 .max_adj = 250000000,
6164 .n_alarm = 0,
6165 .n_ext_ts = 0,
6166 .n_per_out = 0,
6167 .pps = 0,
6168 .adjfreq = tg3_ptp_adjfreq,
6169 .adjtime = tg3_ptp_adjtime,
6170 .gettime = tg3_ptp_gettime,
6171 .settime = tg3_ptp_settime,
6172 .enable = tg3_ptp_enable,
6173 };
6174
6175 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6176 struct skb_shared_hwtstamps *timestamp)
6177 {
6178 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6179 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6180 tp->ptp_adjust);
6181 }
6182
6183 /* tp->lock must be held */
6184 static void tg3_ptp_init(struct tg3 *tp)
6185 {
6186 if (!tg3_flag(tp, PTP_CAPABLE))
6187 return;
6188
6189 /* Initialize the hardware clock to the system time. */
6190 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6191 tp->ptp_adjust = 0;
6192 tp->ptp_info = tg3_ptp_caps;
6193 }
6194
6195 /* tp->lock must be held */
6196 static void tg3_ptp_resume(struct tg3 *tp)
6197 {
6198 if (!tg3_flag(tp, PTP_CAPABLE))
6199 return;
6200
6201 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6202 tp->ptp_adjust = 0;
6203 }
6204
6205 static void tg3_ptp_fini(struct tg3 *tp)
6206 {
6207 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6208 return;
6209
6210 ptp_clock_unregister(tp->ptp_clock);
6211 tp->ptp_clock = NULL;
6212 tp->ptp_adjust = 0;
6213 }
6214
6215 static inline int tg3_irq_sync(struct tg3 *tp)
6216 {
6217 return tp->irq_sync;
6218 }
6219
6220 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6221 {
6222 int i;
6223
6224 dst = (u32 *)((u8 *)dst + off);
6225 for (i = 0; i < len; i += sizeof(u32))
6226 *dst++ = tr32(off + i);
6227 }
6228
6229 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6230 {
6231 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6232 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6233 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6234 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6235 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6236 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6237 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6238 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6239 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6240 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6241 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6242 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6243 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6244 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6245 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6246 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6247 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6248 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6249 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6250
6251 if (tg3_flag(tp, SUPPORT_MSIX))
6252 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6253
6254 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6255 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6256 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6257 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6258 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6259 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6260 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6261 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6262
6263 if (!tg3_flag(tp, 5705_PLUS)) {
6264 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6265 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6266 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6267 }
6268
6269 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6270 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6271 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6272 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6273 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6274
6275 if (tg3_flag(tp, NVRAM))
6276 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6277 }
6278
6279 static void tg3_dump_state(struct tg3 *tp)
6280 {
6281 int i;
6282 u32 *regs;
6283
6284 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6285 if (!regs)
6286 return;
6287
6288 if (tg3_flag(tp, PCI_EXPRESS)) {
6289 /* Read up to but not including private PCI registers */
6290 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6291 regs[i / sizeof(u32)] = tr32(i);
6292 } else
6293 tg3_dump_legacy_regs(tp, regs);
6294
6295 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6296 if (!regs[i + 0] && !regs[i + 1] &&
6297 !regs[i + 2] && !regs[i + 3])
6298 continue;
6299
6300 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6301 i * 4,
6302 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6303 }
6304
6305 kfree(regs);
6306
6307 for (i = 0; i < tp->irq_cnt; i++) {
6308 struct tg3_napi *tnapi = &tp->napi[i];
6309
6310 /* SW status block */
6311 netdev_err(tp->dev,
6312 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6313 i,
6314 tnapi->hw_status->status,
6315 tnapi->hw_status->status_tag,
6316 tnapi->hw_status->rx_jumbo_consumer,
6317 tnapi->hw_status->rx_consumer,
6318 tnapi->hw_status->rx_mini_consumer,
6319 tnapi->hw_status->idx[0].rx_producer,
6320 tnapi->hw_status->idx[0].tx_consumer);
6321
6322 netdev_err(tp->dev,
6323 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6324 i,
6325 tnapi->last_tag, tnapi->last_irq_tag,
6326 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6327 tnapi->rx_rcb_ptr,
6328 tnapi->prodring.rx_std_prod_idx,
6329 tnapi->prodring.rx_std_cons_idx,
6330 tnapi->prodring.rx_jmb_prod_idx,
6331 tnapi->prodring.rx_jmb_cons_idx);
6332 }
6333 }
6334
6335 /* This is called whenever we suspect that the system chipset is re-
6336 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6337 * is bogus tx completions. We try to recover by setting the
6338 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6339 * in the workqueue.
6340 */
6341 static void tg3_tx_recover(struct tg3 *tp)
6342 {
6343 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6344 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6345
6346 netdev_warn(tp->dev,
6347 "The system may be re-ordering memory-mapped I/O "
6348 "cycles to the network device, attempting to recover. "
6349 "Please report the problem to the driver maintainer "
6350 "and include system chipset information.\n");
6351
6352 spin_lock(&tp->lock);
6353 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6354 spin_unlock(&tp->lock);
6355 }
6356
6357 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6358 {
6359 /* Tell compiler to fetch tx indices from memory. */
6360 barrier();
6361 return tnapi->tx_pending -
6362 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6363 }
6364
6365 /* Tigon3 never reports partial packet sends. So we do not
6366 * need special logic to handle SKBs that have not had all
6367 * of their frags sent yet, like SunGEM does.
6368 */
6369 static void tg3_tx(struct tg3_napi *tnapi)
6370 {
6371 struct tg3 *tp = tnapi->tp;
6372 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6373 u32 sw_idx = tnapi->tx_cons;
6374 struct netdev_queue *txq;
6375 int index = tnapi - tp->napi;
6376 unsigned int pkts_compl = 0, bytes_compl = 0;
6377
6378 if (tg3_flag(tp, ENABLE_TSS))
6379 index--;
6380
6381 txq = netdev_get_tx_queue(tp->dev, index);
6382
6383 while (sw_idx != hw_idx) {
6384 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6385 struct sk_buff *skb = ri->skb;
6386 int i, tx_bug = 0;
6387
6388 if (unlikely(skb == NULL)) {
6389 tg3_tx_recover(tp);
6390 return;
6391 }
6392
6393 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6394 struct skb_shared_hwtstamps timestamp;
6395 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6396 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6397
6398 tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6399
6400 skb_tstamp_tx(skb, &timestamp);
6401 }
6402
6403 pci_unmap_single(tp->pdev,
6404 dma_unmap_addr(ri, mapping),
6405 skb_headlen(skb),
6406 PCI_DMA_TODEVICE);
6407
6408 ri->skb = NULL;
6409
6410 while (ri->fragmented) {
6411 ri->fragmented = false;
6412 sw_idx = NEXT_TX(sw_idx);
6413 ri = &tnapi->tx_buffers[sw_idx];
6414 }
6415
6416 sw_idx = NEXT_TX(sw_idx);
6417
6418 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6419 ri = &tnapi->tx_buffers[sw_idx];
6420 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6421 tx_bug = 1;
6422
6423 pci_unmap_page(tp->pdev,
6424 dma_unmap_addr(ri, mapping),
6425 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6426 PCI_DMA_TODEVICE);
6427
6428 while (ri->fragmented) {
6429 ri->fragmented = false;
6430 sw_idx = NEXT_TX(sw_idx);
6431 ri = &tnapi->tx_buffers[sw_idx];
6432 }
6433
6434 sw_idx = NEXT_TX(sw_idx);
6435 }
6436
6437 pkts_compl++;
6438 bytes_compl += skb->len;
6439
6440 dev_kfree_skb(skb);
6441
6442 if (unlikely(tx_bug)) {
6443 tg3_tx_recover(tp);
6444 return;
6445 }
6446 }
6447
6448 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6449
6450 tnapi->tx_cons = sw_idx;
6451
6452 /* Need to make the tx_cons update visible to tg3_start_xmit()
6453 * before checking for netif_queue_stopped(). Without the
6454 * memory barrier, there is a small possibility that tg3_start_xmit()
6455 * will miss it and cause the queue to be stopped forever.
6456 */
6457 smp_mb();
6458
6459 if (unlikely(netif_tx_queue_stopped(txq) &&
6460 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6461 __netif_tx_lock(txq, smp_processor_id());
6462 if (netif_tx_queue_stopped(txq) &&
6463 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6464 netif_tx_wake_queue(txq);
6465 __netif_tx_unlock(txq);
6466 }
6467 }
6468
6469 static void tg3_frag_free(bool is_frag, void *data)
6470 {
6471 if (is_frag)
6472 put_page(virt_to_head_page(data));
6473 else
6474 kfree(data);
6475 }
6476
6477 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6478 {
6479 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6480 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6481
6482 if (!ri->data)
6483 return;
6484
6485 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6486 map_sz, PCI_DMA_FROMDEVICE);
6487 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6488 ri->data = NULL;
6489 }
6490
6491
6492 /* Returns size of skb allocated or < 0 on error.
6493 *
6494 * We only need to fill in the address because the other members
6495 * of the RX descriptor are invariant, see tg3_init_rings.
6496 *
6497 * Note the purposeful assymetry of cpu vs. chip accesses. For
6498 * posting buffers we only dirty the first cache line of the RX
6499 * descriptor (containing the address). Whereas for the RX status
6500 * buffers the cpu only reads the last cacheline of the RX descriptor
6501 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6502 */
6503 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6504 u32 opaque_key, u32 dest_idx_unmasked,
6505 unsigned int *frag_size)
6506 {
6507 struct tg3_rx_buffer_desc *desc;
6508 struct ring_info *map;
6509 u8 *data;
6510 dma_addr_t mapping;
6511 int skb_size, data_size, dest_idx;
6512
6513 switch (opaque_key) {
6514 case RXD_OPAQUE_RING_STD:
6515 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6516 desc = &tpr->rx_std[dest_idx];
6517 map = &tpr->rx_std_buffers[dest_idx];
6518 data_size = tp->rx_pkt_map_sz;
6519 break;
6520
6521 case RXD_OPAQUE_RING_JUMBO:
6522 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6523 desc = &tpr->rx_jmb[dest_idx].std;
6524 map = &tpr->rx_jmb_buffers[dest_idx];
6525 data_size = TG3_RX_JMB_MAP_SZ;
6526 break;
6527
6528 default:
6529 return -EINVAL;
6530 }
6531
6532 /* Do not overwrite any of the map or rp information
6533 * until we are sure we can commit to a new buffer.
6534 *
6535 * Callers depend upon this behavior and assume that
6536 * we leave everything unchanged if we fail.
6537 */
6538 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6539 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6540 if (skb_size <= PAGE_SIZE) {
6541 data = netdev_alloc_frag(skb_size);
6542 *frag_size = skb_size;
6543 } else {
6544 data = kmalloc(skb_size, GFP_ATOMIC);
6545 *frag_size = 0;
6546 }
6547 if (!data)
6548 return -ENOMEM;
6549
6550 mapping = pci_map_single(tp->pdev,
6551 data + TG3_RX_OFFSET(tp),
6552 data_size,
6553 PCI_DMA_FROMDEVICE);
6554 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6555 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6556 return -EIO;
6557 }
6558
6559 map->data = data;
6560 dma_unmap_addr_set(map, mapping, mapping);
6561
6562 desc->addr_hi = ((u64)mapping >> 32);
6563 desc->addr_lo = ((u64)mapping & 0xffffffff);
6564
6565 return data_size;
6566 }
6567
6568 /* We only need to move over in the address because the other
6569 * members of the RX descriptor are invariant. See notes above
6570 * tg3_alloc_rx_data for full details.
6571 */
6572 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6573 struct tg3_rx_prodring_set *dpr,
6574 u32 opaque_key, int src_idx,
6575 u32 dest_idx_unmasked)
6576 {
6577 struct tg3 *tp = tnapi->tp;
6578 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6579 struct ring_info *src_map, *dest_map;
6580 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6581 int dest_idx;
6582
6583 switch (opaque_key) {
6584 case RXD_OPAQUE_RING_STD:
6585 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6586 dest_desc = &dpr->rx_std[dest_idx];
6587 dest_map = &dpr->rx_std_buffers[dest_idx];
6588 src_desc = &spr->rx_std[src_idx];
6589 src_map = &spr->rx_std_buffers[src_idx];
6590 break;
6591
6592 case RXD_OPAQUE_RING_JUMBO:
6593 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6594 dest_desc = &dpr->rx_jmb[dest_idx].std;
6595 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6596 src_desc = &spr->rx_jmb[src_idx].std;
6597 src_map = &spr->rx_jmb_buffers[src_idx];
6598 break;
6599
6600 default:
6601 return;
6602 }
6603
6604 dest_map->data = src_map->data;
6605 dma_unmap_addr_set(dest_map, mapping,
6606 dma_unmap_addr(src_map, mapping));
6607 dest_desc->addr_hi = src_desc->addr_hi;
6608 dest_desc->addr_lo = src_desc->addr_lo;
6609
6610 /* Ensure that the update to the skb happens after the physical
6611 * addresses have been transferred to the new BD location.
6612 */
6613 smp_wmb();
6614
6615 src_map->data = NULL;
6616 }
6617
6618 /* The RX ring scheme is composed of multiple rings which post fresh
6619 * buffers to the chip, and one special ring the chip uses to report
6620 * status back to the host.
6621 *
6622 * The special ring reports the status of received packets to the
6623 * host. The chip does not write into the original descriptor the
6624 * RX buffer was obtained from. The chip simply takes the original
6625 * descriptor as provided by the host, updates the status and length
6626 * field, then writes this into the next status ring entry.
6627 *
6628 * Each ring the host uses to post buffers to the chip is described
6629 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6630 * it is first placed into the on-chip ram. When the packet's length
6631 * is known, it walks down the TG3_BDINFO entries to select the ring.
6632 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6633 * which is within the range of the new packet's length is chosen.
6634 *
6635 * The "separate ring for rx status" scheme may sound queer, but it makes
6636 * sense from a cache coherency perspective. If only the host writes
6637 * to the buffer post rings, and only the chip writes to the rx status
6638 * rings, then cache lines never move beyond shared-modified state.
6639 * If both the host and chip were to write into the same ring, cache line
6640 * eviction could occur since both entities want it in an exclusive state.
6641 */
6642 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6643 {
6644 struct tg3 *tp = tnapi->tp;
6645 u32 work_mask, rx_std_posted = 0;
6646 u32 std_prod_idx, jmb_prod_idx;
6647 u32 sw_idx = tnapi->rx_rcb_ptr;
6648 u16 hw_idx;
6649 int received;
6650 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6651
6652 hw_idx = *(tnapi->rx_rcb_prod_idx);
6653 /*
6654 * We need to order the read of hw_idx and the read of
6655 * the opaque cookie.
6656 */
6657 rmb();
6658 work_mask = 0;
6659 received = 0;
6660 std_prod_idx = tpr->rx_std_prod_idx;
6661 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6662 while (sw_idx != hw_idx && budget > 0) {
6663 struct ring_info *ri;
6664 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6665 unsigned int len;
6666 struct sk_buff *skb;
6667 dma_addr_t dma_addr;
6668 u32 opaque_key, desc_idx, *post_ptr;
6669 u8 *data;
6670 u64 tstamp = 0;
6671
6672 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6673 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6674 if (opaque_key == RXD_OPAQUE_RING_STD) {
6675 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6676 dma_addr = dma_unmap_addr(ri, mapping);
6677 data = ri->data;
6678 post_ptr = &std_prod_idx;
6679 rx_std_posted++;
6680 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6681 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6682 dma_addr = dma_unmap_addr(ri, mapping);
6683 data = ri->data;
6684 post_ptr = &jmb_prod_idx;
6685 } else
6686 goto next_pkt_nopost;
6687
6688 work_mask |= opaque_key;
6689
6690 if (desc->err_vlan & RXD_ERR_MASK) {
6691 drop_it:
6692 tg3_recycle_rx(tnapi, tpr, opaque_key,
6693 desc_idx, *post_ptr);
6694 drop_it_no_recycle:
6695 /* Other statistics kept track of by card. */
6696 tp->rx_dropped++;
6697 goto next_pkt;
6698 }
6699
6700 prefetch(data + TG3_RX_OFFSET(tp));
6701 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6702 ETH_FCS_LEN;
6703
6704 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6705 RXD_FLAG_PTPSTAT_PTPV1 ||
6706 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6707 RXD_FLAG_PTPSTAT_PTPV2) {
6708 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6709 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6710 }
6711
6712 if (len > TG3_RX_COPY_THRESH(tp)) {
6713 int skb_size;
6714 unsigned int frag_size;
6715
6716 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6717 *post_ptr, &frag_size);
6718 if (skb_size < 0)
6719 goto drop_it;
6720
6721 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6722 PCI_DMA_FROMDEVICE);
6723
6724 /* Ensure that the update to the data happens
6725 * after the usage of the old DMA mapping.
6726 */
6727 smp_wmb();
6728
6729 ri->data = NULL;
6730
6731 skb = build_skb(data, frag_size);
6732 if (!skb) {
6733 tg3_frag_free(frag_size != 0, data);
6734 goto drop_it_no_recycle;
6735 }
6736 skb_reserve(skb, TG3_RX_OFFSET(tp));
6737 } else {
6738 tg3_recycle_rx(tnapi, tpr, opaque_key,
6739 desc_idx, *post_ptr);
6740
6741 skb = netdev_alloc_skb(tp->dev,
6742 len + TG3_RAW_IP_ALIGN);
6743 if (skb == NULL)
6744 goto drop_it_no_recycle;
6745
6746 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6747 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6748 memcpy(skb->data,
6749 data + TG3_RX_OFFSET(tp),
6750 len);
6751 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6752 }
6753
6754 skb_put(skb, len);
6755 if (tstamp)
6756 tg3_hwclock_to_timestamp(tp, tstamp,
6757 skb_hwtstamps(skb));
6758
6759 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6760 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6761 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6762 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6763 skb->ip_summed = CHECKSUM_UNNECESSARY;
6764 else
6765 skb_checksum_none_assert(skb);
6766
6767 skb->protocol = eth_type_trans(skb, tp->dev);
6768
6769 if (len > (tp->dev->mtu + ETH_HLEN) &&
6770 skb->protocol != htons(ETH_P_8021Q)) {
6771 dev_kfree_skb(skb);
6772 goto drop_it_no_recycle;
6773 }
6774
6775 if (desc->type_flags & RXD_FLAG_VLAN &&
6776 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6777 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6778 desc->err_vlan & RXD_VLAN_MASK);
6779
6780 napi_gro_receive(&tnapi->napi, skb);
6781
6782 received++;
6783 budget--;
6784
6785 next_pkt:
6786 (*post_ptr)++;
6787
6788 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6789 tpr->rx_std_prod_idx = std_prod_idx &
6790 tp->rx_std_ring_mask;
6791 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6792 tpr->rx_std_prod_idx);
6793 work_mask &= ~RXD_OPAQUE_RING_STD;
6794 rx_std_posted = 0;
6795 }
6796 next_pkt_nopost:
6797 sw_idx++;
6798 sw_idx &= tp->rx_ret_ring_mask;
6799
6800 /* Refresh hw_idx to see if there is new work */
6801 if (sw_idx == hw_idx) {
6802 hw_idx = *(tnapi->rx_rcb_prod_idx);
6803 rmb();
6804 }
6805 }
6806
6807 /* ACK the status ring. */
6808 tnapi->rx_rcb_ptr = sw_idx;
6809 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6810
6811 /* Refill RX ring(s). */
6812 if (!tg3_flag(tp, ENABLE_RSS)) {
6813 /* Sync BD data before updating mailbox */
6814 wmb();
6815
6816 if (work_mask & RXD_OPAQUE_RING_STD) {
6817 tpr->rx_std_prod_idx = std_prod_idx &
6818 tp->rx_std_ring_mask;
6819 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6820 tpr->rx_std_prod_idx);
6821 }
6822 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6823 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6824 tp->rx_jmb_ring_mask;
6825 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6826 tpr->rx_jmb_prod_idx);
6827 }
6828 mmiowb();
6829 } else if (work_mask) {
6830 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6831 * updated before the producer indices can be updated.
6832 */
6833 smp_wmb();
6834
6835 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6836 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6837
6838 if (tnapi != &tp->napi[1]) {
6839 tp->rx_refill = true;
6840 napi_schedule(&tp->napi[1].napi);
6841 }
6842 }
6843
6844 return received;
6845 }
6846
6847 static void tg3_poll_link(struct tg3 *tp)
6848 {
6849 /* handle link change and other phy events */
6850 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6851 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6852
6853 if (sblk->status & SD_STATUS_LINK_CHG) {
6854 sblk->status = SD_STATUS_UPDATED |
6855 (sblk->status & ~SD_STATUS_LINK_CHG);
6856 spin_lock(&tp->lock);
6857 if (tg3_flag(tp, USE_PHYLIB)) {
6858 tw32_f(MAC_STATUS,
6859 (MAC_STATUS_SYNC_CHANGED |
6860 MAC_STATUS_CFG_CHANGED |
6861 MAC_STATUS_MI_COMPLETION |
6862 MAC_STATUS_LNKSTATE_CHANGED));
6863 udelay(40);
6864 } else
6865 tg3_setup_phy(tp, false);
6866 spin_unlock(&tp->lock);
6867 }
6868 }
6869 }
6870
6871 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6872 struct tg3_rx_prodring_set *dpr,
6873 struct tg3_rx_prodring_set *spr)
6874 {
6875 u32 si, di, cpycnt, src_prod_idx;
6876 int i, err = 0;
6877
6878 while (1) {
6879 src_prod_idx = spr->rx_std_prod_idx;
6880
6881 /* Make sure updates to the rx_std_buffers[] entries and the
6882 * standard producer index are seen in the correct order.
6883 */
6884 smp_rmb();
6885
6886 if (spr->rx_std_cons_idx == src_prod_idx)
6887 break;
6888
6889 if (spr->rx_std_cons_idx < src_prod_idx)
6890 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6891 else
6892 cpycnt = tp->rx_std_ring_mask + 1 -
6893 spr->rx_std_cons_idx;
6894
6895 cpycnt = min(cpycnt,
6896 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6897
6898 si = spr->rx_std_cons_idx;
6899 di = dpr->rx_std_prod_idx;
6900
6901 for (i = di; i < di + cpycnt; i++) {
6902 if (dpr->rx_std_buffers[i].data) {
6903 cpycnt = i - di;
6904 err = -ENOSPC;
6905 break;
6906 }
6907 }
6908
6909 if (!cpycnt)
6910 break;
6911
6912 /* Ensure that updates to the rx_std_buffers ring and the
6913 * shadowed hardware producer ring from tg3_recycle_skb() are
6914 * ordered correctly WRT the skb check above.
6915 */
6916 smp_rmb();
6917
6918 memcpy(&dpr->rx_std_buffers[di],
6919 &spr->rx_std_buffers[si],
6920 cpycnt * sizeof(struct ring_info));
6921
6922 for (i = 0; i < cpycnt; i++, di++, si++) {
6923 struct tg3_rx_buffer_desc *sbd, *dbd;
6924 sbd = &spr->rx_std[si];
6925 dbd = &dpr->rx_std[di];
6926 dbd->addr_hi = sbd->addr_hi;
6927 dbd->addr_lo = sbd->addr_lo;
6928 }
6929
6930 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6931 tp->rx_std_ring_mask;
6932 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6933 tp->rx_std_ring_mask;
6934 }
6935
6936 while (1) {
6937 src_prod_idx = spr->rx_jmb_prod_idx;
6938
6939 /* Make sure updates to the rx_jmb_buffers[] entries and
6940 * the jumbo producer index are seen in the correct order.
6941 */
6942 smp_rmb();
6943
6944 if (spr->rx_jmb_cons_idx == src_prod_idx)
6945 break;
6946
6947 if (spr->rx_jmb_cons_idx < src_prod_idx)
6948 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6949 else
6950 cpycnt = tp->rx_jmb_ring_mask + 1 -
6951 spr->rx_jmb_cons_idx;
6952
6953 cpycnt = min(cpycnt,
6954 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6955
6956 si = spr->rx_jmb_cons_idx;
6957 di = dpr->rx_jmb_prod_idx;
6958
6959 for (i = di; i < di + cpycnt; i++) {
6960 if (dpr->rx_jmb_buffers[i].data) {
6961 cpycnt = i - di;
6962 err = -ENOSPC;
6963 break;
6964 }
6965 }
6966
6967 if (!cpycnt)
6968 break;
6969
6970 /* Ensure that updates to the rx_jmb_buffers ring and the
6971 * shadowed hardware producer ring from tg3_recycle_skb() are
6972 * ordered correctly WRT the skb check above.
6973 */
6974 smp_rmb();
6975
6976 memcpy(&dpr->rx_jmb_buffers[di],
6977 &spr->rx_jmb_buffers[si],
6978 cpycnt * sizeof(struct ring_info));
6979
6980 for (i = 0; i < cpycnt; i++, di++, si++) {
6981 struct tg3_rx_buffer_desc *sbd, *dbd;
6982 sbd = &spr->rx_jmb[si].std;
6983 dbd = &dpr->rx_jmb[di].std;
6984 dbd->addr_hi = sbd->addr_hi;
6985 dbd->addr_lo = sbd->addr_lo;
6986 }
6987
6988 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6989 tp->rx_jmb_ring_mask;
6990 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6991 tp->rx_jmb_ring_mask;
6992 }
6993
6994 return err;
6995 }
6996
6997 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6998 {
6999 struct tg3 *tp = tnapi->tp;
7000
7001 /* run TX completion thread */
7002 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7003 tg3_tx(tnapi);
7004 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7005 return work_done;
7006 }
7007
7008 if (!tnapi->rx_rcb_prod_idx)
7009 return work_done;
7010
7011 /* run RX thread, within the bounds set by NAPI.
7012 * All RX "locking" is done by ensuring outside
7013 * code synchronizes with tg3->napi.poll()
7014 */
7015 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7016 work_done += tg3_rx(tnapi, budget - work_done);
7017
7018 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7019 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7020 int i, err = 0;
7021 u32 std_prod_idx = dpr->rx_std_prod_idx;
7022 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7023
7024 tp->rx_refill = false;
7025 for (i = 1; i <= tp->rxq_cnt; i++)
7026 err |= tg3_rx_prodring_xfer(tp, dpr,
7027 &tp->napi[i].prodring);
7028
7029 wmb();
7030
7031 if (std_prod_idx != dpr->rx_std_prod_idx)
7032 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7033 dpr->rx_std_prod_idx);
7034
7035 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7036 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7037 dpr->rx_jmb_prod_idx);
7038
7039 mmiowb();
7040
7041 if (err)
7042 tw32_f(HOSTCC_MODE, tp->coal_now);
7043 }
7044
7045 return work_done;
7046 }
7047
7048 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7049 {
7050 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7051 schedule_work(&tp->reset_task);
7052 }
7053
7054 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7055 {
7056 cancel_work_sync(&tp->reset_task);
7057 tg3_flag_clear(tp, RESET_TASK_PENDING);
7058 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7059 }
7060
7061 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7062 {
7063 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7064 struct tg3 *tp = tnapi->tp;
7065 int work_done = 0;
7066 struct tg3_hw_status *sblk = tnapi->hw_status;
7067
7068 while (1) {
7069 work_done = tg3_poll_work(tnapi, work_done, budget);
7070
7071 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7072 goto tx_recovery;
7073
7074 if (unlikely(work_done >= budget))
7075 break;
7076
7077 /* tp->last_tag is used in tg3_int_reenable() below
7078 * to tell the hw how much work has been processed,
7079 * so we must read it before checking for more work.
7080 */
7081 tnapi->last_tag = sblk->status_tag;
7082 tnapi->last_irq_tag = tnapi->last_tag;
7083 rmb();
7084
7085 /* check for RX/TX work to do */
7086 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7087 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7088
7089 /* This test here is not race free, but will reduce
7090 * the number of interrupts by looping again.
7091 */
7092 if (tnapi == &tp->napi[1] && tp->rx_refill)
7093 continue;
7094
7095 napi_complete(napi);
7096 /* Reenable interrupts. */
7097 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7098
7099 /* This test here is synchronized by napi_schedule()
7100 * and napi_complete() to close the race condition.
7101 */
7102 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7103 tw32(HOSTCC_MODE, tp->coalesce_mode |
7104 HOSTCC_MODE_ENABLE |
7105 tnapi->coal_now);
7106 }
7107 mmiowb();
7108 break;
7109 }
7110 }
7111
7112 return work_done;
7113
7114 tx_recovery:
7115 /* work_done is guaranteed to be less than budget. */
7116 napi_complete(napi);
7117 tg3_reset_task_schedule(tp);
7118 return work_done;
7119 }
7120
7121 static void tg3_process_error(struct tg3 *tp)
7122 {
7123 u32 val;
7124 bool real_error = false;
7125
7126 if (tg3_flag(tp, ERROR_PROCESSED))
7127 return;
7128
7129 /* Check Flow Attention register */
7130 val = tr32(HOSTCC_FLOW_ATTN);
7131 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7132 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7133 real_error = true;
7134 }
7135
7136 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7137 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7138 real_error = true;
7139 }
7140
7141 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7142 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7143 real_error = true;
7144 }
7145
7146 if (!real_error)
7147 return;
7148
7149 tg3_dump_state(tp);
7150
7151 tg3_flag_set(tp, ERROR_PROCESSED);
7152 tg3_reset_task_schedule(tp);
7153 }
7154
7155 static int tg3_poll(struct napi_struct *napi, int budget)
7156 {
7157 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7158 struct tg3 *tp = tnapi->tp;
7159 int work_done = 0;
7160 struct tg3_hw_status *sblk = tnapi->hw_status;
7161
7162 while (1) {
7163 if (sblk->status & SD_STATUS_ERROR)
7164 tg3_process_error(tp);
7165
7166 tg3_poll_link(tp);
7167
7168 work_done = tg3_poll_work(tnapi, work_done, budget);
7169
7170 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7171 goto tx_recovery;
7172
7173 if (unlikely(work_done >= budget))
7174 break;
7175
7176 if (tg3_flag(tp, TAGGED_STATUS)) {
7177 /* tp->last_tag is used in tg3_int_reenable() below
7178 * to tell the hw how much work has been processed,
7179 * so we must read it before checking for more work.
7180 */
7181 tnapi->last_tag = sblk->status_tag;
7182 tnapi->last_irq_tag = tnapi->last_tag;
7183 rmb();
7184 } else
7185 sblk->status &= ~SD_STATUS_UPDATED;
7186
7187 if (likely(!tg3_has_work(tnapi))) {
7188 napi_complete(napi);
7189 tg3_int_reenable(tnapi);
7190 break;
7191 }
7192 }
7193
7194 return work_done;
7195
7196 tx_recovery:
7197 /* work_done is guaranteed to be less than budget. */
7198 napi_complete(napi);
7199 tg3_reset_task_schedule(tp);
7200 return work_done;
7201 }
7202
7203 static void tg3_napi_disable(struct tg3 *tp)
7204 {
7205 int i;
7206
7207 for (i = tp->irq_cnt - 1; i >= 0; i--)
7208 napi_disable(&tp->napi[i].napi);
7209 }
7210
7211 static void tg3_napi_enable(struct tg3 *tp)
7212 {
7213 int i;
7214
7215 for (i = 0; i < tp->irq_cnt; i++)
7216 napi_enable(&tp->napi[i].napi);
7217 }
7218
7219 static void tg3_napi_init(struct tg3 *tp)
7220 {
7221 int i;
7222
7223 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7224 for (i = 1; i < tp->irq_cnt; i++)
7225 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7226 }
7227
7228 static void tg3_napi_fini(struct tg3 *tp)
7229 {
7230 int i;
7231
7232 for (i = 0; i < tp->irq_cnt; i++)
7233 netif_napi_del(&tp->napi[i].napi);
7234 }
7235
7236 static inline void tg3_netif_stop(struct tg3 *tp)
7237 {
7238 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7239 tg3_napi_disable(tp);
7240 netif_carrier_off(tp->dev);
7241 netif_tx_disable(tp->dev);
7242 }
7243
7244 /* tp->lock must be held */
7245 static inline void tg3_netif_start(struct tg3 *tp)
7246 {
7247 tg3_ptp_resume(tp);
7248
7249 /* NOTE: unconditional netif_tx_wake_all_queues is only
7250 * appropriate so long as all callers are assured to
7251 * have free tx slots (such as after tg3_init_hw)
7252 */
7253 netif_tx_wake_all_queues(tp->dev);
7254
7255 if (tp->link_up)
7256 netif_carrier_on(tp->dev);
7257
7258 tg3_napi_enable(tp);
7259 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7260 tg3_enable_ints(tp);
7261 }
7262
7263 static void tg3_irq_quiesce(struct tg3 *tp)
7264 {
7265 int i;
7266
7267 BUG_ON(tp->irq_sync);
7268
7269 tp->irq_sync = 1;
7270 smp_mb();
7271
7272 for (i = 0; i < tp->irq_cnt; i++)
7273 synchronize_irq(tp->napi[i].irq_vec);
7274 }
7275
7276 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7277 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7278 * with as well. Most of the time, this is not necessary except when
7279 * shutting down the device.
7280 */
7281 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7282 {
7283 spin_lock_bh(&tp->lock);
7284 if (irq_sync)
7285 tg3_irq_quiesce(tp);
7286 }
7287
7288 static inline void tg3_full_unlock(struct tg3 *tp)
7289 {
7290 spin_unlock_bh(&tp->lock);
7291 }
7292
7293 /* One-shot MSI handler - Chip automatically disables interrupt
7294 * after sending MSI so driver doesn't have to do it.
7295 */
7296 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7297 {
7298 struct tg3_napi *tnapi = dev_id;
7299 struct tg3 *tp = tnapi->tp;
7300
7301 prefetch(tnapi->hw_status);
7302 if (tnapi->rx_rcb)
7303 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7304
7305 if (likely(!tg3_irq_sync(tp)))
7306 napi_schedule(&tnapi->napi);
7307
7308 return IRQ_HANDLED;
7309 }
7310
7311 /* MSI ISR - No need to check for interrupt sharing and no need to
7312 * flush status block and interrupt mailbox. PCI ordering rules
7313 * guarantee that MSI will arrive after the status block.
7314 */
7315 static irqreturn_t tg3_msi(int irq, void *dev_id)
7316 {
7317 struct tg3_napi *tnapi = dev_id;
7318 struct tg3 *tp = tnapi->tp;
7319
7320 prefetch(tnapi->hw_status);
7321 if (tnapi->rx_rcb)
7322 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7323 /*
7324 * Writing any value to intr-mbox-0 clears PCI INTA# and
7325 * chip-internal interrupt pending events.
7326 * Writing non-zero to intr-mbox-0 additional tells the
7327 * NIC to stop sending us irqs, engaging "in-intr-handler"
7328 * event coalescing.
7329 */
7330 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7331 if (likely(!tg3_irq_sync(tp)))
7332 napi_schedule(&tnapi->napi);
7333
7334 return IRQ_RETVAL(1);
7335 }
7336
7337 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7338 {
7339 struct tg3_napi *tnapi = dev_id;
7340 struct tg3 *tp = tnapi->tp;
7341 struct tg3_hw_status *sblk = tnapi->hw_status;
7342 unsigned int handled = 1;
7343
7344 /* In INTx mode, it is possible for the interrupt to arrive at
7345 * the CPU before the status block posted prior to the interrupt.
7346 * Reading the PCI State register will confirm whether the
7347 * interrupt is ours and will flush the status block.
7348 */
7349 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7350 if (tg3_flag(tp, CHIP_RESETTING) ||
7351 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7352 handled = 0;
7353 goto out;
7354 }
7355 }
7356
7357 /*
7358 * Writing any value to intr-mbox-0 clears PCI INTA# and
7359 * chip-internal interrupt pending events.
7360 * Writing non-zero to intr-mbox-0 additional tells the
7361 * NIC to stop sending us irqs, engaging "in-intr-handler"
7362 * event coalescing.
7363 *
7364 * Flush the mailbox to de-assert the IRQ immediately to prevent
7365 * spurious interrupts. The flush impacts performance but
7366 * excessive spurious interrupts can be worse in some cases.
7367 */
7368 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7369 if (tg3_irq_sync(tp))
7370 goto out;
7371 sblk->status &= ~SD_STATUS_UPDATED;
7372 if (likely(tg3_has_work(tnapi))) {
7373 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7374 napi_schedule(&tnapi->napi);
7375 } else {
7376 /* No work, shared interrupt perhaps? re-enable
7377 * interrupts, and flush that PCI write
7378 */
7379 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7380 0x00000000);
7381 }
7382 out:
7383 return IRQ_RETVAL(handled);
7384 }
7385
7386 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7387 {
7388 struct tg3_napi *tnapi = dev_id;
7389 struct tg3 *tp = tnapi->tp;
7390 struct tg3_hw_status *sblk = tnapi->hw_status;
7391 unsigned int handled = 1;
7392
7393 /* In INTx mode, it is possible for the interrupt to arrive at
7394 * the CPU before the status block posted prior to the interrupt.
7395 * Reading the PCI State register will confirm whether the
7396 * interrupt is ours and will flush the status block.
7397 */
7398 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7399 if (tg3_flag(tp, CHIP_RESETTING) ||
7400 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7401 handled = 0;
7402 goto out;
7403 }
7404 }
7405
7406 /*
7407 * writing any value to intr-mbox-0 clears PCI INTA# and
7408 * chip-internal interrupt pending events.
7409 * writing non-zero to intr-mbox-0 additional tells the
7410 * NIC to stop sending us irqs, engaging "in-intr-handler"
7411 * event coalescing.
7412 *
7413 * Flush the mailbox to de-assert the IRQ immediately to prevent
7414 * spurious interrupts. The flush impacts performance but
7415 * excessive spurious interrupts can be worse in some cases.
7416 */
7417 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7418
7419 /*
7420 * In a shared interrupt configuration, sometimes other devices'
7421 * interrupts will scream. We record the current status tag here
7422 * so that the above check can report that the screaming interrupts
7423 * are unhandled. Eventually they will be silenced.
7424 */
7425 tnapi->last_irq_tag = sblk->status_tag;
7426
7427 if (tg3_irq_sync(tp))
7428 goto out;
7429
7430 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7431
7432 napi_schedule(&tnapi->napi);
7433
7434 out:
7435 return IRQ_RETVAL(handled);
7436 }
7437
7438 /* ISR for interrupt test */
7439 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7440 {
7441 struct tg3_napi *tnapi = dev_id;
7442 struct tg3 *tp = tnapi->tp;
7443 struct tg3_hw_status *sblk = tnapi->hw_status;
7444
7445 if ((sblk->status & SD_STATUS_UPDATED) ||
7446 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7447 tg3_disable_ints(tp);
7448 return IRQ_RETVAL(1);
7449 }
7450 return IRQ_RETVAL(0);
7451 }
7452
7453 #ifdef CONFIG_NET_POLL_CONTROLLER
7454 static void tg3_poll_controller(struct net_device *dev)
7455 {
7456 int i;
7457 struct tg3 *tp = netdev_priv(dev);
7458
7459 if (tg3_irq_sync(tp))
7460 return;
7461
7462 for (i = 0; i < tp->irq_cnt; i++)
7463 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7464 }
7465 #endif
7466
7467 static void tg3_tx_timeout(struct net_device *dev)
7468 {
7469 struct tg3 *tp = netdev_priv(dev);
7470
7471 if (netif_msg_tx_err(tp)) {
7472 netdev_err(dev, "transmit timed out, resetting\n");
7473 tg3_dump_state(tp);
7474 }
7475
7476 tg3_reset_task_schedule(tp);
7477 }
7478
7479 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7480 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7481 {
7482 u32 base = (u32) mapping & 0xffffffff;
7483
7484 return base + len + 8 < base;
7485 }
7486
7487 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7488 * of any 4GB boundaries: 4G, 8G, etc
7489 */
7490 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7491 u32 len, u32 mss)
7492 {
7493 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7494 u32 base = (u32) mapping & 0xffffffff;
7495
7496 return ((base + len + (mss & 0x3fff)) < base);
7497 }
7498 return 0;
7499 }
7500
7501 /* Test for DMA addresses > 40-bit */
7502 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7503 int len)
7504 {
7505 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7506 if (tg3_flag(tp, 40BIT_DMA_BUG))
7507 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7508 return 0;
7509 #else
7510 return 0;
7511 #endif
7512 }
7513
7514 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7515 dma_addr_t mapping, u32 len, u32 flags,
7516 u32 mss, u32 vlan)
7517 {
7518 txbd->addr_hi = ((u64) mapping >> 32);
7519 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7520 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7521 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7522 }
7523
7524 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7525 dma_addr_t map, u32 len, u32 flags,
7526 u32 mss, u32 vlan)
7527 {
7528 struct tg3 *tp = tnapi->tp;
7529 bool hwbug = false;
7530
7531 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7532 hwbug = true;
7533
7534 if (tg3_4g_overflow_test(map, len))
7535 hwbug = true;
7536
7537 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7538 hwbug = true;
7539
7540 if (tg3_40bit_overflow_test(tp, map, len))
7541 hwbug = true;
7542
7543 if (tp->dma_limit) {
7544 u32 prvidx = *entry;
7545 u32 tmp_flag = flags & ~TXD_FLAG_END;
7546 while (len > tp->dma_limit && *budget) {
7547 u32 frag_len = tp->dma_limit;
7548 len -= tp->dma_limit;
7549
7550 /* Avoid the 8byte DMA problem */
7551 if (len <= 8) {
7552 len += tp->dma_limit / 2;
7553 frag_len = tp->dma_limit / 2;
7554 }
7555
7556 tnapi->tx_buffers[*entry].fragmented = true;
7557
7558 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7559 frag_len, tmp_flag, mss, vlan);
7560 *budget -= 1;
7561 prvidx = *entry;
7562 *entry = NEXT_TX(*entry);
7563
7564 map += frag_len;
7565 }
7566
7567 if (len) {
7568 if (*budget) {
7569 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7570 len, flags, mss, vlan);
7571 *budget -= 1;
7572 *entry = NEXT_TX(*entry);
7573 } else {
7574 hwbug = true;
7575 tnapi->tx_buffers[prvidx].fragmented = false;
7576 }
7577 }
7578 } else {
7579 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7580 len, flags, mss, vlan);
7581 *entry = NEXT_TX(*entry);
7582 }
7583
7584 return hwbug;
7585 }
7586
7587 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7588 {
7589 int i;
7590 struct sk_buff *skb;
7591 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7592
7593 skb = txb->skb;
7594 txb->skb = NULL;
7595
7596 pci_unmap_single(tnapi->tp->pdev,
7597 dma_unmap_addr(txb, mapping),
7598 skb_headlen(skb),
7599 PCI_DMA_TODEVICE);
7600
7601 while (txb->fragmented) {
7602 txb->fragmented = false;
7603 entry = NEXT_TX(entry);
7604 txb = &tnapi->tx_buffers[entry];
7605 }
7606
7607 for (i = 0; i <= last; i++) {
7608 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7609
7610 entry = NEXT_TX(entry);
7611 txb = &tnapi->tx_buffers[entry];
7612
7613 pci_unmap_page(tnapi->tp->pdev,
7614 dma_unmap_addr(txb, mapping),
7615 skb_frag_size(frag), PCI_DMA_TODEVICE);
7616
7617 while (txb->fragmented) {
7618 txb->fragmented = false;
7619 entry = NEXT_TX(entry);
7620 txb = &tnapi->tx_buffers[entry];
7621 }
7622 }
7623 }
7624
7625 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7626 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7627 struct sk_buff **pskb,
7628 u32 *entry, u32 *budget,
7629 u32 base_flags, u32 mss, u32 vlan)
7630 {
7631 struct tg3 *tp = tnapi->tp;
7632 struct sk_buff *new_skb, *skb = *pskb;
7633 dma_addr_t new_addr = 0;
7634 int ret = 0;
7635
7636 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7637 new_skb = skb_copy(skb, GFP_ATOMIC);
7638 else {
7639 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7640
7641 new_skb = skb_copy_expand(skb,
7642 skb_headroom(skb) + more_headroom,
7643 skb_tailroom(skb), GFP_ATOMIC);
7644 }
7645
7646 if (!new_skb) {
7647 ret = -1;
7648 } else {
7649 /* New SKB is guaranteed to be linear. */
7650 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7651 PCI_DMA_TODEVICE);
7652 /* Make sure the mapping succeeded */
7653 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7654 dev_kfree_skb(new_skb);
7655 ret = -1;
7656 } else {
7657 u32 save_entry = *entry;
7658
7659 base_flags |= TXD_FLAG_END;
7660
7661 tnapi->tx_buffers[*entry].skb = new_skb;
7662 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7663 mapping, new_addr);
7664
7665 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7666 new_skb->len, base_flags,
7667 mss, vlan)) {
7668 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7669 dev_kfree_skb(new_skb);
7670 ret = -1;
7671 }
7672 }
7673 }
7674
7675 dev_kfree_skb(skb);
7676 *pskb = new_skb;
7677 return ret;
7678 }
7679
7680 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7681
7682 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7683 * TSO header is greater than 80 bytes.
7684 */
7685 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7686 {
7687 struct sk_buff *segs, *nskb;
7688 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7689
7690 /* Estimate the number of fragments in the worst case */
7691 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7692 netif_stop_queue(tp->dev);
7693
7694 /* netif_tx_stop_queue() must be done before checking
7695 * checking tx index in tg3_tx_avail() below, because in
7696 * tg3_tx(), we update tx index before checking for
7697 * netif_tx_queue_stopped().
7698 */
7699 smp_mb();
7700 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7701 return NETDEV_TX_BUSY;
7702
7703 netif_wake_queue(tp->dev);
7704 }
7705
7706 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7707 if (IS_ERR(segs))
7708 goto tg3_tso_bug_end;
7709
7710 do {
7711 nskb = segs;
7712 segs = segs->next;
7713 nskb->next = NULL;
7714 tg3_start_xmit(nskb, tp->dev);
7715 } while (segs);
7716
7717 tg3_tso_bug_end:
7718 dev_kfree_skb(skb);
7719
7720 return NETDEV_TX_OK;
7721 }
7722
7723 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7724 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7725 */
7726 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7727 {
7728 struct tg3 *tp = netdev_priv(dev);
7729 u32 len, entry, base_flags, mss, vlan = 0;
7730 u32 budget;
7731 int i = -1, would_hit_hwbug;
7732 dma_addr_t mapping;
7733 struct tg3_napi *tnapi;
7734 struct netdev_queue *txq;
7735 unsigned int last;
7736
7737 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7738 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7739 if (tg3_flag(tp, ENABLE_TSS))
7740 tnapi++;
7741
7742 budget = tg3_tx_avail(tnapi);
7743
7744 /* We are running in BH disabled context with netif_tx_lock
7745 * and TX reclaim runs via tp->napi.poll inside of a software
7746 * interrupt. Furthermore, IRQ processing runs lockless so we have
7747 * no IRQ context deadlocks to worry about either. Rejoice!
7748 */
7749 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7750 if (!netif_tx_queue_stopped(txq)) {
7751 netif_tx_stop_queue(txq);
7752
7753 /* This is a hard error, log it. */
7754 netdev_err(dev,
7755 "BUG! Tx Ring full when queue awake!\n");
7756 }
7757 return NETDEV_TX_BUSY;
7758 }
7759
7760 entry = tnapi->tx_prod;
7761 base_flags = 0;
7762 if (skb->ip_summed == CHECKSUM_PARTIAL)
7763 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7764
7765 mss = skb_shinfo(skb)->gso_size;
7766 if (mss) {
7767 struct iphdr *iph;
7768 u32 tcp_opt_len, hdr_len;
7769
7770 if (skb_header_cloned(skb) &&
7771 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7772 goto drop;
7773
7774 iph = ip_hdr(skb);
7775 tcp_opt_len = tcp_optlen(skb);
7776
7777 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7778
7779 if (!skb_is_gso_v6(skb)) {
7780 iph->check = 0;
7781 iph->tot_len = htons(mss + hdr_len);
7782 }
7783
7784 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7785 tg3_flag(tp, TSO_BUG))
7786 return tg3_tso_bug(tp, skb);
7787
7788 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7789 TXD_FLAG_CPU_POST_DMA);
7790
7791 if (tg3_flag(tp, HW_TSO_1) ||
7792 tg3_flag(tp, HW_TSO_2) ||
7793 tg3_flag(tp, HW_TSO_3)) {
7794 tcp_hdr(skb)->check = 0;
7795 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7796 } else
7797 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7798 iph->daddr, 0,
7799 IPPROTO_TCP,
7800 0);
7801
7802 if (tg3_flag(tp, HW_TSO_3)) {
7803 mss |= (hdr_len & 0xc) << 12;
7804 if (hdr_len & 0x10)
7805 base_flags |= 0x00000010;
7806 base_flags |= (hdr_len & 0x3e0) << 5;
7807 } else if (tg3_flag(tp, HW_TSO_2))
7808 mss |= hdr_len << 9;
7809 else if (tg3_flag(tp, HW_TSO_1) ||
7810 tg3_asic_rev(tp) == ASIC_REV_5705) {
7811 if (tcp_opt_len || iph->ihl > 5) {
7812 int tsflags;
7813
7814 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7815 mss |= (tsflags << 11);
7816 }
7817 } else {
7818 if (tcp_opt_len || iph->ihl > 5) {
7819 int tsflags;
7820
7821 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7822 base_flags |= tsflags << 12;
7823 }
7824 }
7825 }
7826
7827 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7828 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7829 base_flags |= TXD_FLAG_JMB_PKT;
7830
7831 if (vlan_tx_tag_present(skb)) {
7832 base_flags |= TXD_FLAG_VLAN;
7833 vlan = vlan_tx_tag_get(skb);
7834 }
7835
7836 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7837 tg3_flag(tp, TX_TSTAMP_EN)) {
7838 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7839 base_flags |= TXD_FLAG_HWTSTAMP;
7840 }
7841
7842 len = skb_headlen(skb);
7843
7844 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7845 if (pci_dma_mapping_error(tp->pdev, mapping))
7846 goto drop;
7847
7848
7849 tnapi->tx_buffers[entry].skb = skb;
7850 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7851
7852 would_hit_hwbug = 0;
7853
7854 if (tg3_flag(tp, 5701_DMA_BUG))
7855 would_hit_hwbug = 1;
7856
7857 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7858 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7859 mss, vlan)) {
7860 would_hit_hwbug = 1;
7861 } else if (skb_shinfo(skb)->nr_frags > 0) {
7862 u32 tmp_mss = mss;
7863
7864 if (!tg3_flag(tp, HW_TSO_1) &&
7865 !tg3_flag(tp, HW_TSO_2) &&
7866 !tg3_flag(tp, HW_TSO_3))
7867 tmp_mss = 0;
7868
7869 /* Now loop through additional data
7870 * fragments, and queue them.
7871 */
7872 last = skb_shinfo(skb)->nr_frags - 1;
7873 for (i = 0; i <= last; i++) {
7874 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7875
7876 len = skb_frag_size(frag);
7877 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7878 len, DMA_TO_DEVICE);
7879
7880 tnapi->tx_buffers[entry].skb = NULL;
7881 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7882 mapping);
7883 if (dma_mapping_error(&tp->pdev->dev, mapping))
7884 goto dma_error;
7885
7886 if (!budget ||
7887 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7888 len, base_flags |
7889 ((i == last) ? TXD_FLAG_END : 0),
7890 tmp_mss, vlan)) {
7891 would_hit_hwbug = 1;
7892 break;
7893 }
7894 }
7895 }
7896
7897 if (would_hit_hwbug) {
7898 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7899
7900 /* If the workaround fails due to memory/mapping
7901 * failure, silently drop this packet.
7902 */
7903 entry = tnapi->tx_prod;
7904 budget = tg3_tx_avail(tnapi);
7905 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7906 base_flags, mss, vlan))
7907 goto drop_nofree;
7908 }
7909
7910 skb_tx_timestamp(skb);
7911 netdev_tx_sent_queue(txq, skb->len);
7912
7913 /* Sync BD data before updating mailbox */
7914 wmb();
7915
7916 /* Packets are ready, update Tx producer idx local and on card. */
7917 tw32_tx_mbox(tnapi->prodmbox, entry);
7918
7919 tnapi->tx_prod = entry;
7920 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7921 netif_tx_stop_queue(txq);
7922
7923 /* netif_tx_stop_queue() must be done before checking
7924 * checking tx index in tg3_tx_avail() below, because in
7925 * tg3_tx(), we update tx index before checking for
7926 * netif_tx_queue_stopped().
7927 */
7928 smp_mb();
7929 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7930 netif_tx_wake_queue(txq);
7931 }
7932
7933 mmiowb();
7934 return NETDEV_TX_OK;
7935
7936 dma_error:
7937 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7938 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7939 drop:
7940 dev_kfree_skb(skb);
7941 drop_nofree:
7942 tp->tx_dropped++;
7943 return NETDEV_TX_OK;
7944 }
7945
7946 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7947 {
7948 if (enable) {
7949 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7950 MAC_MODE_PORT_MODE_MASK);
7951
7952 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7953
7954 if (!tg3_flag(tp, 5705_PLUS))
7955 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7956
7957 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7958 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7959 else
7960 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7961 } else {
7962 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7963
7964 if (tg3_flag(tp, 5705_PLUS) ||
7965 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7966 tg3_asic_rev(tp) == ASIC_REV_5700)
7967 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7968 }
7969
7970 tw32(MAC_MODE, tp->mac_mode);
7971 udelay(40);
7972 }
7973
7974 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7975 {
7976 u32 val, bmcr, mac_mode, ptest = 0;
7977
7978 tg3_phy_toggle_apd(tp, false);
7979 tg3_phy_toggle_automdix(tp, false);
7980
7981 if (extlpbk && tg3_phy_set_extloopbk(tp))
7982 return -EIO;
7983
7984 bmcr = BMCR_FULLDPLX;
7985 switch (speed) {
7986 case SPEED_10:
7987 break;
7988 case SPEED_100:
7989 bmcr |= BMCR_SPEED100;
7990 break;
7991 case SPEED_1000:
7992 default:
7993 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7994 speed = SPEED_100;
7995 bmcr |= BMCR_SPEED100;
7996 } else {
7997 speed = SPEED_1000;
7998 bmcr |= BMCR_SPEED1000;
7999 }
8000 }
8001
8002 if (extlpbk) {
8003 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8004 tg3_readphy(tp, MII_CTRL1000, &val);
8005 val |= CTL1000_AS_MASTER |
8006 CTL1000_ENABLE_MASTER;
8007 tg3_writephy(tp, MII_CTRL1000, val);
8008 } else {
8009 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8010 MII_TG3_FET_PTEST_TRIM_2;
8011 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8012 }
8013 } else
8014 bmcr |= BMCR_LOOPBACK;
8015
8016 tg3_writephy(tp, MII_BMCR, bmcr);
8017
8018 /* The write needs to be flushed for the FETs */
8019 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8020 tg3_readphy(tp, MII_BMCR, &bmcr);
8021
8022 udelay(40);
8023
8024 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8025 tg3_asic_rev(tp) == ASIC_REV_5785) {
8026 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8027 MII_TG3_FET_PTEST_FRC_TX_LINK |
8028 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8029
8030 /* The write needs to be flushed for the AC131 */
8031 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8032 }
8033
8034 /* Reset to prevent losing 1st rx packet intermittently */
8035 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8036 tg3_flag(tp, 5780_CLASS)) {
8037 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8038 udelay(10);
8039 tw32_f(MAC_RX_MODE, tp->rx_mode);
8040 }
8041
8042 mac_mode = tp->mac_mode &
8043 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8044 if (speed == SPEED_1000)
8045 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8046 else
8047 mac_mode |= MAC_MODE_PORT_MODE_MII;
8048
8049 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8050 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8051
8052 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8053 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8054 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8055 mac_mode |= MAC_MODE_LINK_POLARITY;
8056
8057 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8058 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8059 }
8060
8061 tw32(MAC_MODE, mac_mode);
8062 udelay(40);
8063
8064 return 0;
8065 }
8066
8067 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8068 {
8069 struct tg3 *tp = netdev_priv(dev);
8070
8071 if (features & NETIF_F_LOOPBACK) {
8072 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8073 return;
8074
8075 spin_lock_bh(&tp->lock);
8076 tg3_mac_loopback(tp, true);
8077 netif_carrier_on(tp->dev);
8078 spin_unlock_bh(&tp->lock);
8079 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8080 } else {
8081 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8082 return;
8083
8084 spin_lock_bh(&tp->lock);
8085 tg3_mac_loopback(tp, false);
8086 /* Force link status check */
8087 tg3_setup_phy(tp, true);
8088 spin_unlock_bh(&tp->lock);
8089 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8090 }
8091 }
8092
8093 static netdev_features_t tg3_fix_features(struct net_device *dev,
8094 netdev_features_t features)
8095 {
8096 struct tg3 *tp = netdev_priv(dev);
8097
8098 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8099 features &= ~NETIF_F_ALL_TSO;
8100
8101 return features;
8102 }
8103
8104 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8105 {
8106 netdev_features_t changed = dev->features ^ features;
8107
8108 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8109 tg3_set_loopback(dev, features);
8110
8111 return 0;
8112 }
8113
8114 static void tg3_rx_prodring_free(struct tg3 *tp,
8115 struct tg3_rx_prodring_set *tpr)
8116 {
8117 int i;
8118
8119 if (tpr != &tp->napi[0].prodring) {
8120 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8121 i = (i + 1) & tp->rx_std_ring_mask)
8122 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8123 tp->rx_pkt_map_sz);
8124
8125 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8126 for (i = tpr->rx_jmb_cons_idx;
8127 i != tpr->rx_jmb_prod_idx;
8128 i = (i + 1) & tp->rx_jmb_ring_mask) {
8129 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8130 TG3_RX_JMB_MAP_SZ);
8131 }
8132 }
8133
8134 return;
8135 }
8136
8137 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8138 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8139 tp->rx_pkt_map_sz);
8140
8141 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8142 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8143 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8144 TG3_RX_JMB_MAP_SZ);
8145 }
8146 }
8147
8148 /* Initialize rx rings for packet processing.
8149 *
8150 * The chip has been shut down and the driver detached from
8151 * the networking, so no interrupts or new tx packets will
8152 * end up in the driver. tp->{tx,}lock are held and thus
8153 * we may not sleep.
8154 */
8155 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8156 struct tg3_rx_prodring_set *tpr)
8157 {
8158 u32 i, rx_pkt_dma_sz;
8159
8160 tpr->rx_std_cons_idx = 0;
8161 tpr->rx_std_prod_idx = 0;
8162 tpr->rx_jmb_cons_idx = 0;
8163 tpr->rx_jmb_prod_idx = 0;
8164
8165 if (tpr != &tp->napi[0].prodring) {
8166 memset(&tpr->rx_std_buffers[0], 0,
8167 TG3_RX_STD_BUFF_RING_SIZE(tp));
8168 if (tpr->rx_jmb_buffers)
8169 memset(&tpr->rx_jmb_buffers[0], 0,
8170 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8171 goto done;
8172 }
8173
8174 /* Zero out all descriptors. */
8175 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8176
8177 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8178 if (tg3_flag(tp, 5780_CLASS) &&
8179 tp->dev->mtu > ETH_DATA_LEN)
8180 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8181 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8182
8183 /* Initialize invariants of the rings, we only set this
8184 * stuff once. This works because the card does not
8185 * write into the rx buffer posting rings.
8186 */
8187 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8188 struct tg3_rx_buffer_desc *rxd;
8189
8190 rxd = &tpr->rx_std[i];
8191 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8192 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8193 rxd->opaque = (RXD_OPAQUE_RING_STD |
8194 (i << RXD_OPAQUE_INDEX_SHIFT));
8195 }
8196
8197 /* Now allocate fresh SKBs for each rx ring. */
8198 for (i = 0; i < tp->rx_pending; i++) {
8199 unsigned int frag_size;
8200
8201 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8202 &frag_size) < 0) {
8203 netdev_warn(tp->dev,
8204 "Using a smaller RX standard ring. Only "
8205 "%d out of %d buffers were allocated "
8206 "successfully\n", i, tp->rx_pending);
8207 if (i == 0)
8208 goto initfail;
8209 tp->rx_pending = i;
8210 break;
8211 }
8212 }
8213
8214 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8215 goto done;
8216
8217 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8218
8219 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8220 goto done;
8221
8222 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8223 struct tg3_rx_buffer_desc *rxd;
8224
8225 rxd = &tpr->rx_jmb[i].std;
8226 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8227 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8228 RXD_FLAG_JUMBO;
8229 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8230 (i << RXD_OPAQUE_INDEX_SHIFT));
8231 }
8232
8233 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8234 unsigned int frag_size;
8235
8236 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8237 &frag_size) < 0) {
8238 netdev_warn(tp->dev,
8239 "Using a smaller RX jumbo ring. Only %d "
8240 "out of %d buffers were allocated "
8241 "successfully\n", i, tp->rx_jumbo_pending);
8242 if (i == 0)
8243 goto initfail;
8244 tp->rx_jumbo_pending = i;
8245 break;
8246 }
8247 }
8248
8249 done:
8250 return 0;
8251
8252 initfail:
8253 tg3_rx_prodring_free(tp, tpr);
8254 return -ENOMEM;
8255 }
8256
8257 static void tg3_rx_prodring_fini(struct tg3 *tp,
8258 struct tg3_rx_prodring_set *tpr)
8259 {
8260 kfree(tpr->rx_std_buffers);
8261 tpr->rx_std_buffers = NULL;
8262 kfree(tpr->rx_jmb_buffers);
8263 tpr->rx_jmb_buffers = NULL;
8264 if (tpr->rx_std) {
8265 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8266 tpr->rx_std, tpr->rx_std_mapping);
8267 tpr->rx_std = NULL;
8268 }
8269 if (tpr->rx_jmb) {
8270 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8271 tpr->rx_jmb, tpr->rx_jmb_mapping);
8272 tpr->rx_jmb = NULL;
8273 }
8274 }
8275
8276 static int tg3_rx_prodring_init(struct tg3 *tp,
8277 struct tg3_rx_prodring_set *tpr)
8278 {
8279 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8280 GFP_KERNEL);
8281 if (!tpr->rx_std_buffers)
8282 return -ENOMEM;
8283
8284 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8285 TG3_RX_STD_RING_BYTES(tp),
8286 &tpr->rx_std_mapping,
8287 GFP_KERNEL);
8288 if (!tpr->rx_std)
8289 goto err_out;
8290
8291 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8292 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8293 GFP_KERNEL);
8294 if (!tpr->rx_jmb_buffers)
8295 goto err_out;
8296
8297 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8298 TG3_RX_JMB_RING_BYTES(tp),
8299 &tpr->rx_jmb_mapping,
8300 GFP_KERNEL);
8301 if (!tpr->rx_jmb)
8302 goto err_out;
8303 }
8304
8305 return 0;
8306
8307 err_out:
8308 tg3_rx_prodring_fini(tp, tpr);
8309 return -ENOMEM;
8310 }
8311
8312 /* Free up pending packets in all rx/tx rings.
8313 *
8314 * The chip has been shut down and the driver detached from
8315 * the networking, so no interrupts or new tx packets will
8316 * end up in the driver. tp->{tx,}lock is not held and we are not
8317 * in an interrupt context and thus may sleep.
8318 */
8319 static void tg3_free_rings(struct tg3 *tp)
8320 {
8321 int i, j;
8322
8323 for (j = 0; j < tp->irq_cnt; j++) {
8324 struct tg3_napi *tnapi = &tp->napi[j];
8325
8326 tg3_rx_prodring_free(tp, &tnapi->prodring);
8327
8328 if (!tnapi->tx_buffers)
8329 continue;
8330
8331 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8332 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8333
8334 if (!skb)
8335 continue;
8336
8337 tg3_tx_skb_unmap(tnapi, i,
8338 skb_shinfo(skb)->nr_frags - 1);
8339
8340 dev_kfree_skb_any(skb);
8341 }
8342 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8343 }
8344 }
8345
8346 /* Initialize tx/rx rings for packet processing.
8347 *
8348 * The chip has been shut down and the driver detached from
8349 * the networking, so no interrupts or new tx packets will
8350 * end up in the driver. tp->{tx,}lock are held and thus
8351 * we may not sleep.
8352 */
8353 static int tg3_init_rings(struct tg3 *tp)
8354 {
8355 int i;
8356
8357 /* Free up all the SKBs. */
8358 tg3_free_rings(tp);
8359
8360 for (i = 0; i < tp->irq_cnt; i++) {
8361 struct tg3_napi *tnapi = &tp->napi[i];
8362
8363 tnapi->last_tag = 0;
8364 tnapi->last_irq_tag = 0;
8365 tnapi->hw_status->status = 0;
8366 tnapi->hw_status->status_tag = 0;
8367 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8368
8369 tnapi->tx_prod = 0;
8370 tnapi->tx_cons = 0;
8371 if (tnapi->tx_ring)
8372 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8373
8374 tnapi->rx_rcb_ptr = 0;
8375 if (tnapi->rx_rcb)
8376 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8377
8378 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8379 tg3_free_rings(tp);
8380 return -ENOMEM;
8381 }
8382 }
8383
8384 return 0;
8385 }
8386
8387 static void tg3_mem_tx_release(struct tg3 *tp)
8388 {
8389 int i;
8390
8391 for (i = 0; i < tp->irq_max; i++) {
8392 struct tg3_napi *tnapi = &tp->napi[i];
8393
8394 if (tnapi->tx_ring) {
8395 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8396 tnapi->tx_ring, tnapi->tx_desc_mapping);
8397 tnapi->tx_ring = NULL;
8398 }
8399
8400 kfree(tnapi->tx_buffers);
8401 tnapi->tx_buffers = NULL;
8402 }
8403 }
8404
8405 static int tg3_mem_tx_acquire(struct tg3 *tp)
8406 {
8407 int i;
8408 struct tg3_napi *tnapi = &tp->napi[0];
8409
8410 /* If multivector TSS is enabled, vector 0 does not handle
8411 * tx interrupts. Don't allocate any resources for it.
8412 */
8413 if (tg3_flag(tp, ENABLE_TSS))
8414 tnapi++;
8415
8416 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8417 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8418 TG3_TX_RING_SIZE, GFP_KERNEL);
8419 if (!tnapi->tx_buffers)
8420 goto err_out;
8421
8422 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8423 TG3_TX_RING_BYTES,
8424 &tnapi->tx_desc_mapping,
8425 GFP_KERNEL);
8426 if (!tnapi->tx_ring)
8427 goto err_out;
8428 }
8429
8430 return 0;
8431
8432 err_out:
8433 tg3_mem_tx_release(tp);
8434 return -ENOMEM;
8435 }
8436
8437 static void tg3_mem_rx_release(struct tg3 *tp)
8438 {
8439 int i;
8440
8441 for (i = 0; i < tp->irq_max; i++) {
8442 struct tg3_napi *tnapi = &tp->napi[i];
8443
8444 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8445
8446 if (!tnapi->rx_rcb)
8447 continue;
8448
8449 dma_free_coherent(&tp->pdev->dev,
8450 TG3_RX_RCB_RING_BYTES(tp),
8451 tnapi->rx_rcb,
8452 tnapi->rx_rcb_mapping);
8453 tnapi->rx_rcb = NULL;
8454 }
8455 }
8456
8457 static int tg3_mem_rx_acquire(struct tg3 *tp)
8458 {
8459 unsigned int i, limit;
8460
8461 limit = tp->rxq_cnt;
8462
8463 /* If RSS is enabled, we need a (dummy) producer ring
8464 * set on vector zero. This is the true hw prodring.
8465 */
8466 if (tg3_flag(tp, ENABLE_RSS))
8467 limit++;
8468
8469 for (i = 0; i < limit; i++) {
8470 struct tg3_napi *tnapi = &tp->napi[i];
8471
8472 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8473 goto err_out;
8474
8475 /* If multivector RSS is enabled, vector 0
8476 * does not handle rx or tx interrupts.
8477 * Don't allocate any resources for it.
8478 */
8479 if (!i && tg3_flag(tp, ENABLE_RSS))
8480 continue;
8481
8482 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8483 TG3_RX_RCB_RING_BYTES(tp),
8484 &tnapi->rx_rcb_mapping,
8485 GFP_KERNEL | __GFP_ZERO);
8486 if (!tnapi->rx_rcb)
8487 goto err_out;
8488 }
8489
8490 return 0;
8491
8492 err_out:
8493 tg3_mem_rx_release(tp);
8494 return -ENOMEM;
8495 }
8496
8497 /*
8498 * Must not be invoked with interrupt sources disabled and
8499 * the hardware shutdown down.
8500 */
8501 static void tg3_free_consistent(struct tg3 *tp)
8502 {
8503 int i;
8504
8505 for (i = 0; i < tp->irq_cnt; i++) {
8506 struct tg3_napi *tnapi = &tp->napi[i];
8507
8508 if (tnapi->hw_status) {
8509 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8510 tnapi->hw_status,
8511 tnapi->status_mapping);
8512 tnapi->hw_status = NULL;
8513 }
8514 }
8515
8516 tg3_mem_rx_release(tp);
8517 tg3_mem_tx_release(tp);
8518
8519 if (tp->hw_stats) {
8520 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8521 tp->hw_stats, tp->stats_mapping);
8522 tp->hw_stats = NULL;
8523 }
8524 }
8525
8526 /*
8527 * Must not be invoked with interrupt sources disabled and
8528 * the hardware shutdown down. Can sleep.
8529 */
8530 static int tg3_alloc_consistent(struct tg3 *tp)
8531 {
8532 int i;
8533
8534 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8535 sizeof(struct tg3_hw_stats),
8536 &tp->stats_mapping,
8537 GFP_KERNEL | __GFP_ZERO);
8538 if (!tp->hw_stats)
8539 goto err_out;
8540
8541 for (i = 0; i < tp->irq_cnt; i++) {
8542 struct tg3_napi *tnapi = &tp->napi[i];
8543 struct tg3_hw_status *sblk;
8544
8545 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8546 TG3_HW_STATUS_SIZE,
8547 &tnapi->status_mapping,
8548 GFP_KERNEL | __GFP_ZERO);
8549 if (!tnapi->hw_status)
8550 goto err_out;
8551
8552 sblk = tnapi->hw_status;
8553
8554 if (tg3_flag(tp, ENABLE_RSS)) {
8555 u16 *prodptr = NULL;
8556
8557 /*
8558 * When RSS is enabled, the status block format changes
8559 * slightly. The "rx_jumbo_consumer", "reserved",
8560 * and "rx_mini_consumer" members get mapped to the
8561 * other three rx return ring producer indexes.
8562 */
8563 switch (i) {
8564 case 1:
8565 prodptr = &sblk->idx[0].rx_producer;
8566 break;
8567 case 2:
8568 prodptr = &sblk->rx_jumbo_consumer;
8569 break;
8570 case 3:
8571 prodptr = &sblk->reserved;
8572 break;
8573 case 4:
8574 prodptr = &sblk->rx_mini_consumer;
8575 break;
8576 }
8577 tnapi->rx_rcb_prod_idx = prodptr;
8578 } else {
8579 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8580 }
8581 }
8582
8583 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8584 goto err_out;
8585
8586 return 0;
8587
8588 err_out:
8589 tg3_free_consistent(tp);
8590 return -ENOMEM;
8591 }
8592
8593 #define MAX_WAIT_CNT 1000
8594
8595 /* To stop a block, clear the enable bit and poll till it
8596 * clears. tp->lock is held.
8597 */
8598 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8599 {
8600 unsigned int i;
8601 u32 val;
8602
8603 if (tg3_flag(tp, 5705_PLUS)) {
8604 switch (ofs) {
8605 case RCVLSC_MODE:
8606 case DMAC_MODE:
8607 case MBFREE_MODE:
8608 case BUFMGR_MODE:
8609 case MEMARB_MODE:
8610 /* We can't enable/disable these bits of the
8611 * 5705/5750, just say success.
8612 */
8613 return 0;
8614
8615 default:
8616 break;
8617 }
8618 }
8619
8620 val = tr32(ofs);
8621 val &= ~enable_bit;
8622 tw32_f(ofs, val);
8623
8624 for (i = 0; i < MAX_WAIT_CNT; i++) {
8625 if (pci_channel_offline(tp->pdev)) {
8626 dev_err(&tp->pdev->dev,
8627 "tg3_stop_block device offline, "
8628 "ofs=%lx enable_bit=%x\n",
8629 ofs, enable_bit);
8630 return -ENODEV;
8631 }
8632
8633 udelay(100);
8634 val = tr32(ofs);
8635 if ((val & enable_bit) == 0)
8636 break;
8637 }
8638
8639 if (i == MAX_WAIT_CNT && !silent) {
8640 dev_err(&tp->pdev->dev,
8641 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8642 ofs, enable_bit);
8643 return -ENODEV;
8644 }
8645
8646 return 0;
8647 }
8648
8649 /* tp->lock is held. */
8650 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8651 {
8652 int i, err;
8653
8654 tg3_disable_ints(tp);
8655
8656 if (pci_channel_offline(tp->pdev)) {
8657 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8658 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8659 err = -ENODEV;
8660 goto err_no_dev;
8661 }
8662
8663 tp->rx_mode &= ~RX_MODE_ENABLE;
8664 tw32_f(MAC_RX_MODE, tp->rx_mode);
8665 udelay(10);
8666
8667 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8668 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8669 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8670 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8671 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8672 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8673
8674 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8675 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8676 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8677 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8678 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8679 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8680 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8681
8682 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8683 tw32_f(MAC_MODE, tp->mac_mode);
8684 udelay(40);
8685
8686 tp->tx_mode &= ~TX_MODE_ENABLE;
8687 tw32_f(MAC_TX_MODE, tp->tx_mode);
8688
8689 for (i = 0; i < MAX_WAIT_CNT; i++) {
8690 udelay(100);
8691 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8692 break;
8693 }
8694 if (i >= MAX_WAIT_CNT) {
8695 dev_err(&tp->pdev->dev,
8696 "%s timed out, TX_MODE_ENABLE will not clear "
8697 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8698 err |= -ENODEV;
8699 }
8700
8701 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8702 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8703 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8704
8705 tw32(FTQ_RESET, 0xffffffff);
8706 tw32(FTQ_RESET, 0x00000000);
8707
8708 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8709 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8710
8711 err_no_dev:
8712 for (i = 0; i < tp->irq_cnt; i++) {
8713 struct tg3_napi *tnapi = &tp->napi[i];
8714 if (tnapi->hw_status)
8715 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8716 }
8717
8718 return err;
8719 }
8720
8721 /* Save PCI command register before chip reset */
8722 static void tg3_save_pci_state(struct tg3 *tp)
8723 {
8724 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8725 }
8726
8727 /* Restore PCI state after chip reset */
8728 static void tg3_restore_pci_state(struct tg3 *tp)
8729 {
8730 u32 val;
8731
8732 /* Re-enable indirect register accesses. */
8733 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8734 tp->misc_host_ctrl);
8735
8736 /* Set MAX PCI retry to zero. */
8737 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8738 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8739 tg3_flag(tp, PCIX_MODE))
8740 val |= PCISTATE_RETRY_SAME_DMA;
8741 /* Allow reads and writes to the APE register and memory space. */
8742 if (tg3_flag(tp, ENABLE_APE))
8743 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8744 PCISTATE_ALLOW_APE_SHMEM_WR |
8745 PCISTATE_ALLOW_APE_PSPACE_WR;
8746 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8747
8748 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8749
8750 if (!tg3_flag(tp, PCI_EXPRESS)) {
8751 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8752 tp->pci_cacheline_sz);
8753 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8754 tp->pci_lat_timer);
8755 }
8756
8757 /* Make sure PCI-X relaxed ordering bit is clear. */
8758 if (tg3_flag(tp, PCIX_MODE)) {
8759 u16 pcix_cmd;
8760
8761 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8762 &pcix_cmd);
8763 pcix_cmd &= ~PCI_X_CMD_ERO;
8764 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8765 pcix_cmd);
8766 }
8767
8768 if (tg3_flag(tp, 5780_CLASS)) {
8769
8770 /* Chip reset on 5780 will reset MSI enable bit,
8771 * so need to restore it.
8772 */
8773 if (tg3_flag(tp, USING_MSI)) {
8774 u16 ctrl;
8775
8776 pci_read_config_word(tp->pdev,
8777 tp->msi_cap + PCI_MSI_FLAGS,
8778 &ctrl);
8779 pci_write_config_word(tp->pdev,
8780 tp->msi_cap + PCI_MSI_FLAGS,
8781 ctrl | PCI_MSI_FLAGS_ENABLE);
8782 val = tr32(MSGINT_MODE);
8783 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8784 }
8785 }
8786 }
8787
8788 /* tp->lock is held. */
8789 static int tg3_chip_reset(struct tg3 *tp)
8790 {
8791 u32 val;
8792 void (*write_op)(struct tg3 *, u32, u32);
8793 int i, err;
8794
8795 tg3_nvram_lock(tp);
8796
8797 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8798
8799 /* No matching tg3_nvram_unlock() after this because
8800 * chip reset below will undo the nvram lock.
8801 */
8802 tp->nvram_lock_cnt = 0;
8803
8804 /* GRC_MISC_CFG core clock reset will clear the memory
8805 * enable bit in PCI register 4 and the MSI enable bit
8806 * on some chips, so we save relevant registers here.
8807 */
8808 tg3_save_pci_state(tp);
8809
8810 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8811 tg3_flag(tp, 5755_PLUS))
8812 tw32(GRC_FASTBOOT_PC, 0);
8813
8814 /*
8815 * We must avoid the readl() that normally takes place.
8816 * It locks machines, causes machine checks, and other
8817 * fun things. So, temporarily disable the 5701
8818 * hardware workaround, while we do the reset.
8819 */
8820 write_op = tp->write32;
8821 if (write_op == tg3_write_flush_reg32)
8822 tp->write32 = tg3_write32;
8823
8824 /* Prevent the irq handler from reading or writing PCI registers
8825 * during chip reset when the memory enable bit in the PCI command
8826 * register may be cleared. The chip does not generate interrupt
8827 * at this time, but the irq handler may still be called due to irq
8828 * sharing or irqpoll.
8829 */
8830 tg3_flag_set(tp, CHIP_RESETTING);
8831 for (i = 0; i < tp->irq_cnt; i++) {
8832 struct tg3_napi *tnapi = &tp->napi[i];
8833 if (tnapi->hw_status) {
8834 tnapi->hw_status->status = 0;
8835 tnapi->hw_status->status_tag = 0;
8836 }
8837 tnapi->last_tag = 0;
8838 tnapi->last_irq_tag = 0;
8839 }
8840 smp_mb();
8841
8842 for (i = 0; i < tp->irq_cnt; i++)
8843 synchronize_irq(tp->napi[i].irq_vec);
8844
8845 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8846 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8847 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8848 }
8849
8850 /* do the reset */
8851 val = GRC_MISC_CFG_CORECLK_RESET;
8852
8853 if (tg3_flag(tp, PCI_EXPRESS)) {
8854 /* Force PCIe 1.0a mode */
8855 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8856 !tg3_flag(tp, 57765_PLUS) &&
8857 tr32(TG3_PCIE_PHY_TSTCTL) ==
8858 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8859 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8860
8861 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8862 tw32(GRC_MISC_CFG, (1 << 29));
8863 val |= (1 << 29);
8864 }
8865 }
8866
8867 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8868 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8869 tw32(GRC_VCPU_EXT_CTRL,
8870 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8871 }
8872
8873 /* Manage gphy power for all CPMU absent PCIe devices. */
8874 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8875 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8876
8877 tw32(GRC_MISC_CFG, val);
8878
8879 /* restore 5701 hardware bug workaround write method */
8880 tp->write32 = write_op;
8881
8882 /* Unfortunately, we have to delay before the PCI read back.
8883 * Some 575X chips even will not respond to a PCI cfg access
8884 * when the reset command is given to the chip.
8885 *
8886 * How do these hardware designers expect things to work
8887 * properly if the PCI write is posted for a long period
8888 * of time? It is always necessary to have some method by
8889 * which a register read back can occur to push the write
8890 * out which does the reset.
8891 *
8892 * For most tg3 variants the trick below was working.
8893 * Ho hum...
8894 */
8895 udelay(120);
8896
8897 /* Flush PCI posted writes. The normal MMIO registers
8898 * are inaccessible at this time so this is the only
8899 * way to make this reliably (actually, this is no longer
8900 * the case, see above). I tried to use indirect
8901 * register read/write but this upset some 5701 variants.
8902 */
8903 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8904
8905 udelay(120);
8906
8907 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8908 u16 val16;
8909
8910 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8911 int j;
8912 u32 cfg_val;
8913
8914 /* Wait for link training to complete. */
8915 for (j = 0; j < 5000; j++)
8916 udelay(100);
8917
8918 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8919 pci_write_config_dword(tp->pdev, 0xc4,
8920 cfg_val | (1 << 15));
8921 }
8922
8923 /* Clear the "no snoop" and "relaxed ordering" bits. */
8924 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8925 /*
8926 * Older PCIe devices only support the 128 byte
8927 * MPS setting. Enforce the restriction.
8928 */
8929 if (!tg3_flag(tp, CPMU_PRESENT))
8930 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8931 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8932
8933 /* Clear error status */
8934 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8935 PCI_EXP_DEVSTA_CED |
8936 PCI_EXP_DEVSTA_NFED |
8937 PCI_EXP_DEVSTA_FED |
8938 PCI_EXP_DEVSTA_URD);
8939 }
8940
8941 tg3_restore_pci_state(tp);
8942
8943 tg3_flag_clear(tp, CHIP_RESETTING);
8944 tg3_flag_clear(tp, ERROR_PROCESSED);
8945
8946 val = 0;
8947 if (tg3_flag(tp, 5780_CLASS))
8948 val = tr32(MEMARB_MODE);
8949 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8950
8951 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8952 tg3_stop_fw(tp);
8953 tw32(0x5000, 0x400);
8954 }
8955
8956 if (tg3_flag(tp, IS_SSB_CORE)) {
8957 /*
8958 * BCM4785: In order to avoid repercussions from using
8959 * potentially defective internal ROM, stop the Rx RISC CPU,
8960 * which is not required.
8961 */
8962 tg3_stop_fw(tp);
8963 tg3_halt_cpu(tp, RX_CPU_BASE);
8964 }
8965
8966 err = tg3_poll_fw(tp);
8967 if (err)
8968 return err;
8969
8970 tw32(GRC_MODE, tp->grc_mode);
8971
8972 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8973 val = tr32(0xc4);
8974
8975 tw32(0xc4, val | (1 << 15));
8976 }
8977
8978 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8979 tg3_asic_rev(tp) == ASIC_REV_5705) {
8980 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8981 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8982 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8983 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8984 }
8985
8986 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8987 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8988 val = tp->mac_mode;
8989 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8990 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8991 val = tp->mac_mode;
8992 } else
8993 val = 0;
8994
8995 tw32_f(MAC_MODE, val);
8996 udelay(40);
8997
8998 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8999
9000 tg3_mdio_start(tp);
9001
9002 if (tg3_flag(tp, PCI_EXPRESS) &&
9003 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9004 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9005 !tg3_flag(tp, 57765_PLUS)) {
9006 val = tr32(0x7c00);
9007
9008 tw32(0x7c00, val | (1 << 25));
9009 }
9010
9011 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9012 val = tr32(TG3_CPMU_CLCK_ORIDE);
9013 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9014 }
9015
9016 /* Reprobe ASF enable state. */
9017 tg3_flag_clear(tp, ENABLE_ASF);
9018 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9019 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9020
9021 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9022 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9023 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9024 u32 nic_cfg;
9025
9026 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9027 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9028 tg3_flag_set(tp, ENABLE_ASF);
9029 tp->last_event_jiffies = jiffies;
9030 if (tg3_flag(tp, 5750_PLUS))
9031 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9032
9033 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9034 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9035 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9036 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9037 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9038 }
9039 }
9040
9041 return 0;
9042 }
9043
9044 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9045 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9046
9047 /* tp->lock is held. */
9048 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9049 {
9050 int err;
9051
9052 tg3_stop_fw(tp);
9053
9054 tg3_write_sig_pre_reset(tp, kind);
9055
9056 tg3_abort_hw(tp, silent);
9057 err = tg3_chip_reset(tp);
9058
9059 __tg3_set_mac_addr(tp, false);
9060
9061 tg3_write_sig_legacy(tp, kind);
9062 tg3_write_sig_post_reset(tp, kind);
9063
9064 if (tp->hw_stats) {
9065 /* Save the stats across chip resets... */
9066 tg3_get_nstats(tp, &tp->net_stats_prev);
9067 tg3_get_estats(tp, &tp->estats_prev);
9068
9069 /* And make sure the next sample is new data */
9070 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9071 }
9072
9073 if (err)
9074 return err;
9075
9076 return 0;
9077 }
9078
9079 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9080 {
9081 struct tg3 *tp = netdev_priv(dev);
9082 struct sockaddr *addr = p;
9083 int err = 0;
9084 bool skip_mac_1 = false;
9085
9086 if (!is_valid_ether_addr(addr->sa_data))
9087 return -EADDRNOTAVAIL;
9088
9089 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9090
9091 if (!netif_running(dev))
9092 return 0;
9093
9094 if (tg3_flag(tp, ENABLE_ASF)) {
9095 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9096
9097 addr0_high = tr32(MAC_ADDR_0_HIGH);
9098 addr0_low = tr32(MAC_ADDR_0_LOW);
9099 addr1_high = tr32(MAC_ADDR_1_HIGH);
9100 addr1_low = tr32(MAC_ADDR_1_LOW);
9101
9102 /* Skip MAC addr 1 if ASF is using it. */
9103 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9104 !(addr1_high == 0 && addr1_low == 0))
9105 skip_mac_1 = true;
9106 }
9107 spin_lock_bh(&tp->lock);
9108 __tg3_set_mac_addr(tp, skip_mac_1);
9109 spin_unlock_bh(&tp->lock);
9110
9111 return err;
9112 }
9113
9114 /* tp->lock is held. */
9115 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9116 dma_addr_t mapping, u32 maxlen_flags,
9117 u32 nic_addr)
9118 {
9119 tg3_write_mem(tp,
9120 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9121 ((u64) mapping >> 32));
9122 tg3_write_mem(tp,
9123 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9124 ((u64) mapping & 0xffffffff));
9125 tg3_write_mem(tp,
9126 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9127 maxlen_flags);
9128
9129 if (!tg3_flag(tp, 5705_PLUS))
9130 tg3_write_mem(tp,
9131 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9132 nic_addr);
9133 }
9134
9135
9136 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9137 {
9138 int i = 0;
9139
9140 if (!tg3_flag(tp, ENABLE_TSS)) {
9141 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9142 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9143 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9144 } else {
9145 tw32(HOSTCC_TXCOL_TICKS, 0);
9146 tw32(HOSTCC_TXMAX_FRAMES, 0);
9147 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9148
9149 for (; i < tp->txq_cnt; i++) {
9150 u32 reg;
9151
9152 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9153 tw32(reg, ec->tx_coalesce_usecs);
9154 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9155 tw32(reg, ec->tx_max_coalesced_frames);
9156 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9157 tw32(reg, ec->tx_max_coalesced_frames_irq);
9158 }
9159 }
9160
9161 for (; i < tp->irq_max - 1; i++) {
9162 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9163 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9164 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9165 }
9166 }
9167
9168 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9169 {
9170 int i = 0;
9171 u32 limit = tp->rxq_cnt;
9172
9173 if (!tg3_flag(tp, ENABLE_RSS)) {
9174 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9175 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9176 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9177 limit--;
9178 } else {
9179 tw32(HOSTCC_RXCOL_TICKS, 0);
9180 tw32(HOSTCC_RXMAX_FRAMES, 0);
9181 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9182 }
9183
9184 for (; i < limit; i++) {
9185 u32 reg;
9186
9187 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9188 tw32(reg, ec->rx_coalesce_usecs);
9189 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9190 tw32(reg, ec->rx_max_coalesced_frames);
9191 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9192 tw32(reg, ec->rx_max_coalesced_frames_irq);
9193 }
9194
9195 for (; i < tp->irq_max - 1; i++) {
9196 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9197 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9198 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9199 }
9200 }
9201
9202 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9203 {
9204 tg3_coal_tx_init(tp, ec);
9205 tg3_coal_rx_init(tp, ec);
9206
9207 if (!tg3_flag(tp, 5705_PLUS)) {
9208 u32 val = ec->stats_block_coalesce_usecs;
9209
9210 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9211 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9212
9213 if (!tp->link_up)
9214 val = 0;
9215
9216 tw32(HOSTCC_STAT_COAL_TICKS, val);
9217 }
9218 }
9219
9220 /* tp->lock is held. */
9221 static void tg3_rings_reset(struct tg3 *tp)
9222 {
9223 int i;
9224 u32 stblk, txrcb, rxrcb, limit;
9225 struct tg3_napi *tnapi = &tp->napi[0];
9226
9227 /* Disable all transmit rings but the first. */
9228 if (!tg3_flag(tp, 5705_PLUS))
9229 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9230 else if (tg3_flag(tp, 5717_PLUS))
9231 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9232 else if (tg3_flag(tp, 57765_CLASS) ||
9233 tg3_asic_rev(tp) == ASIC_REV_5762)
9234 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9235 else
9236 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9237
9238 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9239 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9240 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9241 BDINFO_FLAGS_DISABLED);
9242
9243
9244 /* Disable all receive return rings but the first. */
9245 if (tg3_flag(tp, 5717_PLUS))
9246 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9247 else if (!tg3_flag(tp, 5705_PLUS))
9248 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9249 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9250 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9251 tg3_flag(tp, 57765_CLASS))
9252 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9253 else
9254 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9255
9256 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9257 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9258 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9259 BDINFO_FLAGS_DISABLED);
9260
9261 /* Disable interrupts */
9262 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9263 tp->napi[0].chk_msi_cnt = 0;
9264 tp->napi[0].last_rx_cons = 0;
9265 tp->napi[0].last_tx_cons = 0;
9266
9267 /* Zero mailbox registers. */
9268 if (tg3_flag(tp, SUPPORT_MSIX)) {
9269 for (i = 1; i < tp->irq_max; i++) {
9270 tp->napi[i].tx_prod = 0;
9271 tp->napi[i].tx_cons = 0;
9272 if (tg3_flag(tp, ENABLE_TSS))
9273 tw32_mailbox(tp->napi[i].prodmbox, 0);
9274 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9275 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9276 tp->napi[i].chk_msi_cnt = 0;
9277 tp->napi[i].last_rx_cons = 0;
9278 tp->napi[i].last_tx_cons = 0;
9279 }
9280 if (!tg3_flag(tp, ENABLE_TSS))
9281 tw32_mailbox(tp->napi[0].prodmbox, 0);
9282 } else {
9283 tp->napi[0].tx_prod = 0;
9284 tp->napi[0].tx_cons = 0;
9285 tw32_mailbox(tp->napi[0].prodmbox, 0);
9286 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9287 }
9288
9289 /* Make sure the NIC-based send BD rings are disabled. */
9290 if (!tg3_flag(tp, 5705_PLUS)) {
9291 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9292 for (i = 0; i < 16; i++)
9293 tw32_tx_mbox(mbox + i * 8, 0);
9294 }
9295
9296 txrcb = NIC_SRAM_SEND_RCB;
9297 rxrcb = NIC_SRAM_RCV_RET_RCB;
9298
9299 /* Clear status block in ram. */
9300 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9301
9302 /* Set status block DMA address */
9303 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9304 ((u64) tnapi->status_mapping >> 32));
9305 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9306 ((u64) tnapi->status_mapping & 0xffffffff));
9307
9308 if (tnapi->tx_ring) {
9309 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9310 (TG3_TX_RING_SIZE <<
9311 BDINFO_FLAGS_MAXLEN_SHIFT),
9312 NIC_SRAM_TX_BUFFER_DESC);
9313 txrcb += TG3_BDINFO_SIZE;
9314 }
9315
9316 if (tnapi->rx_rcb) {
9317 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9318 (tp->rx_ret_ring_mask + 1) <<
9319 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9320 rxrcb += TG3_BDINFO_SIZE;
9321 }
9322
9323 stblk = HOSTCC_STATBLCK_RING1;
9324
9325 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9326 u64 mapping = (u64)tnapi->status_mapping;
9327 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9328 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9329
9330 /* Clear status block in ram. */
9331 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9332
9333 if (tnapi->tx_ring) {
9334 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9335 (TG3_TX_RING_SIZE <<
9336 BDINFO_FLAGS_MAXLEN_SHIFT),
9337 NIC_SRAM_TX_BUFFER_DESC);
9338 txrcb += TG3_BDINFO_SIZE;
9339 }
9340
9341 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9342 ((tp->rx_ret_ring_mask + 1) <<
9343 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9344
9345 stblk += 8;
9346 rxrcb += TG3_BDINFO_SIZE;
9347 }
9348 }
9349
9350 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9351 {
9352 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9353
9354 if (!tg3_flag(tp, 5750_PLUS) ||
9355 tg3_flag(tp, 5780_CLASS) ||
9356 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9357 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9358 tg3_flag(tp, 57765_PLUS))
9359 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9360 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9361 tg3_asic_rev(tp) == ASIC_REV_5787)
9362 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9363 else
9364 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9365
9366 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9367 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9368
9369 val = min(nic_rep_thresh, host_rep_thresh);
9370 tw32(RCVBDI_STD_THRESH, val);
9371
9372 if (tg3_flag(tp, 57765_PLUS))
9373 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9374
9375 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9376 return;
9377
9378 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9379
9380 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9381
9382 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9383 tw32(RCVBDI_JUMBO_THRESH, val);
9384
9385 if (tg3_flag(tp, 57765_PLUS))
9386 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9387 }
9388
9389 static inline u32 calc_crc(unsigned char *buf, int len)
9390 {
9391 u32 reg;
9392 u32 tmp;
9393 int j, k;
9394
9395 reg = 0xffffffff;
9396
9397 for (j = 0; j < len; j++) {
9398 reg ^= buf[j];
9399
9400 for (k = 0; k < 8; k++) {
9401 tmp = reg & 0x01;
9402
9403 reg >>= 1;
9404
9405 if (tmp)
9406 reg ^= 0xedb88320;
9407 }
9408 }
9409
9410 return ~reg;
9411 }
9412
9413 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9414 {
9415 /* accept or reject all multicast frames */
9416 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9417 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9418 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9419 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9420 }
9421
9422 static void __tg3_set_rx_mode(struct net_device *dev)
9423 {
9424 struct tg3 *tp = netdev_priv(dev);
9425 u32 rx_mode;
9426
9427 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9428 RX_MODE_KEEP_VLAN_TAG);
9429
9430 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9431 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9432 * flag clear.
9433 */
9434 if (!tg3_flag(tp, ENABLE_ASF))
9435 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9436 #endif
9437
9438 if (dev->flags & IFF_PROMISC) {
9439 /* Promiscuous mode. */
9440 rx_mode |= RX_MODE_PROMISC;
9441 } else if (dev->flags & IFF_ALLMULTI) {
9442 /* Accept all multicast. */
9443 tg3_set_multi(tp, 1);
9444 } else if (netdev_mc_empty(dev)) {
9445 /* Reject all multicast. */
9446 tg3_set_multi(tp, 0);
9447 } else {
9448 /* Accept one or more multicast(s). */
9449 struct netdev_hw_addr *ha;
9450 u32 mc_filter[4] = { 0, };
9451 u32 regidx;
9452 u32 bit;
9453 u32 crc;
9454
9455 netdev_for_each_mc_addr(ha, dev) {
9456 crc = calc_crc(ha->addr, ETH_ALEN);
9457 bit = ~crc & 0x7f;
9458 regidx = (bit & 0x60) >> 5;
9459 bit &= 0x1f;
9460 mc_filter[regidx] |= (1 << bit);
9461 }
9462
9463 tw32(MAC_HASH_REG_0, mc_filter[0]);
9464 tw32(MAC_HASH_REG_1, mc_filter[1]);
9465 tw32(MAC_HASH_REG_2, mc_filter[2]);
9466 tw32(MAC_HASH_REG_3, mc_filter[3]);
9467 }
9468
9469 if (rx_mode != tp->rx_mode) {
9470 tp->rx_mode = rx_mode;
9471 tw32_f(MAC_RX_MODE, rx_mode);
9472 udelay(10);
9473 }
9474 }
9475
9476 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9477 {
9478 int i;
9479
9480 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9481 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9482 }
9483
9484 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9485 {
9486 int i;
9487
9488 if (!tg3_flag(tp, SUPPORT_MSIX))
9489 return;
9490
9491 if (tp->rxq_cnt == 1) {
9492 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9493 return;
9494 }
9495
9496 /* Validate table against current IRQ count */
9497 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9498 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9499 break;
9500 }
9501
9502 if (i != TG3_RSS_INDIR_TBL_SIZE)
9503 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9504 }
9505
9506 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9507 {
9508 int i = 0;
9509 u32 reg = MAC_RSS_INDIR_TBL_0;
9510
9511 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9512 u32 val = tp->rss_ind_tbl[i];
9513 i++;
9514 for (; i % 8; i++) {
9515 val <<= 4;
9516 val |= tp->rss_ind_tbl[i];
9517 }
9518 tw32(reg, val);
9519 reg += 4;
9520 }
9521 }
9522
9523 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9524 {
9525 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9526 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9527 else
9528 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9529 }
9530
9531 /* tp->lock is held. */
9532 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9533 {
9534 u32 val, rdmac_mode;
9535 int i, err, limit;
9536 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9537
9538 tg3_disable_ints(tp);
9539
9540 tg3_stop_fw(tp);
9541
9542 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9543
9544 if (tg3_flag(tp, INIT_COMPLETE))
9545 tg3_abort_hw(tp, 1);
9546
9547 /* Enable MAC control of LPI */
9548 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9549 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9550 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9551 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9552 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9553
9554 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9555
9556 tw32_f(TG3_CPMU_EEE_CTRL,
9557 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9558
9559 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9560 TG3_CPMU_EEEMD_LPI_IN_TX |
9561 TG3_CPMU_EEEMD_LPI_IN_RX |
9562 TG3_CPMU_EEEMD_EEE_ENABLE;
9563
9564 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9565 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9566
9567 if (tg3_flag(tp, ENABLE_APE))
9568 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9569
9570 tw32_f(TG3_CPMU_EEE_MODE, val);
9571
9572 tw32_f(TG3_CPMU_EEE_DBTMR1,
9573 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9574 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9575
9576 tw32_f(TG3_CPMU_EEE_DBTMR2,
9577 TG3_CPMU_DBTMR2_APE_TX_2047US |
9578 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9579 }
9580
9581 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9582 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9583 tg3_phy_pull_config(tp);
9584 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9585 }
9586
9587 if (reset_phy)
9588 tg3_phy_reset(tp);
9589
9590 err = tg3_chip_reset(tp);
9591 if (err)
9592 return err;
9593
9594 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9595
9596 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9597 val = tr32(TG3_CPMU_CTRL);
9598 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9599 tw32(TG3_CPMU_CTRL, val);
9600
9601 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9602 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9603 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9604 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9605
9606 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9607 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9608 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9609 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9610
9611 val = tr32(TG3_CPMU_HST_ACC);
9612 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9613 val |= CPMU_HST_ACC_MACCLK_6_25;
9614 tw32(TG3_CPMU_HST_ACC, val);
9615 }
9616
9617 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9618 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9619 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9620 PCIE_PWR_MGMT_L1_THRESH_4MS;
9621 tw32(PCIE_PWR_MGMT_THRESH, val);
9622
9623 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9624 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9625
9626 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9627
9628 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9629 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9630 }
9631
9632 if (tg3_flag(tp, L1PLLPD_EN)) {
9633 u32 grc_mode = tr32(GRC_MODE);
9634
9635 /* Access the lower 1K of PL PCIE block registers. */
9636 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9637 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9638
9639 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9640 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9641 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9642
9643 tw32(GRC_MODE, grc_mode);
9644 }
9645
9646 if (tg3_flag(tp, 57765_CLASS)) {
9647 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9648 u32 grc_mode = tr32(GRC_MODE);
9649
9650 /* Access the lower 1K of PL PCIE block registers. */
9651 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9652 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9653
9654 val = tr32(TG3_PCIE_TLDLPL_PORT +
9655 TG3_PCIE_PL_LO_PHYCTL5);
9656 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9657 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9658
9659 tw32(GRC_MODE, grc_mode);
9660 }
9661
9662 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9663 u32 grc_mode;
9664
9665 /* Fix transmit hangs */
9666 val = tr32(TG3_CPMU_PADRNG_CTL);
9667 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9668 tw32(TG3_CPMU_PADRNG_CTL, val);
9669
9670 grc_mode = tr32(GRC_MODE);
9671
9672 /* Access the lower 1K of DL PCIE block registers. */
9673 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9674 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9675
9676 val = tr32(TG3_PCIE_TLDLPL_PORT +
9677 TG3_PCIE_DL_LO_FTSMAX);
9678 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9679 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9680 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9681
9682 tw32(GRC_MODE, grc_mode);
9683 }
9684
9685 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9686 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9687 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9688 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9689 }
9690
9691 /* This works around an issue with Athlon chipsets on
9692 * B3 tigon3 silicon. This bit has no effect on any
9693 * other revision. But do not set this on PCI Express
9694 * chips and don't even touch the clocks if the CPMU is present.
9695 */
9696 if (!tg3_flag(tp, CPMU_PRESENT)) {
9697 if (!tg3_flag(tp, PCI_EXPRESS))
9698 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9699 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9700 }
9701
9702 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9703 tg3_flag(tp, PCIX_MODE)) {
9704 val = tr32(TG3PCI_PCISTATE);
9705 val |= PCISTATE_RETRY_SAME_DMA;
9706 tw32(TG3PCI_PCISTATE, val);
9707 }
9708
9709 if (tg3_flag(tp, ENABLE_APE)) {
9710 /* Allow reads and writes to the
9711 * APE register and memory space.
9712 */
9713 val = tr32(TG3PCI_PCISTATE);
9714 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9715 PCISTATE_ALLOW_APE_SHMEM_WR |
9716 PCISTATE_ALLOW_APE_PSPACE_WR;
9717 tw32(TG3PCI_PCISTATE, val);
9718 }
9719
9720 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9721 /* Enable some hw fixes. */
9722 val = tr32(TG3PCI_MSI_DATA);
9723 val |= (1 << 26) | (1 << 28) | (1 << 29);
9724 tw32(TG3PCI_MSI_DATA, val);
9725 }
9726
9727 /* Descriptor ring init may make accesses to the
9728 * NIC SRAM area to setup the TX descriptors, so we
9729 * can only do this after the hardware has been
9730 * successfully reset.
9731 */
9732 err = tg3_init_rings(tp);
9733 if (err)
9734 return err;
9735
9736 if (tg3_flag(tp, 57765_PLUS)) {
9737 val = tr32(TG3PCI_DMA_RW_CTRL) &
9738 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9739 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9740 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9741 if (!tg3_flag(tp, 57765_CLASS) &&
9742 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9743 tg3_asic_rev(tp) != ASIC_REV_5762)
9744 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9745 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9746 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9747 tg3_asic_rev(tp) != ASIC_REV_5761) {
9748 /* This value is determined during the probe time DMA
9749 * engine test, tg3_test_dma.
9750 */
9751 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9752 }
9753
9754 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9755 GRC_MODE_4X_NIC_SEND_RINGS |
9756 GRC_MODE_NO_TX_PHDR_CSUM |
9757 GRC_MODE_NO_RX_PHDR_CSUM);
9758 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9759
9760 /* Pseudo-header checksum is done by hardware logic and not
9761 * the offload processers, so make the chip do the pseudo-
9762 * header checksums on receive. For transmit it is more
9763 * convenient to do the pseudo-header checksum in software
9764 * as Linux does that on transmit for us in all cases.
9765 */
9766 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9767
9768 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9769 if (tp->rxptpctl)
9770 tw32(TG3_RX_PTP_CTL,
9771 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9772
9773 if (tg3_flag(tp, PTP_CAPABLE))
9774 val |= GRC_MODE_TIME_SYNC_ENABLE;
9775
9776 tw32(GRC_MODE, tp->grc_mode | val);
9777
9778 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9779 val = tr32(GRC_MISC_CFG);
9780 val &= ~0xff;
9781 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9782 tw32(GRC_MISC_CFG, val);
9783
9784 /* Initialize MBUF/DESC pool. */
9785 if (tg3_flag(tp, 5750_PLUS)) {
9786 /* Do nothing. */
9787 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9788 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9789 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9790 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9791 else
9792 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9793 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9794 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9795 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9796 int fw_len;
9797
9798 fw_len = tp->fw_len;
9799 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9800 tw32(BUFMGR_MB_POOL_ADDR,
9801 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9802 tw32(BUFMGR_MB_POOL_SIZE,
9803 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9804 }
9805
9806 if (tp->dev->mtu <= ETH_DATA_LEN) {
9807 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9808 tp->bufmgr_config.mbuf_read_dma_low_water);
9809 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9810 tp->bufmgr_config.mbuf_mac_rx_low_water);
9811 tw32(BUFMGR_MB_HIGH_WATER,
9812 tp->bufmgr_config.mbuf_high_water);
9813 } else {
9814 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9815 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9816 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9817 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9818 tw32(BUFMGR_MB_HIGH_WATER,
9819 tp->bufmgr_config.mbuf_high_water_jumbo);
9820 }
9821 tw32(BUFMGR_DMA_LOW_WATER,
9822 tp->bufmgr_config.dma_low_water);
9823 tw32(BUFMGR_DMA_HIGH_WATER,
9824 tp->bufmgr_config.dma_high_water);
9825
9826 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9827 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9828 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9829 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9830 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9831 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9832 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9833 tw32(BUFMGR_MODE, val);
9834 for (i = 0; i < 2000; i++) {
9835 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9836 break;
9837 udelay(10);
9838 }
9839 if (i >= 2000) {
9840 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9841 return -ENODEV;
9842 }
9843
9844 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9845 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9846
9847 tg3_setup_rxbd_thresholds(tp);
9848
9849 /* Initialize TG3_BDINFO's at:
9850 * RCVDBDI_STD_BD: standard eth size rx ring
9851 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9852 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9853 *
9854 * like so:
9855 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9856 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9857 * ring attribute flags
9858 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9859 *
9860 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9861 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9862 *
9863 * The size of each ring is fixed in the firmware, but the location is
9864 * configurable.
9865 */
9866 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9867 ((u64) tpr->rx_std_mapping >> 32));
9868 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9869 ((u64) tpr->rx_std_mapping & 0xffffffff));
9870 if (!tg3_flag(tp, 5717_PLUS))
9871 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9872 NIC_SRAM_RX_BUFFER_DESC);
9873
9874 /* Disable the mini ring */
9875 if (!tg3_flag(tp, 5705_PLUS))
9876 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9877 BDINFO_FLAGS_DISABLED);
9878
9879 /* Program the jumbo buffer descriptor ring control
9880 * blocks on those devices that have them.
9881 */
9882 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9883 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9884
9885 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9886 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9887 ((u64) tpr->rx_jmb_mapping >> 32));
9888 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9889 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9890 val = TG3_RX_JMB_RING_SIZE(tp) <<
9891 BDINFO_FLAGS_MAXLEN_SHIFT;
9892 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9893 val | BDINFO_FLAGS_USE_EXT_RECV);
9894 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9895 tg3_flag(tp, 57765_CLASS) ||
9896 tg3_asic_rev(tp) == ASIC_REV_5762)
9897 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9898 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9899 } else {
9900 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9901 BDINFO_FLAGS_DISABLED);
9902 }
9903
9904 if (tg3_flag(tp, 57765_PLUS)) {
9905 val = TG3_RX_STD_RING_SIZE(tp);
9906 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9907 val |= (TG3_RX_STD_DMA_SZ << 2);
9908 } else
9909 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9910 } else
9911 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9912
9913 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9914
9915 tpr->rx_std_prod_idx = tp->rx_pending;
9916 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9917
9918 tpr->rx_jmb_prod_idx =
9919 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9920 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9921
9922 tg3_rings_reset(tp);
9923
9924 /* Initialize MAC address and backoff seed. */
9925 __tg3_set_mac_addr(tp, false);
9926
9927 /* MTU + ethernet header + FCS + optional VLAN tag */
9928 tw32(MAC_RX_MTU_SIZE,
9929 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9930
9931 /* The slot time is changed by tg3_setup_phy if we
9932 * run at gigabit with half duplex.
9933 */
9934 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9935 (6 << TX_LENGTHS_IPG_SHIFT) |
9936 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9937
9938 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9939 tg3_asic_rev(tp) == ASIC_REV_5762)
9940 val |= tr32(MAC_TX_LENGTHS) &
9941 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9942 TX_LENGTHS_CNT_DWN_VAL_MSK);
9943
9944 tw32(MAC_TX_LENGTHS, val);
9945
9946 /* Receive rules. */
9947 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9948 tw32(RCVLPC_CONFIG, 0x0181);
9949
9950 /* Calculate RDMAC_MODE setting early, we need it to determine
9951 * the RCVLPC_STATE_ENABLE mask.
9952 */
9953 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9954 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9955 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9956 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9957 RDMAC_MODE_LNGREAD_ENAB);
9958
9959 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9960 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9961
9962 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9963 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9964 tg3_asic_rev(tp) == ASIC_REV_57780)
9965 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9966 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9967 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9968
9969 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9970 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9971 if (tg3_flag(tp, TSO_CAPABLE) &&
9972 tg3_asic_rev(tp) == ASIC_REV_5705) {
9973 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9974 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9975 !tg3_flag(tp, IS_5788)) {
9976 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9977 }
9978 }
9979
9980 if (tg3_flag(tp, PCI_EXPRESS))
9981 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9982
9983 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9984 tp->dma_limit = 0;
9985 if (tp->dev->mtu <= ETH_DATA_LEN) {
9986 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9987 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9988 }
9989 }
9990
9991 if (tg3_flag(tp, HW_TSO_1) ||
9992 tg3_flag(tp, HW_TSO_2) ||
9993 tg3_flag(tp, HW_TSO_3))
9994 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9995
9996 if (tg3_flag(tp, 57765_PLUS) ||
9997 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9998 tg3_asic_rev(tp) == ASIC_REV_57780)
9999 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10000
10001 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10002 tg3_asic_rev(tp) == ASIC_REV_5762)
10003 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10004
10005 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10006 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10007 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10008 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10009 tg3_flag(tp, 57765_PLUS)) {
10010 u32 tgtreg;
10011
10012 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10013 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10014 else
10015 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10016
10017 val = tr32(tgtreg);
10018 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10019 tg3_asic_rev(tp) == ASIC_REV_5762) {
10020 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10021 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10022 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10023 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10024 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10025 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10026 }
10027 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10028 }
10029
10030 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10031 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10032 tg3_asic_rev(tp) == ASIC_REV_5762) {
10033 u32 tgtreg;
10034
10035 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10036 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10037 else
10038 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10039
10040 val = tr32(tgtreg);
10041 tw32(tgtreg, val |
10042 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10043 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10044 }
10045
10046 /* Receive/send statistics. */
10047 if (tg3_flag(tp, 5750_PLUS)) {
10048 val = tr32(RCVLPC_STATS_ENABLE);
10049 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10050 tw32(RCVLPC_STATS_ENABLE, val);
10051 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10052 tg3_flag(tp, TSO_CAPABLE)) {
10053 val = tr32(RCVLPC_STATS_ENABLE);
10054 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10055 tw32(RCVLPC_STATS_ENABLE, val);
10056 } else {
10057 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10058 }
10059 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10060 tw32(SNDDATAI_STATSENAB, 0xffffff);
10061 tw32(SNDDATAI_STATSCTRL,
10062 (SNDDATAI_SCTRL_ENABLE |
10063 SNDDATAI_SCTRL_FASTUPD));
10064
10065 /* Setup host coalescing engine. */
10066 tw32(HOSTCC_MODE, 0);
10067 for (i = 0; i < 2000; i++) {
10068 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10069 break;
10070 udelay(10);
10071 }
10072
10073 __tg3_set_coalesce(tp, &tp->coal);
10074
10075 if (!tg3_flag(tp, 5705_PLUS)) {
10076 /* Status/statistics block address. See tg3_timer,
10077 * the tg3_periodic_fetch_stats call there, and
10078 * tg3_get_stats to see how this works for 5705/5750 chips.
10079 */
10080 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10081 ((u64) tp->stats_mapping >> 32));
10082 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10083 ((u64) tp->stats_mapping & 0xffffffff));
10084 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10085
10086 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10087
10088 /* Clear statistics and status block memory areas */
10089 for (i = NIC_SRAM_STATS_BLK;
10090 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10091 i += sizeof(u32)) {
10092 tg3_write_mem(tp, i, 0);
10093 udelay(40);
10094 }
10095 }
10096
10097 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10098
10099 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10100 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10101 if (!tg3_flag(tp, 5705_PLUS))
10102 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10103
10104 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10105 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10106 /* reset to prevent losing 1st rx packet intermittently */
10107 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10108 udelay(10);
10109 }
10110
10111 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10112 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10113 MAC_MODE_FHDE_ENABLE;
10114 if (tg3_flag(tp, ENABLE_APE))
10115 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10116 if (!tg3_flag(tp, 5705_PLUS) &&
10117 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10118 tg3_asic_rev(tp) != ASIC_REV_5700)
10119 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10120 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10121 udelay(40);
10122
10123 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10124 * If TG3_FLAG_IS_NIC is zero, we should read the
10125 * register to preserve the GPIO settings for LOMs. The GPIOs,
10126 * whether used as inputs or outputs, are set by boot code after
10127 * reset.
10128 */
10129 if (!tg3_flag(tp, IS_NIC)) {
10130 u32 gpio_mask;
10131
10132 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10133 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10134 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10135
10136 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10137 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10138 GRC_LCLCTRL_GPIO_OUTPUT3;
10139
10140 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10141 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10142
10143 tp->grc_local_ctrl &= ~gpio_mask;
10144 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10145
10146 /* GPIO1 must be driven high for eeprom write protect */
10147 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10148 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10149 GRC_LCLCTRL_GPIO_OUTPUT1);
10150 }
10151 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10152 udelay(100);
10153
10154 if (tg3_flag(tp, USING_MSIX)) {
10155 val = tr32(MSGINT_MODE);
10156 val |= MSGINT_MODE_ENABLE;
10157 if (tp->irq_cnt > 1)
10158 val |= MSGINT_MODE_MULTIVEC_EN;
10159 if (!tg3_flag(tp, 1SHOT_MSI))
10160 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10161 tw32(MSGINT_MODE, val);
10162 }
10163
10164 if (!tg3_flag(tp, 5705_PLUS)) {
10165 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10166 udelay(40);
10167 }
10168
10169 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10170 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10171 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10172 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10173 WDMAC_MODE_LNGREAD_ENAB);
10174
10175 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10176 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10177 if (tg3_flag(tp, TSO_CAPABLE) &&
10178 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10179 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10180 /* nothing */
10181 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10182 !tg3_flag(tp, IS_5788)) {
10183 val |= WDMAC_MODE_RX_ACCEL;
10184 }
10185 }
10186
10187 /* Enable host coalescing bug fix */
10188 if (tg3_flag(tp, 5755_PLUS))
10189 val |= WDMAC_MODE_STATUS_TAG_FIX;
10190
10191 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10192 val |= WDMAC_MODE_BURST_ALL_DATA;
10193
10194 tw32_f(WDMAC_MODE, val);
10195 udelay(40);
10196
10197 if (tg3_flag(tp, PCIX_MODE)) {
10198 u16 pcix_cmd;
10199
10200 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10201 &pcix_cmd);
10202 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10203 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10204 pcix_cmd |= PCI_X_CMD_READ_2K;
10205 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10206 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10207 pcix_cmd |= PCI_X_CMD_READ_2K;
10208 }
10209 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10210 pcix_cmd);
10211 }
10212
10213 tw32_f(RDMAC_MODE, rdmac_mode);
10214 udelay(40);
10215
10216 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10217 tg3_asic_rev(tp) == ASIC_REV_5720) {
10218 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10219 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10220 break;
10221 }
10222 if (i < TG3_NUM_RDMA_CHANNELS) {
10223 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10224 val |= tg3_lso_rd_dma_workaround_bit(tp);
10225 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10226 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10227 }
10228 }
10229
10230 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10231 if (!tg3_flag(tp, 5705_PLUS))
10232 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10233
10234 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10235 tw32(SNDDATAC_MODE,
10236 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10237 else
10238 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10239
10240 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10241 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10242 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10243 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10244 val |= RCVDBDI_MODE_LRG_RING_SZ;
10245 tw32(RCVDBDI_MODE, val);
10246 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10247 if (tg3_flag(tp, HW_TSO_1) ||
10248 tg3_flag(tp, HW_TSO_2) ||
10249 tg3_flag(tp, HW_TSO_3))
10250 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10251 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10252 if (tg3_flag(tp, ENABLE_TSS))
10253 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10254 tw32(SNDBDI_MODE, val);
10255 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10256
10257 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10258 err = tg3_load_5701_a0_firmware_fix(tp);
10259 if (err)
10260 return err;
10261 }
10262
10263 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10264 /* Ignore any errors for the firmware download. If download
10265 * fails, the device will operate with EEE disabled
10266 */
10267 tg3_load_57766_firmware(tp);
10268 }
10269
10270 if (tg3_flag(tp, TSO_CAPABLE)) {
10271 err = tg3_load_tso_firmware(tp);
10272 if (err)
10273 return err;
10274 }
10275
10276 tp->tx_mode = TX_MODE_ENABLE;
10277
10278 if (tg3_flag(tp, 5755_PLUS) ||
10279 tg3_asic_rev(tp) == ASIC_REV_5906)
10280 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10281
10282 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10283 tg3_asic_rev(tp) == ASIC_REV_5762) {
10284 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10285 tp->tx_mode &= ~val;
10286 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10287 }
10288
10289 tw32_f(MAC_TX_MODE, tp->tx_mode);
10290 udelay(100);
10291
10292 if (tg3_flag(tp, ENABLE_RSS)) {
10293 tg3_rss_write_indir_tbl(tp);
10294
10295 /* Setup the "secret" hash key. */
10296 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10297 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10298 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10299 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10300 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10301 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10302 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10303 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10304 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10305 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10306 }
10307
10308 tp->rx_mode = RX_MODE_ENABLE;
10309 if (tg3_flag(tp, 5755_PLUS))
10310 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10311
10312 if (tg3_flag(tp, ENABLE_RSS))
10313 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10314 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10315 RX_MODE_RSS_IPV6_HASH_EN |
10316 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10317 RX_MODE_RSS_IPV4_HASH_EN |
10318 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10319
10320 tw32_f(MAC_RX_MODE, tp->rx_mode);
10321 udelay(10);
10322
10323 tw32(MAC_LED_CTRL, tp->led_ctrl);
10324
10325 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10326 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10327 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10328 udelay(10);
10329 }
10330 tw32_f(MAC_RX_MODE, tp->rx_mode);
10331 udelay(10);
10332
10333 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10334 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10335 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10336 /* Set drive transmission level to 1.2V */
10337 /* only if the signal pre-emphasis bit is not set */
10338 val = tr32(MAC_SERDES_CFG);
10339 val &= 0xfffff000;
10340 val |= 0x880;
10341 tw32(MAC_SERDES_CFG, val);
10342 }
10343 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10344 tw32(MAC_SERDES_CFG, 0x616000);
10345 }
10346
10347 /* Prevent chip from dropping frames when flow control
10348 * is enabled.
10349 */
10350 if (tg3_flag(tp, 57765_CLASS))
10351 val = 1;
10352 else
10353 val = 2;
10354 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10355
10356 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10357 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10358 /* Use hardware link auto-negotiation */
10359 tg3_flag_set(tp, HW_AUTONEG);
10360 }
10361
10362 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10363 tg3_asic_rev(tp) == ASIC_REV_5714) {
10364 u32 tmp;
10365
10366 tmp = tr32(SERDES_RX_CTRL);
10367 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10368 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10369 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10370 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10371 }
10372
10373 if (!tg3_flag(tp, USE_PHYLIB)) {
10374 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10375 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10376
10377 err = tg3_setup_phy(tp, false);
10378 if (err)
10379 return err;
10380
10381 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10382 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10383 u32 tmp;
10384
10385 /* Clear CRC stats. */
10386 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10387 tg3_writephy(tp, MII_TG3_TEST1,
10388 tmp | MII_TG3_TEST1_CRC_EN);
10389 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10390 }
10391 }
10392 }
10393
10394 __tg3_set_rx_mode(tp->dev);
10395
10396 /* Initialize receive rules. */
10397 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10398 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10399 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10400 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10401
10402 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10403 limit = 8;
10404 else
10405 limit = 16;
10406 if (tg3_flag(tp, ENABLE_ASF))
10407 limit -= 4;
10408 switch (limit) {
10409 case 16:
10410 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10411 case 15:
10412 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10413 case 14:
10414 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10415 case 13:
10416 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10417 case 12:
10418 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10419 case 11:
10420 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10421 case 10:
10422 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10423 case 9:
10424 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10425 case 8:
10426 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10427 case 7:
10428 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10429 case 6:
10430 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10431 case 5:
10432 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10433 case 4:
10434 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10435 case 3:
10436 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10437 case 2:
10438 case 1:
10439
10440 default:
10441 break;
10442 }
10443
10444 if (tg3_flag(tp, ENABLE_APE))
10445 /* Write our heartbeat update interval to APE. */
10446 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10447 APE_HOST_HEARTBEAT_INT_DISABLE);
10448
10449 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10450
10451 return 0;
10452 }
10453
10454 /* Called at device open time to get the chip ready for
10455 * packet processing. Invoked with tp->lock held.
10456 */
10457 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10458 {
10459 /* Chip may have been just powered on. If so, the boot code may still
10460 * be running initialization. Wait for it to finish to avoid races in
10461 * accessing the hardware.
10462 */
10463 tg3_enable_register_access(tp);
10464 tg3_poll_fw(tp);
10465
10466 tg3_switch_clocks(tp);
10467
10468 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10469
10470 return tg3_reset_hw(tp, reset_phy);
10471 }
10472
10473 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10474 {
10475 int i;
10476
10477 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10478 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10479
10480 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10481 off += len;
10482
10483 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10484 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10485 memset(ocir, 0, TG3_OCIR_LEN);
10486 }
10487 }
10488
10489 /* sysfs attributes for hwmon */
10490 static ssize_t tg3_show_temp(struct device *dev,
10491 struct device_attribute *devattr, char *buf)
10492 {
10493 struct pci_dev *pdev = to_pci_dev(dev);
10494 struct net_device *netdev = pci_get_drvdata(pdev);
10495 struct tg3 *tp = netdev_priv(netdev);
10496 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10497 u32 temperature;
10498
10499 spin_lock_bh(&tp->lock);
10500 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10501 sizeof(temperature));
10502 spin_unlock_bh(&tp->lock);
10503 return sprintf(buf, "%u\n", temperature);
10504 }
10505
10506
10507 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10508 TG3_TEMP_SENSOR_OFFSET);
10509 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10510 TG3_TEMP_CAUTION_OFFSET);
10511 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10512 TG3_TEMP_MAX_OFFSET);
10513
10514 static struct attribute *tg3_attributes[] = {
10515 &sensor_dev_attr_temp1_input.dev_attr.attr,
10516 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10517 &sensor_dev_attr_temp1_max.dev_attr.attr,
10518 NULL
10519 };
10520
10521 static const struct attribute_group tg3_group = {
10522 .attrs = tg3_attributes,
10523 };
10524
10525 static void tg3_hwmon_close(struct tg3 *tp)
10526 {
10527 if (tp->hwmon_dev) {
10528 hwmon_device_unregister(tp->hwmon_dev);
10529 tp->hwmon_dev = NULL;
10530 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10531 }
10532 }
10533
10534 static void tg3_hwmon_open(struct tg3 *tp)
10535 {
10536 int i, err;
10537 u32 size = 0;
10538 struct pci_dev *pdev = tp->pdev;
10539 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10540
10541 tg3_sd_scan_scratchpad(tp, ocirs);
10542
10543 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10544 if (!ocirs[i].src_data_length)
10545 continue;
10546
10547 size += ocirs[i].src_hdr_length;
10548 size += ocirs[i].src_data_length;
10549 }
10550
10551 if (!size)
10552 return;
10553
10554 /* Register hwmon sysfs hooks */
10555 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10556 if (err) {
10557 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10558 return;
10559 }
10560
10561 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10562 if (IS_ERR(tp->hwmon_dev)) {
10563 tp->hwmon_dev = NULL;
10564 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10565 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10566 }
10567 }
10568
10569
10570 #define TG3_STAT_ADD32(PSTAT, REG) \
10571 do { u32 __val = tr32(REG); \
10572 (PSTAT)->low += __val; \
10573 if ((PSTAT)->low < __val) \
10574 (PSTAT)->high += 1; \
10575 } while (0)
10576
10577 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10578 {
10579 struct tg3_hw_stats *sp = tp->hw_stats;
10580
10581 if (!tp->link_up)
10582 return;
10583
10584 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10585 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10586 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10587 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10588 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10589 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10590 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10591 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10592 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10593 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10594 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10595 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10596 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10597 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10598 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10599 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10600 u32 val;
10601
10602 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10603 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10604 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10605 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10606 }
10607
10608 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10609 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10610 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10611 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10612 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10613 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10614 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10615 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10616 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10617 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10618 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10619 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10620 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10621 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10622
10623 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10624 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10625 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10626 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10627 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10628 } else {
10629 u32 val = tr32(HOSTCC_FLOW_ATTN);
10630 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10631 if (val) {
10632 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10633 sp->rx_discards.low += val;
10634 if (sp->rx_discards.low < val)
10635 sp->rx_discards.high += 1;
10636 }
10637 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10638 }
10639 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10640 }
10641
10642 static void tg3_chk_missed_msi(struct tg3 *tp)
10643 {
10644 u32 i;
10645
10646 for (i = 0; i < tp->irq_cnt; i++) {
10647 struct tg3_napi *tnapi = &tp->napi[i];
10648
10649 if (tg3_has_work(tnapi)) {
10650 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10651 tnapi->last_tx_cons == tnapi->tx_cons) {
10652 if (tnapi->chk_msi_cnt < 1) {
10653 tnapi->chk_msi_cnt++;
10654 return;
10655 }
10656 tg3_msi(0, tnapi);
10657 }
10658 }
10659 tnapi->chk_msi_cnt = 0;
10660 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10661 tnapi->last_tx_cons = tnapi->tx_cons;
10662 }
10663 }
10664
10665 static void tg3_timer(unsigned long __opaque)
10666 {
10667 struct tg3 *tp = (struct tg3 *) __opaque;
10668
10669 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10670 goto restart_timer;
10671
10672 spin_lock(&tp->lock);
10673
10674 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10675 tg3_flag(tp, 57765_CLASS))
10676 tg3_chk_missed_msi(tp);
10677
10678 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10679 /* BCM4785: Flush posted writes from GbE to host memory. */
10680 tr32(HOSTCC_MODE);
10681 }
10682
10683 if (!tg3_flag(tp, TAGGED_STATUS)) {
10684 /* All of this garbage is because when using non-tagged
10685 * IRQ status the mailbox/status_block protocol the chip
10686 * uses with the cpu is race prone.
10687 */
10688 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10689 tw32(GRC_LOCAL_CTRL,
10690 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10691 } else {
10692 tw32(HOSTCC_MODE, tp->coalesce_mode |
10693 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10694 }
10695
10696 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10697 spin_unlock(&tp->lock);
10698 tg3_reset_task_schedule(tp);
10699 goto restart_timer;
10700 }
10701 }
10702
10703 /* This part only runs once per second. */
10704 if (!--tp->timer_counter) {
10705 if (tg3_flag(tp, 5705_PLUS))
10706 tg3_periodic_fetch_stats(tp);
10707
10708 if (tp->setlpicnt && !--tp->setlpicnt)
10709 tg3_phy_eee_enable(tp);
10710
10711 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10712 u32 mac_stat;
10713 int phy_event;
10714
10715 mac_stat = tr32(MAC_STATUS);
10716
10717 phy_event = 0;
10718 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10719 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10720 phy_event = 1;
10721 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10722 phy_event = 1;
10723
10724 if (phy_event)
10725 tg3_setup_phy(tp, false);
10726 } else if (tg3_flag(tp, POLL_SERDES)) {
10727 u32 mac_stat = tr32(MAC_STATUS);
10728 int need_setup = 0;
10729
10730 if (tp->link_up &&
10731 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10732 need_setup = 1;
10733 }
10734 if (!tp->link_up &&
10735 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10736 MAC_STATUS_SIGNAL_DET))) {
10737 need_setup = 1;
10738 }
10739 if (need_setup) {
10740 if (!tp->serdes_counter) {
10741 tw32_f(MAC_MODE,
10742 (tp->mac_mode &
10743 ~MAC_MODE_PORT_MODE_MASK));
10744 udelay(40);
10745 tw32_f(MAC_MODE, tp->mac_mode);
10746 udelay(40);
10747 }
10748 tg3_setup_phy(tp, false);
10749 }
10750 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10751 tg3_flag(tp, 5780_CLASS)) {
10752 tg3_serdes_parallel_detect(tp);
10753 }
10754
10755 tp->timer_counter = tp->timer_multiplier;
10756 }
10757
10758 /* Heartbeat is only sent once every 2 seconds.
10759 *
10760 * The heartbeat is to tell the ASF firmware that the host
10761 * driver is still alive. In the event that the OS crashes,
10762 * ASF needs to reset the hardware to free up the FIFO space
10763 * that may be filled with rx packets destined for the host.
10764 * If the FIFO is full, ASF will no longer function properly.
10765 *
10766 * Unintended resets have been reported on real time kernels
10767 * where the timer doesn't run on time. Netpoll will also have
10768 * same problem.
10769 *
10770 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10771 * to check the ring condition when the heartbeat is expiring
10772 * before doing the reset. This will prevent most unintended
10773 * resets.
10774 */
10775 if (!--tp->asf_counter) {
10776 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10777 tg3_wait_for_event_ack(tp);
10778
10779 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10780 FWCMD_NICDRV_ALIVE3);
10781 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10782 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10783 TG3_FW_UPDATE_TIMEOUT_SEC);
10784
10785 tg3_generate_fw_event(tp);
10786 }
10787 tp->asf_counter = tp->asf_multiplier;
10788 }
10789
10790 spin_unlock(&tp->lock);
10791
10792 restart_timer:
10793 tp->timer.expires = jiffies + tp->timer_offset;
10794 add_timer(&tp->timer);
10795 }
10796
10797 static void tg3_timer_init(struct tg3 *tp)
10798 {
10799 if (tg3_flag(tp, TAGGED_STATUS) &&
10800 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10801 !tg3_flag(tp, 57765_CLASS))
10802 tp->timer_offset = HZ;
10803 else
10804 tp->timer_offset = HZ / 10;
10805
10806 BUG_ON(tp->timer_offset > HZ);
10807
10808 tp->timer_multiplier = (HZ / tp->timer_offset);
10809 tp->asf_multiplier = (HZ / tp->timer_offset) *
10810 TG3_FW_UPDATE_FREQ_SEC;
10811
10812 init_timer(&tp->timer);
10813 tp->timer.data = (unsigned long) tp;
10814 tp->timer.function = tg3_timer;
10815 }
10816
10817 static void tg3_timer_start(struct tg3 *tp)
10818 {
10819 tp->asf_counter = tp->asf_multiplier;
10820 tp->timer_counter = tp->timer_multiplier;
10821
10822 tp->timer.expires = jiffies + tp->timer_offset;
10823 add_timer(&tp->timer);
10824 }
10825
10826 static void tg3_timer_stop(struct tg3 *tp)
10827 {
10828 del_timer_sync(&tp->timer);
10829 }
10830
10831 /* Restart hardware after configuration changes, self-test, etc.
10832 * Invoked with tp->lock held.
10833 */
10834 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10835 __releases(tp->lock)
10836 __acquires(tp->lock)
10837 {
10838 int err;
10839
10840 err = tg3_init_hw(tp, reset_phy);
10841 if (err) {
10842 netdev_err(tp->dev,
10843 "Failed to re-initialize device, aborting\n");
10844 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10845 tg3_full_unlock(tp);
10846 tg3_timer_stop(tp);
10847 tp->irq_sync = 0;
10848 tg3_napi_enable(tp);
10849 dev_close(tp->dev);
10850 tg3_full_lock(tp, 0);
10851 }
10852 return err;
10853 }
10854
10855 static void tg3_reset_task(struct work_struct *work)
10856 {
10857 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10858 int err;
10859
10860 tg3_full_lock(tp, 0);
10861
10862 if (!netif_running(tp->dev)) {
10863 tg3_flag_clear(tp, RESET_TASK_PENDING);
10864 tg3_full_unlock(tp);
10865 return;
10866 }
10867
10868 tg3_full_unlock(tp);
10869
10870 tg3_phy_stop(tp);
10871
10872 tg3_netif_stop(tp);
10873
10874 tg3_full_lock(tp, 1);
10875
10876 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10877 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10878 tp->write32_rx_mbox = tg3_write_flush_reg32;
10879 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10880 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10881 }
10882
10883 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10884 err = tg3_init_hw(tp, true);
10885 if (err)
10886 goto out;
10887
10888 tg3_netif_start(tp);
10889
10890 out:
10891 tg3_full_unlock(tp);
10892
10893 if (!err)
10894 tg3_phy_start(tp);
10895
10896 tg3_flag_clear(tp, RESET_TASK_PENDING);
10897 }
10898
10899 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10900 {
10901 irq_handler_t fn;
10902 unsigned long flags;
10903 char *name;
10904 struct tg3_napi *tnapi = &tp->napi[irq_num];
10905
10906 if (tp->irq_cnt == 1)
10907 name = tp->dev->name;
10908 else {
10909 name = &tnapi->irq_lbl[0];
10910 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10911 name[IFNAMSIZ-1] = 0;
10912 }
10913
10914 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10915 fn = tg3_msi;
10916 if (tg3_flag(tp, 1SHOT_MSI))
10917 fn = tg3_msi_1shot;
10918 flags = 0;
10919 } else {
10920 fn = tg3_interrupt;
10921 if (tg3_flag(tp, TAGGED_STATUS))
10922 fn = tg3_interrupt_tagged;
10923 flags = IRQF_SHARED;
10924 }
10925
10926 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10927 }
10928
10929 static int tg3_test_interrupt(struct tg3 *tp)
10930 {
10931 struct tg3_napi *tnapi = &tp->napi[0];
10932 struct net_device *dev = tp->dev;
10933 int err, i, intr_ok = 0;
10934 u32 val;
10935
10936 if (!netif_running(dev))
10937 return -ENODEV;
10938
10939 tg3_disable_ints(tp);
10940
10941 free_irq(tnapi->irq_vec, tnapi);
10942
10943 /*
10944 * Turn off MSI one shot mode. Otherwise this test has no
10945 * observable way to know whether the interrupt was delivered.
10946 */
10947 if (tg3_flag(tp, 57765_PLUS)) {
10948 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10949 tw32(MSGINT_MODE, val);
10950 }
10951
10952 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10953 IRQF_SHARED, dev->name, tnapi);
10954 if (err)
10955 return err;
10956
10957 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10958 tg3_enable_ints(tp);
10959
10960 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10961 tnapi->coal_now);
10962
10963 for (i = 0; i < 5; i++) {
10964 u32 int_mbox, misc_host_ctrl;
10965
10966 int_mbox = tr32_mailbox(tnapi->int_mbox);
10967 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10968
10969 if ((int_mbox != 0) ||
10970 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10971 intr_ok = 1;
10972 break;
10973 }
10974
10975 if (tg3_flag(tp, 57765_PLUS) &&
10976 tnapi->hw_status->status_tag != tnapi->last_tag)
10977 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10978
10979 msleep(10);
10980 }
10981
10982 tg3_disable_ints(tp);
10983
10984 free_irq(tnapi->irq_vec, tnapi);
10985
10986 err = tg3_request_irq(tp, 0);
10987
10988 if (err)
10989 return err;
10990
10991 if (intr_ok) {
10992 /* Reenable MSI one shot mode. */
10993 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10994 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10995 tw32(MSGINT_MODE, val);
10996 }
10997 return 0;
10998 }
10999
11000 return -EIO;
11001 }
11002
11003 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11004 * successfully restored
11005 */
11006 static int tg3_test_msi(struct tg3 *tp)
11007 {
11008 int err;
11009 u16 pci_cmd;
11010
11011 if (!tg3_flag(tp, USING_MSI))
11012 return 0;
11013
11014 /* Turn off SERR reporting in case MSI terminates with Master
11015 * Abort.
11016 */
11017 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11018 pci_write_config_word(tp->pdev, PCI_COMMAND,
11019 pci_cmd & ~PCI_COMMAND_SERR);
11020
11021 err = tg3_test_interrupt(tp);
11022
11023 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11024
11025 if (!err)
11026 return 0;
11027
11028 /* other failures */
11029 if (err != -EIO)
11030 return err;
11031
11032 /* MSI test failed, go back to INTx mode */
11033 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11034 "to INTx mode. Please report this failure to the PCI "
11035 "maintainer and include system chipset information\n");
11036
11037 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11038
11039 pci_disable_msi(tp->pdev);
11040
11041 tg3_flag_clear(tp, USING_MSI);
11042 tp->napi[0].irq_vec = tp->pdev->irq;
11043
11044 err = tg3_request_irq(tp, 0);
11045 if (err)
11046 return err;
11047
11048 /* Need to reset the chip because the MSI cycle may have terminated
11049 * with Master Abort.
11050 */
11051 tg3_full_lock(tp, 1);
11052
11053 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11054 err = tg3_init_hw(tp, true);
11055
11056 tg3_full_unlock(tp);
11057
11058 if (err)
11059 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11060
11061 return err;
11062 }
11063
11064 static int tg3_request_firmware(struct tg3 *tp)
11065 {
11066 const struct tg3_firmware_hdr *fw_hdr;
11067
11068 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11069 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11070 tp->fw_needed);
11071 return -ENOENT;
11072 }
11073
11074 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11075
11076 /* Firmware blob starts with version numbers, followed by
11077 * start address and _full_ length including BSS sections
11078 * (which must be longer than the actual data, of course
11079 */
11080
11081 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11082 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11083 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11084 tp->fw_len, tp->fw_needed);
11085 release_firmware(tp->fw);
11086 tp->fw = NULL;
11087 return -EINVAL;
11088 }
11089
11090 /* We no longer need firmware; we have it. */
11091 tp->fw_needed = NULL;
11092 return 0;
11093 }
11094
11095 static u32 tg3_irq_count(struct tg3 *tp)
11096 {
11097 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11098
11099 if (irq_cnt > 1) {
11100 /* We want as many rx rings enabled as there are cpus.
11101 * In multiqueue MSI-X mode, the first MSI-X vector
11102 * only deals with link interrupts, etc, so we add
11103 * one to the number of vectors we are requesting.
11104 */
11105 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11106 }
11107
11108 return irq_cnt;
11109 }
11110
11111 static bool tg3_enable_msix(struct tg3 *tp)
11112 {
11113 int i, rc;
11114 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11115
11116 tp->txq_cnt = tp->txq_req;
11117 tp->rxq_cnt = tp->rxq_req;
11118 if (!tp->rxq_cnt)
11119 tp->rxq_cnt = netif_get_num_default_rss_queues();
11120 if (tp->rxq_cnt > tp->rxq_max)
11121 tp->rxq_cnt = tp->rxq_max;
11122
11123 /* Disable multiple TX rings by default. Simple round-robin hardware
11124 * scheduling of the TX rings can cause starvation of rings with
11125 * small packets when other rings have TSO or jumbo packets.
11126 */
11127 if (!tp->txq_req)
11128 tp->txq_cnt = 1;
11129
11130 tp->irq_cnt = tg3_irq_count(tp);
11131
11132 for (i = 0; i < tp->irq_max; i++) {
11133 msix_ent[i].entry = i;
11134 msix_ent[i].vector = 0;
11135 }
11136
11137 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11138 if (rc < 0) {
11139 return false;
11140 } else if (rc != 0) {
11141 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11142 return false;
11143 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11144 tp->irq_cnt, rc);
11145 tp->irq_cnt = rc;
11146 tp->rxq_cnt = max(rc - 1, 1);
11147 if (tp->txq_cnt)
11148 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11149 }
11150
11151 for (i = 0; i < tp->irq_max; i++)
11152 tp->napi[i].irq_vec = msix_ent[i].vector;
11153
11154 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11155 pci_disable_msix(tp->pdev);
11156 return false;
11157 }
11158
11159 if (tp->irq_cnt == 1)
11160 return true;
11161
11162 tg3_flag_set(tp, ENABLE_RSS);
11163
11164 if (tp->txq_cnt > 1)
11165 tg3_flag_set(tp, ENABLE_TSS);
11166
11167 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11168
11169 return true;
11170 }
11171
11172 static void tg3_ints_init(struct tg3 *tp)
11173 {
11174 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11175 !tg3_flag(tp, TAGGED_STATUS)) {
11176 /* All MSI supporting chips should support tagged
11177 * status. Assert that this is the case.
11178 */
11179 netdev_warn(tp->dev,
11180 "MSI without TAGGED_STATUS? Not using MSI\n");
11181 goto defcfg;
11182 }
11183
11184 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11185 tg3_flag_set(tp, USING_MSIX);
11186 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11187 tg3_flag_set(tp, USING_MSI);
11188
11189 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11190 u32 msi_mode = tr32(MSGINT_MODE);
11191 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11192 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11193 if (!tg3_flag(tp, 1SHOT_MSI))
11194 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11195 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11196 }
11197 defcfg:
11198 if (!tg3_flag(tp, USING_MSIX)) {
11199 tp->irq_cnt = 1;
11200 tp->napi[0].irq_vec = tp->pdev->irq;
11201 }
11202
11203 if (tp->irq_cnt == 1) {
11204 tp->txq_cnt = 1;
11205 tp->rxq_cnt = 1;
11206 netif_set_real_num_tx_queues(tp->dev, 1);
11207 netif_set_real_num_rx_queues(tp->dev, 1);
11208 }
11209 }
11210
11211 static void tg3_ints_fini(struct tg3 *tp)
11212 {
11213 if (tg3_flag(tp, USING_MSIX))
11214 pci_disable_msix(tp->pdev);
11215 else if (tg3_flag(tp, USING_MSI))
11216 pci_disable_msi(tp->pdev);
11217 tg3_flag_clear(tp, USING_MSI);
11218 tg3_flag_clear(tp, USING_MSIX);
11219 tg3_flag_clear(tp, ENABLE_RSS);
11220 tg3_flag_clear(tp, ENABLE_TSS);
11221 }
11222
11223 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11224 bool init)
11225 {
11226 struct net_device *dev = tp->dev;
11227 int i, err;
11228
11229 /*
11230 * Setup interrupts first so we know how
11231 * many NAPI resources to allocate
11232 */
11233 tg3_ints_init(tp);
11234
11235 tg3_rss_check_indir_tbl(tp);
11236
11237 /* The placement of this call is tied
11238 * to the setup and use of Host TX descriptors.
11239 */
11240 err = tg3_alloc_consistent(tp);
11241 if (err)
11242 goto err_out1;
11243
11244 tg3_napi_init(tp);
11245
11246 tg3_napi_enable(tp);
11247
11248 for (i = 0; i < tp->irq_cnt; i++) {
11249 struct tg3_napi *tnapi = &tp->napi[i];
11250 err = tg3_request_irq(tp, i);
11251 if (err) {
11252 for (i--; i >= 0; i--) {
11253 tnapi = &tp->napi[i];
11254 free_irq(tnapi->irq_vec, tnapi);
11255 }
11256 goto err_out2;
11257 }
11258 }
11259
11260 tg3_full_lock(tp, 0);
11261
11262 err = tg3_init_hw(tp, reset_phy);
11263 if (err) {
11264 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11265 tg3_free_rings(tp);
11266 }
11267
11268 tg3_full_unlock(tp);
11269
11270 if (err)
11271 goto err_out3;
11272
11273 if (test_irq && tg3_flag(tp, USING_MSI)) {
11274 err = tg3_test_msi(tp);
11275
11276 if (err) {
11277 tg3_full_lock(tp, 0);
11278 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11279 tg3_free_rings(tp);
11280 tg3_full_unlock(tp);
11281
11282 goto err_out2;
11283 }
11284
11285 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11286 u32 val = tr32(PCIE_TRANSACTION_CFG);
11287
11288 tw32(PCIE_TRANSACTION_CFG,
11289 val | PCIE_TRANS_CFG_1SHOT_MSI);
11290 }
11291 }
11292
11293 tg3_phy_start(tp);
11294
11295 tg3_hwmon_open(tp);
11296
11297 tg3_full_lock(tp, 0);
11298
11299 tg3_timer_start(tp);
11300 tg3_flag_set(tp, INIT_COMPLETE);
11301 tg3_enable_ints(tp);
11302
11303 if (init)
11304 tg3_ptp_init(tp);
11305 else
11306 tg3_ptp_resume(tp);
11307
11308
11309 tg3_full_unlock(tp);
11310
11311 netif_tx_start_all_queues(dev);
11312
11313 /*
11314 * Reset loopback feature if it was turned on while the device was down
11315 * make sure that it's installed properly now.
11316 */
11317 if (dev->features & NETIF_F_LOOPBACK)
11318 tg3_set_loopback(dev, dev->features);
11319
11320 return 0;
11321
11322 err_out3:
11323 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11324 struct tg3_napi *tnapi = &tp->napi[i];
11325 free_irq(tnapi->irq_vec, tnapi);
11326 }
11327
11328 err_out2:
11329 tg3_napi_disable(tp);
11330 tg3_napi_fini(tp);
11331 tg3_free_consistent(tp);
11332
11333 err_out1:
11334 tg3_ints_fini(tp);
11335
11336 return err;
11337 }
11338
11339 static void tg3_stop(struct tg3 *tp)
11340 {
11341 int i;
11342
11343 tg3_reset_task_cancel(tp);
11344 tg3_netif_stop(tp);
11345
11346 tg3_timer_stop(tp);
11347
11348 tg3_hwmon_close(tp);
11349
11350 tg3_phy_stop(tp);
11351
11352 tg3_full_lock(tp, 1);
11353
11354 tg3_disable_ints(tp);
11355
11356 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11357 tg3_free_rings(tp);
11358 tg3_flag_clear(tp, INIT_COMPLETE);
11359
11360 tg3_full_unlock(tp);
11361
11362 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11363 struct tg3_napi *tnapi = &tp->napi[i];
11364 free_irq(tnapi->irq_vec, tnapi);
11365 }
11366
11367 tg3_ints_fini(tp);
11368
11369 tg3_napi_fini(tp);
11370
11371 tg3_free_consistent(tp);
11372 }
11373
11374 static int tg3_open(struct net_device *dev)
11375 {
11376 struct tg3 *tp = netdev_priv(dev);
11377 int err;
11378
11379 if (tp->fw_needed) {
11380 err = tg3_request_firmware(tp);
11381 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11382 if (err) {
11383 netdev_warn(tp->dev, "EEE capability disabled\n");
11384 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11385 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11386 netdev_warn(tp->dev, "EEE capability restored\n");
11387 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11388 }
11389 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11390 if (err)
11391 return err;
11392 } else if (err) {
11393 netdev_warn(tp->dev, "TSO capability disabled\n");
11394 tg3_flag_clear(tp, TSO_CAPABLE);
11395 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11396 netdev_notice(tp->dev, "TSO capability restored\n");
11397 tg3_flag_set(tp, TSO_CAPABLE);
11398 }
11399 }
11400
11401 tg3_carrier_off(tp);
11402
11403 err = tg3_power_up(tp);
11404 if (err)
11405 return err;
11406
11407 tg3_full_lock(tp, 0);
11408
11409 tg3_disable_ints(tp);
11410 tg3_flag_clear(tp, INIT_COMPLETE);
11411
11412 tg3_full_unlock(tp);
11413
11414 err = tg3_start(tp,
11415 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11416 true, true);
11417 if (err) {
11418 tg3_frob_aux_power(tp, false);
11419 pci_set_power_state(tp->pdev, PCI_D3hot);
11420 }
11421
11422 if (tg3_flag(tp, PTP_CAPABLE)) {
11423 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11424 &tp->pdev->dev);
11425 if (IS_ERR(tp->ptp_clock))
11426 tp->ptp_clock = NULL;
11427 }
11428
11429 return err;
11430 }
11431
11432 static int tg3_close(struct net_device *dev)
11433 {
11434 struct tg3 *tp = netdev_priv(dev);
11435
11436 tg3_ptp_fini(tp);
11437
11438 tg3_stop(tp);
11439
11440 /* Clear stats across close / open calls */
11441 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11442 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11443
11444 tg3_power_down(tp);
11445
11446 tg3_carrier_off(tp);
11447
11448 return 0;
11449 }
11450
11451 static inline u64 get_stat64(tg3_stat64_t *val)
11452 {
11453 return ((u64)val->high << 32) | ((u64)val->low);
11454 }
11455
11456 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11457 {
11458 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11459
11460 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11461 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11462 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11463 u32 val;
11464
11465 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11466 tg3_writephy(tp, MII_TG3_TEST1,
11467 val | MII_TG3_TEST1_CRC_EN);
11468 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11469 } else
11470 val = 0;
11471
11472 tp->phy_crc_errors += val;
11473
11474 return tp->phy_crc_errors;
11475 }
11476
11477 return get_stat64(&hw_stats->rx_fcs_errors);
11478 }
11479
11480 #define ESTAT_ADD(member) \
11481 estats->member = old_estats->member + \
11482 get_stat64(&hw_stats->member)
11483
11484 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11485 {
11486 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11487 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11488
11489 ESTAT_ADD(rx_octets);
11490 ESTAT_ADD(rx_fragments);
11491 ESTAT_ADD(rx_ucast_packets);
11492 ESTAT_ADD(rx_mcast_packets);
11493 ESTAT_ADD(rx_bcast_packets);
11494 ESTAT_ADD(rx_fcs_errors);
11495 ESTAT_ADD(rx_align_errors);
11496 ESTAT_ADD(rx_xon_pause_rcvd);
11497 ESTAT_ADD(rx_xoff_pause_rcvd);
11498 ESTAT_ADD(rx_mac_ctrl_rcvd);
11499 ESTAT_ADD(rx_xoff_entered);
11500 ESTAT_ADD(rx_frame_too_long_errors);
11501 ESTAT_ADD(rx_jabbers);
11502 ESTAT_ADD(rx_undersize_packets);
11503 ESTAT_ADD(rx_in_length_errors);
11504 ESTAT_ADD(rx_out_length_errors);
11505 ESTAT_ADD(rx_64_or_less_octet_packets);
11506 ESTAT_ADD(rx_65_to_127_octet_packets);
11507 ESTAT_ADD(rx_128_to_255_octet_packets);
11508 ESTAT_ADD(rx_256_to_511_octet_packets);
11509 ESTAT_ADD(rx_512_to_1023_octet_packets);
11510 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11511 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11512 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11513 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11514 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11515
11516 ESTAT_ADD(tx_octets);
11517 ESTAT_ADD(tx_collisions);
11518 ESTAT_ADD(tx_xon_sent);
11519 ESTAT_ADD(tx_xoff_sent);
11520 ESTAT_ADD(tx_flow_control);
11521 ESTAT_ADD(tx_mac_errors);
11522 ESTAT_ADD(tx_single_collisions);
11523 ESTAT_ADD(tx_mult_collisions);
11524 ESTAT_ADD(tx_deferred);
11525 ESTAT_ADD(tx_excessive_collisions);
11526 ESTAT_ADD(tx_late_collisions);
11527 ESTAT_ADD(tx_collide_2times);
11528 ESTAT_ADD(tx_collide_3times);
11529 ESTAT_ADD(tx_collide_4times);
11530 ESTAT_ADD(tx_collide_5times);
11531 ESTAT_ADD(tx_collide_6times);
11532 ESTAT_ADD(tx_collide_7times);
11533 ESTAT_ADD(tx_collide_8times);
11534 ESTAT_ADD(tx_collide_9times);
11535 ESTAT_ADD(tx_collide_10times);
11536 ESTAT_ADD(tx_collide_11times);
11537 ESTAT_ADD(tx_collide_12times);
11538 ESTAT_ADD(tx_collide_13times);
11539 ESTAT_ADD(tx_collide_14times);
11540 ESTAT_ADD(tx_collide_15times);
11541 ESTAT_ADD(tx_ucast_packets);
11542 ESTAT_ADD(tx_mcast_packets);
11543 ESTAT_ADD(tx_bcast_packets);
11544 ESTAT_ADD(tx_carrier_sense_errors);
11545 ESTAT_ADD(tx_discards);
11546 ESTAT_ADD(tx_errors);
11547
11548 ESTAT_ADD(dma_writeq_full);
11549 ESTAT_ADD(dma_write_prioq_full);
11550 ESTAT_ADD(rxbds_empty);
11551 ESTAT_ADD(rx_discards);
11552 ESTAT_ADD(rx_errors);
11553 ESTAT_ADD(rx_threshold_hit);
11554
11555 ESTAT_ADD(dma_readq_full);
11556 ESTAT_ADD(dma_read_prioq_full);
11557 ESTAT_ADD(tx_comp_queue_full);
11558
11559 ESTAT_ADD(ring_set_send_prod_index);
11560 ESTAT_ADD(ring_status_update);
11561 ESTAT_ADD(nic_irqs);
11562 ESTAT_ADD(nic_avoided_irqs);
11563 ESTAT_ADD(nic_tx_threshold_hit);
11564
11565 ESTAT_ADD(mbuf_lwm_thresh_hit);
11566 }
11567
11568 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11569 {
11570 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11571 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11572
11573 stats->rx_packets = old_stats->rx_packets +
11574 get_stat64(&hw_stats->rx_ucast_packets) +
11575 get_stat64(&hw_stats->rx_mcast_packets) +
11576 get_stat64(&hw_stats->rx_bcast_packets);
11577
11578 stats->tx_packets = old_stats->tx_packets +
11579 get_stat64(&hw_stats->tx_ucast_packets) +
11580 get_stat64(&hw_stats->tx_mcast_packets) +
11581 get_stat64(&hw_stats->tx_bcast_packets);
11582
11583 stats->rx_bytes = old_stats->rx_bytes +
11584 get_stat64(&hw_stats->rx_octets);
11585 stats->tx_bytes = old_stats->tx_bytes +
11586 get_stat64(&hw_stats->tx_octets);
11587
11588 stats->rx_errors = old_stats->rx_errors +
11589 get_stat64(&hw_stats->rx_errors);
11590 stats->tx_errors = old_stats->tx_errors +
11591 get_stat64(&hw_stats->tx_errors) +
11592 get_stat64(&hw_stats->tx_mac_errors) +
11593 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11594 get_stat64(&hw_stats->tx_discards);
11595
11596 stats->multicast = old_stats->multicast +
11597 get_stat64(&hw_stats->rx_mcast_packets);
11598 stats->collisions = old_stats->collisions +
11599 get_stat64(&hw_stats->tx_collisions);
11600
11601 stats->rx_length_errors = old_stats->rx_length_errors +
11602 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11603 get_stat64(&hw_stats->rx_undersize_packets);
11604
11605 stats->rx_over_errors = old_stats->rx_over_errors +
11606 get_stat64(&hw_stats->rxbds_empty);
11607 stats->rx_frame_errors = old_stats->rx_frame_errors +
11608 get_stat64(&hw_stats->rx_align_errors);
11609 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11610 get_stat64(&hw_stats->tx_discards);
11611 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11612 get_stat64(&hw_stats->tx_carrier_sense_errors);
11613
11614 stats->rx_crc_errors = old_stats->rx_crc_errors +
11615 tg3_calc_crc_errors(tp);
11616
11617 stats->rx_missed_errors = old_stats->rx_missed_errors +
11618 get_stat64(&hw_stats->rx_discards);
11619
11620 stats->rx_dropped = tp->rx_dropped;
11621 stats->tx_dropped = tp->tx_dropped;
11622 }
11623
11624 static int tg3_get_regs_len(struct net_device *dev)
11625 {
11626 return TG3_REG_BLK_SIZE;
11627 }
11628
11629 static void tg3_get_regs(struct net_device *dev,
11630 struct ethtool_regs *regs, void *_p)
11631 {
11632 struct tg3 *tp = netdev_priv(dev);
11633
11634 regs->version = 0;
11635
11636 memset(_p, 0, TG3_REG_BLK_SIZE);
11637
11638 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11639 return;
11640
11641 tg3_full_lock(tp, 0);
11642
11643 tg3_dump_legacy_regs(tp, (u32 *)_p);
11644
11645 tg3_full_unlock(tp);
11646 }
11647
11648 static int tg3_get_eeprom_len(struct net_device *dev)
11649 {
11650 struct tg3 *tp = netdev_priv(dev);
11651
11652 return tp->nvram_size;
11653 }
11654
11655 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11656 {
11657 struct tg3 *tp = netdev_priv(dev);
11658 int ret;
11659 u8 *pd;
11660 u32 i, offset, len, b_offset, b_count;
11661 __be32 val;
11662
11663 if (tg3_flag(tp, NO_NVRAM))
11664 return -EINVAL;
11665
11666 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11667 return -EAGAIN;
11668
11669 offset = eeprom->offset;
11670 len = eeprom->len;
11671 eeprom->len = 0;
11672
11673 eeprom->magic = TG3_EEPROM_MAGIC;
11674
11675 if (offset & 3) {
11676 /* adjustments to start on required 4 byte boundary */
11677 b_offset = offset & 3;
11678 b_count = 4 - b_offset;
11679 if (b_count > len) {
11680 /* i.e. offset=1 len=2 */
11681 b_count = len;
11682 }
11683 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11684 if (ret)
11685 return ret;
11686 memcpy(data, ((char *)&val) + b_offset, b_count);
11687 len -= b_count;
11688 offset += b_count;
11689 eeprom->len += b_count;
11690 }
11691
11692 /* read bytes up to the last 4 byte boundary */
11693 pd = &data[eeprom->len];
11694 for (i = 0; i < (len - (len & 3)); i += 4) {
11695 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11696 if (ret) {
11697 eeprom->len += i;
11698 return ret;
11699 }
11700 memcpy(pd + i, &val, 4);
11701 }
11702 eeprom->len += i;
11703
11704 if (len & 3) {
11705 /* read last bytes not ending on 4 byte boundary */
11706 pd = &data[eeprom->len];
11707 b_count = len & 3;
11708 b_offset = offset + len - b_count;
11709 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11710 if (ret)
11711 return ret;
11712 memcpy(pd, &val, b_count);
11713 eeprom->len += b_count;
11714 }
11715 return 0;
11716 }
11717
11718 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11719 {
11720 struct tg3 *tp = netdev_priv(dev);
11721 int ret;
11722 u32 offset, len, b_offset, odd_len;
11723 u8 *buf;
11724 __be32 start, end;
11725
11726 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11727 return -EAGAIN;
11728
11729 if (tg3_flag(tp, NO_NVRAM) ||
11730 eeprom->magic != TG3_EEPROM_MAGIC)
11731 return -EINVAL;
11732
11733 offset = eeprom->offset;
11734 len = eeprom->len;
11735
11736 if ((b_offset = (offset & 3))) {
11737 /* adjustments to start on required 4 byte boundary */
11738 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11739 if (ret)
11740 return ret;
11741 len += b_offset;
11742 offset &= ~3;
11743 if (len < 4)
11744 len = 4;
11745 }
11746
11747 odd_len = 0;
11748 if (len & 3) {
11749 /* adjustments to end on required 4 byte boundary */
11750 odd_len = 1;
11751 len = (len + 3) & ~3;
11752 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11753 if (ret)
11754 return ret;
11755 }
11756
11757 buf = data;
11758 if (b_offset || odd_len) {
11759 buf = kmalloc(len, GFP_KERNEL);
11760 if (!buf)
11761 return -ENOMEM;
11762 if (b_offset)
11763 memcpy(buf, &start, 4);
11764 if (odd_len)
11765 memcpy(buf+len-4, &end, 4);
11766 memcpy(buf + b_offset, data, eeprom->len);
11767 }
11768
11769 ret = tg3_nvram_write_block(tp, offset, len, buf);
11770
11771 if (buf != data)
11772 kfree(buf);
11773
11774 return ret;
11775 }
11776
11777 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11778 {
11779 struct tg3 *tp = netdev_priv(dev);
11780
11781 if (tg3_flag(tp, USE_PHYLIB)) {
11782 struct phy_device *phydev;
11783 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11784 return -EAGAIN;
11785 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11786 return phy_ethtool_gset(phydev, cmd);
11787 }
11788
11789 cmd->supported = (SUPPORTED_Autoneg);
11790
11791 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11792 cmd->supported |= (SUPPORTED_1000baseT_Half |
11793 SUPPORTED_1000baseT_Full);
11794
11795 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11796 cmd->supported |= (SUPPORTED_100baseT_Half |
11797 SUPPORTED_100baseT_Full |
11798 SUPPORTED_10baseT_Half |
11799 SUPPORTED_10baseT_Full |
11800 SUPPORTED_TP);
11801 cmd->port = PORT_TP;
11802 } else {
11803 cmd->supported |= SUPPORTED_FIBRE;
11804 cmd->port = PORT_FIBRE;
11805 }
11806
11807 cmd->advertising = tp->link_config.advertising;
11808 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11809 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11810 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11811 cmd->advertising |= ADVERTISED_Pause;
11812 } else {
11813 cmd->advertising |= ADVERTISED_Pause |
11814 ADVERTISED_Asym_Pause;
11815 }
11816 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11817 cmd->advertising |= ADVERTISED_Asym_Pause;
11818 }
11819 }
11820 if (netif_running(dev) && tp->link_up) {
11821 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11822 cmd->duplex = tp->link_config.active_duplex;
11823 cmd->lp_advertising = tp->link_config.rmt_adv;
11824 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11825 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11826 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11827 else
11828 cmd->eth_tp_mdix = ETH_TP_MDI;
11829 }
11830 } else {
11831 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11832 cmd->duplex = DUPLEX_UNKNOWN;
11833 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11834 }
11835 cmd->phy_address = tp->phy_addr;
11836 cmd->transceiver = XCVR_INTERNAL;
11837 cmd->autoneg = tp->link_config.autoneg;
11838 cmd->maxtxpkt = 0;
11839 cmd->maxrxpkt = 0;
11840 return 0;
11841 }
11842
11843 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11844 {
11845 struct tg3 *tp = netdev_priv(dev);
11846 u32 speed = ethtool_cmd_speed(cmd);
11847
11848 if (tg3_flag(tp, USE_PHYLIB)) {
11849 struct phy_device *phydev;
11850 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11851 return -EAGAIN;
11852 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11853 return phy_ethtool_sset(phydev, cmd);
11854 }
11855
11856 if (cmd->autoneg != AUTONEG_ENABLE &&
11857 cmd->autoneg != AUTONEG_DISABLE)
11858 return -EINVAL;
11859
11860 if (cmd->autoneg == AUTONEG_DISABLE &&
11861 cmd->duplex != DUPLEX_FULL &&
11862 cmd->duplex != DUPLEX_HALF)
11863 return -EINVAL;
11864
11865 if (cmd->autoneg == AUTONEG_ENABLE) {
11866 u32 mask = ADVERTISED_Autoneg |
11867 ADVERTISED_Pause |
11868 ADVERTISED_Asym_Pause;
11869
11870 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11871 mask |= ADVERTISED_1000baseT_Half |
11872 ADVERTISED_1000baseT_Full;
11873
11874 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11875 mask |= ADVERTISED_100baseT_Half |
11876 ADVERTISED_100baseT_Full |
11877 ADVERTISED_10baseT_Half |
11878 ADVERTISED_10baseT_Full |
11879 ADVERTISED_TP;
11880 else
11881 mask |= ADVERTISED_FIBRE;
11882
11883 if (cmd->advertising & ~mask)
11884 return -EINVAL;
11885
11886 mask &= (ADVERTISED_1000baseT_Half |
11887 ADVERTISED_1000baseT_Full |
11888 ADVERTISED_100baseT_Half |
11889 ADVERTISED_100baseT_Full |
11890 ADVERTISED_10baseT_Half |
11891 ADVERTISED_10baseT_Full);
11892
11893 cmd->advertising &= mask;
11894 } else {
11895 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11896 if (speed != SPEED_1000)
11897 return -EINVAL;
11898
11899 if (cmd->duplex != DUPLEX_FULL)
11900 return -EINVAL;
11901 } else {
11902 if (speed != SPEED_100 &&
11903 speed != SPEED_10)
11904 return -EINVAL;
11905 }
11906 }
11907
11908 tg3_full_lock(tp, 0);
11909
11910 tp->link_config.autoneg = cmd->autoneg;
11911 if (cmd->autoneg == AUTONEG_ENABLE) {
11912 tp->link_config.advertising = (cmd->advertising |
11913 ADVERTISED_Autoneg);
11914 tp->link_config.speed = SPEED_UNKNOWN;
11915 tp->link_config.duplex = DUPLEX_UNKNOWN;
11916 } else {
11917 tp->link_config.advertising = 0;
11918 tp->link_config.speed = speed;
11919 tp->link_config.duplex = cmd->duplex;
11920 }
11921
11922 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11923
11924 tg3_warn_mgmt_link_flap(tp);
11925
11926 if (netif_running(dev))
11927 tg3_setup_phy(tp, true);
11928
11929 tg3_full_unlock(tp);
11930
11931 return 0;
11932 }
11933
11934 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11935 {
11936 struct tg3 *tp = netdev_priv(dev);
11937
11938 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11939 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11940 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11941 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11942 }
11943
11944 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11945 {
11946 struct tg3 *tp = netdev_priv(dev);
11947
11948 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11949 wol->supported = WAKE_MAGIC;
11950 else
11951 wol->supported = 0;
11952 wol->wolopts = 0;
11953 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11954 wol->wolopts = WAKE_MAGIC;
11955 memset(&wol->sopass, 0, sizeof(wol->sopass));
11956 }
11957
11958 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11959 {
11960 struct tg3 *tp = netdev_priv(dev);
11961 struct device *dp = &tp->pdev->dev;
11962
11963 if (wol->wolopts & ~WAKE_MAGIC)
11964 return -EINVAL;
11965 if ((wol->wolopts & WAKE_MAGIC) &&
11966 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11967 return -EINVAL;
11968
11969 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11970
11971 spin_lock_bh(&tp->lock);
11972 if (device_may_wakeup(dp))
11973 tg3_flag_set(tp, WOL_ENABLE);
11974 else
11975 tg3_flag_clear(tp, WOL_ENABLE);
11976 spin_unlock_bh(&tp->lock);
11977
11978 return 0;
11979 }
11980
11981 static u32 tg3_get_msglevel(struct net_device *dev)
11982 {
11983 struct tg3 *tp = netdev_priv(dev);
11984 return tp->msg_enable;
11985 }
11986
11987 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11988 {
11989 struct tg3 *tp = netdev_priv(dev);
11990 tp->msg_enable = value;
11991 }
11992
11993 static int tg3_nway_reset(struct net_device *dev)
11994 {
11995 struct tg3 *tp = netdev_priv(dev);
11996 int r;
11997
11998 if (!netif_running(dev))
11999 return -EAGAIN;
12000
12001 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12002 return -EINVAL;
12003
12004 tg3_warn_mgmt_link_flap(tp);
12005
12006 if (tg3_flag(tp, USE_PHYLIB)) {
12007 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12008 return -EAGAIN;
12009 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12010 } else {
12011 u32 bmcr;
12012
12013 spin_lock_bh(&tp->lock);
12014 r = -EINVAL;
12015 tg3_readphy(tp, MII_BMCR, &bmcr);
12016 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12017 ((bmcr & BMCR_ANENABLE) ||
12018 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12019 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12020 BMCR_ANENABLE);
12021 r = 0;
12022 }
12023 spin_unlock_bh(&tp->lock);
12024 }
12025
12026 return r;
12027 }
12028
12029 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12030 {
12031 struct tg3 *tp = netdev_priv(dev);
12032
12033 ering->rx_max_pending = tp->rx_std_ring_mask;
12034 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12035 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12036 else
12037 ering->rx_jumbo_max_pending = 0;
12038
12039 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12040
12041 ering->rx_pending = tp->rx_pending;
12042 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12043 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12044 else
12045 ering->rx_jumbo_pending = 0;
12046
12047 ering->tx_pending = tp->napi[0].tx_pending;
12048 }
12049
12050 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12051 {
12052 struct tg3 *tp = netdev_priv(dev);
12053 int i, irq_sync = 0, err = 0;
12054
12055 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12056 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12057 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12058 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12059 (tg3_flag(tp, TSO_BUG) &&
12060 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12061 return -EINVAL;
12062
12063 if (netif_running(dev)) {
12064 tg3_phy_stop(tp);
12065 tg3_netif_stop(tp);
12066 irq_sync = 1;
12067 }
12068
12069 tg3_full_lock(tp, irq_sync);
12070
12071 tp->rx_pending = ering->rx_pending;
12072
12073 if (tg3_flag(tp, MAX_RXPEND_64) &&
12074 tp->rx_pending > 63)
12075 tp->rx_pending = 63;
12076 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12077
12078 for (i = 0; i < tp->irq_max; i++)
12079 tp->napi[i].tx_pending = ering->tx_pending;
12080
12081 if (netif_running(dev)) {
12082 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12083 err = tg3_restart_hw(tp, false);
12084 if (!err)
12085 tg3_netif_start(tp);
12086 }
12087
12088 tg3_full_unlock(tp);
12089
12090 if (irq_sync && !err)
12091 tg3_phy_start(tp);
12092
12093 return err;
12094 }
12095
12096 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12097 {
12098 struct tg3 *tp = netdev_priv(dev);
12099
12100 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12101
12102 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12103 epause->rx_pause = 1;
12104 else
12105 epause->rx_pause = 0;
12106
12107 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12108 epause->tx_pause = 1;
12109 else
12110 epause->tx_pause = 0;
12111 }
12112
12113 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12114 {
12115 struct tg3 *tp = netdev_priv(dev);
12116 int err = 0;
12117
12118 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12119 tg3_warn_mgmt_link_flap(tp);
12120
12121 if (tg3_flag(tp, USE_PHYLIB)) {
12122 u32 newadv;
12123 struct phy_device *phydev;
12124
12125 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12126
12127 if (!(phydev->supported & SUPPORTED_Pause) ||
12128 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12129 (epause->rx_pause != epause->tx_pause)))
12130 return -EINVAL;
12131
12132 tp->link_config.flowctrl = 0;
12133 if (epause->rx_pause) {
12134 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12135
12136 if (epause->tx_pause) {
12137 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12138 newadv = ADVERTISED_Pause;
12139 } else
12140 newadv = ADVERTISED_Pause |
12141 ADVERTISED_Asym_Pause;
12142 } else if (epause->tx_pause) {
12143 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12144 newadv = ADVERTISED_Asym_Pause;
12145 } else
12146 newadv = 0;
12147
12148 if (epause->autoneg)
12149 tg3_flag_set(tp, PAUSE_AUTONEG);
12150 else
12151 tg3_flag_clear(tp, PAUSE_AUTONEG);
12152
12153 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12154 u32 oldadv = phydev->advertising &
12155 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12156 if (oldadv != newadv) {
12157 phydev->advertising &=
12158 ~(ADVERTISED_Pause |
12159 ADVERTISED_Asym_Pause);
12160 phydev->advertising |= newadv;
12161 if (phydev->autoneg) {
12162 /*
12163 * Always renegotiate the link to
12164 * inform our link partner of our
12165 * flow control settings, even if the
12166 * flow control is forced. Let
12167 * tg3_adjust_link() do the final
12168 * flow control setup.
12169 */
12170 return phy_start_aneg(phydev);
12171 }
12172 }
12173
12174 if (!epause->autoneg)
12175 tg3_setup_flow_control(tp, 0, 0);
12176 } else {
12177 tp->link_config.advertising &=
12178 ~(ADVERTISED_Pause |
12179 ADVERTISED_Asym_Pause);
12180 tp->link_config.advertising |= newadv;
12181 }
12182 } else {
12183 int irq_sync = 0;
12184
12185 if (netif_running(dev)) {
12186 tg3_netif_stop(tp);
12187 irq_sync = 1;
12188 }
12189
12190 tg3_full_lock(tp, irq_sync);
12191
12192 if (epause->autoneg)
12193 tg3_flag_set(tp, PAUSE_AUTONEG);
12194 else
12195 tg3_flag_clear(tp, PAUSE_AUTONEG);
12196 if (epause->rx_pause)
12197 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12198 else
12199 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12200 if (epause->tx_pause)
12201 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12202 else
12203 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12204
12205 if (netif_running(dev)) {
12206 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12207 err = tg3_restart_hw(tp, false);
12208 if (!err)
12209 tg3_netif_start(tp);
12210 }
12211
12212 tg3_full_unlock(tp);
12213 }
12214
12215 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12216
12217 return err;
12218 }
12219
12220 static int tg3_get_sset_count(struct net_device *dev, int sset)
12221 {
12222 switch (sset) {
12223 case ETH_SS_TEST:
12224 return TG3_NUM_TEST;
12225 case ETH_SS_STATS:
12226 return TG3_NUM_STATS;
12227 default:
12228 return -EOPNOTSUPP;
12229 }
12230 }
12231
12232 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12233 u32 *rules __always_unused)
12234 {
12235 struct tg3 *tp = netdev_priv(dev);
12236
12237 if (!tg3_flag(tp, SUPPORT_MSIX))
12238 return -EOPNOTSUPP;
12239
12240 switch (info->cmd) {
12241 case ETHTOOL_GRXRINGS:
12242 if (netif_running(tp->dev))
12243 info->data = tp->rxq_cnt;
12244 else {
12245 info->data = num_online_cpus();
12246 if (info->data > TG3_RSS_MAX_NUM_QS)
12247 info->data = TG3_RSS_MAX_NUM_QS;
12248 }
12249
12250 /* The first interrupt vector only
12251 * handles link interrupts.
12252 */
12253 info->data -= 1;
12254 return 0;
12255
12256 default:
12257 return -EOPNOTSUPP;
12258 }
12259 }
12260
12261 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12262 {
12263 u32 size = 0;
12264 struct tg3 *tp = netdev_priv(dev);
12265
12266 if (tg3_flag(tp, SUPPORT_MSIX))
12267 size = TG3_RSS_INDIR_TBL_SIZE;
12268
12269 return size;
12270 }
12271
12272 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12273 {
12274 struct tg3 *tp = netdev_priv(dev);
12275 int i;
12276
12277 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12278 indir[i] = tp->rss_ind_tbl[i];
12279
12280 return 0;
12281 }
12282
12283 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12284 {
12285 struct tg3 *tp = netdev_priv(dev);
12286 size_t i;
12287
12288 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12289 tp->rss_ind_tbl[i] = indir[i];
12290
12291 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12292 return 0;
12293
12294 /* It is legal to write the indirection
12295 * table while the device is running.
12296 */
12297 tg3_full_lock(tp, 0);
12298 tg3_rss_write_indir_tbl(tp);
12299 tg3_full_unlock(tp);
12300
12301 return 0;
12302 }
12303
12304 static void tg3_get_channels(struct net_device *dev,
12305 struct ethtool_channels *channel)
12306 {
12307 struct tg3 *tp = netdev_priv(dev);
12308 u32 deflt_qs = netif_get_num_default_rss_queues();
12309
12310 channel->max_rx = tp->rxq_max;
12311 channel->max_tx = tp->txq_max;
12312
12313 if (netif_running(dev)) {
12314 channel->rx_count = tp->rxq_cnt;
12315 channel->tx_count = tp->txq_cnt;
12316 } else {
12317 if (tp->rxq_req)
12318 channel->rx_count = tp->rxq_req;
12319 else
12320 channel->rx_count = min(deflt_qs, tp->rxq_max);
12321
12322 if (tp->txq_req)
12323 channel->tx_count = tp->txq_req;
12324 else
12325 channel->tx_count = min(deflt_qs, tp->txq_max);
12326 }
12327 }
12328
12329 static int tg3_set_channels(struct net_device *dev,
12330 struct ethtool_channels *channel)
12331 {
12332 struct tg3 *tp = netdev_priv(dev);
12333
12334 if (!tg3_flag(tp, SUPPORT_MSIX))
12335 return -EOPNOTSUPP;
12336
12337 if (channel->rx_count > tp->rxq_max ||
12338 channel->tx_count > tp->txq_max)
12339 return -EINVAL;
12340
12341 tp->rxq_req = channel->rx_count;
12342 tp->txq_req = channel->tx_count;
12343
12344 if (!netif_running(dev))
12345 return 0;
12346
12347 tg3_stop(tp);
12348
12349 tg3_carrier_off(tp);
12350
12351 tg3_start(tp, true, false, false);
12352
12353 return 0;
12354 }
12355
12356 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12357 {
12358 switch (stringset) {
12359 case ETH_SS_STATS:
12360 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12361 break;
12362 case ETH_SS_TEST:
12363 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12364 break;
12365 default:
12366 WARN_ON(1); /* we need a WARN() */
12367 break;
12368 }
12369 }
12370
12371 static int tg3_set_phys_id(struct net_device *dev,
12372 enum ethtool_phys_id_state state)
12373 {
12374 struct tg3 *tp = netdev_priv(dev);
12375
12376 if (!netif_running(tp->dev))
12377 return -EAGAIN;
12378
12379 switch (state) {
12380 case ETHTOOL_ID_ACTIVE:
12381 return 1; /* cycle on/off once per second */
12382
12383 case ETHTOOL_ID_ON:
12384 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12385 LED_CTRL_1000MBPS_ON |
12386 LED_CTRL_100MBPS_ON |
12387 LED_CTRL_10MBPS_ON |
12388 LED_CTRL_TRAFFIC_OVERRIDE |
12389 LED_CTRL_TRAFFIC_BLINK |
12390 LED_CTRL_TRAFFIC_LED);
12391 break;
12392
12393 case ETHTOOL_ID_OFF:
12394 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12395 LED_CTRL_TRAFFIC_OVERRIDE);
12396 break;
12397
12398 case ETHTOOL_ID_INACTIVE:
12399 tw32(MAC_LED_CTRL, tp->led_ctrl);
12400 break;
12401 }
12402
12403 return 0;
12404 }
12405
12406 static void tg3_get_ethtool_stats(struct net_device *dev,
12407 struct ethtool_stats *estats, u64 *tmp_stats)
12408 {
12409 struct tg3 *tp = netdev_priv(dev);
12410
12411 if (tp->hw_stats)
12412 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12413 else
12414 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12415 }
12416
12417 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12418 {
12419 int i;
12420 __be32 *buf;
12421 u32 offset = 0, len = 0;
12422 u32 magic, val;
12423
12424 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12425 return NULL;
12426
12427 if (magic == TG3_EEPROM_MAGIC) {
12428 for (offset = TG3_NVM_DIR_START;
12429 offset < TG3_NVM_DIR_END;
12430 offset += TG3_NVM_DIRENT_SIZE) {
12431 if (tg3_nvram_read(tp, offset, &val))
12432 return NULL;
12433
12434 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12435 TG3_NVM_DIRTYPE_EXTVPD)
12436 break;
12437 }
12438
12439 if (offset != TG3_NVM_DIR_END) {
12440 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12441 if (tg3_nvram_read(tp, offset + 4, &offset))
12442 return NULL;
12443
12444 offset = tg3_nvram_logical_addr(tp, offset);
12445 }
12446 }
12447
12448 if (!offset || !len) {
12449 offset = TG3_NVM_VPD_OFF;
12450 len = TG3_NVM_VPD_LEN;
12451 }
12452
12453 buf = kmalloc(len, GFP_KERNEL);
12454 if (buf == NULL)
12455 return NULL;
12456
12457 if (magic == TG3_EEPROM_MAGIC) {
12458 for (i = 0; i < len; i += 4) {
12459 /* The data is in little-endian format in NVRAM.
12460 * Use the big-endian read routines to preserve
12461 * the byte order as it exists in NVRAM.
12462 */
12463 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12464 goto error;
12465 }
12466 } else {
12467 u8 *ptr;
12468 ssize_t cnt;
12469 unsigned int pos = 0;
12470
12471 ptr = (u8 *)&buf[0];
12472 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12473 cnt = pci_read_vpd(tp->pdev, pos,
12474 len - pos, ptr);
12475 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12476 cnt = 0;
12477 else if (cnt < 0)
12478 goto error;
12479 }
12480 if (pos != len)
12481 goto error;
12482 }
12483
12484 *vpdlen = len;
12485
12486 return buf;
12487
12488 error:
12489 kfree(buf);
12490 return NULL;
12491 }
12492
12493 #define NVRAM_TEST_SIZE 0x100
12494 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12495 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12496 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12497 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12498 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12499 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12500 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12501 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12502
12503 static int tg3_test_nvram(struct tg3 *tp)
12504 {
12505 u32 csum, magic, len;
12506 __be32 *buf;
12507 int i, j, k, err = 0, size;
12508
12509 if (tg3_flag(tp, NO_NVRAM))
12510 return 0;
12511
12512 if (tg3_nvram_read(tp, 0, &magic) != 0)
12513 return -EIO;
12514
12515 if (magic == TG3_EEPROM_MAGIC)
12516 size = NVRAM_TEST_SIZE;
12517 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12518 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12519 TG3_EEPROM_SB_FORMAT_1) {
12520 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12521 case TG3_EEPROM_SB_REVISION_0:
12522 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12523 break;
12524 case TG3_EEPROM_SB_REVISION_2:
12525 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12526 break;
12527 case TG3_EEPROM_SB_REVISION_3:
12528 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12529 break;
12530 case TG3_EEPROM_SB_REVISION_4:
12531 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12532 break;
12533 case TG3_EEPROM_SB_REVISION_5:
12534 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12535 break;
12536 case TG3_EEPROM_SB_REVISION_6:
12537 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12538 break;
12539 default:
12540 return -EIO;
12541 }
12542 } else
12543 return 0;
12544 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12545 size = NVRAM_SELFBOOT_HW_SIZE;
12546 else
12547 return -EIO;
12548
12549 buf = kmalloc(size, GFP_KERNEL);
12550 if (buf == NULL)
12551 return -ENOMEM;
12552
12553 err = -EIO;
12554 for (i = 0, j = 0; i < size; i += 4, j++) {
12555 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12556 if (err)
12557 break;
12558 }
12559 if (i < size)
12560 goto out;
12561
12562 /* Selfboot format */
12563 magic = be32_to_cpu(buf[0]);
12564 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12565 TG3_EEPROM_MAGIC_FW) {
12566 u8 *buf8 = (u8 *) buf, csum8 = 0;
12567
12568 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12569 TG3_EEPROM_SB_REVISION_2) {
12570 /* For rev 2, the csum doesn't include the MBA. */
12571 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12572 csum8 += buf8[i];
12573 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12574 csum8 += buf8[i];
12575 } else {
12576 for (i = 0; i < size; i++)
12577 csum8 += buf8[i];
12578 }
12579
12580 if (csum8 == 0) {
12581 err = 0;
12582 goto out;
12583 }
12584
12585 err = -EIO;
12586 goto out;
12587 }
12588
12589 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12590 TG3_EEPROM_MAGIC_HW) {
12591 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12592 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12593 u8 *buf8 = (u8 *) buf;
12594
12595 /* Separate the parity bits and the data bytes. */
12596 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12597 if ((i == 0) || (i == 8)) {
12598 int l;
12599 u8 msk;
12600
12601 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12602 parity[k++] = buf8[i] & msk;
12603 i++;
12604 } else if (i == 16) {
12605 int l;
12606 u8 msk;
12607
12608 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12609 parity[k++] = buf8[i] & msk;
12610 i++;
12611
12612 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12613 parity[k++] = buf8[i] & msk;
12614 i++;
12615 }
12616 data[j++] = buf8[i];
12617 }
12618
12619 err = -EIO;
12620 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12621 u8 hw8 = hweight8(data[i]);
12622
12623 if ((hw8 & 0x1) && parity[i])
12624 goto out;
12625 else if (!(hw8 & 0x1) && !parity[i])
12626 goto out;
12627 }
12628 err = 0;
12629 goto out;
12630 }
12631
12632 err = -EIO;
12633
12634 /* Bootstrap checksum at offset 0x10 */
12635 csum = calc_crc((unsigned char *) buf, 0x10);
12636 if (csum != le32_to_cpu(buf[0x10/4]))
12637 goto out;
12638
12639 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12640 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12641 if (csum != le32_to_cpu(buf[0xfc/4]))
12642 goto out;
12643
12644 kfree(buf);
12645
12646 buf = tg3_vpd_readblock(tp, &len);
12647 if (!buf)
12648 return -ENOMEM;
12649
12650 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12651 if (i > 0) {
12652 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12653 if (j < 0)
12654 goto out;
12655
12656 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12657 goto out;
12658
12659 i += PCI_VPD_LRDT_TAG_SIZE;
12660 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12661 PCI_VPD_RO_KEYWORD_CHKSUM);
12662 if (j > 0) {
12663 u8 csum8 = 0;
12664
12665 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12666
12667 for (i = 0; i <= j; i++)
12668 csum8 += ((u8 *)buf)[i];
12669
12670 if (csum8)
12671 goto out;
12672 }
12673 }
12674
12675 err = 0;
12676
12677 out:
12678 kfree(buf);
12679 return err;
12680 }
12681
12682 #define TG3_SERDES_TIMEOUT_SEC 2
12683 #define TG3_COPPER_TIMEOUT_SEC 6
12684
12685 static int tg3_test_link(struct tg3 *tp)
12686 {
12687 int i, max;
12688
12689 if (!netif_running(tp->dev))
12690 return -ENODEV;
12691
12692 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12693 max = TG3_SERDES_TIMEOUT_SEC;
12694 else
12695 max = TG3_COPPER_TIMEOUT_SEC;
12696
12697 for (i = 0; i < max; i++) {
12698 if (tp->link_up)
12699 return 0;
12700
12701 if (msleep_interruptible(1000))
12702 break;
12703 }
12704
12705 return -EIO;
12706 }
12707
12708 /* Only test the commonly used registers */
12709 static int tg3_test_registers(struct tg3 *tp)
12710 {
12711 int i, is_5705, is_5750;
12712 u32 offset, read_mask, write_mask, val, save_val, read_val;
12713 static struct {
12714 u16 offset;
12715 u16 flags;
12716 #define TG3_FL_5705 0x1
12717 #define TG3_FL_NOT_5705 0x2
12718 #define TG3_FL_NOT_5788 0x4
12719 #define TG3_FL_NOT_5750 0x8
12720 u32 read_mask;
12721 u32 write_mask;
12722 } reg_tbl[] = {
12723 /* MAC Control Registers */
12724 { MAC_MODE, TG3_FL_NOT_5705,
12725 0x00000000, 0x00ef6f8c },
12726 { MAC_MODE, TG3_FL_5705,
12727 0x00000000, 0x01ef6b8c },
12728 { MAC_STATUS, TG3_FL_NOT_5705,
12729 0x03800107, 0x00000000 },
12730 { MAC_STATUS, TG3_FL_5705,
12731 0x03800100, 0x00000000 },
12732 { MAC_ADDR_0_HIGH, 0x0000,
12733 0x00000000, 0x0000ffff },
12734 { MAC_ADDR_0_LOW, 0x0000,
12735 0x00000000, 0xffffffff },
12736 { MAC_RX_MTU_SIZE, 0x0000,
12737 0x00000000, 0x0000ffff },
12738 { MAC_TX_MODE, 0x0000,
12739 0x00000000, 0x00000070 },
12740 { MAC_TX_LENGTHS, 0x0000,
12741 0x00000000, 0x00003fff },
12742 { MAC_RX_MODE, TG3_FL_NOT_5705,
12743 0x00000000, 0x000007fc },
12744 { MAC_RX_MODE, TG3_FL_5705,
12745 0x00000000, 0x000007dc },
12746 { MAC_HASH_REG_0, 0x0000,
12747 0x00000000, 0xffffffff },
12748 { MAC_HASH_REG_1, 0x0000,
12749 0x00000000, 0xffffffff },
12750 { MAC_HASH_REG_2, 0x0000,
12751 0x00000000, 0xffffffff },
12752 { MAC_HASH_REG_3, 0x0000,
12753 0x00000000, 0xffffffff },
12754
12755 /* Receive Data and Receive BD Initiator Control Registers. */
12756 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12757 0x00000000, 0xffffffff },
12758 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12759 0x00000000, 0xffffffff },
12760 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12761 0x00000000, 0x00000003 },
12762 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12763 0x00000000, 0xffffffff },
12764 { RCVDBDI_STD_BD+0, 0x0000,
12765 0x00000000, 0xffffffff },
12766 { RCVDBDI_STD_BD+4, 0x0000,
12767 0x00000000, 0xffffffff },
12768 { RCVDBDI_STD_BD+8, 0x0000,
12769 0x00000000, 0xffff0002 },
12770 { RCVDBDI_STD_BD+0xc, 0x0000,
12771 0x00000000, 0xffffffff },
12772
12773 /* Receive BD Initiator Control Registers. */
12774 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12775 0x00000000, 0xffffffff },
12776 { RCVBDI_STD_THRESH, TG3_FL_5705,
12777 0x00000000, 0x000003ff },
12778 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12779 0x00000000, 0xffffffff },
12780
12781 /* Host Coalescing Control Registers. */
12782 { HOSTCC_MODE, TG3_FL_NOT_5705,
12783 0x00000000, 0x00000004 },
12784 { HOSTCC_MODE, TG3_FL_5705,
12785 0x00000000, 0x000000f6 },
12786 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12787 0x00000000, 0xffffffff },
12788 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12789 0x00000000, 0x000003ff },
12790 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12791 0x00000000, 0xffffffff },
12792 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12793 0x00000000, 0x000003ff },
12794 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12795 0x00000000, 0xffffffff },
12796 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12797 0x00000000, 0x000000ff },
12798 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12799 0x00000000, 0xffffffff },
12800 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12801 0x00000000, 0x000000ff },
12802 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12803 0x00000000, 0xffffffff },
12804 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12805 0x00000000, 0xffffffff },
12806 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12807 0x00000000, 0xffffffff },
12808 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12809 0x00000000, 0x000000ff },
12810 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12811 0x00000000, 0xffffffff },
12812 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12813 0x00000000, 0x000000ff },
12814 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12815 0x00000000, 0xffffffff },
12816 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12817 0x00000000, 0xffffffff },
12818 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12819 0x00000000, 0xffffffff },
12820 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12821 0x00000000, 0xffffffff },
12822 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12823 0x00000000, 0xffffffff },
12824 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12825 0xffffffff, 0x00000000 },
12826 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12827 0xffffffff, 0x00000000 },
12828
12829 /* Buffer Manager Control Registers. */
12830 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12831 0x00000000, 0x007fff80 },
12832 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12833 0x00000000, 0x007fffff },
12834 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12835 0x00000000, 0x0000003f },
12836 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12837 0x00000000, 0x000001ff },
12838 { BUFMGR_MB_HIGH_WATER, 0x0000,
12839 0x00000000, 0x000001ff },
12840 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12841 0xffffffff, 0x00000000 },
12842 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12843 0xffffffff, 0x00000000 },
12844
12845 /* Mailbox Registers */
12846 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12847 0x00000000, 0x000001ff },
12848 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12849 0x00000000, 0x000001ff },
12850 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12851 0x00000000, 0x000007ff },
12852 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12853 0x00000000, 0x000001ff },
12854
12855 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12856 };
12857
12858 is_5705 = is_5750 = 0;
12859 if (tg3_flag(tp, 5705_PLUS)) {
12860 is_5705 = 1;
12861 if (tg3_flag(tp, 5750_PLUS))
12862 is_5750 = 1;
12863 }
12864
12865 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12866 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12867 continue;
12868
12869 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12870 continue;
12871
12872 if (tg3_flag(tp, IS_5788) &&
12873 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12874 continue;
12875
12876 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12877 continue;
12878
12879 offset = (u32) reg_tbl[i].offset;
12880 read_mask = reg_tbl[i].read_mask;
12881 write_mask = reg_tbl[i].write_mask;
12882
12883 /* Save the original register content */
12884 save_val = tr32(offset);
12885
12886 /* Determine the read-only value. */
12887 read_val = save_val & read_mask;
12888
12889 /* Write zero to the register, then make sure the read-only bits
12890 * are not changed and the read/write bits are all zeros.
12891 */
12892 tw32(offset, 0);
12893
12894 val = tr32(offset);
12895
12896 /* Test the read-only and read/write bits. */
12897 if (((val & read_mask) != read_val) || (val & write_mask))
12898 goto out;
12899
12900 /* Write ones to all the bits defined by RdMask and WrMask, then
12901 * make sure the read-only bits are not changed and the
12902 * read/write bits are all ones.
12903 */
12904 tw32(offset, read_mask | write_mask);
12905
12906 val = tr32(offset);
12907
12908 /* Test the read-only bits. */
12909 if ((val & read_mask) != read_val)
12910 goto out;
12911
12912 /* Test the read/write bits. */
12913 if ((val & write_mask) != write_mask)
12914 goto out;
12915
12916 tw32(offset, save_val);
12917 }
12918
12919 return 0;
12920
12921 out:
12922 if (netif_msg_hw(tp))
12923 netdev_err(tp->dev,
12924 "Register test failed at offset %x\n", offset);
12925 tw32(offset, save_val);
12926 return -EIO;
12927 }
12928
12929 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12930 {
12931 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12932 int i;
12933 u32 j;
12934
12935 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12936 for (j = 0; j < len; j += 4) {
12937 u32 val;
12938
12939 tg3_write_mem(tp, offset + j, test_pattern[i]);
12940 tg3_read_mem(tp, offset + j, &val);
12941 if (val != test_pattern[i])
12942 return -EIO;
12943 }
12944 }
12945 return 0;
12946 }
12947
12948 static int tg3_test_memory(struct tg3 *tp)
12949 {
12950 static struct mem_entry {
12951 u32 offset;
12952 u32 len;
12953 } mem_tbl_570x[] = {
12954 { 0x00000000, 0x00b50},
12955 { 0x00002000, 0x1c000},
12956 { 0xffffffff, 0x00000}
12957 }, mem_tbl_5705[] = {
12958 { 0x00000100, 0x0000c},
12959 { 0x00000200, 0x00008},
12960 { 0x00004000, 0x00800},
12961 { 0x00006000, 0x01000},
12962 { 0x00008000, 0x02000},
12963 { 0x00010000, 0x0e000},
12964 { 0xffffffff, 0x00000}
12965 }, mem_tbl_5755[] = {
12966 { 0x00000200, 0x00008},
12967 { 0x00004000, 0x00800},
12968 { 0x00006000, 0x00800},
12969 { 0x00008000, 0x02000},
12970 { 0x00010000, 0x0c000},
12971 { 0xffffffff, 0x00000}
12972 }, mem_tbl_5906[] = {
12973 { 0x00000200, 0x00008},
12974 { 0x00004000, 0x00400},
12975 { 0x00006000, 0x00400},
12976 { 0x00008000, 0x01000},
12977 { 0x00010000, 0x01000},
12978 { 0xffffffff, 0x00000}
12979 }, mem_tbl_5717[] = {
12980 { 0x00000200, 0x00008},
12981 { 0x00010000, 0x0a000},
12982 { 0x00020000, 0x13c00},
12983 { 0xffffffff, 0x00000}
12984 }, mem_tbl_57765[] = {
12985 { 0x00000200, 0x00008},
12986 { 0x00004000, 0x00800},
12987 { 0x00006000, 0x09800},
12988 { 0x00010000, 0x0a000},
12989 { 0xffffffff, 0x00000}
12990 };
12991 struct mem_entry *mem_tbl;
12992 int err = 0;
12993 int i;
12994
12995 if (tg3_flag(tp, 5717_PLUS))
12996 mem_tbl = mem_tbl_5717;
12997 else if (tg3_flag(tp, 57765_CLASS) ||
12998 tg3_asic_rev(tp) == ASIC_REV_5762)
12999 mem_tbl = mem_tbl_57765;
13000 else if (tg3_flag(tp, 5755_PLUS))
13001 mem_tbl = mem_tbl_5755;
13002 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13003 mem_tbl = mem_tbl_5906;
13004 else if (tg3_flag(tp, 5705_PLUS))
13005 mem_tbl = mem_tbl_5705;
13006 else
13007 mem_tbl = mem_tbl_570x;
13008
13009 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13010 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13011 if (err)
13012 break;
13013 }
13014
13015 return err;
13016 }
13017
13018 #define TG3_TSO_MSS 500
13019
13020 #define TG3_TSO_IP_HDR_LEN 20
13021 #define TG3_TSO_TCP_HDR_LEN 20
13022 #define TG3_TSO_TCP_OPT_LEN 12
13023
13024 static const u8 tg3_tso_header[] = {
13025 0x08, 0x00,
13026 0x45, 0x00, 0x00, 0x00,
13027 0x00, 0x00, 0x40, 0x00,
13028 0x40, 0x06, 0x00, 0x00,
13029 0x0a, 0x00, 0x00, 0x01,
13030 0x0a, 0x00, 0x00, 0x02,
13031 0x0d, 0x00, 0xe0, 0x00,
13032 0x00, 0x00, 0x01, 0x00,
13033 0x00, 0x00, 0x02, 0x00,
13034 0x80, 0x10, 0x10, 0x00,
13035 0x14, 0x09, 0x00, 0x00,
13036 0x01, 0x01, 0x08, 0x0a,
13037 0x11, 0x11, 0x11, 0x11,
13038 0x11, 0x11, 0x11, 0x11,
13039 };
13040
13041 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13042 {
13043 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13044 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13045 u32 budget;
13046 struct sk_buff *skb;
13047 u8 *tx_data, *rx_data;
13048 dma_addr_t map;
13049 int num_pkts, tx_len, rx_len, i, err;
13050 struct tg3_rx_buffer_desc *desc;
13051 struct tg3_napi *tnapi, *rnapi;
13052 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13053
13054 tnapi = &tp->napi[0];
13055 rnapi = &tp->napi[0];
13056 if (tp->irq_cnt > 1) {
13057 if (tg3_flag(tp, ENABLE_RSS))
13058 rnapi = &tp->napi[1];
13059 if (tg3_flag(tp, ENABLE_TSS))
13060 tnapi = &tp->napi[1];
13061 }
13062 coal_now = tnapi->coal_now | rnapi->coal_now;
13063
13064 err = -EIO;
13065
13066 tx_len = pktsz;
13067 skb = netdev_alloc_skb(tp->dev, tx_len);
13068 if (!skb)
13069 return -ENOMEM;
13070
13071 tx_data = skb_put(skb, tx_len);
13072 memcpy(tx_data, tp->dev->dev_addr, 6);
13073 memset(tx_data + 6, 0x0, 8);
13074
13075 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13076
13077 if (tso_loopback) {
13078 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13079
13080 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13081 TG3_TSO_TCP_OPT_LEN;
13082
13083 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13084 sizeof(tg3_tso_header));
13085 mss = TG3_TSO_MSS;
13086
13087 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13088 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13089
13090 /* Set the total length field in the IP header */
13091 iph->tot_len = htons((u16)(mss + hdr_len));
13092
13093 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13094 TXD_FLAG_CPU_POST_DMA);
13095
13096 if (tg3_flag(tp, HW_TSO_1) ||
13097 tg3_flag(tp, HW_TSO_2) ||
13098 tg3_flag(tp, HW_TSO_3)) {
13099 struct tcphdr *th;
13100 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13101 th = (struct tcphdr *)&tx_data[val];
13102 th->check = 0;
13103 } else
13104 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13105
13106 if (tg3_flag(tp, HW_TSO_3)) {
13107 mss |= (hdr_len & 0xc) << 12;
13108 if (hdr_len & 0x10)
13109 base_flags |= 0x00000010;
13110 base_flags |= (hdr_len & 0x3e0) << 5;
13111 } else if (tg3_flag(tp, HW_TSO_2))
13112 mss |= hdr_len << 9;
13113 else if (tg3_flag(tp, HW_TSO_1) ||
13114 tg3_asic_rev(tp) == ASIC_REV_5705) {
13115 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13116 } else {
13117 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13118 }
13119
13120 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13121 } else {
13122 num_pkts = 1;
13123 data_off = ETH_HLEN;
13124
13125 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13126 tx_len > VLAN_ETH_FRAME_LEN)
13127 base_flags |= TXD_FLAG_JMB_PKT;
13128 }
13129
13130 for (i = data_off; i < tx_len; i++)
13131 tx_data[i] = (u8) (i & 0xff);
13132
13133 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13134 if (pci_dma_mapping_error(tp->pdev, map)) {
13135 dev_kfree_skb(skb);
13136 return -EIO;
13137 }
13138
13139 val = tnapi->tx_prod;
13140 tnapi->tx_buffers[val].skb = skb;
13141 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13142
13143 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13144 rnapi->coal_now);
13145
13146 udelay(10);
13147
13148 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13149
13150 budget = tg3_tx_avail(tnapi);
13151 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13152 base_flags | TXD_FLAG_END, mss, 0)) {
13153 tnapi->tx_buffers[val].skb = NULL;
13154 dev_kfree_skb(skb);
13155 return -EIO;
13156 }
13157
13158 tnapi->tx_prod++;
13159
13160 /* Sync BD data before updating mailbox */
13161 wmb();
13162
13163 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13164 tr32_mailbox(tnapi->prodmbox);
13165
13166 udelay(10);
13167
13168 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13169 for (i = 0; i < 35; i++) {
13170 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13171 coal_now);
13172
13173 udelay(10);
13174
13175 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13176 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13177 if ((tx_idx == tnapi->tx_prod) &&
13178 (rx_idx == (rx_start_idx + num_pkts)))
13179 break;
13180 }
13181
13182 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13183 dev_kfree_skb(skb);
13184
13185 if (tx_idx != tnapi->tx_prod)
13186 goto out;
13187
13188 if (rx_idx != rx_start_idx + num_pkts)
13189 goto out;
13190
13191 val = data_off;
13192 while (rx_idx != rx_start_idx) {
13193 desc = &rnapi->rx_rcb[rx_start_idx++];
13194 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13195 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13196
13197 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13198 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13199 goto out;
13200
13201 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13202 - ETH_FCS_LEN;
13203
13204 if (!tso_loopback) {
13205 if (rx_len != tx_len)
13206 goto out;
13207
13208 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13209 if (opaque_key != RXD_OPAQUE_RING_STD)
13210 goto out;
13211 } else {
13212 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13213 goto out;
13214 }
13215 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13216 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13217 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13218 goto out;
13219 }
13220
13221 if (opaque_key == RXD_OPAQUE_RING_STD) {
13222 rx_data = tpr->rx_std_buffers[desc_idx].data;
13223 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13224 mapping);
13225 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13226 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13227 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13228 mapping);
13229 } else
13230 goto out;
13231
13232 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13233 PCI_DMA_FROMDEVICE);
13234
13235 rx_data += TG3_RX_OFFSET(tp);
13236 for (i = data_off; i < rx_len; i++, val++) {
13237 if (*(rx_data + i) != (u8) (val & 0xff))
13238 goto out;
13239 }
13240 }
13241
13242 err = 0;
13243
13244 /* tg3_free_rings will unmap and free the rx_data */
13245 out:
13246 return err;
13247 }
13248
13249 #define TG3_STD_LOOPBACK_FAILED 1
13250 #define TG3_JMB_LOOPBACK_FAILED 2
13251 #define TG3_TSO_LOOPBACK_FAILED 4
13252 #define TG3_LOOPBACK_FAILED \
13253 (TG3_STD_LOOPBACK_FAILED | \
13254 TG3_JMB_LOOPBACK_FAILED | \
13255 TG3_TSO_LOOPBACK_FAILED)
13256
13257 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13258 {
13259 int err = -EIO;
13260 u32 eee_cap;
13261 u32 jmb_pkt_sz = 9000;
13262
13263 if (tp->dma_limit)
13264 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13265
13266 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13267 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13268
13269 if (!netif_running(tp->dev)) {
13270 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13271 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13272 if (do_extlpbk)
13273 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13274 goto done;
13275 }
13276
13277 err = tg3_reset_hw(tp, true);
13278 if (err) {
13279 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13280 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13281 if (do_extlpbk)
13282 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13283 goto done;
13284 }
13285
13286 if (tg3_flag(tp, ENABLE_RSS)) {
13287 int i;
13288
13289 /* Reroute all rx packets to the 1st queue */
13290 for (i = MAC_RSS_INDIR_TBL_0;
13291 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13292 tw32(i, 0x0);
13293 }
13294
13295 /* HW errata - mac loopback fails in some cases on 5780.
13296 * Normal traffic and PHY loopback are not affected by
13297 * errata. Also, the MAC loopback test is deprecated for
13298 * all newer ASIC revisions.
13299 */
13300 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13301 !tg3_flag(tp, CPMU_PRESENT)) {
13302 tg3_mac_loopback(tp, true);
13303
13304 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13305 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13306
13307 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13308 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13309 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13310
13311 tg3_mac_loopback(tp, false);
13312 }
13313
13314 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13315 !tg3_flag(tp, USE_PHYLIB)) {
13316 int i;
13317
13318 tg3_phy_lpbk_set(tp, 0, false);
13319
13320 /* Wait for link */
13321 for (i = 0; i < 100; i++) {
13322 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13323 break;
13324 mdelay(1);
13325 }
13326
13327 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13328 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13329 if (tg3_flag(tp, TSO_CAPABLE) &&
13330 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13331 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13332 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13333 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13334 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13335
13336 if (do_extlpbk) {
13337 tg3_phy_lpbk_set(tp, 0, true);
13338
13339 /* All link indications report up, but the hardware
13340 * isn't really ready for about 20 msec. Double it
13341 * to be sure.
13342 */
13343 mdelay(40);
13344
13345 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13346 data[TG3_EXT_LOOPB_TEST] |=
13347 TG3_STD_LOOPBACK_FAILED;
13348 if (tg3_flag(tp, TSO_CAPABLE) &&
13349 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13350 data[TG3_EXT_LOOPB_TEST] |=
13351 TG3_TSO_LOOPBACK_FAILED;
13352 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13353 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13354 data[TG3_EXT_LOOPB_TEST] |=
13355 TG3_JMB_LOOPBACK_FAILED;
13356 }
13357
13358 /* Re-enable gphy autopowerdown. */
13359 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13360 tg3_phy_toggle_apd(tp, true);
13361 }
13362
13363 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13364 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13365
13366 done:
13367 tp->phy_flags |= eee_cap;
13368
13369 return err;
13370 }
13371
13372 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13373 u64 *data)
13374 {
13375 struct tg3 *tp = netdev_priv(dev);
13376 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13377
13378 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13379 tg3_power_up(tp)) {
13380 etest->flags |= ETH_TEST_FL_FAILED;
13381 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13382 return;
13383 }
13384
13385 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13386
13387 if (tg3_test_nvram(tp) != 0) {
13388 etest->flags |= ETH_TEST_FL_FAILED;
13389 data[TG3_NVRAM_TEST] = 1;
13390 }
13391 if (!doextlpbk && tg3_test_link(tp)) {
13392 etest->flags |= ETH_TEST_FL_FAILED;
13393 data[TG3_LINK_TEST] = 1;
13394 }
13395 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13396 int err, err2 = 0, irq_sync = 0;
13397
13398 if (netif_running(dev)) {
13399 tg3_phy_stop(tp);
13400 tg3_netif_stop(tp);
13401 irq_sync = 1;
13402 }
13403
13404 tg3_full_lock(tp, irq_sync);
13405 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13406 err = tg3_nvram_lock(tp);
13407 tg3_halt_cpu(tp, RX_CPU_BASE);
13408 if (!tg3_flag(tp, 5705_PLUS))
13409 tg3_halt_cpu(tp, TX_CPU_BASE);
13410 if (!err)
13411 tg3_nvram_unlock(tp);
13412
13413 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13414 tg3_phy_reset(tp);
13415
13416 if (tg3_test_registers(tp) != 0) {
13417 etest->flags |= ETH_TEST_FL_FAILED;
13418 data[TG3_REGISTER_TEST] = 1;
13419 }
13420
13421 if (tg3_test_memory(tp) != 0) {
13422 etest->flags |= ETH_TEST_FL_FAILED;
13423 data[TG3_MEMORY_TEST] = 1;
13424 }
13425
13426 if (doextlpbk)
13427 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13428
13429 if (tg3_test_loopback(tp, data, doextlpbk))
13430 etest->flags |= ETH_TEST_FL_FAILED;
13431
13432 tg3_full_unlock(tp);
13433
13434 if (tg3_test_interrupt(tp) != 0) {
13435 etest->flags |= ETH_TEST_FL_FAILED;
13436 data[TG3_INTERRUPT_TEST] = 1;
13437 }
13438
13439 tg3_full_lock(tp, 0);
13440
13441 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13442 if (netif_running(dev)) {
13443 tg3_flag_set(tp, INIT_COMPLETE);
13444 err2 = tg3_restart_hw(tp, true);
13445 if (!err2)
13446 tg3_netif_start(tp);
13447 }
13448
13449 tg3_full_unlock(tp);
13450
13451 if (irq_sync && !err2)
13452 tg3_phy_start(tp);
13453 }
13454 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13455 tg3_power_down(tp);
13456
13457 }
13458
13459 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13460 struct ifreq *ifr, int cmd)
13461 {
13462 struct tg3 *tp = netdev_priv(dev);
13463 struct hwtstamp_config stmpconf;
13464
13465 if (!tg3_flag(tp, PTP_CAPABLE))
13466 return -EINVAL;
13467
13468 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13469 return -EFAULT;
13470
13471 if (stmpconf.flags)
13472 return -EINVAL;
13473
13474 switch (stmpconf.tx_type) {
13475 case HWTSTAMP_TX_ON:
13476 tg3_flag_set(tp, TX_TSTAMP_EN);
13477 break;
13478 case HWTSTAMP_TX_OFF:
13479 tg3_flag_clear(tp, TX_TSTAMP_EN);
13480 break;
13481 default:
13482 return -ERANGE;
13483 }
13484
13485 switch (stmpconf.rx_filter) {
13486 case HWTSTAMP_FILTER_NONE:
13487 tp->rxptpctl = 0;
13488 break;
13489 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13490 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13491 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13492 break;
13493 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13494 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13495 TG3_RX_PTP_CTL_SYNC_EVNT;
13496 break;
13497 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13498 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13499 TG3_RX_PTP_CTL_DELAY_REQ;
13500 break;
13501 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13502 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13503 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13504 break;
13505 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13506 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13507 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13508 break;
13509 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13510 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13511 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13512 break;
13513 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13514 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13515 TG3_RX_PTP_CTL_SYNC_EVNT;
13516 break;
13517 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13518 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13519 TG3_RX_PTP_CTL_SYNC_EVNT;
13520 break;
13521 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13522 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13523 TG3_RX_PTP_CTL_SYNC_EVNT;
13524 break;
13525 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13526 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13527 TG3_RX_PTP_CTL_DELAY_REQ;
13528 break;
13529 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13530 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13531 TG3_RX_PTP_CTL_DELAY_REQ;
13532 break;
13533 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13534 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13535 TG3_RX_PTP_CTL_DELAY_REQ;
13536 break;
13537 default:
13538 return -ERANGE;
13539 }
13540
13541 if (netif_running(dev) && tp->rxptpctl)
13542 tw32(TG3_RX_PTP_CTL,
13543 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13544
13545 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13546 -EFAULT : 0;
13547 }
13548
13549 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13550 {
13551 struct mii_ioctl_data *data = if_mii(ifr);
13552 struct tg3 *tp = netdev_priv(dev);
13553 int err;
13554
13555 if (tg3_flag(tp, USE_PHYLIB)) {
13556 struct phy_device *phydev;
13557 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13558 return -EAGAIN;
13559 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13560 return phy_mii_ioctl(phydev, ifr, cmd);
13561 }
13562
13563 switch (cmd) {
13564 case SIOCGMIIPHY:
13565 data->phy_id = tp->phy_addr;
13566
13567 /* fallthru */
13568 case SIOCGMIIREG: {
13569 u32 mii_regval;
13570
13571 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13572 break; /* We have no PHY */
13573
13574 if (!netif_running(dev))
13575 return -EAGAIN;
13576
13577 spin_lock_bh(&tp->lock);
13578 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13579 data->reg_num & 0x1f, &mii_regval);
13580 spin_unlock_bh(&tp->lock);
13581
13582 data->val_out = mii_regval;
13583
13584 return err;
13585 }
13586
13587 case SIOCSMIIREG:
13588 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13589 break; /* We have no PHY */
13590
13591 if (!netif_running(dev))
13592 return -EAGAIN;
13593
13594 spin_lock_bh(&tp->lock);
13595 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13596 data->reg_num & 0x1f, data->val_in);
13597 spin_unlock_bh(&tp->lock);
13598
13599 return err;
13600
13601 case SIOCSHWTSTAMP:
13602 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13603
13604 default:
13605 /* do nothing */
13606 break;
13607 }
13608 return -EOPNOTSUPP;
13609 }
13610
13611 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13612 {
13613 struct tg3 *tp = netdev_priv(dev);
13614
13615 memcpy(ec, &tp->coal, sizeof(*ec));
13616 return 0;
13617 }
13618
13619 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13620 {
13621 struct tg3 *tp = netdev_priv(dev);
13622 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13623 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13624
13625 if (!tg3_flag(tp, 5705_PLUS)) {
13626 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13627 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13628 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13629 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13630 }
13631
13632 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13633 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13634 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13635 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13636 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13637 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13638 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13639 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13640 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13641 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13642 return -EINVAL;
13643
13644 /* No rx interrupts will be generated if both are zero */
13645 if ((ec->rx_coalesce_usecs == 0) &&
13646 (ec->rx_max_coalesced_frames == 0))
13647 return -EINVAL;
13648
13649 /* No tx interrupts will be generated if both are zero */
13650 if ((ec->tx_coalesce_usecs == 0) &&
13651 (ec->tx_max_coalesced_frames == 0))
13652 return -EINVAL;
13653
13654 /* Only copy relevant parameters, ignore all others. */
13655 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13656 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13657 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13658 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13659 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13660 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13661 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13662 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13663 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13664
13665 if (netif_running(dev)) {
13666 tg3_full_lock(tp, 0);
13667 __tg3_set_coalesce(tp, &tp->coal);
13668 tg3_full_unlock(tp);
13669 }
13670 return 0;
13671 }
13672
13673 static const struct ethtool_ops tg3_ethtool_ops = {
13674 .get_settings = tg3_get_settings,
13675 .set_settings = tg3_set_settings,
13676 .get_drvinfo = tg3_get_drvinfo,
13677 .get_regs_len = tg3_get_regs_len,
13678 .get_regs = tg3_get_regs,
13679 .get_wol = tg3_get_wol,
13680 .set_wol = tg3_set_wol,
13681 .get_msglevel = tg3_get_msglevel,
13682 .set_msglevel = tg3_set_msglevel,
13683 .nway_reset = tg3_nway_reset,
13684 .get_link = ethtool_op_get_link,
13685 .get_eeprom_len = tg3_get_eeprom_len,
13686 .get_eeprom = tg3_get_eeprom,
13687 .set_eeprom = tg3_set_eeprom,
13688 .get_ringparam = tg3_get_ringparam,
13689 .set_ringparam = tg3_set_ringparam,
13690 .get_pauseparam = tg3_get_pauseparam,
13691 .set_pauseparam = tg3_set_pauseparam,
13692 .self_test = tg3_self_test,
13693 .get_strings = tg3_get_strings,
13694 .set_phys_id = tg3_set_phys_id,
13695 .get_ethtool_stats = tg3_get_ethtool_stats,
13696 .get_coalesce = tg3_get_coalesce,
13697 .set_coalesce = tg3_set_coalesce,
13698 .get_sset_count = tg3_get_sset_count,
13699 .get_rxnfc = tg3_get_rxnfc,
13700 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13701 .get_rxfh_indir = tg3_get_rxfh_indir,
13702 .set_rxfh_indir = tg3_set_rxfh_indir,
13703 .get_channels = tg3_get_channels,
13704 .set_channels = tg3_set_channels,
13705 .get_ts_info = tg3_get_ts_info,
13706 };
13707
13708 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13709 struct rtnl_link_stats64 *stats)
13710 {
13711 struct tg3 *tp = netdev_priv(dev);
13712
13713 spin_lock_bh(&tp->lock);
13714 if (!tp->hw_stats) {
13715 spin_unlock_bh(&tp->lock);
13716 return &tp->net_stats_prev;
13717 }
13718
13719 tg3_get_nstats(tp, stats);
13720 spin_unlock_bh(&tp->lock);
13721
13722 return stats;
13723 }
13724
13725 static void tg3_set_rx_mode(struct net_device *dev)
13726 {
13727 struct tg3 *tp = netdev_priv(dev);
13728
13729 if (!netif_running(dev))
13730 return;
13731
13732 tg3_full_lock(tp, 0);
13733 __tg3_set_rx_mode(dev);
13734 tg3_full_unlock(tp);
13735 }
13736
13737 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13738 int new_mtu)
13739 {
13740 dev->mtu = new_mtu;
13741
13742 if (new_mtu > ETH_DATA_LEN) {
13743 if (tg3_flag(tp, 5780_CLASS)) {
13744 netdev_update_features(dev);
13745 tg3_flag_clear(tp, TSO_CAPABLE);
13746 } else {
13747 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13748 }
13749 } else {
13750 if (tg3_flag(tp, 5780_CLASS)) {
13751 tg3_flag_set(tp, TSO_CAPABLE);
13752 netdev_update_features(dev);
13753 }
13754 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13755 }
13756 }
13757
13758 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13759 {
13760 struct tg3 *tp = netdev_priv(dev);
13761 int err;
13762 bool reset_phy = false;
13763
13764 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13765 return -EINVAL;
13766
13767 if (!netif_running(dev)) {
13768 /* We'll just catch it later when the
13769 * device is up'd.
13770 */
13771 tg3_set_mtu(dev, tp, new_mtu);
13772 return 0;
13773 }
13774
13775 tg3_phy_stop(tp);
13776
13777 tg3_netif_stop(tp);
13778
13779 tg3_set_mtu(dev, tp, new_mtu);
13780
13781 tg3_full_lock(tp, 1);
13782
13783 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13784
13785 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13786 * breaks all requests to 256 bytes.
13787 */
13788 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13789 reset_phy = true;
13790
13791 err = tg3_restart_hw(tp, reset_phy);
13792
13793 if (!err)
13794 tg3_netif_start(tp);
13795
13796 tg3_full_unlock(tp);
13797
13798 if (!err)
13799 tg3_phy_start(tp);
13800
13801 return err;
13802 }
13803
13804 static const struct net_device_ops tg3_netdev_ops = {
13805 .ndo_open = tg3_open,
13806 .ndo_stop = tg3_close,
13807 .ndo_start_xmit = tg3_start_xmit,
13808 .ndo_get_stats64 = tg3_get_stats64,
13809 .ndo_validate_addr = eth_validate_addr,
13810 .ndo_set_rx_mode = tg3_set_rx_mode,
13811 .ndo_set_mac_address = tg3_set_mac_addr,
13812 .ndo_do_ioctl = tg3_ioctl,
13813 .ndo_tx_timeout = tg3_tx_timeout,
13814 .ndo_change_mtu = tg3_change_mtu,
13815 .ndo_fix_features = tg3_fix_features,
13816 .ndo_set_features = tg3_set_features,
13817 #ifdef CONFIG_NET_POLL_CONTROLLER
13818 .ndo_poll_controller = tg3_poll_controller,
13819 #endif
13820 };
13821
13822 static void tg3_get_eeprom_size(struct tg3 *tp)
13823 {
13824 u32 cursize, val, magic;
13825
13826 tp->nvram_size = EEPROM_CHIP_SIZE;
13827
13828 if (tg3_nvram_read(tp, 0, &magic) != 0)
13829 return;
13830
13831 if ((magic != TG3_EEPROM_MAGIC) &&
13832 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13833 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13834 return;
13835
13836 /*
13837 * Size the chip by reading offsets at increasing powers of two.
13838 * When we encounter our validation signature, we know the addressing
13839 * has wrapped around, and thus have our chip size.
13840 */
13841 cursize = 0x10;
13842
13843 while (cursize < tp->nvram_size) {
13844 if (tg3_nvram_read(tp, cursize, &val) != 0)
13845 return;
13846
13847 if (val == magic)
13848 break;
13849
13850 cursize <<= 1;
13851 }
13852
13853 tp->nvram_size = cursize;
13854 }
13855
13856 static void tg3_get_nvram_size(struct tg3 *tp)
13857 {
13858 u32 val;
13859
13860 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13861 return;
13862
13863 /* Selfboot format */
13864 if (val != TG3_EEPROM_MAGIC) {
13865 tg3_get_eeprom_size(tp);
13866 return;
13867 }
13868
13869 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13870 if (val != 0) {
13871 /* This is confusing. We want to operate on the
13872 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13873 * call will read from NVRAM and byteswap the data
13874 * according to the byteswapping settings for all
13875 * other register accesses. This ensures the data we
13876 * want will always reside in the lower 16-bits.
13877 * However, the data in NVRAM is in LE format, which
13878 * means the data from the NVRAM read will always be
13879 * opposite the endianness of the CPU. The 16-bit
13880 * byteswap then brings the data to CPU endianness.
13881 */
13882 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13883 return;
13884 }
13885 }
13886 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13887 }
13888
13889 static void tg3_get_nvram_info(struct tg3 *tp)
13890 {
13891 u32 nvcfg1;
13892
13893 nvcfg1 = tr32(NVRAM_CFG1);
13894 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13895 tg3_flag_set(tp, FLASH);
13896 } else {
13897 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13898 tw32(NVRAM_CFG1, nvcfg1);
13899 }
13900
13901 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13902 tg3_flag(tp, 5780_CLASS)) {
13903 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13904 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13905 tp->nvram_jedecnum = JEDEC_ATMEL;
13906 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13907 tg3_flag_set(tp, NVRAM_BUFFERED);
13908 break;
13909 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13910 tp->nvram_jedecnum = JEDEC_ATMEL;
13911 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13912 break;
13913 case FLASH_VENDOR_ATMEL_EEPROM:
13914 tp->nvram_jedecnum = JEDEC_ATMEL;
13915 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13916 tg3_flag_set(tp, NVRAM_BUFFERED);
13917 break;
13918 case FLASH_VENDOR_ST:
13919 tp->nvram_jedecnum = JEDEC_ST;
13920 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13921 tg3_flag_set(tp, NVRAM_BUFFERED);
13922 break;
13923 case FLASH_VENDOR_SAIFUN:
13924 tp->nvram_jedecnum = JEDEC_SAIFUN;
13925 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13926 break;
13927 case FLASH_VENDOR_SST_SMALL:
13928 case FLASH_VENDOR_SST_LARGE:
13929 tp->nvram_jedecnum = JEDEC_SST;
13930 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13931 break;
13932 }
13933 } else {
13934 tp->nvram_jedecnum = JEDEC_ATMEL;
13935 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13936 tg3_flag_set(tp, NVRAM_BUFFERED);
13937 }
13938 }
13939
13940 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13941 {
13942 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13943 case FLASH_5752PAGE_SIZE_256:
13944 tp->nvram_pagesize = 256;
13945 break;
13946 case FLASH_5752PAGE_SIZE_512:
13947 tp->nvram_pagesize = 512;
13948 break;
13949 case FLASH_5752PAGE_SIZE_1K:
13950 tp->nvram_pagesize = 1024;
13951 break;
13952 case FLASH_5752PAGE_SIZE_2K:
13953 tp->nvram_pagesize = 2048;
13954 break;
13955 case FLASH_5752PAGE_SIZE_4K:
13956 tp->nvram_pagesize = 4096;
13957 break;
13958 case FLASH_5752PAGE_SIZE_264:
13959 tp->nvram_pagesize = 264;
13960 break;
13961 case FLASH_5752PAGE_SIZE_528:
13962 tp->nvram_pagesize = 528;
13963 break;
13964 }
13965 }
13966
13967 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13968 {
13969 u32 nvcfg1;
13970
13971 nvcfg1 = tr32(NVRAM_CFG1);
13972
13973 /* NVRAM protection for TPM */
13974 if (nvcfg1 & (1 << 27))
13975 tg3_flag_set(tp, PROTECTED_NVRAM);
13976
13977 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13978 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13979 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13980 tp->nvram_jedecnum = JEDEC_ATMEL;
13981 tg3_flag_set(tp, NVRAM_BUFFERED);
13982 break;
13983 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13984 tp->nvram_jedecnum = JEDEC_ATMEL;
13985 tg3_flag_set(tp, NVRAM_BUFFERED);
13986 tg3_flag_set(tp, FLASH);
13987 break;
13988 case FLASH_5752VENDOR_ST_M45PE10:
13989 case FLASH_5752VENDOR_ST_M45PE20:
13990 case FLASH_5752VENDOR_ST_M45PE40:
13991 tp->nvram_jedecnum = JEDEC_ST;
13992 tg3_flag_set(tp, NVRAM_BUFFERED);
13993 tg3_flag_set(tp, FLASH);
13994 break;
13995 }
13996
13997 if (tg3_flag(tp, FLASH)) {
13998 tg3_nvram_get_pagesize(tp, nvcfg1);
13999 } else {
14000 /* For eeprom, set pagesize to maximum eeprom size */
14001 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14002
14003 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14004 tw32(NVRAM_CFG1, nvcfg1);
14005 }
14006 }
14007
14008 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14009 {
14010 u32 nvcfg1, protect = 0;
14011
14012 nvcfg1 = tr32(NVRAM_CFG1);
14013
14014 /* NVRAM protection for TPM */
14015 if (nvcfg1 & (1 << 27)) {
14016 tg3_flag_set(tp, PROTECTED_NVRAM);
14017 protect = 1;
14018 }
14019
14020 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14021 switch (nvcfg1) {
14022 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14023 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14024 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14025 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14026 tp->nvram_jedecnum = JEDEC_ATMEL;
14027 tg3_flag_set(tp, NVRAM_BUFFERED);
14028 tg3_flag_set(tp, FLASH);
14029 tp->nvram_pagesize = 264;
14030 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14031 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14032 tp->nvram_size = (protect ? 0x3e200 :
14033 TG3_NVRAM_SIZE_512KB);
14034 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14035 tp->nvram_size = (protect ? 0x1f200 :
14036 TG3_NVRAM_SIZE_256KB);
14037 else
14038 tp->nvram_size = (protect ? 0x1f200 :
14039 TG3_NVRAM_SIZE_128KB);
14040 break;
14041 case FLASH_5752VENDOR_ST_M45PE10:
14042 case FLASH_5752VENDOR_ST_M45PE20:
14043 case FLASH_5752VENDOR_ST_M45PE40:
14044 tp->nvram_jedecnum = JEDEC_ST;
14045 tg3_flag_set(tp, NVRAM_BUFFERED);
14046 tg3_flag_set(tp, FLASH);
14047 tp->nvram_pagesize = 256;
14048 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14049 tp->nvram_size = (protect ?
14050 TG3_NVRAM_SIZE_64KB :
14051 TG3_NVRAM_SIZE_128KB);
14052 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14053 tp->nvram_size = (protect ?
14054 TG3_NVRAM_SIZE_64KB :
14055 TG3_NVRAM_SIZE_256KB);
14056 else
14057 tp->nvram_size = (protect ?
14058 TG3_NVRAM_SIZE_128KB :
14059 TG3_NVRAM_SIZE_512KB);
14060 break;
14061 }
14062 }
14063
14064 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14065 {
14066 u32 nvcfg1;
14067
14068 nvcfg1 = tr32(NVRAM_CFG1);
14069
14070 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14071 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14072 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14073 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14074 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14075 tp->nvram_jedecnum = JEDEC_ATMEL;
14076 tg3_flag_set(tp, NVRAM_BUFFERED);
14077 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14078
14079 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14080 tw32(NVRAM_CFG1, nvcfg1);
14081 break;
14082 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14083 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14084 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14085 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14086 tp->nvram_jedecnum = JEDEC_ATMEL;
14087 tg3_flag_set(tp, NVRAM_BUFFERED);
14088 tg3_flag_set(tp, FLASH);
14089 tp->nvram_pagesize = 264;
14090 break;
14091 case FLASH_5752VENDOR_ST_M45PE10:
14092 case FLASH_5752VENDOR_ST_M45PE20:
14093 case FLASH_5752VENDOR_ST_M45PE40:
14094 tp->nvram_jedecnum = JEDEC_ST;
14095 tg3_flag_set(tp, NVRAM_BUFFERED);
14096 tg3_flag_set(tp, FLASH);
14097 tp->nvram_pagesize = 256;
14098 break;
14099 }
14100 }
14101
14102 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14103 {
14104 u32 nvcfg1, protect = 0;
14105
14106 nvcfg1 = tr32(NVRAM_CFG1);
14107
14108 /* NVRAM protection for TPM */
14109 if (nvcfg1 & (1 << 27)) {
14110 tg3_flag_set(tp, PROTECTED_NVRAM);
14111 protect = 1;
14112 }
14113
14114 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14115 switch (nvcfg1) {
14116 case FLASH_5761VENDOR_ATMEL_ADB021D:
14117 case FLASH_5761VENDOR_ATMEL_ADB041D:
14118 case FLASH_5761VENDOR_ATMEL_ADB081D:
14119 case FLASH_5761VENDOR_ATMEL_ADB161D:
14120 case FLASH_5761VENDOR_ATMEL_MDB021D:
14121 case FLASH_5761VENDOR_ATMEL_MDB041D:
14122 case FLASH_5761VENDOR_ATMEL_MDB081D:
14123 case FLASH_5761VENDOR_ATMEL_MDB161D:
14124 tp->nvram_jedecnum = JEDEC_ATMEL;
14125 tg3_flag_set(tp, NVRAM_BUFFERED);
14126 tg3_flag_set(tp, FLASH);
14127 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14128 tp->nvram_pagesize = 256;
14129 break;
14130 case FLASH_5761VENDOR_ST_A_M45PE20:
14131 case FLASH_5761VENDOR_ST_A_M45PE40:
14132 case FLASH_5761VENDOR_ST_A_M45PE80:
14133 case FLASH_5761VENDOR_ST_A_M45PE16:
14134 case FLASH_5761VENDOR_ST_M_M45PE20:
14135 case FLASH_5761VENDOR_ST_M_M45PE40:
14136 case FLASH_5761VENDOR_ST_M_M45PE80:
14137 case FLASH_5761VENDOR_ST_M_M45PE16:
14138 tp->nvram_jedecnum = JEDEC_ST;
14139 tg3_flag_set(tp, NVRAM_BUFFERED);
14140 tg3_flag_set(tp, FLASH);
14141 tp->nvram_pagesize = 256;
14142 break;
14143 }
14144
14145 if (protect) {
14146 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14147 } else {
14148 switch (nvcfg1) {
14149 case FLASH_5761VENDOR_ATMEL_ADB161D:
14150 case FLASH_5761VENDOR_ATMEL_MDB161D:
14151 case FLASH_5761VENDOR_ST_A_M45PE16:
14152 case FLASH_5761VENDOR_ST_M_M45PE16:
14153 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14154 break;
14155 case FLASH_5761VENDOR_ATMEL_ADB081D:
14156 case FLASH_5761VENDOR_ATMEL_MDB081D:
14157 case FLASH_5761VENDOR_ST_A_M45PE80:
14158 case FLASH_5761VENDOR_ST_M_M45PE80:
14159 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14160 break;
14161 case FLASH_5761VENDOR_ATMEL_ADB041D:
14162 case FLASH_5761VENDOR_ATMEL_MDB041D:
14163 case FLASH_5761VENDOR_ST_A_M45PE40:
14164 case FLASH_5761VENDOR_ST_M_M45PE40:
14165 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14166 break;
14167 case FLASH_5761VENDOR_ATMEL_ADB021D:
14168 case FLASH_5761VENDOR_ATMEL_MDB021D:
14169 case FLASH_5761VENDOR_ST_A_M45PE20:
14170 case FLASH_5761VENDOR_ST_M_M45PE20:
14171 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14172 break;
14173 }
14174 }
14175 }
14176
14177 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14178 {
14179 tp->nvram_jedecnum = JEDEC_ATMEL;
14180 tg3_flag_set(tp, NVRAM_BUFFERED);
14181 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14182 }
14183
14184 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14185 {
14186 u32 nvcfg1;
14187
14188 nvcfg1 = tr32(NVRAM_CFG1);
14189
14190 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14191 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14192 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14193 tp->nvram_jedecnum = JEDEC_ATMEL;
14194 tg3_flag_set(tp, NVRAM_BUFFERED);
14195 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14196
14197 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14198 tw32(NVRAM_CFG1, nvcfg1);
14199 return;
14200 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14201 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14202 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14203 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14204 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14205 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14206 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14207 tp->nvram_jedecnum = JEDEC_ATMEL;
14208 tg3_flag_set(tp, NVRAM_BUFFERED);
14209 tg3_flag_set(tp, FLASH);
14210
14211 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14212 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14213 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14214 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14215 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14216 break;
14217 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14218 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14219 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14220 break;
14221 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14222 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14223 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14224 break;
14225 }
14226 break;
14227 case FLASH_5752VENDOR_ST_M45PE10:
14228 case FLASH_5752VENDOR_ST_M45PE20:
14229 case FLASH_5752VENDOR_ST_M45PE40:
14230 tp->nvram_jedecnum = JEDEC_ST;
14231 tg3_flag_set(tp, NVRAM_BUFFERED);
14232 tg3_flag_set(tp, FLASH);
14233
14234 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14235 case FLASH_5752VENDOR_ST_M45PE10:
14236 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14237 break;
14238 case FLASH_5752VENDOR_ST_M45PE20:
14239 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14240 break;
14241 case FLASH_5752VENDOR_ST_M45PE40:
14242 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14243 break;
14244 }
14245 break;
14246 default:
14247 tg3_flag_set(tp, NO_NVRAM);
14248 return;
14249 }
14250
14251 tg3_nvram_get_pagesize(tp, nvcfg1);
14252 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14253 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14254 }
14255
14256
14257 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14258 {
14259 u32 nvcfg1;
14260
14261 nvcfg1 = tr32(NVRAM_CFG1);
14262
14263 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14264 case FLASH_5717VENDOR_ATMEL_EEPROM:
14265 case FLASH_5717VENDOR_MICRO_EEPROM:
14266 tp->nvram_jedecnum = JEDEC_ATMEL;
14267 tg3_flag_set(tp, NVRAM_BUFFERED);
14268 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14269
14270 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14271 tw32(NVRAM_CFG1, nvcfg1);
14272 return;
14273 case FLASH_5717VENDOR_ATMEL_MDB011D:
14274 case FLASH_5717VENDOR_ATMEL_ADB011B:
14275 case FLASH_5717VENDOR_ATMEL_ADB011D:
14276 case FLASH_5717VENDOR_ATMEL_MDB021D:
14277 case FLASH_5717VENDOR_ATMEL_ADB021B:
14278 case FLASH_5717VENDOR_ATMEL_ADB021D:
14279 case FLASH_5717VENDOR_ATMEL_45USPT:
14280 tp->nvram_jedecnum = JEDEC_ATMEL;
14281 tg3_flag_set(tp, NVRAM_BUFFERED);
14282 tg3_flag_set(tp, FLASH);
14283
14284 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14285 case FLASH_5717VENDOR_ATMEL_MDB021D:
14286 /* Detect size with tg3_nvram_get_size() */
14287 break;
14288 case FLASH_5717VENDOR_ATMEL_ADB021B:
14289 case FLASH_5717VENDOR_ATMEL_ADB021D:
14290 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14291 break;
14292 default:
14293 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14294 break;
14295 }
14296 break;
14297 case FLASH_5717VENDOR_ST_M_M25PE10:
14298 case FLASH_5717VENDOR_ST_A_M25PE10:
14299 case FLASH_5717VENDOR_ST_M_M45PE10:
14300 case FLASH_5717VENDOR_ST_A_M45PE10:
14301 case FLASH_5717VENDOR_ST_M_M25PE20:
14302 case FLASH_5717VENDOR_ST_A_M25PE20:
14303 case FLASH_5717VENDOR_ST_M_M45PE20:
14304 case FLASH_5717VENDOR_ST_A_M45PE20:
14305 case FLASH_5717VENDOR_ST_25USPT:
14306 case FLASH_5717VENDOR_ST_45USPT:
14307 tp->nvram_jedecnum = JEDEC_ST;
14308 tg3_flag_set(tp, NVRAM_BUFFERED);
14309 tg3_flag_set(tp, FLASH);
14310
14311 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14312 case FLASH_5717VENDOR_ST_M_M25PE20:
14313 case FLASH_5717VENDOR_ST_M_M45PE20:
14314 /* Detect size with tg3_nvram_get_size() */
14315 break;
14316 case FLASH_5717VENDOR_ST_A_M25PE20:
14317 case FLASH_5717VENDOR_ST_A_M45PE20:
14318 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14319 break;
14320 default:
14321 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14322 break;
14323 }
14324 break;
14325 default:
14326 tg3_flag_set(tp, NO_NVRAM);
14327 return;
14328 }
14329
14330 tg3_nvram_get_pagesize(tp, nvcfg1);
14331 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14332 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14333 }
14334
14335 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14336 {
14337 u32 nvcfg1, nvmpinstrp;
14338
14339 nvcfg1 = tr32(NVRAM_CFG1);
14340 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14341
14342 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14343 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14344 tg3_flag_set(tp, NO_NVRAM);
14345 return;
14346 }
14347
14348 switch (nvmpinstrp) {
14349 case FLASH_5762_EEPROM_HD:
14350 nvmpinstrp = FLASH_5720_EEPROM_HD;
14351 break;
14352 case FLASH_5762_EEPROM_LD:
14353 nvmpinstrp = FLASH_5720_EEPROM_LD;
14354 break;
14355 case FLASH_5720VENDOR_M_ST_M45PE20:
14356 /* This pinstrap supports multiple sizes, so force it
14357 * to read the actual size from location 0xf0.
14358 */
14359 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14360 break;
14361 }
14362 }
14363
14364 switch (nvmpinstrp) {
14365 case FLASH_5720_EEPROM_HD:
14366 case FLASH_5720_EEPROM_LD:
14367 tp->nvram_jedecnum = JEDEC_ATMEL;
14368 tg3_flag_set(tp, NVRAM_BUFFERED);
14369
14370 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14371 tw32(NVRAM_CFG1, nvcfg1);
14372 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14373 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14374 else
14375 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14376 return;
14377 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14378 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14379 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14380 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14381 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14382 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14383 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14384 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14385 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14386 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14387 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14388 case FLASH_5720VENDOR_ATMEL_45USPT:
14389 tp->nvram_jedecnum = JEDEC_ATMEL;
14390 tg3_flag_set(tp, NVRAM_BUFFERED);
14391 tg3_flag_set(tp, FLASH);
14392
14393 switch (nvmpinstrp) {
14394 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14395 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14396 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14397 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14398 break;
14399 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14400 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14401 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14402 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14403 break;
14404 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14405 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14406 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14407 break;
14408 default:
14409 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14410 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14411 break;
14412 }
14413 break;
14414 case FLASH_5720VENDOR_M_ST_M25PE10:
14415 case FLASH_5720VENDOR_M_ST_M45PE10:
14416 case FLASH_5720VENDOR_A_ST_M25PE10:
14417 case FLASH_5720VENDOR_A_ST_M45PE10:
14418 case FLASH_5720VENDOR_M_ST_M25PE20:
14419 case FLASH_5720VENDOR_M_ST_M45PE20:
14420 case FLASH_5720VENDOR_A_ST_M25PE20:
14421 case FLASH_5720VENDOR_A_ST_M45PE20:
14422 case FLASH_5720VENDOR_M_ST_M25PE40:
14423 case FLASH_5720VENDOR_M_ST_M45PE40:
14424 case FLASH_5720VENDOR_A_ST_M25PE40:
14425 case FLASH_5720VENDOR_A_ST_M45PE40:
14426 case FLASH_5720VENDOR_M_ST_M25PE80:
14427 case FLASH_5720VENDOR_M_ST_M45PE80:
14428 case FLASH_5720VENDOR_A_ST_M25PE80:
14429 case FLASH_5720VENDOR_A_ST_M45PE80:
14430 case FLASH_5720VENDOR_ST_25USPT:
14431 case FLASH_5720VENDOR_ST_45USPT:
14432 tp->nvram_jedecnum = JEDEC_ST;
14433 tg3_flag_set(tp, NVRAM_BUFFERED);
14434 tg3_flag_set(tp, FLASH);
14435
14436 switch (nvmpinstrp) {
14437 case FLASH_5720VENDOR_M_ST_M25PE20:
14438 case FLASH_5720VENDOR_M_ST_M45PE20:
14439 case FLASH_5720VENDOR_A_ST_M25PE20:
14440 case FLASH_5720VENDOR_A_ST_M45PE20:
14441 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14442 break;
14443 case FLASH_5720VENDOR_M_ST_M25PE40:
14444 case FLASH_5720VENDOR_M_ST_M45PE40:
14445 case FLASH_5720VENDOR_A_ST_M25PE40:
14446 case FLASH_5720VENDOR_A_ST_M45PE40:
14447 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14448 break;
14449 case FLASH_5720VENDOR_M_ST_M25PE80:
14450 case FLASH_5720VENDOR_M_ST_M45PE80:
14451 case FLASH_5720VENDOR_A_ST_M25PE80:
14452 case FLASH_5720VENDOR_A_ST_M45PE80:
14453 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14454 break;
14455 default:
14456 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14457 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14458 break;
14459 }
14460 break;
14461 default:
14462 tg3_flag_set(tp, NO_NVRAM);
14463 return;
14464 }
14465
14466 tg3_nvram_get_pagesize(tp, nvcfg1);
14467 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14468 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14469
14470 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14471 u32 val;
14472
14473 if (tg3_nvram_read(tp, 0, &val))
14474 return;
14475
14476 if (val != TG3_EEPROM_MAGIC &&
14477 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14478 tg3_flag_set(tp, NO_NVRAM);
14479 }
14480 }
14481
14482 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14483 static void tg3_nvram_init(struct tg3 *tp)
14484 {
14485 if (tg3_flag(tp, IS_SSB_CORE)) {
14486 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14487 tg3_flag_clear(tp, NVRAM);
14488 tg3_flag_clear(tp, NVRAM_BUFFERED);
14489 tg3_flag_set(tp, NO_NVRAM);
14490 return;
14491 }
14492
14493 tw32_f(GRC_EEPROM_ADDR,
14494 (EEPROM_ADDR_FSM_RESET |
14495 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14496 EEPROM_ADDR_CLKPERD_SHIFT)));
14497
14498 msleep(1);
14499
14500 /* Enable seeprom accesses. */
14501 tw32_f(GRC_LOCAL_CTRL,
14502 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14503 udelay(100);
14504
14505 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14506 tg3_asic_rev(tp) != ASIC_REV_5701) {
14507 tg3_flag_set(tp, NVRAM);
14508
14509 if (tg3_nvram_lock(tp)) {
14510 netdev_warn(tp->dev,
14511 "Cannot get nvram lock, %s failed\n",
14512 __func__);
14513 return;
14514 }
14515 tg3_enable_nvram_access(tp);
14516
14517 tp->nvram_size = 0;
14518
14519 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14520 tg3_get_5752_nvram_info(tp);
14521 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14522 tg3_get_5755_nvram_info(tp);
14523 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14524 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14525 tg3_asic_rev(tp) == ASIC_REV_5785)
14526 tg3_get_5787_nvram_info(tp);
14527 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14528 tg3_get_5761_nvram_info(tp);
14529 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14530 tg3_get_5906_nvram_info(tp);
14531 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14532 tg3_flag(tp, 57765_CLASS))
14533 tg3_get_57780_nvram_info(tp);
14534 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14535 tg3_asic_rev(tp) == ASIC_REV_5719)
14536 tg3_get_5717_nvram_info(tp);
14537 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14538 tg3_asic_rev(tp) == ASIC_REV_5762)
14539 tg3_get_5720_nvram_info(tp);
14540 else
14541 tg3_get_nvram_info(tp);
14542
14543 if (tp->nvram_size == 0)
14544 tg3_get_nvram_size(tp);
14545
14546 tg3_disable_nvram_access(tp);
14547 tg3_nvram_unlock(tp);
14548
14549 } else {
14550 tg3_flag_clear(tp, NVRAM);
14551 tg3_flag_clear(tp, NVRAM_BUFFERED);
14552
14553 tg3_get_eeprom_size(tp);
14554 }
14555 }
14556
14557 struct subsys_tbl_ent {
14558 u16 subsys_vendor, subsys_devid;
14559 u32 phy_id;
14560 };
14561
14562 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14563 /* Broadcom boards. */
14564 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14565 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14566 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14567 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14568 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14569 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14570 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14571 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14572 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14573 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14574 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14575 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14576 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14577 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14578 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14579 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14580 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14581 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14582 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14583 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14584 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14585 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14586
14587 /* 3com boards. */
14588 { TG3PCI_SUBVENDOR_ID_3COM,
14589 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14590 { TG3PCI_SUBVENDOR_ID_3COM,
14591 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14592 { TG3PCI_SUBVENDOR_ID_3COM,
14593 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14594 { TG3PCI_SUBVENDOR_ID_3COM,
14595 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14596 { TG3PCI_SUBVENDOR_ID_3COM,
14597 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14598
14599 /* DELL boards. */
14600 { TG3PCI_SUBVENDOR_ID_DELL,
14601 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14602 { TG3PCI_SUBVENDOR_ID_DELL,
14603 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14604 { TG3PCI_SUBVENDOR_ID_DELL,
14605 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14606 { TG3PCI_SUBVENDOR_ID_DELL,
14607 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14608
14609 /* Compaq boards. */
14610 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14611 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14612 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14613 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14614 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14615 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14616 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14617 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14618 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14619 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14620
14621 /* IBM boards. */
14622 { TG3PCI_SUBVENDOR_ID_IBM,
14623 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14624 };
14625
14626 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14627 {
14628 int i;
14629
14630 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14631 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14632 tp->pdev->subsystem_vendor) &&
14633 (subsys_id_to_phy_id[i].subsys_devid ==
14634 tp->pdev->subsystem_device))
14635 return &subsys_id_to_phy_id[i];
14636 }
14637 return NULL;
14638 }
14639
14640 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14641 {
14642 u32 val;
14643
14644 tp->phy_id = TG3_PHY_ID_INVALID;
14645 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14646
14647 /* Assume an onboard device and WOL capable by default. */
14648 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14649 tg3_flag_set(tp, WOL_CAP);
14650
14651 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14652 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14653 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14654 tg3_flag_set(tp, IS_NIC);
14655 }
14656 val = tr32(VCPU_CFGSHDW);
14657 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14658 tg3_flag_set(tp, ASPM_WORKAROUND);
14659 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14660 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14661 tg3_flag_set(tp, WOL_ENABLE);
14662 device_set_wakeup_enable(&tp->pdev->dev, true);
14663 }
14664 goto done;
14665 }
14666
14667 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14668 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14669 u32 nic_cfg, led_cfg;
14670 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14671 int eeprom_phy_serdes = 0;
14672
14673 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14674 tp->nic_sram_data_cfg = nic_cfg;
14675
14676 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14677 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14678 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14679 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14680 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14681 (ver > 0) && (ver < 0x100))
14682 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14683
14684 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14685 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14686
14687 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14688 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14689 eeprom_phy_serdes = 1;
14690
14691 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14692 if (nic_phy_id != 0) {
14693 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14694 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14695
14696 eeprom_phy_id = (id1 >> 16) << 10;
14697 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14698 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14699 } else
14700 eeprom_phy_id = 0;
14701
14702 tp->phy_id = eeprom_phy_id;
14703 if (eeprom_phy_serdes) {
14704 if (!tg3_flag(tp, 5705_PLUS))
14705 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14706 else
14707 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14708 }
14709
14710 if (tg3_flag(tp, 5750_PLUS))
14711 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14712 SHASTA_EXT_LED_MODE_MASK);
14713 else
14714 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14715
14716 switch (led_cfg) {
14717 default:
14718 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14719 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14720 break;
14721
14722 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14723 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14724 break;
14725
14726 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14727 tp->led_ctrl = LED_CTRL_MODE_MAC;
14728
14729 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14730 * read on some older 5700/5701 bootcode.
14731 */
14732 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14733 tg3_asic_rev(tp) == ASIC_REV_5701)
14734 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14735
14736 break;
14737
14738 case SHASTA_EXT_LED_SHARED:
14739 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14740 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14741 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14742 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14743 LED_CTRL_MODE_PHY_2);
14744 break;
14745
14746 case SHASTA_EXT_LED_MAC:
14747 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14748 break;
14749
14750 case SHASTA_EXT_LED_COMBO:
14751 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14752 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14753 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14754 LED_CTRL_MODE_PHY_2);
14755 break;
14756
14757 }
14758
14759 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14760 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14761 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14762 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14763
14764 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14765 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14766
14767 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14768 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14769 if ((tp->pdev->subsystem_vendor ==
14770 PCI_VENDOR_ID_ARIMA) &&
14771 (tp->pdev->subsystem_device == 0x205a ||
14772 tp->pdev->subsystem_device == 0x2063))
14773 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14774 } else {
14775 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14776 tg3_flag_set(tp, IS_NIC);
14777 }
14778
14779 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14780 tg3_flag_set(tp, ENABLE_ASF);
14781 if (tg3_flag(tp, 5750_PLUS))
14782 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14783 }
14784
14785 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14786 tg3_flag(tp, 5750_PLUS))
14787 tg3_flag_set(tp, ENABLE_APE);
14788
14789 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14790 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14791 tg3_flag_clear(tp, WOL_CAP);
14792
14793 if (tg3_flag(tp, WOL_CAP) &&
14794 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14795 tg3_flag_set(tp, WOL_ENABLE);
14796 device_set_wakeup_enable(&tp->pdev->dev, true);
14797 }
14798
14799 if (cfg2 & (1 << 17))
14800 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14801
14802 /* serdes signal pre-emphasis in register 0x590 set by */
14803 /* bootcode if bit 18 is set */
14804 if (cfg2 & (1 << 18))
14805 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14806
14807 if ((tg3_flag(tp, 57765_PLUS) ||
14808 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14809 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14810 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14811 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14812
14813 if (tg3_flag(tp, PCI_EXPRESS)) {
14814 u32 cfg3;
14815
14816 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14817 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14818 !tg3_flag(tp, 57765_PLUS) &&
14819 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14820 tg3_flag_set(tp, ASPM_WORKAROUND);
14821 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14822 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14823 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14824 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14825 }
14826
14827 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14828 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14829 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14830 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14831 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14832 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14833 }
14834 done:
14835 if (tg3_flag(tp, WOL_CAP))
14836 device_set_wakeup_enable(&tp->pdev->dev,
14837 tg3_flag(tp, WOL_ENABLE));
14838 else
14839 device_set_wakeup_capable(&tp->pdev->dev, false);
14840 }
14841
14842 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14843 {
14844 int i, err;
14845 u32 val2, off = offset * 8;
14846
14847 err = tg3_nvram_lock(tp);
14848 if (err)
14849 return err;
14850
14851 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14852 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14853 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14854 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14855 udelay(10);
14856
14857 for (i = 0; i < 100; i++) {
14858 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14859 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14860 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14861 break;
14862 }
14863 udelay(10);
14864 }
14865
14866 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14867
14868 tg3_nvram_unlock(tp);
14869 if (val2 & APE_OTP_STATUS_CMD_DONE)
14870 return 0;
14871
14872 return -EBUSY;
14873 }
14874
14875 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14876 {
14877 int i;
14878 u32 val;
14879
14880 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14881 tw32(OTP_CTRL, cmd);
14882
14883 /* Wait for up to 1 ms for command to execute. */
14884 for (i = 0; i < 100; i++) {
14885 val = tr32(OTP_STATUS);
14886 if (val & OTP_STATUS_CMD_DONE)
14887 break;
14888 udelay(10);
14889 }
14890
14891 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14892 }
14893
14894 /* Read the gphy configuration from the OTP region of the chip. The gphy
14895 * configuration is a 32-bit value that straddles the alignment boundary.
14896 * We do two 32-bit reads and then shift and merge the results.
14897 */
14898 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14899 {
14900 u32 bhalf_otp, thalf_otp;
14901
14902 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14903
14904 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14905 return 0;
14906
14907 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14908
14909 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14910 return 0;
14911
14912 thalf_otp = tr32(OTP_READ_DATA);
14913
14914 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14915
14916 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14917 return 0;
14918
14919 bhalf_otp = tr32(OTP_READ_DATA);
14920
14921 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14922 }
14923
14924 static void tg3_phy_init_link_config(struct tg3 *tp)
14925 {
14926 u32 adv = ADVERTISED_Autoneg;
14927
14928 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14929 adv |= ADVERTISED_1000baseT_Half |
14930 ADVERTISED_1000baseT_Full;
14931
14932 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14933 adv |= ADVERTISED_100baseT_Half |
14934 ADVERTISED_100baseT_Full |
14935 ADVERTISED_10baseT_Half |
14936 ADVERTISED_10baseT_Full |
14937 ADVERTISED_TP;
14938 else
14939 adv |= ADVERTISED_FIBRE;
14940
14941 tp->link_config.advertising = adv;
14942 tp->link_config.speed = SPEED_UNKNOWN;
14943 tp->link_config.duplex = DUPLEX_UNKNOWN;
14944 tp->link_config.autoneg = AUTONEG_ENABLE;
14945 tp->link_config.active_speed = SPEED_UNKNOWN;
14946 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14947
14948 tp->old_link = -1;
14949 }
14950
14951 static int tg3_phy_probe(struct tg3 *tp)
14952 {
14953 u32 hw_phy_id_1, hw_phy_id_2;
14954 u32 hw_phy_id, hw_phy_id_masked;
14955 int err;
14956
14957 /* flow control autonegotiation is default behavior */
14958 tg3_flag_set(tp, PAUSE_AUTONEG);
14959 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14960
14961 if (tg3_flag(tp, ENABLE_APE)) {
14962 switch (tp->pci_fn) {
14963 case 0:
14964 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14965 break;
14966 case 1:
14967 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14968 break;
14969 case 2:
14970 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14971 break;
14972 case 3:
14973 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14974 break;
14975 }
14976 }
14977
14978 if (!tg3_flag(tp, ENABLE_ASF) &&
14979 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14980 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14981 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
14982 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
14983
14984 if (tg3_flag(tp, USE_PHYLIB))
14985 return tg3_phy_init(tp);
14986
14987 /* Reading the PHY ID register can conflict with ASF
14988 * firmware access to the PHY hardware.
14989 */
14990 err = 0;
14991 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14992 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14993 } else {
14994 /* Now read the physical PHY_ID from the chip and verify
14995 * that it is sane. If it doesn't look good, we fall back
14996 * to either the hard-coded table based PHY_ID and failing
14997 * that the value found in the eeprom area.
14998 */
14999 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15000 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15001
15002 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15003 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15004 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15005
15006 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15007 }
15008
15009 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15010 tp->phy_id = hw_phy_id;
15011 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15012 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15013 else
15014 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15015 } else {
15016 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15017 /* Do nothing, phy ID already set up in
15018 * tg3_get_eeprom_hw_cfg().
15019 */
15020 } else {
15021 struct subsys_tbl_ent *p;
15022
15023 /* No eeprom signature? Try the hardcoded
15024 * subsys device table.
15025 */
15026 p = tg3_lookup_by_subsys(tp);
15027 if (p) {
15028 tp->phy_id = p->phy_id;
15029 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15030 /* For now we saw the IDs 0xbc050cd0,
15031 * 0xbc050f80 and 0xbc050c30 on devices
15032 * connected to an BCM4785 and there are
15033 * probably more. Just assume that the phy is
15034 * supported when it is connected to a SSB core
15035 * for now.
15036 */
15037 return -ENODEV;
15038 }
15039
15040 if (!tp->phy_id ||
15041 tp->phy_id == TG3_PHY_ID_BCM8002)
15042 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15043 }
15044 }
15045
15046 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15047 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15048 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15049 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15050 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15051 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15052 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15053 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15054 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
15055 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15056
15057 tg3_phy_init_link_config(tp);
15058
15059 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15060 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15061 !tg3_flag(tp, ENABLE_APE) &&
15062 !tg3_flag(tp, ENABLE_ASF)) {
15063 u32 bmsr, dummy;
15064
15065 tg3_readphy(tp, MII_BMSR, &bmsr);
15066 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15067 (bmsr & BMSR_LSTATUS))
15068 goto skip_phy_reset;
15069
15070 err = tg3_phy_reset(tp);
15071 if (err)
15072 return err;
15073
15074 tg3_phy_set_wirespeed(tp);
15075
15076 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15077 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15078 tp->link_config.flowctrl);
15079
15080 tg3_writephy(tp, MII_BMCR,
15081 BMCR_ANENABLE | BMCR_ANRESTART);
15082 }
15083 }
15084
15085 skip_phy_reset:
15086 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15087 err = tg3_init_5401phy_dsp(tp);
15088 if (err)
15089 return err;
15090
15091 err = tg3_init_5401phy_dsp(tp);
15092 }
15093
15094 return err;
15095 }
15096
15097 static void tg3_read_vpd(struct tg3 *tp)
15098 {
15099 u8 *vpd_data;
15100 unsigned int block_end, rosize, len;
15101 u32 vpdlen;
15102 int j, i = 0;
15103
15104 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15105 if (!vpd_data)
15106 goto out_no_vpd;
15107
15108 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15109 if (i < 0)
15110 goto out_not_found;
15111
15112 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15113 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15114 i += PCI_VPD_LRDT_TAG_SIZE;
15115
15116 if (block_end > vpdlen)
15117 goto out_not_found;
15118
15119 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15120 PCI_VPD_RO_KEYWORD_MFR_ID);
15121 if (j > 0) {
15122 len = pci_vpd_info_field_size(&vpd_data[j]);
15123
15124 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15125 if (j + len > block_end || len != 4 ||
15126 memcmp(&vpd_data[j], "1028", 4))
15127 goto partno;
15128
15129 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15130 PCI_VPD_RO_KEYWORD_VENDOR0);
15131 if (j < 0)
15132 goto partno;
15133
15134 len = pci_vpd_info_field_size(&vpd_data[j]);
15135
15136 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15137 if (j + len > block_end)
15138 goto partno;
15139
15140 if (len >= sizeof(tp->fw_ver))
15141 len = sizeof(tp->fw_ver) - 1;
15142 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15143 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15144 &vpd_data[j]);
15145 }
15146
15147 partno:
15148 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15149 PCI_VPD_RO_KEYWORD_PARTNO);
15150 if (i < 0)
15151 goto out_not_found;
15152
15153 len = pci_vpd_info_field_size(&vpd_data[i]);
15154
15155 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15156 if (len > TG3_BPN_SIZE ||
15157 (len + i) > vpdlen)
15158 goto out_not_found;
15159
15160 memcpy(tp->board_part_number, &vpd_data[i], len);
15161
15162 out_not_found:
15163 kfree(vpd_data);
15164 if (tp->board_part_number[0])
15165 return;
15166
15167 out_no_vpd:
15168 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15169 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15170 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15171 strcpy(tp->board_part_number, "BCM5717");
15172 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15173 strcpy(tp->board_part_number, "BCM5718");
15174 else
15175 goto nomatch;
15176 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15177 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15178 strcpy(tp->board_part_number, "BCM57780");
15179 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15180 strcpy(tp->board_part_number, "BCM57760");
15181 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15182 strcpy(tp->board_part_number, "BCM57790");
15183 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15184 strcpy(tp->board_part_number, "BCM57788");
15185 else
15186 goto nomatch;
15187 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15188 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15189 strcpy(tp->board_part_number, "BCM57761");
15190 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15191 strcpy(tp->board_part_number, "BCM57765");
15192 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15193 strcpy(tp->board_part_number, "BCM57781");
15194 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15195 strcpy(tp->board_part_number, "BCM57785");
15196 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15197 strcpy(tp->board_part_number, "BCM57791");
15198 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15199 strcpy(tp->board_part_number, "BCM57795");
15200 else
15201 goto nomatch;
15202 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15203 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15204 strcpy(tp->board_part_number, "BCM57762");
15205 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15206 strcpy(tp->board_part_number, "BCM57766");
15207 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15208 strcpy(tp->board_part_number, "BCM57782");
15209 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15210 strcpy(tp->board_part_number, "BCM57786");
15211 else
15212 goto nomatch;
15213 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15214 strcpy(tp->board_part_number, "BCM95906");
15215 } else {
15216 nomatch:
15217 strcpy(tp->board_part_number, "none");
15218 }
15219 }
15220
15221 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15222 {
15223 u32 val;
15224
15225 if (tg3_nvram_read(tp, offset, &val) ||
15226 (val & 0xfc000000) != 0x0c000000 ||
15227 tg3_nvram_read(tp, offset + 4, &val) ||
15228 val != 0)
15229 return 0;
15230
15231 return 1;
15232 }
15233
15234 static void tg3_read_bc_ver(struct tg3 *tp)
15235 {
15236 u32 val, offset, start, ver_offset;
15237 int i, dst_off;
15238 bool newver = false;
15239
15240 if (tg3_nvram_read(tp, 0xc, &offset) ||
15241 tg3_nvram_read(tp, 0x4, &start))
15242 return;
15243
15244 offset = tg3_nvram_logical_addr(tp, offset);
15245
15246 if (tg3_nvram_read(tp, offset, &val))
15247 return;
15248
15249 if ((val & 0xfc000000) == 0x0c000000) {
15250 if (tg3_nvram_read(tp, offset + 4, &val))
15251 return;
15252
15253 if (val == 0)
15254 newver = true;
15255 }
15256
15257 dst_off = strlen(tp->fw_ver);
15258
15259 if (newver) {
15260 if (TG3_VER_SIZE - dst_off < 16 ||
15261 tg3_nvram_read(tp, offset + 8, &ver_offset))
15262 return;
15263
15264 offset = offset + ver_offset - start;
15265 for (i = 0; i < 16; i += 4) {
15266 __be32 v;
15267 if (tg3_nvram_read_be32(tp, offset + i, &v))
15268 return;
15269
15270 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15271 }
15272 } else {
15273 u32 major, minor;
15274
15275 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15276 return;
15277
15278 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15279 TG3_NVM_BCVER_MAJSFT;
15280 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15281 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15282 "v%d.%02d", major, minor);
15283 }
15284 }
15285
15286 static void tg3_read_hwsb_ver(struct tg3 *tp)
15287 {
15288 u32 val, major, minor;
15289
15290 /* Use native endian representation */
15291 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15292 return;
15293
15294 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15295 TG3_NVM_HWSB_CFG1_MAJSFT;
15296 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15297 TG3_NVM_HWSB_CFG1_MINSFT;
15298
15299 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15300 }
15301
15302 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15303 {
15304 u32 offset, major, minor, build;
15305
15306 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15307
15308 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15309 return;
15310
15311 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15312 case TG3_EEPROM_SB_REVISION_0:
15313 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15314 break;
15315 case TG3_EEPROM_SB_REVISION_2:
15316 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15317 break;
15318 case TG3_EEPROM_SB_REVISION_3:
15319 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15320 break;
15321 case TG3_EEPROM_SB_REVISION_4:
15322 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15323 break;
15324 case TG3_EEPROM_SB_REVISION_5:
15325 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15326 break;
15327 case TG3_EEPROM_SB_REVISION_6:
15328 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15329 break;
15330 default:
15331 return;
15332 }
15333
15334 if (tg3_nvram_read(tp, offset, &val))
15335 return;
15336
15337 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15338 TG3_EEPROM_SB_EDH_BLD_SHFT;
15339 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15340 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15341 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15342
15343 if (minor > 99 || build > 26)
15344 return;
15345
15346 offset = strlen(tp->fw_ver);
15347 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15348 " v%d.%02d", major, minor);
15349
15350 if (build > 0) {
15351 offset = strlen(tp->fw_ver);
15352 if (offset < TG3_VER_SIZE - 1)
15353 tp->fw_ver[offset] = 'a' + build - 1;
15354 }
15355 }
15356
15357 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15358 {
15359 u32 val, offset, start;
15360 int i, vlen;
15361
15362 for (offset = TG3_NVM_DIR_START;
15363 offset < TG3_NVM_DIR_END;
15364 offset += TG3_NVM_DIRENT_SIZE) {
15365 if (tg3_nvram_read(tp, offset, &val))
15366 return;
15367
15368 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15369 break;
15370 }
15371
15372 if (offset == TG3_NVM_DIR_END)
15373 return;
15374
15375 if (!tg3_flag(tp, 5705_PLUS))
15376 start = 0x08000000;
15377 else if (tg3_nvram_read(tp, offset - 4, &start))
15378 return;
15379
15380 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15381 !tg3_fw_img_is_valid(tp, offset) ||
15382 tg3_nvram_read(tp, offset + 8, &val))
15383 return;
15384
15385 offset += val - start;
15386
15387 vlen = strlen(tp->fw_ver);
15388
15389 tp->fw_ver[vlen++] = ',';
15390 tp->fw_ver[vlen++] = ' ';
15391
15392 for (i = 0; i < 4; i++) {
15393 __be32 v;
15394 if (tg3_nvram_read_be32(tp, offset, &v))
15395 return;
15396
15397 offset += sizeof(v);
15398
15399 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15400 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15401 break;
15402 }
15403
15404 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15405 vlen += sizeof(v);
15406 }
15407 }
15408
15409 static void tg3_probe_ncsi(struct tg3 *tp)
15410 {
15411 u32 apedata;
15412
15413 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15414 if (apedata != APE_SEG_SIG_MAGIC)
15415 return;
15416
15417 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15418 if (!(apedata & APE_FW_STATUS_READY))
15419 return;
15420
15421 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15422 tg3_flag_set(tp, APE_HAS_NCSI);
15423 }
15424
15425 static void tg3_read_dash_ver(struct tg3 *tp)
15426 {
15427 int vlen;
15428 u32 apedata;
15429 char *fwtype;
15430
15431 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15432
15433 if (tg3_flag(tp, APE_HAS_NCSI))
15434 fwtype = "NCSI";
15435 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15436 fwtype = "SMASH";
15437 else
15438 fwtype = "DASH";
15439
15440 vlen = strlen(tp->fw_ver);
15441
15442 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15443 fwtype,
15444 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15445 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15446 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15447 (apedata & APE_FW_VERSION_BLDMSK));
15448 }
15449
15450 static void tg3_read_otp_ver(struct tg3 *tp)
15451 {
15452 u32 val, val2;
15453
15454 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15455 return;
15456
15457 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15458 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15459 TG3_OTP_MAGIC0_VALID(val)) {
15460 u64 val64 = (u64) val << 32 | val2;
15461 u32 ver = 0;
15462 int i, vlen;
15463
15464 for (i = 0; i < 7; i++) {
15465 if ((val64 & 0xff) == 0)
15466 break;
15467 ver = val64 & 0xff;
15468 val64 >>= 8;
15469 }
15470 vlen = strlen(tp->fw_ver);
15471 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15472 }
15473 }
15474
15475 static void tg3_read_fw_ver(struct tg3 *tp)
15476 {
15477 u32 val;
15478 bool vpd_vers = false;
15479
15480 if (tp->fw_ver[0] != 0)
15481 vpd_vers = true;
15482
15483 if (tg3_flag(tp, NO_NVRAM)) {
15484 strcat(tp->fw_ver, "sb");
15485 tg3_read_otp_ver(tp);
15486 return;
15487 }
15488
15489 if (tg3_nvram_read(tp, 0, &val))
15490 return;
15491
15492 if (val == TG3_EEPROM_MAGIC)
15493 tg3_read_bc_ver(tp);
15494 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15495 tg3_read_sb_ver(tp, val);
15496 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15497 tg3_read_hwsb_ver(tp);
15498
15499 if (tg3_flag(tp, ENABLE_ASF)) {
15500 if (tg3_flag(tp, ENABLE_APE)) {
15501 tg3_probe_ncsi(tp);
15502 if (!vpd_vers)
15503 tg3_read_dash_ver(tp);
15504 } else if (!vpd_vers) {
15505 tg3_read_mgmtfw_ver(tp);
15506 }
15507 }
15508
15509 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15510 }
15511
15512 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15513 {
15514 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15515 return TG3_RX_RET_MAX_SIZE_5717;
15516 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15517 return TG3_RX_RET_MAX_SIZE_5700;
15518 else
15519 return TG3_RX_RET_MAX_SIZE_5705;
15520 }
15521
15522 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15523 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15524 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15525 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15526 { },
15527 };
15528
15529 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15530 {
15531 struct pci_dev *peer;
15532 unsigned int func, devnr = tp->pdev->devfn & ~7;
15533
15534 for (func = 0; func < 8; func++) {
15535 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15536 if (peer && peer != tp->pdev)
15537 break;
15538 pci_dev_put(peer);
15539 }
15540 /* 5704 can be configured in single-port mode, set peer to
15541 * tp->pdev in that case.
15542 */
15543 if (!peer) {
15544 peer = tp->pdev;
15545 return peer;
15546 }
15547
15548 /*
15549 * We don't need to keep the refcount elevated; there's no way
15550 * to remove one half of this device without removing the other
15551 */
15552 pci_dev_put(peer);
15553
15554 return peer;
15555 }
15556
15557 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15558 {
15559 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15560 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15561 u32 reg;
15562
15563 /* All devices that use the alternate
15564 * ASIC REV location have a CPMU.
15565 */
15566 tg3_flag_set(tp, CPMU_PRESENT);
15567
15568 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15569 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15570 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15571 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15572 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15573 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15574 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15575 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15576 reg = TG3PCI_GEN2_PRODID_ASICREV;
15577 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15578 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15579 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15580 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15581 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15582 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15583 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15584 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15585 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15586 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15587 reg = TG3PCI_GEN15_PRODID_ASICREV;
15588 else
15589 reg = TG3PCI_PRODID_ASICREV;
15590
15591 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15592 }
15593
15594 /* Wrong chip ID in 5752 A0. This code can be removed later
15595 * as A0 is not in production.
15596 */
15597 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15598 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15599
15600 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15601 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15602
15603 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15604 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15605 tg3_asic_rev(tp) == ASIC_REV_5720)
15606 tg3_flag_set(tp, 5717_PLUS);
15607
15608 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15609 tg3_asic_rev(tp) == ASIC_REV_57766)
15610 tg3_flag_set(tp, 57765_CLASS);
15611
15612 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15613 tg3_asic_rev(tp) == ASIC_REV_5762)
15614 tg3_flag_set(tp, 57765_PLUS);
15615
15616 /* Intentionally exclude ASIC_REV_5906 */
15617 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15618 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15619 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15620 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15621 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15622 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15623 tg3_flag(tp, 57765_PLUS))
15624 tg3_flag_set(tp, 5755_PLUS);
15625
15626 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15627 tg3_asic_rev(tp) == ASIC_REV_5714)
15628 tg3_flag_set(tp, 5780_CLASS);
15629
15630 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15631 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15632 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15633 tg3_flag(tp, 5755_PLUS) ||
15634 tg3_flag(tp, 5780_CLASS))
15635 tg3_flag_set(tp, 5750_PLUS);
15636
15637 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15638 tg3_flag(tp, 5750_PLUS))
15639 tg3_flag_set(tp, 5705_PLUS);
15640 }
15641
15642 static bool tg3_10_100_only_device(struct tg3 *tp,
15643 const struct pci_device_id *ent)
15644 {
15645 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15646
15647 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15648 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15649 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15650 return true;
15651
15652 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15653 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15654 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15655 return true;
15656 } else {
15657 return true;
15658 }
15659 }
15660
15661 return false;
15662 }
15663
15664 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15665 {
15666 u32 misc_ctrl_reg;
15667 u32 pci_state_reg, grc_misc_cfg;
15668 u32 val;
15669 u16 pci_cmd;
15670 int err;
15671
15672 /* Force memory write invalidate off. If we leave it on,
15673 * then on 5700_BX chips we have to enable a workaround.
15674 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15675 * to match the cacheline size. The Broadcom driver have this
15676 * workaround but turns MWI off all the times so never uses
15677 * it. This seems to suggest that the workaround is insufficient.
15678 */
15679 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15680 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15681 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15682
15683 /* Important! -- Make sure register accesses are byteswapped
15684 * correctly. Also, for those chips that require it, make
15685 * sure that indirect register accesses are enabled before
15686 * the first operation.
15687 */
15688 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15689 &misc_ctrl_reg);
15690 tp->misc_host_ctrl |= (misc_ctrl_reg &
15691 MISC_HOST_CTRL_CHIPREV);
15692 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15693 tp->misc_host_ctrl);
15694
15695 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15696
15697 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15698 * we need to disable memory and use config. cycles
15699 * only to access all registers. The 5702/03 chips
15700 * can mistakenly decode the special cycles from the
15701 * ICH chipsets as memory write cycles, causing corruption
15702 * of register and memory space. Only certain ICH bridges
15703 * will drive special cycles with non-zero data during the
15704 * address phase which can fall within the 5703's address
15705 * range. This is not an ICH bug as the PCI spec allows
15706 * non-zero address during special cycles. However, only
15707 * these ICH bridges are known to drive non-zero addresses
15708 * during special cycles.
15709 *
15710 * Since special cycles do not cross PCI bridges, we only
15711 * enable this workaround if the 5703 is on the secondary
15712 * bus of these ICH bridges.
15713 */
15714 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15715 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15716 static struct tg3_dev_id {
15717 u32 vendor;
15718 u32 device;
15719 u32 rev;
15720 } ich_chipsets[] = {
15721 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15722 PCI_ANY_ID },
15723 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15724 PCI_ANY_ID },
15725 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15726 0xa },
15727 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15728 PCI_ANY_ID },
15729 { },
15730 };
15731 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15732 struct pci_dev *bridge = NULL;
15733
15734 while (pci_id->vendor != 0) {
15735 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15736 bridge);
15737 if (!bridge) {
15738 pci_id++;
15739 continue;
15740 }
15741 if (pci_id->rev != PCI_ANY_ID) {
15742 if (bridge->revision > pci_id->rev)
15743 continue;
15744 }
15745 if (bridge->subordinate &&
15746 (bridge->subordinate->number ==
15747 tp->pdev->bus->number)) {
15748 tg3_flag_set(tp, ICH_WORKAROUND);
15749 pci_dev_put(bridge);
15750 break;
15751 }
15752 }
15753 }
15754
15755 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15756 static struct tg3_dev_id {
15757 u32 vendor;
15758 u32 device;
15759 } bridge_chipsets[] = {
15760 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15761 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15762 { },
15763 };
15764 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15765 struct pci_dev *bridge = NULL;
15766
15767 while (pci_id->vendor != 0) {
15768 bridge = pci_get_device(pci_id->vendor,
15769 pci_id->device,
15770 bridge);
15771 if (!bridge) {
15772 pci_id++;
15773 continue;
15774 }
15775 if (bridge->subordinate &&
15776 (bridge->subordinate->number <=
15777 tp->pdev->bus->number) &&
15778 (bridge->subordinate->busn_res.end >=
15779 tp->pdev->bus->number)) {
15780 tg3_flag_set(tp, 5701_DMA_BUG);
15781 pci_dev_put(bridge);
15782 break;
15783 }
15784 }
15785 }
15786
15787 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15788 * DMA addresses > 40-bit. This bridge may have other additional
15789 * 57xx devices behind it in some 4-port NIC designs for example.
15790 * Any tg3 device found behind the bridge will also need the 40-bit
15791 * DMA workaround.
15792 */
15793 if (tg3_flag(tp, 5780_CLASS)) {
15794 tg3_flag_set(tp, 40BIT_DMA_BUG);
15795 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15796 } else {
15797 struct pci_dev *bridge = NULL;
15798
15799 do {
15800 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15801 PCI_DEVICE_ID_SERVERWORKS_EPB,
15802 bridge);
15803 if (bridge && bridge->subordinate &&
15804 (bridge->subordinate->number <=
15805 tp->pdev->bus->number) &&
15806 (bridge->subordinate->busn_res.end >=
15807 tp->pdev->bus->number)) {
15808 tg3_flag_set(tp, 40BIT_DMA_BUG);
15809 pci_dev_put(bridge);
15810 break;
15811 }
15812 } while (bridge);
15813 }
15814
15815 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15816 tg3_asic_rev(tp) == ASIC_REV_5714)
15817 tp->pdev_peer = tg3_find_peer(tp);
15818
15819 /* Determine TSO capabilities */
15820 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15821 ; /* Do nothing. HW bug. */
15822 else if (tg3_flag(tp, 57765_PLUS))
15823 tg3_flag_set(tp, HW_TSO_3);
15824 else if (tg3_flag(tp, 5755_PLUS) ||
15825 tg3_asic_rev(tp) == ASIC_REV_5906)
15826 tg3_flag_set(tp, HW_TSO_2);
15827 else if (tg3_flag(tp, 5750_PLUS)) {
15828 tg3_flag_set(tp, HW_TSO_1);
15829 tg3_flag_set(tp, TSO_BUG);
15830 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15831 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15832 tg3_flag_clear(tp, TSO_BUG);
15833 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15834 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15835 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15836 tg3_flag_set(tp, FW_TSO);
15837 tg3_flag_set(tp, TSO_BUG);
15838 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15839 tp->fw_needed = FIRMWARE_TG3TSO5;
15840 else
15841 tp->fw_needed = FIRMWARE_TG3TSO;
15842 }
15843
15844 /* Selectively allow TSO based on operating conditions */
15845 if (tg3_flag(tp, HW_TSO_1) ||
15846 tg3_flag(tp, HW_TSO_2) ||
15847 tg3_flag(tp, HW_TSO_3) ||
15848 tg3_flag(tp, FW_TSO)) {
15849 /* For firmware TSO, assume ASF is disabled.
15850 * We'll disable TSO later if we discover ASF
15851 * is enabled in tg3_get_eeprom_hw_cfg().
15852 */
15853 tg3_flag_set(tp, TSO_CAPABLE);
15854 } else {
15855 tg3_flag_clear(tp, TSO_CAPABLE);
15856 tg3_flag_clear(tp, TSO_BUG);
15857 tp->fw_needed = NULL;
15858 }
15859
15860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15861 tp->fw_needed = FIRMWARE_TG3;
15862
15863 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15864 tp->fw_needed = FIRMWARE_TG357766;
15865
15866 tp->irq_max = 1;
15867
15868 if (tg3_flag(tp, 5750_PLUS)) {
15869 tg3_flag_set(tp, SUPPORT_MSI);
15870 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15871 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15872 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15873 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15874 tp->pdev_peer == tp->pdev))
15875 tg3_flag_clear(tp, SUPPORT_MSI);
15876
15877 if (tg3_flag(tp, 5755_PLUS) ||
15878 tg3_asic_rev(tp) == ASIC_REV_5906) {
15879 tg3_flag_set(tp, 1SHOT_MSI);
15880 }
15881
15882 if (tg3_flag(tp, 57765_PLUS)) {
15883 tg3_flag_set(tp, SUPPORT_MSIX);
15884 tp->irq_max = TG3_IRQ_MAX_VECS;
15885 }
15886 }
15887
15888 tp->txq_max = 1;
15889 tp->rxq_max = 1;
15890 if (tp->irq_max > 1) {
15891 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15892 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15893
15894 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15895 tg3_asic_rev(tp) == ASIC_REV_5720)
15896 tp->txq_max = tp->irq_max - 1;
15897 }
15898
15899 if (tg3_flag(tp, 5755_PLUS) ||
15900 tg3_asic_rev(tp) == ASIC_REV_5906)
15901 tg3_flag_set(tp, SHORT_DMA_BUG);
15902
15903 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15904 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15905
15906 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15907 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15908 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15909 tg3_asic_rev(tp) == ASIC_REV_5762)
15910 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15911
15912 if (tg3_flag(tp, 57765_PLUS) &&
15913 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15914 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15915
15916 if (!tg3_flag(tp, 5705_PLUS) ||
15917 tg3_flag(tp, 5780_CLASS) ||
15918 tg3_flag(tp, USE_JUMBO_BDFLAG))
15919 tg3_flag_set(tp, JUMBO_CAPABLE);
15920
15921 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15922 &pci_state_reg);
15923
15924 if (pci_is_pcie(tp->pdev)) {
15925 u16 lnkctl;
15926
15927 tg3_flag_set(tp, PCI_EXPRESS);
15928
15929 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15930 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15931 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15932 tg3_flag_clear(tp, HW_TSO_2);
15933 tg3_flag_clear(tp, TSO_CAPABLE);
15934 }
15935 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15936 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15937 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15938 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15939 tg3_flag_set(tp, CLKREQ_BUG);
15940 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15941 tg3_flag_set(tp, L1PLLPD_EN);
15942 }
15943 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15944 /* BCM5785 devices are effectively PCIe devices, and should
15945 * follow PCIe codepaths, but do not have a PCIe capabilities
15946 * section.
15947 */
15948 tg3_flag_set(tp, PCI_EXPRESS);
15949 } else if (!tg3_flag(tp, 5705_PLUS) ||
15950 tg3_flag(tp, 5780_CLASS)) {
15951 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15952 if (!tp->pcix_cap) {
15953 dev_err(&tp->pdev->dev,
15954 "Cannot find PCI-X capability, aborting\n");
15955 return -EIO;
15956 }
15957
15958 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15959 tg3_flag_set(tp, PCIX_MODE);
15960 }
15961
15962 /* If we have an AMD 762 or VIA K8T800 chipset, write
15963 * reordering to the mailbox registers done by the host
15964 * controller can cause major troubles. We read back from
15965 * every mailbox register write to force the writes to be
15966 * posted to the chip in order.
15967 */
15968 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15969 !tg3_flag(tp, PCI_EXPRESS))
15970 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15971
15972 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15973 &tp->pci_cacheline_sz);
15974 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15975 &tp->pci_lat_timer);
15976 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15977 tp->pci_lat_timer < 64) {
15978 tp->pci_lat_timer = 64;
15979 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15980 tp->pci_lat_timer);
15981 }
15982
15983 /* Important! -- It is critical that the PCI-X hw workaround
15984 * situation is decided before the first MMIO register access.
15985 */
15986 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15987 /* 5700 BX chips need to have their TX producer index
15988 * mailboxes written twice to workaround a bug.
15989 */
15990 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15991
15992 /* If we are in PCI-X mode, enable register write workaround.
15993 *
15994 * The workaround is to use indirect register accesses
15995 * for all chip writes not to mailbox registers.
15996 */
15997 if (tg3_flag(tp, PCIX_MODE)) {
15998 u32 pm_reg;
15999
16000 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16001
16002 /* The chip can have it's power management PCI config
16003 * space registers clobbered due to this bug.
16004 * So explicitly force the chip into D0 here.
16005 */
16006 pci_read_config_dword(tp->pdev,
16007 tp->pm_cap + PCI_PM_CTRL,
16008 &pm_reg);
16009 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16010 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16011 pci_write_config_dword(tp->pdev,
16012 tp->pm_cap + PCI_PM_CTRL,
16013 pm_reg);
16014
16015 /* Also, force SERR#/PERR# in PCI command. */
16016 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16017 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16018 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16019 }
16020 }
16021
16022 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16023 tg3_flag_set(tp, PCI_HIGH_SPEED);
16024 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16025 tg3_flag_set(tp, PCI_32BIT);
16026
16027 /* Chip-specific fixup from Broadcom driver */
16028 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16029 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16030 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16031 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16032 }
16033
16034 /* Default fast path register access methods */
16035 tp->read32 = tg3_read32;
16036 tp->write32 = tg3_write32;
16037 tp->read32_mbox = tg3_read32;
16038 tp->write32_mbox = tg3_write32;
16039 tp->write32_tx_mbox = tg3_write32;
16040 tp->write32_rx_mbox = tg3_write32;
16041
16042 /* Various workaround register access methods */
16043 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16044 tp->write32 = tg3_write_indirect_reg32;
16045 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16046 (tg3_flag(tp, PCI_EXPRESS) &&
16047 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16048 /*
16049 * Back to back register writes can cause problems on these
16050 * chips, the workaround is to read back all reg writes
16051 * except those to mailbox regs.
16052 *
16053 * See tg3_write_indirect_reg32().
16054 */
16055 tp->write32 = tg3_write_flush_reg32;
16056 }
16057
16058 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16059 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16060 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16061 tp->write32_rx_mbox = tg3_write_flush_reg32;
16062 }
16063
16064 if (tg3_flag(tp, ICH_WORKAROUND)) {
16065 tp->read32 = tg3_read_indirect_reg32;
16066 tp->write32 = tg3_write_indirect_reg32;
16067 tp->read32_mbox = tg3_read_indirect_mbox;
16068 tp->write32_mbox = tg3_write_indirect_mbox;
16069 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16070 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16071
16072 iounmap(tp->regs);
16073 tp->regs = NULL;
16074
16075 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16076 pci_cmd &= ~PCI_COMMAND_MEMORY;
16077 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16078 }
16079 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16080 tp->read32_mbox = tg3_read32_mbox_5906;
16081 tp->write32_mbox = tg3_write32_mbox_5906;
16082 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16083 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16084 }
16085
16086 if (tp->write32 == tg3_write_indirect_reg32 ||
16087 (tg3_flag(tp, PCIX_MODE) &&
16088 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16089 tg3_asic_rev(tp) == ASIC_REV_5701)))
16090 tg3_flag_set(tp, SRAM_USE_CONFIG);
16091
16092 /* The memory arbiter has to be enabled in order for SRAM accesses
16093 * to succeed. Normally on powerup the tg3 chip firmware will make
16094 * sure it is enabled, but other entities such as system netboot
16095 * code might disable it.
16096 */
16097 val = tr32(MEMARB_MODE);
16098 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16099
16100 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16101 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16102 tg3_flag(tp, 5780_CLASS)) {
16103 if (tg3_flag(tp, PCIX_MODE)) {
16104 pci_read_config_dword(tp->pdev,
16105 tp->pcix_cap + PCI_X_STATUS,
16106 &val);
16107 tp->pci_fn = val & 0x7;
16108 }
16109 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16110 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16111 tg3_asic_rev(tp) == ASIC_REV_5720) {
16112 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16113 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16114 val = tr32(TG3_CPMU_STATUS);
16115
16116 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16117 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16118 else
16119 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16120 TG3_CPMU_STATUS_FSHFT_5719;
16121 }
16122
16123 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16124 tp->write32_tx_mbox = tg3_write_flush_reg32;
16125 tp->write32_rx_mbox = tg3_write_flush_reg32;
16126 }
16127
16128 /* Get eeprom hw config before calling tg3_set_power_state().
16129 * In particular, the TG3_FLAG_IS_NIC flag must be
16130 * determined before calling tg3_set_power_state() so that
16131 * we know whether or not to switch out of Vaux power.
16132 * When the flag is set, it means that GPIO1 is used for eeprom
16133 * write protect and also implies that it is a LOM where GPIOs
16134 * are not used to switch power.
16135 */
16136 tg3_get_eeprom_hw_cfg(tp);
16137
16138 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16139 tg3_flag_clear(tp, TSO_CAPABLE);
16140 tg3_flag_clear(tp, TSO_BUG);
16141 tp->fw_needed = NULL;
16142 }
16143
16144 if (tg3_flag(tp, ENABLE_APE)) {
16145 /* Allow reads and writes to the
16146 * APE register and memory space.
16147 */
16148 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16149 PCISTATE_ALLOW_APE_SHMEM_WR |
16150 PCISTATE_ALLOW_APE_PSPACE_WR;
16151 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16152 pci_state_reg);
16153
16154 tg3_ape_lock_init(tp);
16155 }
16156
16157 /* Set up tp->grc_local_ctrl before calling
16158 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16159 * will bring 5700's external PHY out of reset.
16160 * It is also used as eeprom write protect on LOMs.
16161 */
16162 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16163 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16164 tg3_flag(tp, EEPROM_WRITE_PROT))
16165 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16166 GRC_LCLCTRL_GPIO_OUTPUT1);
16167 /* Unused GPIO3 must be driven as output on 5752 because there
16168 * are no pull-up resistors on unused GPIO pins.
16169 */
16170 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16171 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16172
16173 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16174 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16175 tg3_flag(tp, 57765_CLASS))
16176 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16177
16178 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16179 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16180 /* Turn off the debug UART. */
16181 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16182 if (tg3_flag(tp, IS_NIC))
16183 /* Keep VMain power. */
16184 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16185 GRC_LCLCTRL_GPIO_OUTPUT0;
16186 }
16187
16188 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16189 tp->grc_local_ctrl |=
16190 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16191
16192 /* Switch out of Vaux if it is a NIC */
16193 tg3_pwrsrc_switch_to_vmain(tp);
16194
16195 /* Derive initial jumbo mode from MTU assigned in
16196 * ether_setup() via the alloc_etherdev() call
16197 */
16198 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16199 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16200
16201 /* Determine WakeOnLan speed to use. */
16202 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16203 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16204 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16205 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16206 tg3_flag_clear(tp, WOL_SPEED_100MB);
16207 } else {
16208 tg3_flag_set(tp, WOL_SPEED_100MB);
16209 }
16210
16211 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16212 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16213
16214 /* A few boards don't want Ethernet@WireSpeed phy feature */
16215 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16216 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16217 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16218 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16219 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16220 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16221 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16222
16223 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16224 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16225 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16226 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16227 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16228
16229 if (tg3_flag(tp, 5705_PLUS) &&
16230 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16231 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16232 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16233 !tg3_flag(tp, 57765_PLUS)) {
16234 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16235 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16236 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16237 tg3_asic_rev(tp) == ASIC_REV_5761) {
16238 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16239 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16240 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16241 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16242 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16243 } else
16244 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16245 }
16246
16247 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16248 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16249 tp->phy_otp = tg3_read_otp_phycfg(tp);
16250 if (tp->phy_otp == 0)
16251 tp->phy_otp = TG3_OTP_DEFAULT;
16252 }
16253
16254 if (tg3_flag(tp, CPMU_PRESENT))
16255 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16256 else
16257 tp->mi_mode = MAC_MI_MODE_BASE;
16258
16259 tp->coalesce_mode = 0;
16260 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16261 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16262 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16263
16264 /* Set these bits to enable statistics workaround. */
16265 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16266 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16267 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16268 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16269 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16270 }
16271
16272 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16273 tg3_asic_rev(tp) == ASIC_REV_57780)
16274 tg3_flag_set(tp, USE_PHYLIB);
16275
16276 err = tg3_mdio_init(tp);
16277 if (err)
16278 return err;
16279
16280 /* Initialize data/descriptor byte/word swapping. */
16281 val = tr32(GRC_MODE);
16282 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16283 tg3_asic_rev(tp) == ASIC_REV_5762)
16284 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16285 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16286 GRC_MODE_B2HRX_ENABLE |
16287 GRC_MODE_HTX2B_ENABLE |
16288 GRC_MODE_HOST_STACKUP);
16289 else
16290 val &= GRC_MODE_HOST_STACKUP;
16291
16292 tw32(GRC_MODE, val | tp->grc_mode);
16293
16294 tg3_switch_clocks(tp);
16295
16296 /* Clear this out for sanity. */
16297 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16298
16299 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16300 tw32(TG3PCI_REG_BASE_ADDR, 0);
16301
16302 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16303 &pci_state_reg);
16304 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16305 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16306 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16307 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16308 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16309 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16310 void __iomem *sram_base;
16311
16312 /* Write some dummy words into the SRAM status block
16313 * area, see if it reads back correctly. If the return
16314 * value is bad, force enable the PCIX workaround.
16315 */
16316 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16317
16318 writel(0x00000000, sram_base);
16319 writel(0x00000000, sram_base + 4);
16320 writel(0xffffffff, sram_base + 4);
16321 if (readl(sram_base) != 0x00000000)
16322 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16323 }
16324 }
16325
16326 udelay(50);
16327 tg3_nvram_init(tp);
16328
16329 /* If the device has an NVRAM, no need to load patch firmware */
16330 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16331 !tg3_flag(tp, NO_NVRAM))
16332 tp->fw_needed = NULL;
16333
16334 grc_misc_cfg = tr32(GRC_MISC_CFG);
16335 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16336
16337 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16338 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16339 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16340 tg3_flag_set(tp, IS_5788);
16341
16342 if (!tg3_flag(tp, IS_5788) &&
16343 tg3_asic_rev(tp) != ASIC_REV_5700)
16344 tg3_flag_set(tp, TAGGED_STATUS);
16345 if (tg3_flag(tp, TAGGED_STATUS)) {
16346 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16347 HOSTCC_MODE_CLRTICK_TXBD);
16348
16349 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16350 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16351 tp->misc_host_ctrl);
16352 }
16353
16354 /* Preserve the APE MAC_MODE bits */
16355 if (tg3_flag(tp, ENABLE_APE))
16356 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16357 else
16358 tp->mac_mode = 0;
16359
16360 if (tg3_10_100_only_device(tp, ent))
16361 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16362
16363 err = tg3_phy_probe(tp);
16364 if (err) {
16365 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16366 /* ... but do not return immediately ... */
16367 tg3_mdio_fini(tp);
16368 }
16369
16370 tg3_read_vpd(tp);
16371 tg3_read_fw_ver(tp);
16372
16373 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16374 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16375 } else {
16376 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16377 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16378 else
16379 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16380 }
16381
16382 /* 5700 {AX,BX} chips have a broken status block link
16383 * change bit implementation, so we must use the
16384 * status register in those cases.
16385 */
16386 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16387 tg3_flag_set(tp, USE_LINKCHG_REG);
16388 else
16389 tg3_flag_clear(tp, USE_LINKCHG_REG);
16390
16391 /* The led_ctrl is set during tg3_phy_probe, here we might
16392 * have to force the link status polling mechanism based
16393 * upon subsystem IDs.
16394 */
16395 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16396 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16397 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16398 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16399 tg3_flag_set(tp, USE_LINKCHG_REG);
16400 }
16401
16402 /* For all SERDES we poll the MAC status register. */
16403 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16404 tg3_flag_set(tp, POLL_SERDES);
16405 else
16406 tg3_flag_clear(tp, POLL_SERDES);
16407
16408 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16409 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16410 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16411 tg3_flag(tp, PCIX_MODE)) {
16412 tp->rx_offset = NET_SKB_PAD;
16413 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16414 tp->rx_copy_thresh = ~(u16)0;
16415 #endif
16416 }
16417
16418 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16419 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16420 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16421
16422 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16423
16424 /* Increment the rx prod index on the rx std ring by at most
16425 * 8 for these chips to workaround hw errata.
16426 */
16427 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16428 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16429 tg3_asic_rev(tp) == ASIC_REV_5755)
16430 tp->rx_std_max_post = 8;
16431
16432 if (tg3_flag(tp, ASPM_WORKAROUND))
16433 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16434 PCIE_PWR_MGMT_L1_THRESH_MSK;
16435
16436 return err;
16437 }
16438
16439 #ifdef CONFIG_SPARC
16440 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16441 {
16442 struct net_device *dev = tp->dev;
16443 struct pci_dev *pdev = tp->pdev;
16444 struct device_node *dp = pci_device_to_OF_node(pdev);
16445 const unsigned char *addr;
16446 int len;
16447
16448 addr = of_get_property(dp, "local-mac-address", &len);
16449 if (addr && len == 6) {
16450 memcpy(dev->dev_addr, addr, 6);
16451 return 0;
16452 }
16453 return -ENODEV;
16454 }
16455
16456 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16457 {
16458 struct net_device *dev = tp->dev;
16459
16460 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16461 return 0;
16462 }
16463 #endif
16464
16465 static int tg3_get_device_address(struct tg3 *tp)
16466 {
16467 struct net_device *dev = tp->dev;
16468 u32 hi, lo, mac_offset;
16469 int addr_ok = 0;
16470 int err;
16471
16472 #ifdef CONFIG_SPARC
16473 if (!tg3_get_macaddr_sparc(tp))
16474 return 0;
16475 #endif
16476
16477 if (tg3_flag(tp, IS_SSB_CORE)) {
16478 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16479 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16480 return 0;
16481 }
16482
16483 mac_offset = 0x7c;
16484 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16485 tg3_flag(tp, 5780_CLASS)) {
16486 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16487 mac_offset = 0xcc;
16488 if (tg3_nvram_lock(tp))
16489 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16490 else
16491 tg3_nvram_unlock(tp);
16492 } else if (tg3_flag(tp, 5717_PLUS)) {
16493 if (tp->pci_fn & 1)
16494 mac_offset = 0xcc;
16495 if (tp->pci_fn > 1)
16496 mac_offset += 0x18c;
16497 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16498 mac_offset = 0x10;
16499
16500 /* First try to get it from MAC address mailbox. */
16501 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16502 if ((hi >> 16) == 0x484b) {
16503 dev->dev_addr[0] = (hi >> 8) & 0xff;
16504 dev->dev_addr[1] = (hi >> 0) & 0xff;
16505
16506 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16507 dev->dev_addr[2] = (lo >> 24) & 0xff;
16508 dev->dev_addr[3] = (lo >> 16) & 0xff;
16509 dev->dev_addr[4] = (lo >> 8) & 0xff;
16510 dev->dev_addr[5] = (lo >> 0) & 0xff;
16511
16512 /* Some old bootcode may report a 0 MAC address in SRAM */
16513 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16514 }
16515 if (!addr_ok) {
16516 /* Next, try NVRAM. */
16517 if (!tg3_flag(tp, NO_NVRAM) &&
16518 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16519 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16520 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16521 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16522 }
16523 /* Finally just fetch it out of the MAC control regs. */
16524 else {
16525 hi = tr32(MAC_ADDR_0_HIGH);
16526 lo = tr32(MAC_ADDR_0_LOW);
16527
16528 dev->dev_addr[5] = lo & 0xff;
16529 dev->dev_addr[4] = (lo >> 8) & 0xff;
16530 dev->dev_addr[3] = (lo >> 16) & 0xff;
16531 dev->dev_addr[2] = (lo >> 24) & 0xff;
16532 dev->dev_addr[1] = hi & 0xff;
16533 dev->dev_addr[0] = (hi >> 8) & 0xff;
16534 }
16535 }
16536
16537 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16538 #ifdef CONFIG_SPARC
16539 if (!tg3_get_default_macaddr_sparc(tp))
16540 return 0;
16541 #endif
16542 return -EINVAL;
16543 }
16544 return 0;
16545 }
16546
16547 #define BOUNDARY_SINGLE_CACHELINE 1
16548 #define BOUNDARY_MULTI_CACHELINE 2
16549
16550 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16551 {
16552 int cacheline_size;
16553 u8 byte;
16554 int goal;
16555
16556 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16557 if (byte == 0)
16558 cacheline_size = 1024;
16559 else
16560 cacheline_size = (int) byte * 4;
16561
16562 /* On 5703 and later chips, the boundary bits have no
16563 * effect.
16564 */
16565 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16566 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16567 !tg3_flag(tp, PCI_EXPRESS))
16568 goto out;
16569
16570 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16571 goal = BOUNDARY_MULTI_CACHELINE;
16572 #else
16573 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16574 goal = BOUNDARY_SINGLE_CACHELINE;
16575 #else
16576 goal = 0;
16577 #endif
16578 #endif
16579
16580 if (tg3_flag(tp, 57765_PLUS)) {
16581 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16582 goto out;
16583 }
16584
16585 if (!goal)
16586 goto out;
16587
16588 /* PCI controllers on most RISC systems tend to disconnect
16589 * when a device tries to burst across a cache-line boundary.
16590 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16591 *
16592 * Unfortunately, for PCI-E there are only limited
16593 * write-side controls for this, and thus for reads
16594 * we will still get the disconnects. We'll also waste
16595 * these PCI cycles for both read and write for chips
16596 * other than 5700 and 5701 which do not implement the
16597 * boundary bits.
16598 */
16599 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16600 switch (cacheline_size) {
16601 case 16:
16602 case 32:
16603 case 64:
16604 case 128:
16605 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16606 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16607 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16608 } else {
16609 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16610 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16611 }
16612 break;
16613
16614 case 256:
16615 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16616 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16617 break;
16618
16619 default:
16620 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16621 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16622 break;
16623 }
16624 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16625 switch (cacheline_size) {
16626 case 16:
16627 case 32:
16628 case 64:
16629 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16630 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16631 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16632 break;
16633 }
16634 /* fallthrough */
16635 case 128:
16636 default:
16637 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16638 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16639 break;
16640 }
16641 } else {
16642 switch (cacheline_size) {
16643 case 16:
16644 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16645 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16646 DMA_RWCTRL_WRITE_BNDRY_16);
16647 break;
16648 }
16649 /* fallthrough */
16650 case 32:
16651 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16652 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16653 DMA_RWCTRL_WRITE_BNDRY_32);
16654 break;
16655 }
16656 /* fallthrough */
16657 case 64:
16658 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16659 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16660 DMA_RWCTRL_WRITE_BNDRY_64);
16661 break;
16662 }
16663 /* fallthrough */
16664 case 128:
16665 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16666 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16667 DMA_RWCTRL_WRITE_BNDRY_128);
16668 break;
16669 }
16670 /* fallthrough */
16671 case 256:
16672 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16673 DMA_RWCTRL_WRITE_BNDRY_256);
16674 break;
16675 case 512:
16676 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16677 DMA_RWCTRL_WRITE_BNDRY_512);
16678 break;
16679 case 1024:
16680 default:
16681 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16682 DMA_RWCTRL_WRITE_BNDRY_1024);
16683 break;
16684 }
16685 }
16686
16687 out:
16688 return val;
16689 }
16690
16691 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16692 int size, bool to_device)
16693 {
16694 struct tg3_internal_buffer_desc test_desc;
16695 u32 sram_dma_descs;
16696 int i, ret;
16697
16698 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16699
16700 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16701 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16702 tw32(RDMAC_STATUS, 0);
16703 tw32(WDMAC_STATUS, 0);
16704
16705 tw32(BUFMGR_MODE, 0);
16706 tw32(FTQ_RESET, 0);
16707
16708 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16709 test_desc.addr_lo = buf_dma & 0xffffffff;
16710 test_desc.nic_mbuf = 0x00002100;
16711 test_desc.len = size;
16712
16713 /*
16714 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16715 * the *second* time the tg3 driver was getting loaded after an
16716 * initial scan.
16717 *
16718 * Broadcom tells me:
16719 * ...the DMA engine is connected to the GRC block and a DMA
16720 * reset may affect the GRC block in some unpredictable way...
16721 * The behavior of resets to individual blocks has not been tested.
16722 *
16723 * Broadcom noted the GRC reset will also reset all sub-components.
16724 */
16725 if (to_device) {
16726 test_desc.cqid_sqid = (13 << 8) | 2;
16727
16728 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16729 udelay(40);
16730 } else {
16731 test_desc.cqid_sqid = (16 << 8) | 7;
16732
16733 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16734 udelay(40);
16735 }
16736 test_desc.flags = 0x00000005;
16737
16738 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16739 u32 val;
16740
16741 val = *(((u32 *)&test_desc) + i);
16742 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16743 sram_dma_descs + (i * sizeof(u32)));
16744 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16745 }
16746 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16747
16748 if (to_device)
16749 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16750 else
16751 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16752
16753 ret = -ENODEV;
16754 for (i = 0; i < 40; i++) {
16755 u32 val;
16756
16757 if (to_device)
16758 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16759 else
16760 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16761 if ((val & 0xffff) == sram_dma_descs) {
16762 ret = 0;
16763 break;
16764 }
16765
16766 udelay(100);
16767 }
16768
16769 return ret;
16770 }
16771
16772 #define TEST_BUFFER_SIZE 0x2000
16773
16774 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16775 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16776 { },
16777 };
16778
16779 static int tg3_test_dma(struct tg3 *tp)
16780 {
16781 dma_addr_t buf_dma;
16782 u32 *buf, saved_dma_rwctrl;
16783 int ret = 0;
16784
16785 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16786 &buf_dma, GFP_KERNEL);
16787 if (!buf) {
16788 ret = -ENOMEM;
16789 goto out_nofree;
16790 }
16791
16792 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16793 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16794
16795 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16796
16797 if (tg3_flag(tp, 57765_PLUS))
16798 goto out;
16799
16800 if (tg3_flag(tp, PCI_EXPRESS)) {
16801 /* DMA read watermark not used on PCIE */
16802 tp->dma_rwctrl |= 0x00180000;
16803 } else if (!tg3_flag(tp, PCIX_MODE)) {
16804 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16805 tg3_asic_rev(tp) == ASIC_REV_5750)
16806 tp->dma_rwctrl |= 0x003f0000;
16807 else
16808 tp->dma_rwctrl |= 0x003f000f;
16809 } else {
16810 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16811 tg3_asic_rev(tp) == ASIC_REV_5704) {
16812 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16813 u32 read_water = 0x7;
16814
16815 /* If the 5704 is behind the EPB bridge, we can
16816 * do the less restrictive ONE_DMA workaround for
16817 * better performance.
16818 */
16819 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16820 tg3_asic_rev(tp) == ASIC_REV_5704)
16821 tp->dma_rwctrl |= 0x8000;
16822 else if (ccval == 0x6 || ccval == 0x7)
16823 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16824
16825 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16826 read_water = 4;
16827 /* Set bit 23 to enable PCIX hw bug fix */
16828 tp->dma_rwctrl |=
16829 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16830 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16831 (1 << 23);
16832 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16833 /* 5780 always in PCIX mode */
16834 tp->dma_rwctrl |= 0x00144000;
16835 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16836 /* 5714 always in PCIX mode */
16837 tp->dma_rwctrl |= 0x00148000;
16838 } else {
16839 tp->dma_rwctrl |= 0x001b000f;
16840 }
16841 }
16842 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16843 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16844
16845 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16846 tg3_asic_rev(tp) == ASIC_REV_5704)
16847 tp->dma_rwctrl &= 0xfffffff0;
16848
16849 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16850 tg3_asic_rev(tp) == ASIC_REV_5701) {
16851 /* Remove this if it causes problems for some boards. */
16852 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16853
16854 /* On 5700/5701 chips, we need to set this bit.
16855 * Otherwise the chip will issue cacheline transactions
16856 * to streamable DMA memory with not all the byte
16857 * enables turned on. This is an error on several
16858 * RISC PCI controllers, in particular sparc64.
16859 *
16860 * On 5703/5704 chips, this bit has been reassigned
16861 * a different meaning. In particular, it is used
16862 * on those chips to enable a PCI-X workaround.
16863 */
16864 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16865 }
16866
16867 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16868
16869 #if 0
16870 /* Unneeded, already done by tg3_get_invariants. */
16871 tg3_switch_clocks(tp);
16872 #endif
16873
16874 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16875 tg3_asic_rev(tp) != ASIC_REV_5701)
16876 goto out;
16877
16878 /* It is best to perform DMA test with maximum write burst size
16879 * to expose the 5700/5701 write DMA bug.
16880 */
16881 saved_dma_rwctrl = tp->dma_rwctrl;
16882 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16883 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16884
16885 while (1) {
16886 u32 *p = buf, i;
16887
16888 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16889 p[i] = i;
16890
16891 /* Send the buffer to the chip. */
16892 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16893 if (ret) {
16894 dev_err(&tp->pdev->dev,
16895 "%s: Buffer write failed. err = %d\n",
16896 __func__, ret);
16897 break;
16898 }
16899
16900 #if 0
16901 /* validate data reached card RAM correctly. */
16902 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16903 u32 val;
16904 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16905 if (le32_to_cpu(val) != p[i]) {
16906 dev_err(&tp->pdev->dev,
16907 "%s: Buffer corrupted on device! "
16908 "(%d != %d)\n", __func__, val, i);
16909 /* ret = -ENODEV here? */
16910 }
16911 p[i] = 0;
16912 }
16913 #endif
16914 /* Now read it back. */
16915 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16916 if (ret) {
16917 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16918 "err = %d\n", __func__, ret);
16919 break;
16920 }
16921
16922 /* Verify it. */
16923 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16924 if (p[i] == i)
16925 continue;
16926
16927 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16928 DMA_RWCTRL_WRITE_BNDRY_16) {
16929 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16930 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16931 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16932 break;
16933 } else {
16934 dev_err(&tp->pdev->dev,
16935 "%s: Buffer corrupted on read back! "
16936 "(%d != %d)\n", __func__, p[i], i);
16937 ret = -ENODEV;
16938 goto out;
16939 }
16940 }
16941
16942 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16943 /* Success. */
16944 ret = 0;
16945 break;
16946 }
16947 }
16948 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16949 DMA_RWCTRL_WRITE_BNDRY_16) {
16950 /* DMA test passed without adjusting DMA boundary,
16951 * now look for chipsets that are known to expose the
16952 * DMA bug without failing the test.
16953 */
16954 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16955 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16956 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16957 } else {
16958 /* Safe to use the calculated DMA boundary. */
16959 tp->dma_rwctrl = saved_dma_rwctrl;
16960 }
16961
16962 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16963 }
16964
16965 out:
16966 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16967 out_nofree:
16968 return ret;
16969 }
16970
16971 static void tg3_init_bufmgr_config(struct tg3 *tp)
16972 {
16973 if (tg3_flag(tp, 57765_PLUS)) {
16974 tp->bufmgr_config.mbuf_read_dma_low_water =
16975 DEFAULT_MB_RDMA_LOW_WATER_5705;
16976 tp->bufmgr_config.mbuf_mac_rx_low_water =
16977 DEFAULT_MB_MACRX_LOW_WATER_57765;
16978 tp->bufmgr_config.mbuf_high_water =
16979 DEFAULT_MB_HIGH_WATER_57765;
16980
16981 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16982 DEFAULT_MB_RDMA_LOW_WATER_5705;
16983 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16984 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16985 tp->bufmgr_config.mbuf_high_water_jumbo =
16986 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16987 } else if (tg3_flag(tp, 5705_PLUS)) {
16988 tp->bufmgr_config.mbuf_read_dma_low_water =
16989 DEFAULT_MB_RDMA_LOW_WATER_5705;
16990 tp->bufmgr_config.mbuf_mac_rx_low_water =
16991 DEFAULT_MB_MACRX_LOW_WATER_5705;
16992 tp->bufmgr_config.mbuf_high_water =
16993 DEFAULT_MB_HIGH_WATER_5705;
16994 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16995 tp->bufmgr_config.mbuf_mac_rx_low_water =
16996 DEFAULT_MB_MACRX_LOW_WATER_5906;
16997 tp->bufmgr_config.mbuf_high_water =
16998 DEFAULT_MB_HIGH_WATER_5906;
16999 }
17000
17001 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17002 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17003 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17004 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17005 tp->bufmgr_config.mbuf_high_water_jumbo =
17006 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17007 } else {
17008 tp->bufmgr_config.mbuf_read_dma_low_water =
17009 DEFAULT_MB_RDMA_LOW_WATER;
17010 tp->bufmgr_config.mbuf_mac_rx_low_water =
17011 DEFAULT_MB_MACRX_LOW_WATER;
17012 tp->bufmgr_config.mbuf_high_water =
17013 DEFAULT_MB_HIGH_WATER;
17014
17015 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17016 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17017 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17018 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17019 tp->bufmgr_config.mbuf_high_water_jumbo =
17020 DEFAULT_MB_HIGH_WATER_JUMBO;
17021 }
17022
17023 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17024 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17025 }
17026
17027 static char *tg3_phy_string(struct tg3 *tp)
17028 {
17029 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17030 case TG3_PHY_ID_BCM5400: return "5400";
17031 case TG3_PHY_ID_BCM5401: return "5401";
17032 case TG3_PHY_ID_BCM5411: return "5411";
17033 case TG3_PHY_ID_BCM5701: return "5701";
17034 case TG3_PHY_ID_BCM5703: return "5703";
17035 case TG3_PHY_ID_BCM5704: return "5704";
17036 case TG3_PHY_ID_BCM5705: return "5705";
17037 case TG3_PHY_ID_BCM5750: return "5750";
17038 case TG3_PHY_ID_BCM5752: return "5752";
17039 case TG3_PHY_ID_BCM5714: return "5714";
17040 case TG3_PHY_ID_BCM5780: return "5780";
17041 case TG3_PHY_ID_BCM5755: return "5755";
17042 case TG3_PHY_ID_BCM5787: return "5787";
17043 case TG3_PHY_ID_BCM5784: return "5784";
17044 case TG3_PHY_ID_BCM5756: return "5722/5756";
17045 case TG3_PHY_ID_BCM5906: return "5906";
17046 case TG3_PHY_ID_BCM5761: return "5761";
17047 case TG3_PHY_ID_BCM5718C: return "5718C";
17048 case TG3_PHY_ID_BCM5718S: return "5718S";
17049 case TG3_PHY_ID_BCM57765: return "57765";
17050 case TG3_PHY_ID_BCM5719C: return "5719C";
17051 case TG3_PHY_ID_BCM5720C: return "5720C";
17052 case TG3_PHY_ID_BCM5762: return "5762C";
17053 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17054 case 0: return "serdes";
17055 default: return "unknown";
17056 }
17057 }
17058
17059 static char *tg3_bus_string(struct tg3 *tp, char *str)
17060 {
17061 if (tg3_flag(tp, PCI_EXPRESS)) {
17062 strcpy(str, "PCI Express");
17063 return str;
17064 } else if (tg3_flag(tp, PCIX_MODE)) {
17065 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17066
17067 strcpy(str, "PCIX:");
17068
17069 if ((clock_ctrl == 7) ||
17070 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17071 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17072 strcat(str, "133MHz");
17073 else if (clock_ctrl == 0)
17074 strcat(str, "33MHz");
17075 else if (clock_ctrl == 2)
17076 strcat(str, "50MHz");
17077 else if (clock_ctrl == 4)
17078 strcat(str, "66MHz");
17079 else if (clock_ctrl == 6)
17080 strcat(str, "100MHz");
17081 } else {
17082 strcpy(str, "PCI:");
17083 if (tg3_flag(tp, PCI_HIGH_SPEED))
17084 strcat(str, "66MHz");
17085 else
17086 strcat(str, "33MHz");
17087 }
17088 if (tg3_flag(tp, PCI_32BIT))
17089 strcat(str, ":32-bit");
17090 else
17091 strcat(str, ":64-bit");
17092 return str;
17093 }
17094
17095 static void tg3_init_coal(struct tg3 *tp)
17096 {
17097 struct ethtool_coalesce *ec = &tp->coal;
17098
17099 memset(ec, 0, sizeof(*ec));
17100 ec->cmd = ETHTOOL_GCOALESCE;
17101 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17102 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17103 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17104 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17105 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17106 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17107 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17108 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17109 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17110
17111 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17112 HOSTCC_MODE_CLRTICK_TXBD)) {
17113 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17114 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17115 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17116 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17117 }
17118
17119 if (tg3_flag(tp, 5705_PLUS)) {
17120 ec->rx_coalesce_usecs_irq = 0;
17121 ec->tx_coalesce_usecs_irq = 0;
17122 ec->stats_block_coalesce_usecs = 0;
17123 }
17124 }
17125
17126 static int tg3_init_one(struct pci_dev *pdev,
17127 const struct pci_device_id *ent)
17128 {
17129 struct net_device *dev;
17130 struct tg3 *tp;
17131 int i, err, pm_cap;
17132 u32 sndmbx, rcvmbx, intmbx;
17133 char str[40];
17134 u64 dma_mask, persist_dma_mask;
17135 netdev_features_t features = 0;
17136
17137 printk_once(KERN_INFO "%s\n", version);
17138
17139 err = pci_enable_device(pdev);
17140 if (err) {
17141 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17142 return err;
17143 }
17144
17145 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17146 if (err) {
17147 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17148 goto err_out_disable_pdev;
17149 }
17150
17151 pci_set_master(pdev);
17152
17153 /* Find power-management capability. */
17154 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17155 if (pm_cap == 0) {
17156 dev_err(&pdev->dev,
17157 "Cannot find Power Management capability, aborting\n");
17158 err = -EIO;
17159 goto err_out_free_res;
17160 }
17161
17162 err = pci_set_power_state(pdev, PCI_D0);
17163 if (err) {
17164 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17165 goto err_out_free_res;
17166 }
17167
17168 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17169 if (!dev) {
17170 err = -ENOMEM;
17171 goto err_out_power_down;
17172 }
17173
17174 SET_NETDEV_DEV(dev, &pdev->dev);
17175
17176 tp = netdev_priv(dev);
17177 tp->pdev = pdev;
17178 tp->dev = dev;
17179 tp->pm_cap = pm_cap;
17180 tp->rx_mode = TG3_DEF_RX_MODE;
17181 tp->tx_mode = TG3_DEF_TX_MODE;
17182 tp->irq_sync = 1;
17183
17184 if (tg3_debug > 0)
17185 tp->msg_enable = tg3_debug;
17186 else
17187 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17188
17189 if (pdev_is_ssb_gige_core(pdev)) {
17190 tg3_flag_set(tp, IS_SSB_CORE);
17191 if (ssb_gige_must_flush_posted_writes(pdev))
17192 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17193 if (ssb_gige_one_dma_at_once(pdev))
17194 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17195 if (ssb_gige_have_roboswitch(pdev))
17196 tg3_flag_set(tp, ROBOSWITCH);
17197 if (ssb_gige_is_rgmii(pdev))
17198 tg3_flag_set(tp, RGMII_MODE);
17199 }
17200
17201 /* The word/byte swap controls here control register access byte
17202 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17203 * setting below.
17204 */
17205 tp->misc_host_ctrl =
17206 MISC_HOST_CTRL_MASK_PCI_INT |
17207 MISC_HOST_CTRL_WORD_SWAP |
17208 MISC_HOST_CTRL_INDIR_ACCESS |
17209 MISC_HOST_CTRL_PCISTATE_RW;
17210
17211 /* The NONFRM (non-frame) byte/word swap controls take effect
17212 * on descriptor entries, anything which isn't packet data.
17213 *
17214 * The StrongARM chips on the board (one for tx, one for rx)
17215 * are running in big-endian mode.
17216 */
17217 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17218 GRC_MODE_WSWAP_NONFRM_DATA);
17219 #ifdef __BIG_ENDIAN
17220 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17221 #endif
17222 spin_lock_init(&tp->lock);
17223 spin_lock_init(&tp->indirect_lock);
17224 INIT_WORK(&tp->reset_task, tg3_reset_task);
17225
17226 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17227 if (!tp->regs) {
17228 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17229 err = -ENOMEM;
17230 goto err_out_free_dev;
17231 }
17232
17233 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17234 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17235 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17236 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17237 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17238 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17239 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17240 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17241 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17242 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17243 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17244 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17245 tg3_flag_set(tp, ENABLE_APE);
17246 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17247 if (!tp->aperegs) {
17248 dev_err(&pdev->dev,
17249 "Cannot map APE registers, aborting\n");
17250 err = -ENOMEM;
17251 goto err_out_iounmap;
17252 }
17253 }
17254
17255 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17256 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17257
17258 dev->ethtool_ops = &tg3_ethtool_ops;
17259 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17260 dev->netdev_ops = &tg3_netdev_ops;
17261 dev->irq = pdev->irq;
17262
17263 err = tg3_get_invariants(tp, ent);
17264 if (err) {
17265 dev_err(&pdev->dev,
17266 "Problem fetching invariants of chip, aborting\n");
17267 goto err_out_apeunmap;
17268 }
17269
17270 /* The EPB bridge inside 5714, 5715, and 5780 and any
17271 * device behind the EPB cannot support DMA addresses > 40-bit.
17272 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17273 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17274 * do DMA address check in tg3_start_xmit().
17275 */
17276 if (tg3_flag(tp, IS_5788))
17277 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17278 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17279 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17280 #ifdef CONFIG_HIGHMEM
17281 dma_mask = DMA_BIT_MASK(64);
17282 #endif
17283 } else
17284 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17285
17286 /* Configure DMA attributes. */
17287 if (dma_mask > DMA_BIT_MASK(32)) {
17288 err = pci_set_dma_mask(pdev, dma_mask);
17289 if (!err) {
17290 features |= NETIF_F_HIGHDMA;
17291 err = pci_set_consistent_dma_mask(pdev,
17292 persist_dma_mask);
17293 if (err < 0) {
17294 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17295 "DMA for consistent allocations\n");
17296 goto err_out_apeunmap;
17297 }
17298 }
17299 }
17300 if (err || dma_mask == DMA_BIT_MASK(32)) {
17301 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17302 if (err) {
17303 dev_err(&pdev->dev,
17304 "No usable DMA configuration, aborting\n");
17305 goto err_out_apeunmap;
17306 }
17307 }
17308
17309 tg3_init_bufmgr_config(tp);
17310
17311 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17312
17313 /* 5700 B0 chips do not support checksumming correctly due
17314 * to hardware bugs.
17315 */
17316 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17317 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17318
17319 if (tg3_flag(tp, 5755_PLUS))
17320 features |= NETIF_F_IPV6_CSUM;
17321 }
17322
17323 /* TSO is on by default on chips that support hardware TSO.
17324 * Firmware TSO on older chips gives lower performance, so it
17325 * is off by default, but can be enabled using ethtool.
17326 */
17327 if ((tg3_flag(tp, HW_TSO_1) ||
17328 tg3_flag(tp, HW_TSO_2) ||
17329 tg3_flag(tp, HW_TSO_3)) &&
17330 (features & NETIF_F_IP_CSUM))
17331 features |= NETIF_F_TSO;
17332 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17333 if (features & NETIF_F_IPV6_CSUM)
17334 features |= NETIF_F_TSO6;
17335 if (tg3_flag(tp, HW_TSO_3) ||
17336 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17337 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17338 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17339 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17340 tg3_asic_rev(tp) == ASIC_REV_57780)
17341 features |= NETIF_F_TSO_ECN;
17342 }
17343
17344 dev->features |= features;
17345 dev->vlan_features |= features;
17346
17347 /*
17348 * Add loopback capability only for a subset of devices that support
17349 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17350 * loopback for the remaining devices.
17351 */
17352 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17353 !tg3_flag(tp, CPMU_PRESENT))
17354 /* Add the loopback capability */
17355 features |= NETIF_F_LOOPBACK;
17356
17357 dev->hw_features |= features;
17358
17359 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17360 !tg3_flag(tp, TSO_CAPABLE) &&
17361 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17362 tg3_flag_set(tp, MAX_RXPEND_64);
17363 tp->rx_pending = 63;
17364 }
17365
17366 err = tg3_get_device_address(tp);
17367 if (err) {
17368 dev_err(&pdev->dev,
17369 "Could not obtain valid ethernet address, aborting\n");
17370 goto err_out_apeunmap;
17371 }
17372
17373 /*
17374 * Reset chip in case UNDI or EFI driver did not shutdown
17375 * DMA self test will enable WDMAC and we'll see (spurious)
17376 * pending DMA on the PCI bus at that point.
17377 */
17378 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17379 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17380 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17381 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17382 }
17383
17384 err = tg3_test_dma(tp);
17385 if (err) {
17386 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17387 goto err_out_apeunmap;
17388 }
17389
17390 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17391 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17392 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17393 for (i = 0; i < tp->irq_max; i++) {
17394 struct tg3_napi *tnapi = &tp->napi[i];
17395
17396 tnapi->tp = tp;
17397 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17398
17399 tnapi->int_mbox = intmbx;
17400 if (i <= 4)
17401 intmbx += 0x8;
17402 else
17403 intmbx += 0x4;
17404
17405 tnapi->consmbox = rcvmbx;
17406 tnapi->prodmbox = sndmbx;
17407
17408 if (i)
17409 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17410 else
17411 tnapi->coal_now = HOSTCC_MODE_NOW;
17412
17413 if (!tg3_flag(tp, SUPPORT_MSIX))
17414 break;
17415
17416 /*
17417 * If we support MSIX, we'll be using RSS. If we're using
17418 * RSS, the first vector only handles link interrupts and the
17419 * remaining vectors handle rx and tx interrupts. Reuse the
17420 * mailbox values for the next iteration. The values we setup
17421 * above are still useful for the single vectored mode.
17422 */
17423 if (!i)
17424 continue;
17425
17426 rcvmbx += 0x8;
17427
17428 if (sndmbx & 0x4)
17429 sndmbx -= 0x4;
17430 else
17431 sndmbx += 0xc;
17432 }
17433
17434 tg3_init_coal(tp);
17435
17436 pci_set_drvdata(pdev, dev);
17437
17438 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17439 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17440 tg3_asic_rev(tp) == ASIC_REV_5762)
17441 tg3_flag_set(tp, PTP_CAPABLE);
17442
17443 if (tg3_flag(tp, 5717_PLUS)) {
17444 /* Resume a low-power mode */
17445 tg3_frob_aux_power(tp, false);
17446 }
17447
17448 tg3_timer_init(tp);
17449
17450 tg3_carrier_off(tp);
17451
17452 err = register_netdev(dev);
17453 if (err) {
17454 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17455 goto err_out_apeunmap;
17456 }
17457
17458 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17459 tp->board_part_number,
17460 tg3_chip_rev_id(tp),
17461 tg3_bus_string(tp, str),
17462 dev->dev_addr);
17463
17464 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17465 struct phy_device *phydev;
17466 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17467 netdev_info(dev,
17468 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17469 phydev->drv->name, dev_name(&phydev->dev));
17470 } else {
17471 char *ethtype;
17472
17473 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17474 ethtype = "10/100Base-TX";
17475 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17476 ethtype = "1000Base-SX";
17477 else
17478 ethtype = "10/100/1000Base-T";
17479
17480 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17481 "(WireSpeed[%d], EEE[%d])\n",
17482 tg3_phy_string(tp), ethtype,
17483 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17484 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17485 }
17486
17487 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17488 (dev->features & NETIF_F_RXCSUM) != 0,
17489 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17490 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17491 tg3_flag(tp, ENABLE_ASF) != 0,
17492 tg3_flag(tp, TSO_CAPABLE) != 0);
17493 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17494 tp->dma_rwctrl,
17495 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17496 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17497
17498 pci_save_state(pdev);
17499
17500 return 0;
17501
17502 err_out_apeunmap:
17503 if (tp->aperegs) {
17504 iounmap(tp->aperegs);
17505 tp->aperegs = NULL;
17506 }
17507
17508 err_out_iounmap:
17509 if (tp->regs) {
17510 iounmap(tp->regs);
17511 tp->regs = NULL;
17512 }
17513
17514 err_out_free_dev:
17515 free_netdev(dev);
17516
17517 err_out_power_down:
17518 pci_set_power_state(pdev, PCI_D3hot);
17519
17520 err_out_free_res:
17521 pci_release_regions(pdev);
17522
17523 err_out_disable_pdev:
17524 pci_disable_device(pdev);
17525 pci_set_drvdata(pdev, NULL);
17526 return err;
17527 }
17528
17529 static void tg3_remove_one(struct pci_dev *pdev)
17530 {
17531 struct net_device *dev = pci_get_drvdata(pdev);
17532
17533 if (dev) {
17534 struct tg3 *tp = netdev_priv(dev);
17535
17536 release_firmware(tp->fw);
17537
17538 tg3_reset_task_cancel(tp);
17539
17540 if (tg3_flag(tp, USE_PHYLIB)) {
17541 tg3_phy_fini(tp);
17542 tg3_mdio_fini(tp);
17543 }
17544
17545 unregister_netdev(dev);
17546 if (tp->aperegs) {
17547 iounmap(tp->aperegs);
17548 tp->aperegs = NULL;
17549 }
17550 if (tp->regs) {
17551 iounmap(tp->regs);
17552 tp->regs = NULL;
17553 }
17554 free_netdev(dev);
17555 pci_release_regions(pdev);
17556 pci_disable_device(pdev);
17557 pci_set_drvdata(pdev, NULL);
17558 }
17559 }
17560
17561 #ifdef CONFIG_PM_SLEEP
17562 static int tg3_suspend(struct device *device)
17563 {
17564 struct pci_dev *pdev = to_pci_dev(device);
17565 struct net_device *dev = pci_get_drvdata(pdev);
17566 struct tg3 *tp = netdev_priv(dev);
17567 int err;
17568
17569 if (!netif_running(dev))
17570 return 0;
17571
17572 tg3_reset_task_cancel(tp);
17573 tg3_phy_stop(tp);
17574 tg3_netif_stop(tp);
17575
17576 tg3_timer_stop(tp);
17577
17578 tg3_full_lock(tp, 1);
17579 tg3_disable_ints(tp);
17580 tg3_full_unlock(tp);
17581
17582 netif_device_detach(dev);
17583
17584 tg3_full_lock(tp, 0);
17585 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17586 tg3_flag_clear(tp, INIT_COMPLETE);
17587 tg3_full_unlock(tp);
17588
17589 err = tg3_power_down_prepare(tp);
17590 if (err) {
17591 int err2;
17592
17593 tg3_full_lock(tp, 0);
17594
17595 tg3_flag_set(tp, INIT_COMPLETE);
17596 err2 = tg3_restart_hw(tp, true);
17597 if (err2)
17598 goto out;
17599
17600 tg3_timer_start(tp);
17601
17602 netif_device_attach(dev);
17603 tg3_netif_start(tp);
17604
17605 out:
17606 tg3_full_unlock(tp);
17607
17608 if (!err2)
17609 tg3_phy_start(tp);
17610 }
17611
17612 return err;
17613 }
17614
17615 static int tg3_resume(struct device *device)
17616 {
17617 struct pci_dev *pdev = to_pci_dev(device);
17618 struct net_device *dev = pci_get_drvdata(pdev);
17619 struct tg3 *tp = netdev_priv(dev);
17620 int err;
17621
17622 if (!netif_running(dev))
17623 return 0;
17624
17625 netif_device_attach(dev);
17626
17627 tg3_full_lock(tp, 0);
17628
17629 tg3_flag_set(tp, INIT_COMPLETE);
17630 err = tg3_restart_hw(tp,
17631 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17632 if (err)
17633 goto out;
17634
17635 tg3_timer_start(tp);
17636
17637 tg3_netif_start(tp);
17638
17639 out:
17640 tg3_full_unlock(tp);
17641
17642 if (!err)
17643 tg3_phy_start(tp);
17644
17645 return err;
17646 }
17647 #endif /* CONFIG_PM_SLEEP */
17648
17649 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17650
17651 /**
17652 * tg3_io_error_detected - called when PCI error is detected
17653 * @pdev: Pointer to PCI device
17654 * @state: The current pci connection state
17655 *
17656 * This function is called after a PCI bus error affecting
17657 * this device has been detected.
17658 */
17659 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17660 pci_channel_state_t state)
17661 {
17662 struct net_device *netdev = pci_get_drvdata(pdev);
17663 struct tg3 *tp = netdev_priv(netdev);
17664 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17665
17666 netdev_info(netdev, "PCI I/O error detected\n");
17667
17668 rtnl_lock();
17669
17670 if (!netif_running(netdev))
17671 goto done;
17672
17673 tg3_phy_stop(tp);
17674
17675 tg3_netif_stop(tp);
17676
17677 tg3_timer_stop(tp);
17678
17679 /* Want to make sure that the reset task doesn't run */
17680 tg3_reset_task_cancel(tp);
17681
17682 netif_device_detach(netdev);
17683
17684 /* Clean up software state, even if MMIO is blocked */
17685 tg3_full_lock(tp, 0);
17686 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17687 tg3_full_unlock(tp);
17688
17689 done:
17690 if (state == pci_channel_io_perm_failure)
17691 err = PCI_ERS_RESULT_DISCONNECT;
17692 else
17693 pci_disable_device(pdev);
17694
17695 rtnl_unlock();
17696
17697 return err;
17698 }
17699
17700 /**
17701 * tg3_io_slot_reset - called after the pci bus has been reset.
17702 * @pdev: Pointer to PCI device
17703 *
17704 * Restart the card from scratch, as if from a cold-boot.
17705 * At this point, the card has exprienced a hard reset,
17706 * followed by fixups by BIOS, and has its config space
17707 * set up identically to what it was at cold boot.
17708 */
17709 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17710 {
17711 struct net_device *netdev = pci_get_drvdata(pdev);
17712 struct tg3 *tp = netdev_priv(netdev);
17713 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17714 int err;
17715
17716 rtnl_lock();
17717
17718 if (pci_enable_device(pdev)) {
17719 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17720 goto done;
17721 }
17722
17723 pci_set_master(pdev);
17724 pci_restore_state(pdev);
17725 pci_save_state(pdev);
17726
17727 if (!netif_running(netdev)) {
17728 rc = PCI_ERS_RESULT_RECOVERED;
17729 goto done;
17730 }
17731
17732 err = tg3_power_up(tp);
17733 if (err)
17734 goto done;
17735
17736 rc = PCI_ERS_RESULT_RECOVERED;
17737
17738 done:
17739 rtnl_unlock();
17740
17741 return rc;
17742 }
17743
17744 /**
17745 * tg3_io_resume - called when traffic can start flowing again.
17746 * @pdev: Pointer to PCI device
17747 *
17748 * This callback is called when the error recovery driver tells
17749 * us that its OK to resume normal operation.
17750 */
17751 static void tg3_io_resume(struct pci_dev *pdev)
17752 {
17753 struct net_device *netdev = pci_get_drvdata(pdev);
17754 struct tg3 *tp = netdev_priv(netdev);
17755 int err;
17756
17757 rtnl_lock();
17758
17759 if (!netif_running(netdev))
17760 goto done;
17761
17762 tg3_full_lock(tp, 0);
17763 tg3_flag_set(tp, INIT_COMPLETE);
17764 err = tg3_restart_hw(tp, true);
17765 if (err) {
17766 tg3_full_unlock(tp);
17767 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17768 goto done;
17769 }
17770
17771 netif_device_attach(netdev);
17772
17773 tg3_timer_start(tp);
17774
17775 tg3_netif_start(tp);
17776
17777 tg3_full_unlock(tp);
17778
17779 tg3_phy_start(tp);
17780
17781 done:
17782 rtnl_unlock();
17783 }
17784
17785 static const struct pci_error_handlers tg3_err_handler = {
17786 .error_detected = tg3_io_error_detected,
17787 .slot_reset = tg3_io_slot_reset,
17788 .resume = tg3_io_resume
17789 };
17790
17791 static struct pci_driver tg3_driver = {
17792 .name = DRV_MODULE_NAME,
17793 .id_table = tg3_pci_tbl,
17794 .probe = tg3_init_one,
17795 .remove = tg3_remove_one,
17796 .err_handler = &tg3_err_handler,
17797 .driver.pm = &tg3_pm_ops,
17798 };
17799
17800 static int __init tg3_init(void)
17801 {
17802 return pci_register_driver(&tg3_driver);
17803 }
17804
17805 static void __exit tg3_cleanup(void)
17806 {
17807 pci_unregister_driver(&tg3_driver);
17808 }
17809
17810 module_init(tg3_init);
17811 module_exit(tg3_cleanup);