Merge tag 'v3.10.107' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_ethtool.c
CommitLineData
de0c62db
DK
1/* bnx2x_ethtool.c: Broadcom Everest network driver.
2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
de0c62db
DK
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
f1deab50
JP
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
de0c62db
DK
20#include <linux/ethtool.h>
21#include <linux/netdevice.h>
22#include <linux/types.h>
23#include <linux/sched.h>
24#include <linux/crc32.h>
de0c62db
DK
25#include "bnx2x.h"
26#include "bnx2x_cmn.h"
27#include "bnx2x_dump.h"
4a33bc03 28#include "bnx2x_init.h"
de0c62db 29
ec6ba945
VZ
30/* Note: in the format strings below %s is replaced by the queue-name which is
31 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
32 * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
33 */
34#define MAX_QUEUE_NAME_LEN 4
35static const struct {
36 long offset;
37 int size;
38 char string[ETH_GSTRING_LEN];
39} bnx2x_q_stats_arr[] = {
40/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
ec6ba945
VZ
41 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
42 8, "[%s]: rx_ucast_packets" },
43 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
44 8, "[%s]: rx_mcast_packets" },
45 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
46 8, "[%s]: rx_bcast_packets" },
47 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
48 { Q_STATS_OFFSET32(rx_err_discard_pkt),
49 4, "[%s]: rx_phy_ip_err_discards"},
50 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
51 4, "[%s]: rx_skb_alloc_discard" },
52 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
53
619c5cb6
VZ
54 { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
55/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
ec6ba945
VZ
56 8, "[%s]: tx_ucast_packets" },
57 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
58 8, "[%s]: tx_mcast_packets" },
59 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
619c5cb6
VZ
60 8, "[%s]: tx_bcast_packets" },
61 { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
62 8, "[%s]: tpa_aggregations" },
63 { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
64 8, "[%s]: tpa_aggregated_frames"},
c96bdc0c
DK
65 { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
66 { Q_STATS_OFFSET32(driver_filtered_tx_pkt),
67 4, "[%s]: driver_filtered_tx_pkt" }
ec6ba945
VZ
68};
69
70#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
71
72static const struct {
73 long offset;
74 int size;
75 u32 flags;
76#define STATS_FLAGS_PORT 1
77#define STATS_FLAGS_FUNC 2
78#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
79 char string[ETH_GSTRING_LEN];
80} bnx2x_stats_arr[] = {
81/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
82 8, STATS_FLAGS_BOTH, "rx_bytes" },
83 { STATS_OFFSET32(error_bytes_received_hi),
84 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
85 { STATS_OFFSET32(total_unicast_packets_received_hi),
86 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
87 { STATS_OFFSET32(total_multicast_packets_received_hi),
88 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
89 { STATS_OFFSET32(total_broadcast_packets_received_hi),
90 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
91 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
92 8, STATS_FLAGS_PORT, "rx_crc_errors" },
93 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
94 8, STATS_FLAGS_PORT, "rx_align_errors" },
95 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
96 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
97 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
98 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
99/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
100 8, STATS_FLAGS_PORT, "rx_fragments" },
101 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
102 8, STATS_FLAGS_PORT, "rx_jabbers" },
103 { STATS_OFFSET32(no_buff_discard_hi),
104 8, STATS_FLAGS_BOTH, "rx_discards" },
105 { STATS_OFFSET32(mac_filter_discard),
106 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
619c5cb6
VZ
107 { STATS_OFFSET32(mf_tag_discard),
108 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
0e898dd7
BW
109 { STATS_OFFSET32(pfc_frames_received_hi),
110 8, STATS_FLAGS_PORT, "pfc_frames_received" },
111 { STATS_OFFSET32(pfc_frames_sent_hi),
112 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
ec6ba945
VZ
113 { STATS_OFFSET32(brb_drop_hi),
114 8, STATS_FLAGS_PORT, "rx_brb_discard" },
115 { STATS_OFFSET32(brb_truncate_hi),
116 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
117 { STATS_OFFSET32(pause_frames_received_hi),
118 8, STATS_FLAGS_PORT, "rx_pause_frames" },
119 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
120 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
121 { STATS_OFFSET32(nig_timer_max),
122 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
123/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
124 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
125 { STATS_OFFSET32(rx_skb_alloc_failed),
126 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
127 { STATS_OFFSET32(hw_csum_err),
128 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
129
130 { STATS_OFFSET32(total_bytes_transmitted_hi),
131 8, STATS_FLAGS_BOTH, "tx_bytes" },
132 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
133 8, STATS_FLAGS_PORT, "tx_error_bytes" },
134 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
135 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
136 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
137 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
138 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
139 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
140 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
141 8, STATS_FLAGS_PORT, "tx_mac_errors" },
142 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
143 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
144/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
145 8, STATS_FLAGS_PORT, "tx_single_collisions" },
146 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
147 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
148 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
149 8, STATS_FLAGS_PORT, "tx_deferred" },
150 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
151 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
152 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
153 8, STATS_FLAGS_PORT, "tx_late_collisions" },
154 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
155 8, STATS_FLAGS_PORT, "tx_total_collisions" },
156 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
157 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
158 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
159 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
160 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
161 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
162 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
163 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
164/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
165 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
166 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
167 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
168 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
169 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
170 { STATS_OFFSET32(pause_frames_sent_hi),
619c5cb6
VZ
171 8, STATS_FLAGS_PORT, "tx_pause_frames" },
172 { STATS_OFFSET32(total_tpa_aggregations_hi),
173 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
174 { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
175 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
176 { STATS_OFFSET32(total_tpa_bytes_hi),
7a752993
AE
177 8, STATS_FLAGS_FUNC, "tpa_bytes"},
178 { STATS_OFFSET32(recoverable_error),
179 4, STATS_FLAGS_FUNC, "recoverable_errors" },
180 { STATS_OFFSET32(unrecoverable_error),
181 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
c96bdc0c
DK
182 { STATS_OFFSET32(driver_filtered_tx_pkt),
183 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" },
e9939c80
YM
184 { STATS_OFFSET32(eee_tx_lpi),
185 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
ec6ba945
VZ
186};
187
188#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
07ba6af4 189
1ac9e428
YR
190static int bnx2x_get_port_type(struct bnx2x *bp)
191{
192 int port_type;
193 u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
194 switch (bp->link_params.phy[phy_idx].media_type) {
dbef807e
YM
195 case ETH_PHY_SFPP_10G_FIBER:
196 case ETH_PHY_SFP_1G_FIBER:
1ac9e428
YR
197 case ETH_PHY_XFP_FIBER:
198 case ETH_PHY_KR:
199 case ETH_PHY_CX4:
200 port_type = PORT_FIBRE;
201 break;
202 case ETH_PHY_DA_TWINAX:
203 port_type = PORT_DA;
204 break;
205 case ETH_PHY_BASE_T:
206 port_type = PORT_TP;
207 break;
208 case ETH_PHY_NOT_PRESENT:
209 port_type = PORT_NONE;
210 break;
211 case ETH_PHY_UNSPECIFIED:
212 default:
213 port_type = PORT_OTHER;
214 break;
215 }
216 return port_type;
217}
ec6ba945 218
de0c62db
DK
219static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
220{
221 struct bnx2x *bp = netdev_priv(dev);
a22f0788 222 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
b3337e4c 223
a22f0788
YR
224 /* Dual Media boards present all available port types */
225 cmd->supported = bp->port.supported[cfg_idx] |
226 (bp->port.supported[cfg_idx ^ 1] &
227 (SUPPORTED_TP | SUPPORTED_FIBRE));
228 cmd->advertising = bp->port.advertising[cfg_idx];
dbef807e
YM
229 if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
230 ETH_PHY_SFP_1G_FIBER) {
231 cmd->supported &= ~(SUPPORTED_10000baseT_Full);
232 cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
233 }
de0c62db 234
59694f00
YM
235 if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
236 !(bp->flags & MF_FUNC_DIS)) {
2de67439 237 cmd->duplex = bp->link_vars.duplex;
38298461
YM
238
239 if (IS_MF(bp) && !BP_NOMCP(bp))
240 ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
59694f00
YM
241 else
242 ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
de0c62db 243 } else {
38298461
YM
244 cmd->duplex = DUPLEX_UNKNOWN;
245 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
de0c62db 246 }
f2e0899f 247
1ac9e428 248 cmd->port = bnx2x_get_port_type(bp);
a22f0788 249
de0c62db
DK
250 cmd->phy_address = bp->mdio.prtad;
251 cmd->transceiver = XCVR_INTERNAL;
252
a22f0788 253 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
de0c62db
DK
254 cmd->autoneg = AUTONEG_ENABLE;
255 else
256 cmd->autoneg = AUTONEG_DISABLE;
257
9e7e8399
MY
258 /* Publish LP advertised speeds and FC */
259 if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
260 u32 status = bp->link_vars.link_status;
261
262 cmd->lp_advertising |= ADVERTISED_Autoneg;
263 if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
264 cmd->lp_advertising |= ADVERTISED_Pause;
265 if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
266 cmd->lp_advertising |= ADVERTISED_Asym_Pause;
267
268 if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
269 cmd->lp_advertising |= ADVERTISED_10baseT_Half;
270 if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
271 cmd->lp_advertising |= ADVERTISED_10baseT_Full;
272 if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
273 cmd->lp_advertising |= ADVERTISED_100baseT_Half;
274 if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
275 cmd->lp_advertising |= ADVERTISED_100baseT_Full;
276 if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
277 cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
278 if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE)
279 cmd->lp_advertising |= ADVERTISED_1000baseT_Full;
280 if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
281 cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
282 if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
283 cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
be94bea7
YR
284 if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
285 cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
9e7e8399
MY
286 }
287
de0c62db
DK
288 cmd->maxtxpkt = 0;
289 cmd->maxrxpkt = 0;
290
51c1a580 291 DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
f1deab50
JP
292 " supported 0x%x advertising 0x%x speed %u\n"
293 " duplex %d port %d phy_address %d transceiver %d\n"
294 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
b3337e4c
DD
295 cmd->cmd, cmd->supported, cmd->advertising,
296 ethtool_cmd_speed(cmd),
de0c62db
DK
297 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
298 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
299
300 return 0;
301}
302
303static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
304{
305 struct bnx2x *bp = netdev_priv(dev);
a22f0788 306 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
dbef807e 307 u32 speed, phy_idx;
de0c62db 308
0793f83f 309 if (IS_MF_SD(bp))
de0c62db
DK
310 return 0;
311
51c1a580 312 DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
b3337e4c 313 " supported 0x%x advertising 0x%x speed %u\n"
0793f83f
DK
314 " duplex %d port %d phy_address %d transceiver %d\n"
315 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
b3337e4c
DD
316 cmd->cmd, cmd->supported, cmd->advertising,
317 ethtool_cmd_speed(cmd),
de0c62db
DK
318 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
319 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
320
b3337e4c 321 speed = ethtool_cmd_speed(cmd);
0793f83f 322
38298461
YM
323 /* If recieved a request for an unknown duplex, assume full*/
324 if (cmd->duplex == DUPLEX_UNKNOWN)
325 cmd->duplex = DUPLEX_FULL;
326
0793f83f 327 if (IS_MF_SI(bp)) {
e3835b99 328 u32 part;
0793f83f
DK
329 u32 line_speed = bp->link_vars.line_speed;
330
331 /* use 10G if no link detected */
332 if (!line_speed)
333 line_speed = 10000;
334
335 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
51c1a580
MS
336 DP(BNX2X_MSG_ETHTOOL,
337 "To set speed BC %X or higher is required, please upgrade BC\n",
338 REQ_BC_VER_4_SET_MF_BW);
0793f83f
DK
339 return -EINVAL;
340 }
e3835b99 341
faa6fcbb 342 part = (speed * 100) / line_speed;
e3835b99 343
faa6fcbb 344 if (line_speed < speed || !part) {
51c1a580
MS
345 DP(BNX2X_MSG_ETHTOOL,
346 "Speed setting should be in a range from 1%% to 100%% of actual line speed\n");
0793f83f
DK
347 return -EINVAL;
348 }
0793f83f 349
e3835b99
DK
350 if (bp->state != BNX2X_STATE_OPEN)
351 /* store value for following "load" */
352 bp->pending_max = part;
353 else
354 bnx2x_update_max_mf_config(bp, part);
0793f83f 355
0793f83f
DK
356 return 0;
357 }
358
a22f0788
YR
359 cfg_idx = bnx2x_get_link_cfg_idx(bp);
360 old_multi_phy_config = bp->link_params.multi_phy_config;
361 switch (cmd->port) {
362 case PORT_TP:
363 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
364 break; /* no port change */
365
366 if (!(bp->port.supported[0] & SUPPORTED_TP ||
367 bp->port.supported[1] & SUPPORTED_TP)) {
51c1a580 368 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
a22f0788
YR
369 return -EINVAL;
370 }
371 bp->link_params.multi_phy_config &=
372 ~PORT_HW_CFG_PHY_SELECTION_MASK;
373 if (bp->link_params.multi_phy_config &
374 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
375 bp->link_params.multi_phy_config |=
376 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
377 else
378 bp->link_params.multi_phy_config |=
379 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
380 break;
381 case PORT_FIBRE:
bfdb5823 382 case PORT_DA:
a22f0788
YR
383 if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
384 break; /* no port change */
385
386 if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
387 bp->port.supported[1] & SUPPORTED_FIBRE)) {
51c1a580 388 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
a22f0788
YR
389 return -EINVAL;
390 }
391 bp->link_params.multi_phy_config &=
392 ~PORT_HW_CFG_PHY_SELECTION_MASK;
393 if (bp->link_params.multi_phy_config &
394 PORT_HW_CFG_PHY_SWAPPED_ENABLED)
395 bp->link_params.multi_phy_config |=
396 PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
397 else
398 bp->link_params.multi_phy_config |=
399 PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
400 break;
401 default:
51c1a580 402 DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
a22f0788
YR
403 return -EINVAL;
404 }
2de67439 405 /* Save new config in case command complete successfully */
a22f0788
YR
406 new_multi_phy_config = bp->link_params.multi_phy_config;
407 /* Get the new cfg_idx */
408 cfg_idx = bnx2x_get_link_cfg_idx(bp);
409 /* Restore old config in case command failed */
410 bp->link_params.multi_phy_config = old_multi_phy_config;
51c1a580 411 DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
a22f0788 412
de0c62db 413 if (cmd->autoneg == AUTONEG_ENABLE) {
75318327
YR
414 u32 an_supported_speed = bp->port.supported[cfg_idx];
415 if (bp->link_params.phy[EXT_PHY1].type ==
416 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
417 an_supported_speed |= (SUPPORTED_100baseT_Half |
418 SUPPORTED_100baseT_Full);
a22f0788 419 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
51c1a580 420 DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n");
de0c62db
DK
421 return -EINVAL;
422 }
423
424 /* advertise the requested speed and duplex if supported */
75318327 425 if (cmd->advertising & ~an_supported_speed) {
51c1a580
MS
426 DP(BNX2X_MSG_ETHTOOL,
427 "Advertisement parameters are not supported\n");
8d661637
YR
428 return -EINVAL;
429 }
de0c62db 430
a22f0788 431 bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
8d661637
YR
432 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
433 bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
de0c62db 434 cmd->advertising);
8d661637
YR
435 if (cmd->advertising) {
436
437 bp->link_params.speed_cap_mask[cfg_idx] = 0;
438 if (cmd->advertising & ADVERTISED_10baseT_Half) {
439 bp->link_params.speed_cap_mask[cfg_idx] |=
440 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
441 }
442 if (cmd->advertising & ADVERTISED_10baseT_Full)
443 bp->link_params.speed_cap_mask[cfg_idx] |=
444 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
de0c62db 445
8d661637
YR
446 if (cmd->advertising & ADVERTISED_100baseT_Full)
447 bp->link_params.speed_cap_mask[cfg_idx] |=
448 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
449
450 if (cmd->advertising & ADVERTISED_100baseT_Half) {
451 bp->link_params.speed_cap_mask[cfg_idx] |=
452 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
453 }
454 if (cmd->advertising & ADVERTISED_1000baseT_Half) {
455 bp->link_params.speed_cap_mask[cfg_idx] |=
456 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
457 }
458 if (cmd->advertising & (ADVERTISED_1000baseT_Full |
459 ADVERTISED_1000baseKX_Full))
460 bp->link_params.speed_cap_mask[cfg_idx] |=
461 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
462
463 if (cmd->advertising & (ADVERTISED_10000baseT_Full |
464 ADVERTISED_10000baseKX4_Full |
465 ADVERTISED_10000baseKR_Full))
466 bp->link_params.speed_cap_mask[cfg_idx] |=
467 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
be94bea7
YR
468
469 if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
470 bp->link_params.speed_cap_mask[cfg_idx] |=
471 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
8d661637 472 }
de0c62db
DK
473 } else { /* forced speed */
474 /* advertise the requested speed and duplex if supported */
a22f0788 475 switch (speed) {
de0c62db
DK
476 case SPEED_10:
477 if (cmd->duplex == DUPLEX_FULL) {
a22f0788 478 if (!(bp->port.supported[cfg_idx] &
de0c62db 479 SUPPORTED_10baseT_Full)) {
51c1a580 480 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
481 "10M full not supported\n");
482 return -EINVAL;
483 }
484
485 advertising = (ADVERTISED_10baseT_Full |
486 ADVERTISED_TP);
487 } else {
a22f0788 488 if (!(bp->port.supported[cfg_idx] &
de0c62db 489 SUPPORTED_10baseT_Half)) {
51c1a580 490 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
491 "10M half not supported\n");
492 return -EINVAL;
493 }
494
495 advertising = (ADVERTISED_10baseT_Half |
496 ADVERTISED_TP);
497 }
498 break;
499
500 case SPEED_100:
501 if (cmd->duplex == DUPLEX_FULL) {
a22f0788 502 if (!(bp->port.supported[cfg_idx] &
de0c62db 503 SUPPORTED_100baseT_Full)) {
51c1a580 504 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
505 "100M full not supported\n");
506 return -EINVAL;
507 }
508
509 advertising = (ADVERTISED_100baseT_Full |
510 ADVERTISED_TP);
511 } else {
a22f0788 512 if (!(bp->port.supported[cfg_idx] &
de0c62db 513 SUPPORTED_100baseT_Half)) {
51c1a580 514 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
515 "100M half not supported\n");
516 return -EINVAL;
517 }
518
519 advertising = (ADVERTISED_100baseT_Half |
520 ADVERTISED_TP);
521 }
522 break;
523
524 case SPEED_1000:
525 if (cmd->duplex != DUPLEX_FULL) {
51c1a580
MS
526 DP(BNX2X_MSG_ETHTOOL,
527 "1G half not supported\n");
de0c62db
DK
528 return -EINVAL;
529 }
530
a22f0788
YR
531 if (!(bp->port.supported[cfg_idx] &
532 SUPPORTED_1000baseT_Full)) {
51c1a580
MS
533 DP(BNX2X_MSG_ETHTOOL,
534 "1G full not supported\n");
de0c62db
DK
535 return -EINVAL;
536 }
537
538 advertising = (ADVERTISED_1000baseT_Full |
539 ADVERTISED_TP);
540 break;
541
542 case SPEED_2500:
543 if (cmd->duplex != DUPLEX_FULL) {
51c1a580 544 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
545 "2.5G half not supported\n");
546 return -EINVAL;
547 }
548
a22f0788
YR
549 if (!(bp->port.supported[cfg_idx]
550 & SUPPORTED_2500baseX_Full)) {
51c1a580 551 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
552 "2.5G full not supported\n");
553 return -EINVAL;
554 }
555
556 advertising = (ADVERTISED_2500baseX_Full |
557 ADVERTISED_TP);
558 break;
559
560 case SPEED_10000:
561 if (cmd->duplex != DUPLEX_FULL) {
51c1a580
MS
562 DP(BNX2X_MSG_ETHTOOL,
563 "10G half not supported\n");
de0c62db
DK
564 return -EINVAL;
565 }
dbef807e 566 phy_idx = bnx2x_get_cur_phy_idx(bp);
a22f0788 567 if (!(bp->port.supported[cfg_idx]
dbef807e
YM
568 & SUPPORTED_10000baseT_Full) ||
569 (bp->link_params.phy[phy_idx].media_type ==
570 ETH_PHY_SFP_1G_FIBER)) {
51c1a580
MS
571 DP(BNX2X_MSG_ETHTOOL,
572 "10G full not supported\n");
de0c62db
DK
573 return -EINVAL;
574 }
575
576 advertising = (ADVERTISED_10000baseT_Full |
577 ADVERTISED_FIBRE);
578 break;
579
580 default:
51c1a580 581 DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed);
de0c62db
DK
582 return -EINVAL;
583 }
584
a22f0788
YR
585 bp->link_params.req_line_speed[cfg_idx] = speed;
586 bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
587 bp->port.advertising[cfg_idx] = advertising;
de0c62db
DK
588 }
589
51c1a580 590 DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n"
f1deab50 591 " req_duplex %d advertising 0x%x\n",
a22f0788
YR
592 bp->link_params.req_line_speed[cfg_idx],
593 bp->link_params.req_duplex[cfg_idx],
594 bp->port.advertising[cfg_idx]);
de0c62db 595
a22f0788
YR
596 /* Set new config */
597 bp->link_params.multi_phy_config = new_multi_phy_config;
de0c62db
DK
598 if (netif_running(dev)) {
599 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
600 bnx2x_link_set(bp);
601 }
602
603 return 0;
604}
605
07ba6af4
MS
606#define DUMP_ALL_PRESETS 0x1FFF
607#define DUMP_MAX_PRESETS 13
0fea29c1 608
07ba6af4 609static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
0fea29c1
VZ
610{
611 if (CHIP_IS_E1(bp))
07ba6af4 612 return dump_num_registers[0][preset-1];
0fea29c1 613 else if (CHIP_IS_E1H(bp))
07ba6af4 614 return dump_num_registers[1][preset-1];
0fea29c1 615 else if (CHIP_IS_E2(bp))
07ba6af4 616 return dump_num_registers[2][preset-1];
0fea29c1 617 else if (CHIP_IS_E3A0(bp))
07ba6af4 618 return dump_num_registers[3][preset-1];
0fea29c1 619 else if (CHIP_IS_E3B0(bp))
07ba6af4 620 return dump_num_registers[4][preset-1];
0fea29c1 621 else
07ba6af4
MS
622 return 0;
623}
624
625static int __bnx2x_get_regs_len(struct bnx2x *bp)
626{
627 u32 preset_idx;
628 int regdump_len = 0;
629
630 /* Calculate the total preset regs length */
631 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
632 regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
633
634 return regdump_len;
635}
636
637static int bnx2x_get_regs_len(struct net_device *dev)
638{
639 struct bnx2x *bp = netdev_priv(dev);
640 int regdump_len = 0;
641
642 regdump_len = __bnx2x_get_regs_len(bp);
643 regdump_len *= 4;
644 regdump_len += sizeof(struct dump_header);
645
646 return regdump_len;
0fea29c1
VZ
647}
648
07ba6af4
MS
649#define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
650#define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
651#define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
652#define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
653#define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
654
655#define IS_REG_IN_PRESET(presets, idx) \
656 ((presets & (1 << (idx-1))) == (1 << (idx-1)))
657
0fea29c1 658/******* Paged registers info selectors ********/
1191cb83 659static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
0fea29c1
VZ
660{
661 if (CHIP_IS_E2(bp))
662 return page_vals_e2;
663 else if (CHIP_IS_E3(bp))
664 return page_vals_e3;
665 else
666 return NULL;
667}
668
1191cb83 669static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
0fea29c1
VZ
670{
671 if (CHIP_IS_E2(bp))
672 return PAGE_MODE_VALUES_E2;
673 else if (CHIP_IS_E3(bp))
674 return PAGE_MODE_VALUES_E3;
675 else
676 return 0;
677}
678
1191cb83 679static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
0fea29c1
VZ
680{
681 if (CHIP_IS_E2(bp))
682 return page_write_regs_e2;
683 else if (CHIP_IS_E3(bp))
684 return page_write_regs_e3;
685 else
686 return NULL;
687}
688
1191cb83 689static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
0fea29c1
VZ
690{
691 if (CHIP_IS_E2(bp))
692 return PAGE_WRITE_REGS_E2;
693 else if (CHIP_IS_E3(bp))
694 return PAGE_WRITE_REGS_E3;
695 else
696 return 0;
697}
698
1191cb83 699static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
0fea29c1
VZ
700{
701 if (CHIP_IS_E2(bp))
702 return page_read_regs_e2;
703 else if (CHIP_IS_E3(bp))
704 return page_read_regs_e3;
705 else
706 return NULL;
707}
708
1191cb83 709static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
0fea29c1
VZ
710{
711 if (CHIP_IS_E2(bp))
712 return PAGE_READ_REGS_E2;
713 else if (CHIP_IS_E3(bp))
714 return PAGE_READ_REGS_E3;
715 else
716 return 0;
717}
718
07ba6af4
MS
719static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
720 const struct reg_addr *reg_info)
0fea29c1 721{
07ba6af4
MS
722 if (CHIP_IS_E1(bp))
723 return IS_E1_REG(reg_info->chips);
724 else if (CHIP_IS_E1H(bp))
725 return IS_E1H_REG(reg_info->chips);
726 else if (CHIP_IS_E2(bp))
727 return IS_E2_REG(reg_info->chips);
728 else if (CHIP_IS_E3A0(bp))
729 return IS_E3A0_REG(reg_info->chips);
730 else if (CHIP_IS_E3B0(bp))
731 return IS_E3B0_REG(reg_info->chips);
732 else
733 return false;
0fea29c1 734}
de0c62db 735
de0c62db 736
07ba6af4
MS
737static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
738 const struct wreg_addr *wreg_info)
739{
740 if (CHIP_IS_E1(bp))
741 return IS_E1_REG(wreg_info->chips);
742 else if (CHIP_IS_E1H(bp))
743 return IS_E1H_REG(wreg_info->chips);
744 else if (CHIP_IS_E2(bp))
745 return IS_E2_REG(wreg_info->chips);
746 else if (CHIP_IS_E3A0(bp))
747 return IS_E3A0_REG(wreg_info->chips);
748 else if (CHIP_IS_E3B0(bp))
749 return IS_E3B0_REG(wreg_info->chips);
750 else
751 return false;
de0c62db
DK
752}
753
0fea29c1
VZ
754/**
755 * bnx2x_read_pages_regs - read "paged" registers
756 *
757 * @bp device handle
758 * @p output buffer
759 *
2de67439
YM
760 * Reads "paged" memories: memories that may only be read by first writing to a
761 * specific address ("write address") and then reading from a specific address
762 * ("read address"). There may be more than one write address per "page" and
763 * more than one read address per write address.
0fea29c1 764 */
07ba6af4 765static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
f2e0899f
DK
766{
767 u32 i, j, k, n;
07ba6af4 768
0fea29c1
VZ
769 /* addresses of the paged registers */
770 const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
771 /* number of paged registers */
772 int num_pages = __bnx2x_get_page_reg_num(bp);
773 /* write addresses */
774 const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
775 /* number of write addresses */
776 int write_num = __bnx2x_get_page_write_num(bp);
777 /* read addresses info */
778 const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
779 /* number of read addresses */
780 int read_num = __bnx2x_get_page_read_num(bp);
07ba6af4 781 u32 addr, size;
0fea29c1
VZ
782
783 for (i = 0; i < num_pages; i++) {
784 for (j = 0; j < write_num; j++) {
785 REG_WR(bp, write_addr[j], page_addr[i]);
07ba6af4
MS
786
787 for (k = 0; k < read_num; k++) {
788 if (IS_REG_IN_PRESET(read_addr[k].presets,
789 preset)) {
790 size = read_addr[k].size;
791 for (n = 0; n < size; n++) {
792 addr = read_addr[k].addr + n*4;
793 *p++ = REG_RD(bp, addr);
794 }
795 }
796 }
f2e0899f
DK
797 }
798 }
799}
800
07ba6af4 801static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
0fea29c1 802{
07ba6af4
MS
803 u32 i, j, addr;
804 const struct wreg_addr *wreg_addr_p = NULL;
805
806 if (CHIP_IS_E1(bp))
807 wreg_addr_p = &wreg_addr_e1;
808 else if (CHIP_IS_E1H(bp))
809 wreg_addr_p = &wreg_addr_e1h;
810 else if (CHIP_IS_E2(bp))
811 wreg_addr_p = &wreg_addr_e2;
812 else if (CHIP_IS_E3A0(bp))
813 wreg_addr_p = &wreg_addr_e3;
814 else if (CHIP_IS_E3B0(bp))
815 wreg_addr_p = &wreg_addr_e3b0;
816
817 /* Read the idle_chk registers */
818 for (i = 0; i < IDLE_REGS_COUNT; i++) {
819 if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
820 IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
821 for (j = 0; j < idle_reg_addrs[i].size; j++)
822 *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
823 }
824 }
0fea29c1
VZ
825
826 /* Read the regular registers */
07ba6af4
MS
827 for (i = 0; i < REGS_COUNT; i++) {
828 if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
829 IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
0fea29c1
VZ
830 for (j = 0; j < reg_addrs[i].size; j++)
831 *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
07ba6af4
MS
832 }
833 }
834
835 /* Read the CAM registers */
836 if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
837 IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
838 for (i = 0; i < wreg_addr_p->size; i++) {
839 *p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
840
841 /* In case of wreg_addr register, read additional
842 registers from read_regs array
843 */
844 for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
845 addr = *(wreg_addr_p->read_regs);
846 *p++ = REG_RD(bp, addr + j*4);
847 }
848 }
849 }
850
851 /* Paged registers are supported in E2 & E3 only */
852 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
853 /* Read "paged" registes */
854 bnx2x_read_pages_regs(bp, p, preset);
855 }
856
857 return 0;
858}
859
860static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
861{
862 u32 preset_idx;
0fea29c1 863
07ba6af4
MS
864 /* Read all registers, by reading all preset registers */
865 for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
866 /* Skip presets with IOR */
867 if ((preset_idx == 2) ||
868 (preset_idx == 5) ||
869 (preset_idx == 8) ||
870 (preset_idx == 11))
871 continue;
872 __bnx2x_get_preset_regs(bp, p, preset_idx);
873 p += __bnx2x_get_preset_regs_len(bp, preset_idx);
874 }
0fea29c1
VZ
875}
876
de0c62db
DK
877static void bnx2x_get_regs(struct net_device *dev,
878 struct ethtool_regs *regs, void *_p)
879{
0fea29c1 880 u32 *p = _p;
de0c62db 881 struct bnx2x *bp = netdev_priv(dev);
07ba6af4 882 struct dump_header dump_hdr = {0};
de0c62db 883
07ba6af4 884 regs->version = 2;
de0c62db
DK
885 memset(p, 0, regs->len);
886
887 if (!netif_running(bp->dev))
888 return;
889
4a33bc03
VZ
890 /* Disable parity attentions as long as following dump may
891 * cause false alarms by reading never written registers. We
892 * will re-enable parity attentions right after the dump.
893 */
07ba6af4
MS
894
895 /* Disable parity on path 0 */
896 bnx2x_pretend_func(bp, 0);
4a33bc03
VZ
897 bnx2x_disable_blocks_parity(bp);
898
07ba6af4
MS
899 /* Disable parity on path 1 */
900 bnx2x_pretend_func(bp, 1);
901 bnx2x_disable_blocks_parity(bp);
f2e0899f 902
07ba6af4
MS
903 /* Return to current function */
904 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
de0c62db 905
07ba6af4
MS
906 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
907 dump_hdr.preset = DUMP_ALL_PRESETS;
908 dump_hdr.version = BNX2X_DUMP_VERSION;
909
910 /* dump_meta_data presents OR of CHIP and PATH. */
911 if (CHIP_IS_E1(bp)) {
912 dump_hdr.dump_meta_data = DUMP_CHIP_E1;
913 } else if (CHIP_IS_E1H(bp)) {
914 dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
915 } else if (CHIP_IS_E2(bp)) {
916 dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
917 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
918 } else if (CHIP_IS_E3A0(bp)) {
919 dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
920 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
921 } else if (CHIP_IS_E3B0(bp)) {
922 dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
923 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
924 }
925
926 memcpy(p, &dump_hdr, sizeof(struct dump_header));
927 p += dump_hdr.header_size + 1;
de0c62db 928
0fea29c1
VZ
929 /* Actually read the registers */
930 __bnx2x_get_regs(bp, p);
931
07ba6af4
MS
932 /* Re-enable parity attentions on path 0 */
933 bnx2x_pretend_func(bp, 0);
934 bnx2x_clear_blocks_parity(bp);
935 bnx2x_enable_blocks_parity(bp);
936
937 /* Re-enable parity attentions on path 1 */
938 bnx2x_pretend_func(bp, 1);
4a33bc03 939 bnx2x_clear_blocks_parity(bp);
c9ee9206 940 bnx2x_enable_blocks_parity(bp);
07ba6af4
MS
941
942 /* Return to current function */
943 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
944}
945
946static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
947{
948 struct bnx2x *bp = netdev_priv(dev);
949 int regdump_len = 0;
950
951 regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
952 regdump_len *= 4;
953 regdump_len += sizeof(struct dump_header);
954
955 return regdump_len;
956}
957
958static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
959{
960 struct bnx2x *bp = netdev_priv(dev);
961
962 /* Use the ethtool_dump "flag" field as the dump preset index */
963 bp->dump_preset_idx = val->flag;
964 return 0;
965}
966
967static int bnx2x_get_dump_flag(struct net_device *dev,
968 struct ethtool_dump *dump)
969{
970 struct bnx2x *bp = netdev_priv(dev);
971
972 /* Calculate the requested preset idx length */
973 dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
974 DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
975 bp->dump_preset_idx, dump->len);
976
977 dump->flag = ETHTOOL_GET_DUMP_DATA;
978 return 0;
979}
980
981static int bnx2x_get_dump_data(struct net_device *dev,
982 struct ethtool_dump *dump,
983 void *buffer)
984{
985 u32 *p = buffer;
986 struct bnx2x *bp = netdev_priv(dev);
987 struct dump_header dump_hdr = {0};
988
989 memset(p, 0, dump->len);
990
991 /* Disable parity attentions as long as following dump may
992 * cause false alarms by reading never written registers. We
993 * will re-enable parity attentions right after the dump.
994 */
995
996 /* Disable parity on path 0 */
997 bnx2x_pretend_func(bp, 0);
998 bnx2x_disable_blocks_parity(bp);
999
1000 /* Disable parity on path 1 */
1001 bnx2x_pretend_func(bp, 1);
1002 bnx2x_disable_blocks_parity(bp);
1003
1004 /* Return to current function */
1005 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1006
1007 dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
1008 dump_hdr.preset = bp->dump_preset_idx;
1009 dump_hdr.version = BNX2X_DUMP_VERSION;
1010
1011 DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
1012
1013 /* dump_meta_data presents OR of CHIP and PATH. */
1014 if (CHIP_IS_E1(bp)) {
1015 dump_hdr.dump_meta_data = DUMP_CHIP_E1;
1016 } else if (CHIP_IS_E1H(bp)) {
1017 dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
1018 } else if (CHIP_IS_E2(bp)) {
1019 dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
1020 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1021 } else if (CHIP_IS_E3A0(bp)) {
1022 dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
1023 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1024 } else if (CHIP_IS_E3B0(bp)) {
1025 dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
1026 (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
1027 }
1028
1029 memcpy(p, &dump_hdr, sizeof(struct dump_header));
1030 p += dump_hdr.header_size + 1;
1031
1032 /* Actually read the registers */
1033 __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
1034
1035 /* Re-enable parity attentions on path 0 */
1036 bnx2x_pretend_func(bp, 0);
1037 bnx2x_clear_blocks_parity(bp);
1038 bnx2x_enable_blocks_parity(bp);
1039
1040 /* Re-enable parity attentions on path 1 */
1041 bnx2x_pretend_func(bp, 1);
1042 bnx2x_clear_blocks_parity(bp);
1043 bnx2x_enable_blocks_parity(bp);
1044
1045 /* Return to current function */
1046 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1047
1048 return 0;
de0c62db
DK
1049}
1050
de0c62db
DK
1051static void bnx2x_get_drvinfo(struct net_device *dev,
1052 struct ethtool_drvinfo *info)
1053{
1054 struct bnx2x *bp = netdev_priv(dev);
de0c62db 1055
68aad78c
RJ
1056 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1057 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
de0c62db 1058
8ca5e17e
AE
1059 bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
1060
68aad78c 1061 strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
de0c62db 1062 info->n_stats = BNX2X_NUM_STATS;
cf2c1df6 1063 info->testinfo_len = BNX2X_NUM_TESTS(bp);
de0c62db
DK
1064 info->eedump_len = bp->common.flash_size;
1065 info->regdump_len = bnx2x_get_regs_len(dev);
1066}
1067
1068static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1069{
1070 struct bnx2x *bp = netdev_priv(dev);
1071
1072 if (bp->flags & NO_WOL_FLAG) {
1073 wol->supported = 0;
1074 wol->wolopts = 0;
1075 } else {
1076 wol->supported = WAKE_MAGIC;
1077 if (bp->wol)
1078 wol->wolopts = WAKE_MAGIC;
1079 else
1080 wol->wolopts = 0;
1081 }
1082 memset(&wol->sopass, 0, sizeof(wol->sopass));
1083}
1084
1085static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1086{
1087 struct bnx2x *bp = netdev_priv(dev);
1088
51c1a580 1089 if (wol->wolopts & ~WAKE_MAGIC) {
2de67439 1090 DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
de0c62db 1091 return -EINVAL;
51c1a580 1092 }
de0c62db
DK
1093
1094 if (wol->wolopts & WAKE_MAGIC) {
51c1a580 1095 if (bp->flags & NO_WOL_FLAG) {
2de67439 1096 DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
de0c62db 1097 return -EINVAL;
51c1a580 1098 }
de0c62db
DK
1099 bp->wol = 1;
1100 } else
1101 bp->wol = 0;
1102
1103 return 0;
1104}
1105
1106static u32 bnx2x_get_msglevel(struct net_device *dev)
1107{
1108 struct bnx2x *bp = netdev_priv(dev);
1109
1110 return bp->msg_enable;
1111}
1112
1113static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
1114{
1115 struct bnx2x *bp = netdev_priv(dev);
1116
7a25cc73
DK
1117 if (capable(CAP_NET_ADMIN)) {
1118 /* dump MCP trace */
ad5afc89 1119 if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
7a25cc73 1120 bnx2x_fw_dump_lvl(bp, KERN_INFO);
de0c62db 1121 bp->msg_enable = level;
7a25cc73 1122 }
de0c62db
DK
1123}
1124
1125static int bnx2x_nway_reset(struct net_device *dev)
1126{
1127 struct bnx2x *bp = netdev_priv(dev);
1128
1129 if (!bp->port.pmf)
1130 return 0;
1131
1132 if (netif_running(dev)) {
1133 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
5d07d868 1134 bnx2x_force_link_reset(bp);
de0c62db
DK
1135 bnx2x_link_set(bp);
1136 }
1137
1138 return 0;
1139}
1140
1141static u32 bnx2x_get_link(struct net_device *dev)
1142{
1143 struct bnx2x *bp = netdev_priv(dev);
1144
f2e0899f 1145 if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
de0c62db
DK
1146 return 0;
1147
1148 return bp->link_vars.link_up;
1149}
1150
1151static int bnx2x_get_eeprom_len(struct net_device *dev)
1152{
1153 struct bnx2x *bp = netdev_priv(dev);
1154
1155 return bp->common.flash_size;
1156}
1157
f16da43b
AE
1158/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had
1159 * we done things the other way around, if two pfs from the same port would
1160 * attempt to access nvram at the same time, we could run into a scenario such
1161 * as:
1162 * pf A takes the port lock.
1163 * pf B succeeds in taking the same lock since they are from the same port.
1164 * pf A takes the per pf misc lock. Performs eeprom access.
1165 * pf A finishes. Unlocks the per pf misc lock.
1166 * Pf B takes the lock and proceeds to perform it's own access.
1167 * pf A unlocks the per port lock, while pf B is still working (!).
1168 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
2de67439 1169 * access corrupted by pf B)
f16da43b 1170 */
de0c62db
DK
1171static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
1172{
1173 int port = BP_PORT(bp);
1174 int count, i;
f16da43b
AE
1175 u32 val;
1176
1177 /* acquire HW lock: protect against other PFs in PF Direct Assignment */
1178 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
de0c62db
DK
1179
1180 /* adjust timeout for emulation/FPGA */
754a2f52 1181 count = BNX2X_NVRAM_TIMEOUT_COUNT;
de0c62db
DK
1182 if (CHIP_REV_IS_SLOW(bp))
1183 count *= 100;
1184
1185 /* request access to nvram interface */
1186 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
1187 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
1188
1189 for (i = 0; i < count*10; i++) {
1190 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
1191 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
1192 break;
1193
1194 udelay(5);
1195 }
1196
1197 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
51c1a580
MS
1198 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1199 "cannot get access to nvram interface\n");
de0c62db
DK
1200 return -EBUSY;
1201 }
1202
1203 return 0;
1204}
1205
1206static int bnx2x_release_nvram_lock(struct bnx2x *bp)
1207{
1208 int port = BP_PORT(bp);
1209 int count, i;
f16da43b 1210 u32 val;
de0c62db
DK
1211
1212 /* adjust timeout for emulation/FPGA */
754a2f52 1213 count = BNX2X_NVRAM_TIMEOUT_COUNT;
de0c62db
DK
1214 if (CHIP_REV_IS_SLOW(bp))
1215 count *= 100;
1216
1217 /* relinquish nvram interface */
1218 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
1219 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
1220
1221 for (i = 0; i < count*10; i++) {
1222 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
1223 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
1224 break;
1225
1226 udelay(5);
1227 }
1228
1229 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
51c1a580
MS
1230 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1231 "cannot free access to nvram interface\n");
de0c62db
DK
1232 return -EBUSY;
1233 }
1234
f16da43b
AE
1235 /* release HW lock: protect against other PFs in PF Direct Assignment */
1236 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
de0c62db
DK
1237 return 0;
1238}
1239
1240static void bnx2x_enable_nvram_access(struct bnx2x *bp)
1241{
1242 u32 val;
1243
1244 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1245
1246 /* enable both bits, even on read */
1247 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1248 (val | MCPR_NVM_ACCESS_ENABLE_EN |
1249 MCPR_NVM_ACCESS_ENABLE_WR_EN));
1250}
1251
1252static void bnx2x_disable_nvram_access(struct bnx2x *bp)
1253{
1254 u32 val;
1255
1256 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
1257
1258 /* disable both bits, even after read */
1259 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
1260 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
1261 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
1262}
1263
1264static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
1265 u32 cmd_flags)
1266{
1267 int count, i, rc;
1268 u32 val;
1269
1270 /* build the command word */
1271 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
1272
1273 /* need to clear DONE bit separately */
1274 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1275
1276 /* address of the NVRAM to read from */
1277 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
1278 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1279
1280 /* issue a read command */
1281 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1282
1283 /* adjust timeout for emulation/FPGA */
754a2f52 1284 count = BNX2X_NVRAM_TIMEOUT_COUNT;
de0c62db
DK
1285 if (CHIP_REV_IS_SLOW(bp))
1286 count *= 100;
1287
1288 /* wait for completion */
1289 *ret_val = 0;
1290 rc = -EBUSY;
1291 for (i = 0; i < count; i++) {
1292 udelay(5);
1293 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
1294
1295 if (val & MCPR_NVM_COMMAND_DONE) {
1296 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
1297 /* we read nvram data in cpu order
1298 * but ethtool sees it as an array of bytes
07ba6af4
MS
1299 * converting to big-endian will do the work
1300 */
de0c62db
DK
1301 *ret_val = cpu_to_be32(val);
1302 rc = 0;
1303 break;
1304 }
1305 }
51c1a580
MS
1306 if (rc == -EBUSY)
1307 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1308 "nvram read timeout expired\n");
de0c62db
DK
1309 return rc;
1310}
1311
1312static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
1313 int buf_size)
1314{
1315 int rc;
1316 u32 cmd_flags;
1317 __be32 val;
1318
1319 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
51c1a580 1320 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
de0c62db
DK
1321 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
1322 offset, buf_size);
1323 return -EINVAL;
1324 }
1325
1326 if (offset + buf_size > bp->common.flash_size) {
51c1a580
MS
1327 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1328 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
de0c62db
DK
1329 offset, buf_size, bp->common.flash_size);
1330 return -EINVAL;
1331 }
1332
1333 /* request access to nvram interface */
1334 rc = bnx2x_acquire_nvram_lock(bp);
1335 if (rc)
1336 return rc;
1337
1338 /* enable access to nvram interface */
1339 bnx2x_enable_nvram_access(bp);
1340
1341 /* read the first word(s) */
1342 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1343 while ((buf_size > sizeof(u32)) && (rc == 0)) {
1344 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
1345 memcpy(ret_buf, &val, 4);
1346
1347 /* advance to the next dword */
1348 offset += sizeof(u32);
1349 ret_buf += sizeof(u32);
1350 buf_size -= sizeof(u32);
1351 cmd_flags = 0;
1352 }
1353
1354 if (rc == 0) {
1355 cmd_flags |= MCPR_NVM_COMMAND_LAST;
1356 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
1357 memcpy(ret_buf, &val, 4);
1358 }
1359
1360 /* disable access to nvram interface */
1361 bnx2x_disable_nvram_access(bp);
1362 bnx2x_release_nvram_lock(bp);
1363
1364 return rc;
1365}
1366
85640952
DK
1367static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
1368 int buf_size)
1369{
1370 int rc;
1371
1372 rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
1373
1374 if (!rc) {
1375 __be32 *be = (__be32 *)buf;
1376
1377 while ((buf_size -= 4) >= 0)
1378 *buf++ = be32_to_cpu(*be++);
1379 }
1380
1381 return rc;
1382}
1383
de0c62db
DK
1384static int bnx2x_get_eeprom(struct net_device *dev,
1385 struct ethtool_eeprom *eeprom, u8 *eebuf)
1386{
1387 struct bnx2x *bp = netdev_priv(dev);
de0c62db 1388
51c1a580
MS
1389 if (!netif_running(dev)) {
1390 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1391 "cannot access eeprom when the interface is down\n");
de0c62db 1392 return -EAGAIN;
51c1a580 1393 }
de0c62db 1394
51c1a580 1395 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
f1deab50 1396 " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
de0c62db
DK
1397 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
1398 eeprom->len, eeprom->len);
1399
1400 /* parameters already validated in ethtool_get_eeprom */
1401
f1691dc6 1402 return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
de0c62db
DK
1403}
1404
24ea818e
YM
1405static int bnx2x_get_module_eeprom(struct net_device *dev,
1406 struct ethtool_eeprom *ee,
1407 u8 *data)
1408{
1409 struct bnx2x *bp = netdev_priv(dev);
669d6996 1410 int rc = -EINVAL, phy_idx;
24ea818e 1411 u8 *user_data = data;
669d6996 1412 unsigned int start_addr = ee->offset, xfer_size = 0;
24ea818e
YM
1413
1414 if (!netif_running(dev)) {
1415 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1416 "cannot access eeprom when the interface is down\n");
1417 return -EAGAIN;
1418 }
1419
1420 phy_idx = bnx2x_get_cur_phy_idx(bp);
669d6996
YR
1421
1422 /* Read A0 section */
1423 if (start_addr < ETH_MODULE_SFF_8079_LEN) {
1424 /* Limit transfer size to the A0 section boundary */
1425 if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
1426 xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
1427 else
1428 xfer_size = ee->len;
1429 bnx2x_acquire_phy_lock(bp);
24ea818e
YM
1430 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1431 &bp->link_params,
669d6996
YR
1432 I2C_DEV_ADDR_A0,
1433 start_addr,
24ea818e
YM
1434 xfer_size,
1435 user_data);
669d6996
YR
1436 bnx2x_release_phy_lock(bp);
1437 if (rc) {
1438 DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
1439
1440 return -EINVAL;
1441 }
24ea818e 1442 user_data += xfer_size;
669d6996 1443 start_addr += xfer_size;
24ea818e
YM
1444 }
1445
669d6996
YR
1446 /* Read A2 section */
1447 if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
1448 (start_addr < ETH_MODULE_SFF_8472_LEN)) {
1449 xfer_size = ee->len - xfer_size;
1450 /* Limit transfer size to the A2 section boundary */
1451 if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
1452 xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
1453 start_addr -= ETH_MODULE_SFF_8079_LEN;
1454 bnx2x_acquire_phy_lock(bp);
1455 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1456 &bp->link_params,
1457 I2C_DEV_ADDR_A2,
1458 start_addr,
1459 xfer_size,
1460 user_data);
1461 bnx2x_release_phy_lock(bp);
1462 if (rc) {
1463 DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
1464 return -EINVAL;
1465 }
1466 }
24ea818e
YM
1467 return rc;
1468}
1469
1470static int bnx2x_get_module_info(struct net_device *dev,
1471 struct ethtool_modinfo *modinfo)
1472{
1473 struct bnx2x *bp = netdev_priv(dev);
669d6996
YR
1474 int phy_idx, rc;
1475 u8 sff8472_comp, diag_type;
1476
24ea818e 1477 if (!netif_running(dev)) {
669d6996 1478 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
24ea818e
YM
1479 "cannot access eeprom when the interface is down\n");
1480 return -EAGAIN;
1481 }
24ea818e 1482 phy_idx = bnx2x_get_cur_phy_idx(bp);
669d6996
YR
1483 bnx2x_acquire_phy_lock(bp);
1484 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1485 &bp->link_params,
1486 I2C_DEV_ADDR_A0,
1487 SFP_EEPROM_SFF_8472_COMP_ADDR,
1488 SFP_EEPROM_SFF_8472_COMP_SIZE,
1489 &sff8472_comp);
1490 bnx2x_release_phy_lock(bp);
1491 if (rc) {
1492 DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
1493 return -EINVAL;
1494 }
1495
1496 bnx2x_acquire_phy_lock(bp);
1497 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1498 &bp->link_params,
1499 I2C_DEV_ADDR_A0,
1500 SFP_EEPROM_DIAG_TYPE_ADDR,
1501 SFP_EEPROM_DIAG_TYPE_SIZE,
1502 &diag_type);
1503 bnx2x_release_phy_lock(bp);
1504 if (rc) {
1505 DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
1506 return -EINVAL;
1507 }
1508
1509 if (!sff8472_comp ||
1510 (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
24ea818e
YM
1511 modinfo->type = ETH_MODULE_SFF_8079;
1512 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
669d6996
YR
1513 } else {
1514 modinfo->type = ETH_MODULE_SFF_8472;
1515 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
24ea818e 1516 }
669d6996 1517 return 0;
24ea818e
YM
1518}
1519
de0c62db
DK
1520static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
1521 u32 cmd_flags)
1522{
1523 int count, i, rc;
1524
1525 /* build the command word */
1526 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
1527
1528 /* need to clear DONE bit separately */
1529 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
1530
1531 /* write the data */
1532 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
1533
1534 /* address of the NVRAM to write to */
1535 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
1536 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
1537
1538 /* issue the write command */
1539 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
1540
1541 /* adjust timeout for emulation/FPGA */
754a2f52 1542 count = BNX2X_NVRAM_TIMEOUT_COUNT;
de0c62db
DK
1543 if (CHIP_REV_IS_SLOW(bp))
1544 count *= 100;
1545
1546 /* wait for completion */
1547 rc = -EBUSY;
1548 for (i = 0; i < count; i++) {
1549 udelay(5);
1550 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
1551 if (val & MCPR_NVM_COMMAND_DONE) {
1552 rc = 0;
1553 break;
1554 }
1555 }
1556
51c1a580
MS
1557 if (rc == -EBUSY)
1558 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1559 "nvram write timeout expired\n");
de0c62db
DK
1560 return rc;
1561}
1562
1563#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
1564
1565static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
1566 int buf_size)
1567{
1568 int rc;
30c20b67
DK
1569 u32 cmd_flags, align_offset, val;
1570 __be32 val_be;
de0c62db
DK
1571
1572 if (offset + buf_size > bp->common.flash_size) {
51c1a580
MS
1573 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1574 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
de0c62db
DK
1575 offset, buf_size, bp->common.flash_size);
1576 return -EINVAL;
1577 }
1578
1579 /* request access to nvram interface */
1580 rc = bnx2x_acquire_nvram_lock(bp);
1581 if (rc)
1582 return rc;
1583
1584 /* enable access to nvram interface */
1585 bnx2x_enable_nvram_access(bp);
1586
1587 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
1588 align_offset = (offset & ~0x03);
30c20b67 1589 rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
de0c62db
DK
1590
1591 if (rc == 0) {
de0c62db 1592 /* nvram data is returned as an array of bytes
07ba6af4
MS
1593 * convert it back to cpu order
1594 */
30c20b67
DK
1595 val = be32_to_cpu(val_be);
1596
1597 val &= ~le32_to_cpu(0xff << BYTE_OFFSET(offset));
1598 val |= le32_to_cpu(*data_buf << BYTE_OFFSET(offset));
de0c62db
DK
1599
1600 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
1601 cmd_flags);
1602 }
1603
1604 /* disable access to nvram interface */
1605 bnx2x_disable_nvram_access(bp);
1606 bnx2x_release_nvram_lock(bp);
1607
1608 return rc;
1609}
1610
1611static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
1612 int buf_size)
1613{
1614 int rc;
1615 u32 cmd_flags;
1616 u32 val;
1617 u32 written_so_far;
1618
1619 if (buf_size == 1) /* ethtool */
1620 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
1621
1622 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
51c1a580 1623 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
de0c62db
DK
1624 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
1625 offset, buf_size);
1626 return -EINVAL;
1627 }
1628
1629 if (offset + buf_size > bp->common.flash_size) {
51c1a580
MS
1630 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1631 "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
de0c62db
DK
1632 offset, buf_size, bp->common.flash_size);
1633 return -EINVAL;
1634 }
1635
1636 /* request access to nvram interface */
1637 rc = bnx2x_acquire_nvram_lock(bp);
1638 if (rc)
1639 return rc;
1640
1641 /* enable access to nvram interface */
1642 bnx2x_enable_nvram_access(bp);
1643
1644 written_so_far = 0;
1645 cmd_flags = MCPR_NVM_COMMAND_FIRST;
1646 while ((written_so_far < buf_size) && (rc == 0)) {
1647 if (written_so_far == (buf_size - sizeof(u32)))
1648 cmd_flags |= MCPR_NVM_COMMAND_LAST;
754a2f52 1649 else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
de0c62db 1650 cmd_flags |= MCPR_NVM_COMMAND_LAST;
754a2f52 1651 else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
de0c62db
DK
1652 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
1653
1654 memcpy(&val, data_buf, 4);
1655
1656 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
1657
1658 /* advance to the next dword */
1659 offset += sizeof(u32);
1660 data_buf += sizeof(u32);
1661 written_so_far += sizeof(u32);
1662 cmd_flags = 0;
1663 }
1664
1665 /* disable access to nvram interface */
1666 bnx2x_disable_nvram_access(bp);
1667 bnx2x_release_nvram_lock(bp);
1668
1669 return rc;
1670}
1671
1672static int bnx2x_set_eeprom(struct net_device *dev,
1673 struct ethtool_eeprom *eeprom, u8 *eebuf)
1674{
1675 struct bnx2x *bp = netdev_priv(dev);
1676 int port = BP_PORT(bp);
1677 int rc = 0;
e10bc84d 1678 u32 ext_phy_config;
51c1a580
MS
1679 if (!netif_running(dev)) {
1680 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1681 "cannot access eeprom when the interface is down\n");
de0c62db 1682 return -EAGAIN;
51c1a580 1683 }
de0c62db 1684
51c1a580 1685 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
f1deab50 1686 " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
de0c62db
DK
1687 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
1688 eeprom->len, eeprom->len);
1689
1690 /* parameters already validated in ethtool_set_eeprom */
1691
1692 /* PHY eeprom can be accessed only by the PMF */
1693 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
51c1a580
MS
1694 !bp->port.pmf) {
1695 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1696 "wrong magic or interface is not pmf\n");
de0c62db 1697 return -EINVAL;
51c1a580 1698 }
de0c62db 1699
e10bc84d
YR
1700 ext_phy_config =
1701 SHMEM_RD(bp,
1702 dev_info.port_hw_config[port].external_phy_config);
1703
de0c62db
DK
1704 if (eeprom->magic == 0x50485950) {
1705 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
1706 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1707
1708 bnx2x_acquire_phy_lock(bp);
1709 rc |= bnx2x_link_reset(&bp->link_params,
1710 &bp->link_vars, 0);
e10bc84d 1711 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
de0c62db
DK
1712 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
1713 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
1714 MISC_REGISTERS_GPIO_HIGH, port);
1715 bnx2x_release_phy_lock(bp);
1716 bnx2x_link_report(bp);
1717
1718 } else if (eeprom->magic == 0x50485952) {
1719 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
1720 if (bp->state == BNX2X_STATE_OPEN) {
1721 bnx2x_acquire_phy_lock(bp);
1722 rc |= bnx2x_link_reset(&bp->link_params,
1723 &bp->link_vars, 1);
1724
1725 rc |= bnx2x_phy_init(&bp->link_params,
1726 &bp->link_vars);
1727 bnx2x_release_phy_lock(bp);
1728 bnx2x_calc_fc_adv(bp);
1729 }
1730 } else if (eeprom->magic == 0x53985943) {
1731 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
e10bc84d 1732 if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
de0c62db 1733 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
de0c62db
DK
1734
1735 /* DSP Remove Download Mode */
1736 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
1737 MISC_REGISTERS_GPIO_LOW, port);
1738
1739 bnx2x_acquire_phy_lock(bp);
1740
e10bc84d
YR
1741 bnx2x_sfx7101_sp_sw_reset(bp,
1742 &bp->link_params.phy[EXT_PHY1]);
de0c62db
DK
1743
1744 /* wait 0.5 sec to allow it to run */
1745 msleep(500);
1746 bnx2x_ext_phy_hw_reset(bp, port);
1747 msleep(500);
1748 bnx2x_release_phy_lock(bp);
1749 }
1750 } else
1751 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
1752
1753 return rc;
1754}
f85582f8 1755
de0c62db
DK
1756static int bnx2x_get_coalesce(struct net_device *dev,
1757 struct ethtool_coalesce *coal)
1758{
1759 struct bnx2x *bp = netdev_priv(dev);
1760
1761 memset(coal, 0, sizeof(struct ethtool_coalesce));
1762
1763 coal->rx_coalesce_usecs = bp->rx_ticks;
1764 coal->tx_coalesce_usecs = bp->tx_ticks;
1765
1766 return 0;
1767}
1768
1769static int bnx2x_set_coalesce(struct net_device *dev,
1770 struct ethtool_coalesce *coal)
1771{
1772 struct bnx2x *bp = netdev_priv(dev);
1773
1774 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
1775 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
1776 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
1777
1778 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
1779 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
1780 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
1781
1782 if (netif_running(dev))
1783 bnx2x_update_coalesce(bp);
1784
1785 return 0;
1786}
1787
1788static void bnx2x_get_ringparam(struct net_device *dev,
1789 struct ethtool_ringparam *ering)
1790{
1791 struct bnx2x *bp = netdev_priv(dev);
1792
1793 ering->rx_max_pending = MAX_RX_AVAIL;
de0c62db 1794
baaee1b7
MY
1795 /* If size isn't already set, we give an estimation of the number
1796 * of buffers we'll have. We're neglecting some possible conditions
1797 * [we couldn't know for certain at this point if number of queues
1798 * might shrink] but the number would be correct for the likely
1799 * scenario.
1800 */
25141580
DK
1801 if (bp->rx_ring_size)
1802 ering->rx_pending = bp->rx_ring_size;
baaee1b7
MY
1803 else if (BNX2X_NUM_RX_QUEUES(bp))
1804 ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp);
25141580 1805 else
c2188952 1806 ering->rx_pending = MAX_RX_AVAIL;
25141580 1807
a3348722 1808 ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
de0c62db
DK
1809 ering->tx_pending = bp->tx_ring_size;
1810}
1811
1812static int bnx2x_set_ringparam(struct net_device *dev,
1813 struct ethtool_ringparam *ering)
1814{
1815 struct bnx2x *bp = netdev_priv(dev);
de0c62db 1816
04c46736
YM
1817 DP(BNX2X_MSG_ETHTOOL,
1818 "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
1819 ering->rx_pending, ering->tx_pending);
1820
de0c62db 1821 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580
MS
1822 DP(BNX2X_MSG_ETHTOOL,
1823 "Handling parity error recovery. Try again later\n");
de0c62db
DK
1824 return -EAGAIN;
1825 }
1826
1827 if ((ering->rx_pending > MAX_RX_AVAIL) ||
b3b83c3f
DK
1828 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
1829 MIN_RX_SIZE_TPA)) ||
a3348722 1830 (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
51c1a580
MS
1831 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
1832 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
de0c62db 1833 return -EINVAL;
51c1a580 1834 }
de0c62db
DK
1835
1836 bp->rx_ring_size = ering->rx_pending;
1837 bp->tx_ring_size = ering->tx_pending;
1838
a9fccec7 1839 return bnx2x_reload_if_running(dev);
de0c62db
DK
1840}
1841
1842static void bnx2x_get_pauseparam(struct net_device *dev,
1843 struct ethtool_pauseparam *epause)
1844{
1845 struct bnx2x *bp = netdev_priv(dev);
a22f0788 1846 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
9e7e8399
MY
1847 int cfg_reg;
1848
a22f0788
YR
1849 epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
1850 BNX2X_FLOW_CTRL_AUTO);
de0c62db 1851
9e7e8399 1852 if (!epause->autoneg)
241fb5d2 1853 cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx];
9e7e8399
MY
1854 else
1855 cfg_reg = bp->link_params.req_fc_auto_adv;
1856
1857 epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) ==
de0c62db 1858 BNX2X_FLOW_CTRL_RX);
9e7e8399 1859 epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) ==
de0c62db
DK
1860 BNX2X_FLOW_CTRL_TX);
1861
51c1a580 1862 DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
f1deab50 1863 " autoneg %d rx_pause %d tx_pause %d\n",
de0c62db
DK
1864 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
1865}
1866
1867static int bnx2x_set_pauseparam(struct net_device *dev,
1868 struct ethtool_pauseparam *epause)
1869{
1870 struct bnx2x *bp = netdev_priv(dev);
a22f0788 1871 u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
fb3bff17 1872 if (IS_MF(bp))
de0c62db
DK
1873 return 0;
1874
51c1a580 1875 DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
f1deab50 1876 " autoneg %d rx_pause %d tx_pause %d\n",
de0c62db
DK
1877 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
1878
a22f0788 1879 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
de0c62db
DK
1880
1881 if (epause->rx_pause)
a22f0788 1882 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
de0c62db
DK
1883
1884 if (epause->tx_pause)
a22f0788 1885 bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
de0c62db 1886
a22f0788
YR
1887 if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
1888 bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
de0c62db
DK
1889
1890 if (epause->autoneg) {
a22f0788 1891 if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
51c1a580 1892 DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n");
de0c62db
DK
1893 return -EINVAL;
1894 }
1895
a22f0788
YR
1896 if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
1897 bp->link_params.req_flow_ctrl[cfg_idx] =
1898 BNX2X_FLOW_CTRL_AUTO;
1899 }
ba35a0fd 1900 bp->link_params.req_fc_auto_adv = 0;
5cd75f0c
YR
1901 if (epause->rx_pause)
1902 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
1903
1904 if (epause->tx_pause)
1905 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
ba35a0fd
YR
1906
1907 if (!bp->link_params.req_fc_auto_adv)
1908 bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
de0c62db
DK
1909 }
1910
51c1a580 1911 DP(BNX2X_MSG_ETHTOOL,
a22f0788 1912 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
de0c62db
DK
1913
1914 if (netif_running(dev)) {
1915 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1916 bnx2x_link_set(bp);
1917 }
1918
1919 return 0;
1920}
1921
5889335c 1922static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
cf2c1df6
MS
1923 "register_test (offline) ",
1924 "memory_test (offline) ",
1925 "int_loopback_test (offline)",
1926 "ext_loopback_test (offline)",
1927 "nvram_test (online) ",
1928 "interrupt_test (online) ",
1929 "link_test (online) "
de0c62db
DK
1930};
1931
e9939c80
YM
1932static u32 bnx2x_eee_to_adv(u32 eee_adv)
1933{
1934 u32 modes = 0;
1935
1936 if (eee_adv & SHMEM_EEE_100M_ADV)
1937 modes |= ADVERTISED_100baseT_Full;
1938 if (eee_adv & SHMEM_EEE_1G_ADV)
1939 modes |= ADVERTISED_1000baseT_Full;
1940 if (eee_adv & SHMEM_EEE_10G_ADV)
1941 modes |= ADVERTISED_10000baseT_Full;
1942
1943 return modes;
1944}
1945
1946static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
1947{
1948 u32 eee_adv = 0;
1949 if (modes & ADVERTISED_100baseT_Full)
1950 eee_adv |= SHMEM_EEE_100M_ADV;
1951 if (modes & ADVERTISED_1000baseT_Full)
1952 eee_adv |= SHMEM_EEE_1G_ADV;
1953 if (modes & ADVERTISED_10000baseT_Full)
1954 eee_adv |= SHMEM_EEE_10G_ADV;
1955
1956 return eee_adv << shift;
1957}
1958
1959static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
1960{
1961 struct bnx2x *bp = netdev_priv(dev);
1962 u32 eee_cfg;
1963
1964 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
1965 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
1966 return -EOPNOTSUPP;
1967 }
1968
08e9acc2 1969 eee_cfg = bp->link_vars.eee_status;
e9939c80
YM
1970
1971 edata->supported =
1972 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
1973 SHMEM_EEE_SUPPORTED_SHIFT);
1974
1975 edata->advertised =
1976 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
1977 SHMEM_EEE_ADV_STATUS_SHIFT);
1978 edata->lp_advertised =
1979 bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
1980 SHMEM_EEE_LP_ADV_STATUS_SHIFT);
1981
1982 /* SHMEM value is in 16u units --> Convert to 1u units. */
1983 edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
1984
1985 edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
1986 edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
1987 edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
1988
1989 return 0;
1990}
1991
1992static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1993{
1994 struct bnx2x *bp = netdev_priv(dev);
1995 u32 eee_cfg;
1996 u32 advertised;
1997
1998 if (IS_MF(bp))
1999 return 0;
2000
2001 if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
2002 DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
2003 return -EOPNOTSUPP;
2004 }
2005
08e9acc2 2006 eee_cfg = bp->link_vars.eee_status;
e9939c80
YM
2007
2008 if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
2009 DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
2010 return -EOPNOTSUPP;
2011 }
2012
2013 advertised = bnx2x_adv_to_eee(edata->advertised,
2014 SHMEM_EEE_ADV_STATUS_SHIFT);
2015 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
2016 DP(BNX2X_MSG_ETHTOOL,
efc7ce03 2017 "Direct manipulation of EEE advertisement is not supported\n");
e9939c80
YM
2018 return -EINVAL;
2019 }
2020
2021 if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
2022 DP(BNX2X_MSG_ETHTOOL,
2023 "Maximal Tx Lpi timer supported is %x(u)\n",
2024 EEE_MODE_TIMER_MASK);
2025 return -EINVAL;
2026 }
2027 if (edata->tx_lpi_enabled &&
2028 (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
2029 DP(BNX2X_MSG_ETHTOOL,
2030 "Minimal Tx Lpi timer supported is %d(u)\n",
2031 EEE_MODE_NVRAM_AGGRESSIVE_TIME);
2032 return -EINVAL;
2033 }
2034
2035 /* All is well; Apply changes*/
2036 if (edata->eee_enabled)
2037 bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
2038 else
2039 bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
2040
2041 if (edata->tx_lpi_enabled)
2042 bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
2043 else
2044 bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
2045
2046 bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
2047 bp->link_params.eee_mode |= (edata->tx_lpi_timer &
2048 EEE_MODE_TIMER_MASK) |
2049 EEE_MODE_OVERRIDE_NVRAM |
2050 EEE_MODE_OUTPUT_TIME;
2051
2052 /* Restart link to propogate changes */
2053 if (netif_running(dev)) {
2054 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
5d07d868 2055 bnx2x_force_link_reset(bp);
e9939c80
YM
2056 bnx2x_link_set(bp);
2057 }
2058
2059 return 0;
2060}
2061
619c5cb6
VZ
2062enum {
2063 BNX2X_CHIP_E1_OFST = 0,
2064 BNX2X_CHIP_E1H_OFST,
2065 BNX2X_CHIP_E2_OFST,
2066 BNX2X_CHIP_E3_OFST,
2067 BNX2X_CHIP_E3B0_OFST,
2068 BNX2X_CHIP_MAX_OFST
2069};
2070
2071#define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
2072#define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
2073#define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
2074#define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
2075#define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
2076
2077#define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
2078#define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
2079
de0c62db
DK
2080static int bnx2x_test_registers(struct bnx2x *bp)
2081{
2082 int idx, i, rc = -ENODEV;
619c5cb6 2083 u32 wr_val = 0, hw;
de0c62db
DK
2084 int port = BP_PORT(bp);
2085 static const struct {
619c5cb6 2086 u32 hw;
de0c62db
DK
2087 u32 offset0;
2088 u32 offset1;
2089 u32 mask;
2090 } reg_tbl[] = {
619c5cb6
VZ
2091/* 0 */ { BNX2X_CHIP_MASK_ALL,
2092 BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
2093 { BNX2X_CHIP_MASK_ALL,
2094 DORQ_REG_DB_ADDR0, 4, 0xffffffff },
2095 { BNX2X_CHIP_MASK_E1X,
2096 HC_REG_AGG_INT_0, 4, 0x000003ff },
2097 { BNX2X_CHIP_MASK_ALL,
2098 PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
2099 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
2100 PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
2101 { BNX2X_CHIP_MASK_E3B0,
2102 PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
2103 { BNX2X_CHIP_MASK_ALL,
2104 PRS_REG_CID_PORT_0, 4, 0x00ffffff },
2105 { BNX2X_CHIP_MASK_ALL,
2106 PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
2107 { BNX2X_CHIP_MASK_ALL,
2108 PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
2109 { BNX2X_CHIP_MASK_ALL,
2110 PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
2111/* 10 */ { BNX2X_CHIP_MASK_ALL,
2112 PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
2113 { BNX2X_CHIP_MASK_ALL,
2114 PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
2115 { BNX2X_CHIP_MASK_ALL,
2116 QM_REG_CONNNUM_0, 4, 0x000fffff },
2117 { BNX2X_CHIP_MASK_ALL,
2118 TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
2119 { BNX2X_CHIP_MASK_ALL,
2120 SRC_REG_KEYRSS0_0, 40, 0xffffffff },
2121 { BNX2X_CHIP_MASK_ALL,
2122 SRC_REG_KEYRSS0_7, 40, 0xffffffff },
2123 { BNX2X_CHIP_MASK_ALL,
2124 XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
2125 { BNX2X_CHIP_MASK_ALL,
2126 XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
2127 { BNX2X_CHIP_MASK_ALL,
2128 XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
2129 { BNX2X_CHIP_MASK_ALL,
2130 NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
2131/* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2132 NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
2133 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2134 NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
2135 { BNX2X_CHIP_MASK_ALL,
2136 NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
2137 { BNX2X_CHIP_MASK_ALL,
2138 NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
2139 { BNX2X_CHIP_MASK_ALL,
2140 NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
2141 { BNX2X_CHIP_MASK_ALL,
2142 NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
2143 { BNX2X_CHIP_MASK_ALL,
2144 NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
2145 { BNX2X_CHIP_MASK_ALL,
2146 NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
2147 { BNX2X_CHIP_MASK_ALL,
2148 NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
2149 { BNX2X_CHIP_MASK_ALL,
2150 NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
2151/* 30 */ { BNX2X_CHIP_MASK_ALL,
2152 NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
2153 { BNX2X_CHIP_MASK_ALL,
2154 NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
2155 { BNX2X_CHIP_MASK_ALL,
2156 NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
2157 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2158 NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
2159 { BNX2X_CHIP_MASK_ALL,
2160 NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
2161 { BNX2X_CHIP_MASK_ALL,
2162 NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
2163 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2164 NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
2165 { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
2166 NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
2167
2168 { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
de0c62db
DK
2169 };
2170
51c1a580
MS
2171 if (!netif_running(bp->dev)) {
2172 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2173 "cannot access eeprom when the interface is down\n");
de0c62db 2174 return rc;
51c1a580 2175 }
de0c62db 2176
619c5cb6
VZ
2177 if (CHIP_IS_E1(bp))
2178 hw = BNX2X_CHIP_MASK_E1;
2179 else if (CHIP_IS_E1H(bp))
2180 hw = BNX2X_CHIP_MASK_E1H;
2181 else if (CHIP_IS_E2(bp))
2182 hw = BNX2X_CHIP_MASK_E2;
2183 else if (CHIP_IS_E3B0(bp))
2184 hw = BNX2X_CHIP_MASK_E3B0;
2185 else /* e3 A0 */
2186 hw = BNX2X_CHIP_MASK_E3;
2187
de0c62db 2188 /* Repeat the test twice:
07ba6af4
MS
2189 * First by writing 0x00000000, second by writing 0xffffffff
2190 */
de0c62db
DK
2191 for (idx = 0; idx < 2; idx++) {
2192
2193 switch (idx) {
2194 case 0:
2195 wr_val = 0;
2196 break;
2197 case 1:
2198 wr_val = 0xffffffff;
2199 break;
2200 }
2201
2202 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
2203 u32 offset, mask, save_val, val;
619c5cb6 2204 if (!(hw & reg_tbl[i].hw))
f2e0899f 2205 continue;
de0c62db
DK
2206
2207 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
2208 mask = reg_tbl[i].mask;
2209
2210 save_val = REG_RD(bp, offset);
2211
ec6ba945 2212 REG_WR(bp, offset, wr_val & mask);
f85582f8 2213
de0c62db
DK
2214 val = REG_RD(bp, offset);
2215
2216 /* Restore the original register's value */
2217 REG_WR(bp, offset, save_val);
2218
2219 /* verify value is as expected */
2220 if ((val & mask) != (wr_val & mask)) {
51c1a580 2221 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
2222 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
2223 offset, val, wr_val, mask);
2224 goto test_reg_exit;
2225 }
2226 }
2227 }
2228
2229 rc = 0;
2230
2231test_reg_exit:
2232 return rc;
2233}
2234
2235static int bnx2x_test_memory(struct bnx2x *bp)
2236{
2237 int i, j, rc = -ENODEV;
619c5cb6 2238 u32 val, index;
de0c62db
DK
2239 static const struct {
2240 u32 offset;
2241 int size;
2242 } mem_tbl[] = {
2243 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
2244 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
2245 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
2246 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
2247 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
2248 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
2249 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
2250
2251 { 0xffffffff, 0 }
2252 };
619c5cb6 2253
de0c62db
DK
2254 static const struct {
2255 char *name;
2256 u32 offset;
619c5cb6 2257 u32 hw_mask[BNX2X_CHIP_MAX_OFST];
de0c62db 2258 } prty_tbl[] = {
619c5cb6
VZ
2259 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
2260 {0x3ffc0, 0, 0, 0} },
2261 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
2262 {0x2, 0x2, 0, 0} },
2263 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
2264 {0, 0, 0, 0} },
2265 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
2266 {0x3ffc0, 0, 0, 0} },
2267 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
2268 {0x3ffc0, 0, 0, 0} },
2269 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
2270 {0x3ffc1, 0, 0, 0} },
2271
2272 { NULL, 0xffffffff, {0, 0, 0, 0} }
de0c62db
DK
2273 };
2274
51c1a580
MS
2275 if (!netif_running(bp->dev)) {
2276 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2277 "cannot access eeprom when the interface is down\n");
de0c62db 2278 return rc;
51c1a580 2279 }
de0c62db 2280
619c5cb6
VZ
2281 if (CHIP_IS_E1(bp))
2282 index = BNX2X_CHIP_E1_OFST;
2283 else if (CHIP_IS_E1H(bp))
2284 index = BNX2X_CHIP_E1H_OFST;
2285 else if (CHIP_IS_E2(bp))
2286 index = BNX2X_CHIP_E2_OFST;
2287 else /* e3 */
2288 index = BNX2X_CHIP_E3_OFST;
2289
f2e0899f
DK
2290 /* pre-Check the parity status */
2291 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
2292 val = REG_RD(bp, prty_tbl[i].offset);
619c5cb6 2293 if (val & ~(prty_tbl[i].hw_mask[index])) {
51c1a580 2294 DP(BNX2X_MSG_ETHTOOL,
f2e0899f
DK
2295 "%s is 0x%x\n", prty_tbl[i].name, val);
2296 goto test_mem_exit;
2297 }
2298 }
2299
de0c62db
DK
2300 /* Go through all the memories */
2301 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
2302 for (j = 0; j < mem_tbl[i].size; j++)
2303 REG_RD(bp, mem_tbl[i].offset + j*4);
2304
2305 /* Check the parity status */
2306 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
2307 val = REG_RD(bp, prty_tbl[i].offset);
619c5cb6 2308 if (val & ~(prty_tbl[i].hw_mask[index])) {
51c1a580 2309 DP(BNX2X_MSG_ETHTOOL,
de0c62db
DK
2310 "%s is 0x%x\n", prty_tbl[i].name, val);
2311 goto test_mem_exit;
2312 }
2313 }
2314
2315 rc = 0;
2316
2317test_mem_exit:
2318 return rc;
2319}
2320
a22f0788 2321static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
de0c62db 2322{
f2e0899f 2323 int cnt = 1400;
de0c62db 2324
619c5cb6 2325 if (link_up) {
a22f0788 2326 while (bnx2x_link_test(bp, is_serdes) && cnt--)
619c5cb6
VZ
2327 msleep(20);
2328
2329 if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
51c1a580 2330 DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
8970b2e4
MS
2331
2332 cnt = 1400;
2333 while (!bp->link_vars.link_up && cnt--)
2334 msleep(20);
2335
2336 if (cnt <= 0 && !bp->link_vars.link_up)
2337 DP(BNX2X_MSG_ETHTOOL,
2338 "Timeout waiting for link init\n");
619c5cb6 2339 }
de0c62db
DK
2340}
2341
619c5cb6 2342static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
de0c62db
DK
2343{
2344 unsigned int pkt_size, num_pkts, i;
2345 struct sk_buff *skb;
2346 unsigned char *packet;
2347 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
2348 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
65565884 2349 struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
de0c62db
DK
2350 u16 tx_start_idx, tx_idx;
2351 u16 rx_start_idx, rx_idx;
b0700b1e 2352 u16 pkt_prod, bd_prod;
de0c62db
DK
2353 struct sw_tx_bd *tx_buf;
2354 struct eth_tx_start_bd *tx_start_bd;
de0c62db
DK
2355 dma_addr_t mapping;
2356 union eth_rx_cqe *cqe;
619c5cb6 2357 u8 cqe_fp_flags, cqe_fp_type;
de0c62db
DK
2358 struct sw_rx_bd *rx_buf;
2359 u16 len;
2360 int rc = -ENODEV;
e52fcb24 2361 u8 *data;
8970b2e4
MS
2362 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
2363 txdata->txq_index);
de0c62db
DK
2364
2365 /* check the loopback mode */
2366 switch (loopback_mode) {
2367 case BNX2X_PHY_LOOPBACK:
8970b2e4
MS
2368 if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
2369 DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
de0c62db 2370 return -EINVAL;
8970b2e4 2371 }
de0c62db
DK
2372 break;
2373 case BNX2X_MAC_LOOPBACK:
32911333
YR
2374 if (CHIP_IS_E3(bp)) {
2375 int cfg_idx = bnx2x_get_link_cfg_idx(bp);
2376 if (bp->port.supported[cfg_idx] &
2377 (SUPPORTED_10000baseT_Full |
2378 SUPPORTED_20000baseMLD2_Full |
2379 SUPPORTED_20000baseKR2_Full))
2380 bp->link_params.loopback_mode = LOOPBACK_XMAC;
2381 else
2382 bp->link_params.loopback_mode = LOOPBACK_UMAC;
2383 } else
2384 bp->link_params.loopback_mode = LOOPBACK_BMAC;
2385
de0c62db
DK
2386 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2387 break;
8970b2e4
MS
2388 case BNX2X_EXT_LOOPBACK:
2389 if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
2390 DP(BNX2X_MSG_ETHTOOL,
2391 "Can't configure external loopback\n");
2392 return -EINVAL;
2393 }
2394 break;
de0c62db 2395 default:
51c1a580 2396 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
de0c62db
DK
2397 return -EINVAL;
2398 }
2399
2400 /* prepare the loopback packet */
2401 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
2402 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
a8c94b91 2403 skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
de0c62db 2404 if (!skb) {
51c1a580 2405 DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n");
de0c62db
DK
2406 rc = -ENOMEM;
2407 goto test_loopback_exit;
2408 }
2409 packet = skb_put(skb, pkt_size);
2410 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
2411 memset(packet + ETH_ALEN, 0, ETH_ALEN);
2412 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
2413 for (i = ETH_HLEN; i < pkt_size; i++)
2414 packet[i] = (unsigned char) (i & 0xff);
619c5cb6
VZ
2415 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2416 skb_headlen(skb), DMA_TO_DEVICE);
2417 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2418 rc = -ENOMEM;
2419 dev_kfree_skb(skb);
51c1a580 2420 DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n");
619c5cb6
VZ
2421 goto test_loopback_exit;
2422 }
de0c62db
DK
2423
2424 /* send the loopback packet */
2425 num_pkts = 0;
6383c0b3 2426 tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
de0c62db
DK
2427 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
2428
73dbb5e1
DK
2429 netdev_tx_sent_queue(txq, skb->len);
2430
6383c0b3
AE
2431 pkt_prod = txdata->tx_pkt_prod++;
2432 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2433 tx_buf->first_bd = txdata->tx_bd_prod;
de0c62db
DK
2434 tx_buf->skb = skb;
2435 tx_buf->flags = 0;
2436
6383c0b3
AE
2437 bd_prod = TX_BD(txdata->tx_bd_prod);
2438 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
de0c62db
DK
2439 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2440 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2441 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
2442 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
523224a3 2443 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
de0c62db 2444 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
523224a3
DK
2445 SET_FLAG(tx_start_bd->general_data,
2446 ETH_TX_START_BD_HDR_NBDS,
2447 1);
96bed4b9
YM
2448 SET_FLAG(tx_start_bd->general_data,
2449 ETH_TX_START_BD_PARSE_NBDS,
2450 0);
de0c62db
DK
2451
2452 /* turn on parsing and get a BD */
2453 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
f85582f8 2454
96bed4b9
YM
2455 if (CHIP_IS_E1x(bp)) {
2456 u16 global_data = 0;
2457 struct eth_tx_parse_bd_e1x *pbd_e1x =
2458 &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
2459 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2460 SET_FLAG(global_data,
2461 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2462 pbd_e1x->global_data = cpu_to_le16(global_data);
2463 } else {
2464 u32 parsing_data = 0;
2465 struct eth_tx_parse_bd_e2 *pbd_e2 =
2466 &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
2467 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2468 SET_FLAG(parsing_data,
2469 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
2470 pbd_e2->parsing_data = cpu_to_le32(parsing_data);
2471 }
de0c62db
DK
2472 wmb();
2473
6383c0b3 2474 txdata->tx_db.data.prod += 2;
de0c62db 2475 barrier();
6383c0b3 2476 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
de0c62db
DK
2477
2478 mmiowb();
619c5cb6 2479 barrier();
de0c62db
DK
2480
2481 num_pkts++;
6383c0b3 2482 txdata->tx_bd_prod += 2; /* start + pbd */
de0c62db
DK
2483
2484 udelay(100);
2485
6383c0b3 2486 tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
de0c62db
DK
2487 if (tx_idx != tx_start_idx + num_pkts)
2488 goto test_loopback_exit;
2489
f2e0899f
DK
2490 /* Unlike HC IGU won't generate an interrupt for status block
2491 * updates that have been performed while interrupts were
2492 * disabled.
2493 */
e1210d12
ED
2494 if (bp->common.int_block == INT_BLOCK_IGU) {
2495 /* Disable local BHes to prevent a dead-lock situation between
2496 * sch_direct_xmit() and bnx2x_run_loopback() (calling
2497 * bnx2x_tx_int()), as both are taking netif_tx_lock().
2498 */
2499 local_bh_disable();
6383c0b3 2500 bnx2x_tx_int(bp, txdata);
e1210d12
ED
2501 local_bh_enable();
2502 }
f2e0899f 2503
de0c62db
DK
2504 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
2505 if (rx_idx != rx_start_idx + num_pkts)
2506 goto test_loopback_exit;
2507
b0700b1e 2508 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
de0c62db 2509 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
619c5cb6
VZ
2510 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
2511 if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
de0c62db
DK
2512 goto test_loopback_rx_exit;
2513
621b4d66 2514 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
de0c62db
DK
2515 if (len != pkt_size)
2516 goto test_loopback_rx_exit;
2517
2518 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
9924cafc 2519 dma_sync_single_for_cpu(&bp->pdev->dev,
619c5cb6
VZ
2520 dma_unmap_addr(rx_buf, mapping),
2521 fp_rx->rx_buf_size, DMA_FROM_DEVICE);
e52fcb24 2522 data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
de0c62db 2523 for (i = ETH_HLEN; i < pkt_size; i++)
e52fcb24 2524 if (*(data + i) != (unsigned char) (i & 0xff))
de0c62db
DK
2525 goto test_loopback_rx_exit;
2526
2527 rc = 0;
2528
2529test_loopback_rx_exit:
2530
2531 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
2532 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
2533 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
2534 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
2535
2536 /* Update producers */
2537 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
2538 fp_rx->rx_sge_prod);
2539
2540test_loopback_exit:
2541 bp->link_params.loopback_mode = LOOPBACK_NONE;
2542
2543 return rc;
2544}
2545
619c5cb6 2546static int bnx2x_test_loopback(struct bnx2x *bp)
de0c62db
DK
2547{
2548 int rc = 0, res;
2549
2550 if (BP_NOMCP(bp))
2551 return rc;
2552
2553 if (!netif_running(bp->dev))
2554 return BNX2X_LOOPBACK_FAILED;
2555
2556 bnx2x_netif_stop(bp, 1);
2557 bnx2x_acquire_phy_lock(bp);
2558
619c5cb6 2559 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
de0c62db 2560 if (res) {
51c1a580 2561 DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res);
de0c62db
DK
2562 rc |= BNX2X_PHY_LOOPBACK_FAILED;
2563 }
2564
619c5cb6 2565 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
de0c62db 2566 if (res) {
51c1a580 2567 DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res);
de0c62db
DK
2568 rc |= BNX2X_MAC_LOOPBACK_FAILED;
2569 }
2570
2571 bnx2x_release_phy_lock(bp);
2572 bnx2x_netif_start(bp);
2573
2574 return rc;
2575}
2576
8970b2e4
MS
2577static int bnx2x_test_ext_loopback(struct bnx2x *bp)
2578{
2579 int rc;
2580 u8 is_serdes =
2581 (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
2582
2583 if (BP_NOMCP(bp))
2584 return -ENODEV;
2585
2586 if (!netif_running(bp->dev))
2587 return BNX2X_EXT_LOOPBACK_FAILED;
2588
5d07d868 2589 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
8970b2e4
MS
2590 rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
2591 if (rc) {
2592 DP(BNX2X_MSG_ETHTOOL,
2593 "Can't perform self-test, nic_load (for external lb) failed\n");
2594 return -ENODEV;
2595 }
2596 bnx2x_wait_for_link(bp, 1, is_serdes);
2597
2598 bnx2x_netif_stop(bp, 1);
2599
2600 rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
2601 if (rc)
2602 DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
2603
2604 bnx2x_netif_start(bp);
2605
2606 return rc;
2607}
2608
edb944d2
DK
2609struct code_entry {
2610 u32 sram_start_addr;
2611 u32 code_attribute;
2612#define CODE_IMAGE_TYPE_MASK 0xf0800003
2613#define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003
2614#define CODE_IMAGE_LENGTH_MASK 0x007ffffc
2615#define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000
2616 u32 nvm_start_addr;
2617};
2618
2619#define CODE_ENTRY_MAX 16
2620#define CODE_ENTRY_EXTENDED_DIR_IDX 15
2621#define MAX_IMAGES_IN_EXTENDED_DIR 64
2622#define NVRAM_DIR_OFFSET 0x14
2623
2624#define EXTENDED_DIR_EXISTS(code) \
2625 ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
2626 (code & CODE_IMAGE_LENGTH_MASK) != 0)
2627
de0c62db 2628#define CRC32_RESIDUAL 0xdebb20e3
edb944d2
DK
2629#define CRC_BUFF_SIZE 256
2630
2631static int bnx2x_nvram_crc(struct bnx2x *bp,
2632 int offset,
2633 int size,
2634 u8 *buff)
2635{
2636 u32 crc = ~0;
2637 int rc = 0, done = 0;
2638
2639 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2640 "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
2641
2642 while (done < size) {
2643 int count = min_t(int, size - done, CRC_BUFF_SIZE);
2644
2645 rc = bnx2x_nvram_read(bp, offset + done, buff, count);
2646
2647 if (rc)
2648 return rc;
2649
2650 crc = crc32_le(crc, buff, count);
2651 done += count;
2652 }
2653
2654 if (crc != CRC32_RESIDUAL)
2655 rc = -EINVAL;
2656
2657 return rc;
2658}
2659
2660static int bnx2x_test_nvram_dir(struct bnx2x *bp,
2661 struct code_entry *entry,
2662 u8 *buff)
2663{
2664 size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
2665 u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
2666 int rc;
2667
2668 /* Zero-length images and AFEX profiles do not have CRC */
2669 if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
2670 return 0;
2671
2672 rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
2673 if (rc)
2674 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2675 "image %x has failed crc test (rc %d)\n", type, rc);
2676
2677 return rc;
2678}
2679
2680static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
2681{
2682 int rc;
2683 struct code_entry entry;
2684
2685 rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
2686 if (rc)
2687 return rc;
2688
2689 return bnx2x_test_nvram_dir(bp, &entry, buff);
2690}
2691
2692static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
2693{
2694 u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
2695 struct code_entry entry;
2696 int i;
2697
2698 rc = bnx2x_nvram_read32(bp,
2699 dir_offset +
2700 sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
2701 (u32 *)&entry, sizeof(entry));
2702 if (rc)
2703 return rc;
2704
2705 if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
2706 return 0;
2707
2708 rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
2709 &cnt, sizeof(u32));
2710 if (rc)
2711 return rc;
2712
2713 dir_offset = entry.nvm_start_addr + 8;
2714
2715 for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
2716 rc = bnx2x_test_dir_entry(bp, dir_offset +
2717 sizeof(struct code_entry) * i,
2718 buff);
2719 if (rc)
2720 return rc;
2721 }
2722
2723 return 0;
2724}
2725
2726static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
2727{
2728 u32 rc, dir_offset = NVRAM_DIR_OFFSET;
2729 int i;
2730
2731 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
2732
2733 for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
2734 rc = bnx2x_test_dir_entry(bp, dir_offset +
2735 sizeof(struct code_entry) * i,
2736 buff);
2737 if (rc)
2738 return rc;
2739 }
2740
2741 return bnx2x_test_nvram_ext_dirs(bp, buff);
2742}
2743
2744struct crc_pair {
2745 int offset;
2746 int size;
2747};
2748
2749static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
2750 const struct crc_pair *nvram_tbl, u8 *buf)
2751{
2752 int i;
2753
2754 for (i = 0; nvram_tbl[i].size; i++) {
2755 int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
2756 nvram_tbl[i].size, buf);
2757 if (rc) {
2758 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2759 "nvram_tbl[%d] has failed crc test (rc %d)\n",
2760 i, rc);
2761 return rc;
2762 }
2763 }
2764
2765 return 0;
2766}
de0c62db
DK
2767
2768static int bnx2x_test_nvram(struct bnx2x *bp)
2769{
edb944d2 2770 const struct crc_pair nvram_tbl[] = {
de0c62db
DK
2771 { 0, 0x14 }, /* bootstrap */
2772 { 0x14, 0xec }, /* dir */
2773 { 0x100, 0x350 }, /* manuf_info */
2774 { 0x450, 0xf0 }, /* feature_info */
2775 { 0x640, 0x64 }, /* upgrade_key_info */
de0c62db 2776 { 0x708, 0x70 }, /* manuf_key_info */
de0c62db
DK
2777 { 0, 0 }
2778 };
edb944d2
DK
2779 const struct crc_pair nvram_tbl2[] = {
2780 { 0x7e8, 0x350 }, /* manuf_info2 */
2781 { 0xb38, 0xf0 }, /* feature_info */
2782 { 0, 0 }
2783 };
2784
85640952 2785 u8 *buf;
edb944d2
DK
2786 int rc;
2787 u32 magic;
de0c62db
DK
2788
2789 if (BP_NOMCP(bp))
2790 return 0;
2791
edb944d2 2792 buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
afa13b4b 2793 if (!buf) {
51c1a580 2794 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
afa13b4b
MY
2795 rc = -ENOMEM;
2796 goto test_nvram_exit;
2797 }
afa13b4b 2798
85640952 2799 rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
de0c62db 2800 if (rc) {
51c1a580
MS
2801 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2802 "magic value read (rc %d)\n", rc);
de0c62db
DK
2803 goto test_nvram_exit;
2804 }
2805
de0c62db 2806 if (magic != 0x669955aa) {
51c1a580
MS
2807 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2808 "wrong magic value (0x%08x)\n", magic);
de0c62db
DK
2809 rc = -ENODEV;
2810 goto test_nvram_exit;
2811 }
2812
edb944d2
DK
2813 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
2814 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
2815 if (rc)
2816 goto test_nvram_exit;
de0c62db 2817
edb944d2
DK
2818 if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
2819 u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
2820 SHARED_HW_CFG_HIDE_PORT1;
de0c62db 2821
edb944d2 2822 if (!hide) {
51c1a580 2823 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
edb944d2
DK
2824 "Port 1 CRC test-set\n");
2825 rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
2826 if (rc)
2827 goto test_nvram_exit;
de0c62db
DK
2828 }
2829 }
2830
edb944d2
DK
2831 rc = bnx2x_test_nvram_dirs(bp, buf);
2832
de0c62db 2833test_nvram_exit:
afa13b4b 2834 kfree(buf);
de0c62db
DK
2835 return rc;
2836}
2837
619c5cb6 2838/* Send an EMPTY ramrod on the first queue */
de0c62db
DK
2839static int bnx2x_test_intr(struct bnx2x *bp)
2840{
3b603066 2841 struct bnx2x_queue_state_params params = {NULL};
de0c62db 2842
51c1a580
MS
2843 if (!netif_running(bp->dev)) {
2844 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
2845 "cannot access eeprom when the interface is down\n");
de0c62db 2846 return -ENODEV;
51c1a580 2847 }
de0c62db 2848
15192a8c 2849 params.q_obj = &bp->sp_objs->q_obj;
619c5cb6 2850 params.cmd = BNX2X_Q_CMD_EMPTY;
de0c62db 2851
619c5cb6
VZ
2852 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2853
2854 return bnx2x_queue_state_change(bp, &params);
de0c62db
DK
2855}
2856
2857static void bnx2x_self_test(struct net_device *dev,
2858 struct ethtool_test *etest, u64 *buf)
2859{
2860 struct bnx2x *bp = netdev_priv(dev);
a336ca7c
YR
2861 u8 is_serdes, link_up;
2862 int rc, cnt = 0;
cf2c1df6 2863
de0c62db 2864 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580
MS
2865 netdev_err(bp->dev,
2866 "Handling parity error recovery. Try again later\n");
de0c62db
DK
2867 etest->flags |= ETH_TEST_FL_FAILED;
2868 return;
2869 }
2de67439 2870
8970b2e4
MS
2871 DP(BNX2X_MSG_ETHTOOL,
2872 "Self-test command parameters: offline = %d, external_lb = %d\n",
2873 (etest->flags & ETH_TEST_FL_OFFLINE),
2874 (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
de0c62db 2875
cf2c1df6 2876 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
de0c62db 2877
cf2c1df6 2878 if (!netif_running(dev)) {
97cd1ee6
DK
2879 DP(BNX2X_MSG_ETHTOOL,
2880 "Can't perform self-test when interface is down\n");
de0c62db 2881 return;
cf2c1df6 2882 }
de0c62db 2883
a22f0788 2884 is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
a336ca7c 2885 link_up = bp->link_vars.link_up;
cf2c1df6
MS
2886 /* offline tests are not supported in MF mode */
2887 if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
de0c62db
DK
2888 int port = BP_PORT(bp);
2889 u32 val;
de0c62db
DK
2890
2891 /* save current value of input enable for TX port IF */
2892 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
2893 /* disable input for TX port IF */
2894 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
2895
5d07d868 2896 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
cf2c1df6
MS
2897 rc = bnx2x_nic_load(bp, LOAD_DIAG);
2898 if (rc) {
2899 etest->flags |= ETH_TEST_FL_FAILED;
2900 DP(BNX2X_MSG_ETHTOOL,
2901 "Can't perform self-test, nic_load (for offline) failed\n");
2902 return;
2903 }
2904
de0c62db 2905 /* wait until link state is restored */
619c5cb6 2906 bnx2x_wait_for_link(bp, 1, is_serdes);
de0c62db
DK
2907
2908 if (bnx2x_test_registers(bp) != 0) {
2909 buf[0] = 1;
2910 etest->flags |= ETH_TEST_FL_FAILED;
2911 }
2912 if (bnx2x_test_memory(bp) != 0) {
2913 buf[1] = 1;
2914 etest->flags |= ETH_TEST_FL_FAILED;
2915 }
f85582f8 2916
8970b2e4 2917 buf[2] = bnx2x_test_loopback(bp); /* internal LB */
de0c62db
DK
2918 if (buf[2] != 0)
2919 etest->flags |= ETH_TEST_FL_FAILED;
2920
8970b2e4
MS
2921 if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
2922 buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
2923 if (buf[3] != 0)
2924 etest->flags |= ETH_TEST_FL_FAILED;
2925 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
2926 }
2927
5d07d868 2928 bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
de0c62db
DK
2929
2930 /* restore input for TX port IF */
2931 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
cf2c1df6
MS
2932 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2933 if (rc) {
2934 etest->flags |= ETH_TEST_FL_FAILED;
2935 DP(BNX2X_MSG_ETHTOOL,
2936 "Can't perform self-test, nic_load (for online) failed\n");
2937 return;
2938 }
de0c62db 2939 /* wait until link state is restored */
a22f0788 2940 bnx2x_wait_for_link(bp, link_up, is_serdes);
de0c62db 2941 }
97cd1ee6
DK
2942 if (bnx2x_test_nvram(bp) != 0) {
2943 if (!IS_MF(bp))
2944 buf[4] = 1;
2945 else
2946 buf[0] = 1;
2947 etest->flags |= ETH_TEST_FL_FAILED;
2948 }
de0c62db 2949 if (bnx2x_test_intr(bp) != 0) {
cf2c1df6
MS
2950 if (!IS_MF(bp))
2951 buf[5] = 1;
2952 else
2953 buf[1] = 1;
de0c62db
DK
2954 etest->flags |= ETH_TEST_FL_FAILED;
2955 }
633ac363 2956
a336ca7c
YR
2957 if (link_up) {
2958 cnt = 100;
2959 while (bnx2x_link_test(bp, is_serdes) && --cnt)
2960 msleep(20);
2961 }
2962
2963 if (!cnt) {
cf2c1df6
MS
2964 if (!IS_MF(bp))
2965 buf[6] = 1;
2966 else
2967 buf[2] = 1;
633ac363
DK
2968 etest->flags |= ETH_TEST_FL_FAILED;
2969 }
de0c62db
DK
2970}
2971
de0c62db
DK
2972#define IS_PORT_STAT(i) \
2973 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
2974#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
fb3bff17
DK
2975#define IS_MF_MODE_STAT(bp) \
2976 (IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
de0c62db 2977
619c5cb6
VZ
2978/* ethtool statistics are displayed for all regular ethernet queues and the
2979 * fcoe L2 queue if not disabled
2980 */
1191cb83 2981static int bnx2x_num_stat_queues(struct bnx2x *bp)
619c5cb6
VZ
2982{
2983 return BNX2X_NUM_ETH_QUEUES(bp);
2984}
2985
de0c62db
DK
2986static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
2987{
2988 struct bnx2x *bp = netdev_priv(dev);
2989 int i, num_stats;
2990
2991 switch (stringset) {
2992 case ETH_SS_STATS:
2993 if (is_multi(bp)) {
619c5cb6 2994 num_stats = bnx2x_num_stat_queues(bp) *
d5e83632
YM
2995 BNX2X_NUM_Q_STATS;
2996 } else
2997 num_stats = 0;
2998 if (IS_MF_MODE_STAT(bp)) {
2999 for (i = 0; i < BNX2X_NUM_STATS; i++)
3000 if (IS_FUNC_STAT(i))
3001 num_stats++;
3002 } else
3003 num_stats += BNX2X_NUM_STATS;
3004
de0c62db
DK
3005 return num_stats;
3006
3007 case ETH_SS_TEST:
cf2c1df6 3008 return BNX2X_NUM_TESTS(bp);
de0c62db
DK
3009
3010 default:
3011 return -EINVAL;
3012 }
3013}
3014
3015static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
3016{
3017 struct bnx2x *bp = netdev_priv(dev);
5889335c 3018 int i, j, k, start;
ec6ba945 3019 char queue_name[MAX_QUEUE_NAME_LEN+1];
de0c62db
DK
3020
3021 switch (stringset) {
3022 case ETH_SS_STATS:
d5e83632 3023 k = 0;
de0c62db 3024 if (is_multi(bp)) {
619c5cb6 3025 for_each_eth_queue(bp, i) {
ec6ba945 3026 memset(queue_name, 0, sizeof(queue_name));
619c5cb6 3027 sprintf(queue_name, "%d", i);
de0c62db 3028 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
ec6ba945
VZ
3029 snprintf(buf + (k + j)*ETH_GSTRING_LEN,
3030 ETH_GSTRING_LEN,
3031 bnx2x_q_stats_arr[j].string,
3032 queue_name);
de0c62db
DK
3033 k += BNX2X_NUM_Q_STATS;
3034 }
de0c62db 3035 }
d5e83632
YM
3036
3037
3038 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
3039 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
3040 continue;
3041 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
3042 bnx2x_stats_arr[i].string);
3043 j++;
3044 }
3045
de0c62db
DK
3046 break;
3047
3048 case ETH_SS_TEST:
cf2c1df6
MS
3049 /* First 4 tests cannot be done in MF mode */
3050 if (!IS_MF(bp))
3051 start = 0;
3052 else
3053 start = 4;
5889335c
MS
3054 memcpy(buf, bnx2x_tests_str_arr + start,
3055 ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
de0c62db
DK
3056 }
3057}
3058
3059static void bnx2x_get_ethtool_stats(struct net_device *dev,
3060 struct ethtool_stats *stats, u64 *buf)
3061{
3062 struct bnx2x *bp = netdev_priv(dev);
3063 u32 *hw_stats, *offset;
d5e83632 3064 int i, j, k = 0;
de0c62db
DK
3065
3066 if (is_multi(bp)) {
619c5cb6 3067 for_each_eth_queue(bp, i) {
15192a8c 3068 hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
de0c62db
DK
3069 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
3070 if (bnx2x_q_stats_arr[j].size == 0) {
3071 /* skip this counter */
3072 buf[k + j] = 0;
3073 continue;
3074 }
3075 offset = (hw_stats +
3076 bnx2x_q_stats_arr[j].offset);
3077 if (bnx2x_q_stats_arr[j].size == 4) {
3078 /* 4-byte counter */
3079 buf[k + j] = (u64) *offset;
3080 continue;
3081 }
3082 /* 8-byte counter */
3083 buf[k + j] = HILO_U64(*offset, *(offset + 1));
3084 }
3085 k += BNX2X_NUM_Q_STATS;
3086 }
d5e83632
YM
3087 }
3088
3089 hw_stats = (u32 *)&bp->eth_stats;
3090 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
3091 if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
3092 continue;
3093 if (bnx2x_stats_arr[i].size == 0) {
3094 /* skip this counter */
3095 buf[k + j] = 0;
3096 j++;
3097 continue;
de0c62db 3098 }
d5e83632
YM
3099 offset = (hw_stats + bnx2x_stats_arr[i].offset);
3100 if (bnx2x_stats_arr[i].size == 4) {
3101 /* 4-byte counter */
3102 buf[k + j] = (u64) *offset;
de0c62db 3103 j++;
d5e83632 3104 continue;
de0c62db 3105 }
d5e83632
YM
3106 /* 8-byte counter */
3107 buf[k + j] = HILO_U64(*offset, *(offset + 1));
3108 j++;
de0c62db
DK
3109 }
3110}
3111
32d36134 3112static int bnx2x_set_phys_id(struct net_device *dev,
3113 enum ethtool_phys_id_state state)
de0c62db
DK
3114{
3115 struct bnx2x *bp = netdev_priv(dev);
de0c62db 3116
51c1a580
MS
3117 if (!netif_running(dev)) {
3118 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
3119 "cannot access eeprom when the interface is down\n");
32d36134 3120 return -EAGAIN;
51c1a580 3121 }
de0c62db 3122
51c1a580
MS
3123 if (!bp->port.pmf) {
3124 DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n");
32d36134 3125 return -EOPNOTSUPP;
51c1a580 3126 }
de0c62db 3127
32d36134 3128 switch (state) {
3129 case ETHTOOL_ID_ACTIVE:
fce55922 3130 return 1; /* cycle on/off once per second */
de0c62db 3131
32d36134 3132 case ETHTOOL_ID_ON:
8203c4b6 3133 bnx2x_acquire_phy_lock(bp);
32d36134 3134 bnx2x_set_led(&bp->link_params, &bp->link_vars,
e1943424 3135 LED_MODE_ON, SPEED_1000);
8203c4b6 3136 bnx2x_release_phy_lock(bp);
32d36134 3137 break;
de0c62db 3138
32d36134 3139 case ETHTOOL_ID_OFF:
8203c4b6 3140 bnx2x_acquire_phy_lock(bp);
32d36134 3141 bnx2x_set_led(&bp->link_params, &bp->link_vars,
e1943424 3142 LED_MODE_FRONT_PANEL_OFF, 0);
8203c4b6 3143 bnx2x_release_phy_lock(bp);
32d36134 3144 break;
3145
3146 case ETHTOOL_ID_INACTIVE:
8203c4b6 3147 bnx2x_acquire_phy_lock(bp);
e1943424
DM
3148 bnx2x_set_led(&bp->link_params, &bp->link_vars,
3149 LED_MODE_OPER,
3150 bp->link_vars.line_speed);
8203c4b6 3151 bnx2x_release_phy_lock(bp);
32d36134 3152 }
de0c62db
DK
3153
3154 return 0;
3155}
3156
5d317c6a
MS
3157static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3158{
3159
3160 switch (info->flow_type) {
3161 case TCP_V4_FLOW:
3162 case TCP_V6_FLOW:
3163 info->data = RXH_IP_SRC | RXH_IP_DST |
3164 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3165 break;
3166 case UDP_V4_FLOW:
3167 if (bp->rss_conf_obj.udp_rss_v4)
3168 info->data = RXH_IP_SRC | RXH_IP_DST |
3169 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3170 else
3171 info->data = RXH_IP_SRC | RXH_IP_DST;
3172 break;
3173 case UDP_V6_FLOW:
3174 if (bp->rss_conf_obj.udp_rss_v6)
3175 info->data = RXH_IP_SRC | RXH_IP_DST |
3176 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3177 else
3178 info->data = RXH_IP_SRC | RXH_IP_DST;
3179 break;
3180 case IPV4_FLOW:
3181 case IPV6_FLOW:
3182 info->data = RXH_IP_SRC | RXH_IP_DST;
3183 break;
3184 default:
3185 info->data = 0;
3186 break;
3187 }
3188
3189 return 0;
3190}
3191
ab532cf3 3192static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
815c7db5 3193 u32 *rules __always_unused)
ab532cf3
TH
3194{
3195 struct bnx2x *bp = netdev_priv(dev);
3196
3197 switch (info->cmd) {
3198 case ETHTOOL_GRXRINGS:
3199 info->data = BNX2X_NUM_ETH_QUEUES(bp);
3200 return 0;
5d317c6a
MS
3201 case ETHTOOL_GRXFH:
3202 return bnx2x_get_rss_flags(bp, info);
3203 default:
3204 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
3205 return -EOPNOTSUPP;
3206 }
3207}
3208
3209static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3210{
3211 int udp_rss_requested;
3212
3213 DP(BNX2X_MSG_ETHTOOL,
3214 "Set rss flags command parameters: flow type = %d, data = %llu\n",
3215 info->flow_type, info->data);
3216
3217 switch (info->flow_type) {
3218 case TCP_V4_FLOW:
3219 case TCP_V6_FLOW:
3220 /* For TCP only 4-tupple hash is supported */
3221 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
3222 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
3223 DP(BNX2X_MSG_ETHTOOL,
3224 "Command parameters not supported\n");
3225 return -EINVAL;
5d317c6a 3226 }
2de67439 3227 return 0;
5d317c6a
MS
3228
3229 case UDP_V4_FLOW:
3230 case UDP_V6_FLOW:
3231 /* For UDP either 2-tupple hash or 4-tupple hash is supported */
3232 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
2de67439 3233 RXH_L4_B_0_1 | RXH_L4_B_2_3))
5d317c6a
MS
3234 udp_rss_requested = 1;
3235 else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
3236 udp_rss_requested = 0;
3237 else
3238 return -EINVAL;
3239 if ((info->flow_type == UDP_V4_FLOW) &&
3240 (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
3241 bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
3242 DP(BNX2X_MSG_ETHTOOL,
3243 "rss re-configured, UDP 4-tupple %s\n",
3244 udp_rss_requested ? "enabled" : "disabled");
3245 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
3246 } else if ((info->flow_type == UDP_V6_FLOW) &&
3247 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
3248 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
5d317c6a
MS
3249 DP(BNX2X_MSG_ETHTOOL,
3250 "rss re-configured, UDP 4-tupple %s\n",
3251 udp_rss_requested ? "enabled" : "disabled");
337da3e3 3252 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
5d317c6a 3253 }
924d75ab
YM
3254 return 0;
3255
5d317c6a
MS
3256 case IPV4_FLOW:
3257 case IPV6_FLOW:
3258 /* For IP only 2-tupple hash is supported */
3259 if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
3260 DP(BNX2X_MSG_ETHTOOL,
3261 "Command parameters not supported\n");
3262 return -EINVAL;
5d317c6a 3263 }
924d75ab
YM
3264 return 0;
3265
5d317c6a
MS
3266 case SCTP_V4_FLOW:
3267 case AH_ESP_V4_FLOW:
3268 case AH_V4_FLOW:
3269 case ESP_V4_FLOW:
3270 case SCTP_V6_FLOW:
3271 case AH_ESP_V6_FLOW:
3272 case AH_V6_FLOW:
3273 case ESP_V6_FLOW:
3274 case IP_USER_FLOW:
3275 case ETHER_FLOW:
3276 /* RSS is not supported for these protocols */
3277 if (info->data) {
3278 DP(BNX2X_MSG_ETHTOOL,
3279 "Command parameters not supported\n");
3280 return -EINVAL;
5d317c6a 3281 }
924d75ab
YM
3282 return 0;
3283
5d317c6a
MS
3284 default:
3285 return -EINVAL;
3286 }
3287}
3288
3289static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
3290{
3291 struct bnx2x *bp = netdev_priv(dev);
ab532cf3 3292
5d317c6a
MS
3293 switch (info->cmd) {
3294 case ETHTOOL_SRXFH:
3295 return bnx2x_set_rss_flags(bp, info);
ab532cf3 3296 default:
51c1a580 3297 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
ab532cf3
TH
3298 return -EOPNOTSUPP;
3299 }
3300}
3301
7850f63f
BH
3302static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
3303{
96305234 3304 return T_ETH_INDIRECTION_TABLE_SIZE;
7850f63f
BH
3305}
3306
3307static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
ab532cf3
TH
3308{
3309 struct bnx2x *bp = netdev_priv(dev);
619c5cb6
VZ
3310 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
3311 size_t i;
ab532cf3 3312
619c5cb6
VZ
3313 /* Get the current configuration of the RSS indirection table */
3314 bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
3315
3316 /*
3317 * We can't use a memcpy() as an internal storage of an
3318 * indirection table is a u8 array while indir->ring_index
3319 * points to an array of u32.
3320 *
3321 * Indirection table contains the FW Client IDs, so we need to
3322 * align the returned table to the Client ID of the leading RSS
3323 * queue.
3324 */
7850f63f
BH
3325 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
3326 indir[i] = ind_table[i] - bp->fp->cl_id;
619c5cb6 3327
ab532cf3
TH
3328 return 0;
3329}
3330
7850f63f 3331static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
ab532cf3
TH
3332{
3333 struct bnx2x *bp = netdev_priv(dev);
3334 size_t i;
619c5cb6
VZ
3335
3336 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
619c5cb6
VZ
3337 /*
3338 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
3339 * as an internal storage of an indirection table is a u8 array
3340 * while indir->ring_index points to an array of u32.
3341 *
3342 * Indirection table contains the FW Client IDs, so we need to
3343 * align the received table to the Client ID of the leading RSS
3344 * queue
3345 */
5d317c6a 3346 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
619c5cb6 3347 }
ab532cf3 3348
5d317c6a 3349 return bnx2x_config_rss_eth(bp, false);
ab532cf3
TH
3350}
3351
0e8d2ec5
MS
3352/**
3353 * bnx2x_get_channels - gets the number of RSS queues.
3354 *
3355 * @dev: net device
3356 * @channels: returns the number of max / current queues
3357 */
3358static void bnx2x_get_channels(struct net_device *dev,
3359 struct ethtool_channels *channels)
3360{
3361 struct bnx2x *bp = netdev_priv(dev);
3362
3363 channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
3364 channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
3365}
3366
3367/**
3368 * bnx2x_change_num_queues - change the number of RSS queues.
3369 *
3370 * @bp: bnx2x private structure
3371 *
3372 * Re-configure interrupt mode to get the new number of MSI-X
3373 * vectors and re-add NAPI objects.
3374 */
3375static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
3376{
0e8d2ec5 3377 bnx2x_disable_msi(bp);
55c11941
MS
3378 bp->num_ethernet_queues = num_rss;
3379 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
3380 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
0e8d2ec5 3381 bnx2x_set_int_mode(bp);
0e8d2ec5
MS
3382}
3383
3384/**
3385 * bnx2x_set_channels - sets the number of RSS queues.
3386 *
3387 * @dev: net device
3388 * @channels: includes the number of queues requested
3389 */
3390static int bnx2x_set_channels(struct net_device *dev,
3391 struct ethtool_channels *channels)
3392{
3393 struct bnx2x *bp = netdev_priv(dev);
3394
3395
3396 DP(BNX2X_MSG_ETHTOOL,
3397 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
3398 channels->rx_count, channels->tx_count, channels->other_count,
3399 channels->combined_count);
3400
3401 /* We don't support separate rx / tx channels.
3402 * We don't allow setting 'other' channels.
3403 */
3404 if (channels->rx_count || channels->tx_count || channels->other_count
3405 || (channels->combined_count == 0) ||
3406 (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
3407 DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
3408 return -EINVAL;
3409 }
3410
3411 /* Check if there was a change in the active parameters */
3412 if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
3413 DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
3414 return 0;
3415 }
3416
3417 /* Set the requested number of queues in bp context.
3418 * Note that the actual number of queues created during load may be
3419 * less than requested if memory is low.
3420 */
3421 if (unlikely(!netif_running(dev))) {
3422 bnx2x_change_num_queues(bp, channels->combined_count);
3423 return 0;
3424 }
5d07d868 3425 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
0e8d2ec5
MS
3426 bnx2x_change_num_queues(bp, channels->combined_count);
3427 return bnx2x_nic_load(bp, LOAD_NORMAL);
3428}
3429
de0c62db
DK
3430static const struct ethtool_ops bnx2x_ethtool_ops = {
3431 .get_settings = bnx2x_get_settings,
3432 .set_settings = bnx2x_set_settings,
3433 .get_drvinfo = bnx2x_get_drvinfo,
3434 .get_regs_len = bnx2x_get_regs_len,
3435 .get_regs = bnx2x_get_regs,
07ba6af4
MS
3436 .get_dump_flag = bnx2x_get_dump_flag,
3437 .get_dump_data = bnx2x_get_dump_data,
3438 .set_dump = bnx2x_set_dump,
de0c62db
DK
3439 .get_wol = bnx2x_get_wol,
3440 .set_wol = bnx2x_set_wol,
3441 .get_msglevel = bnx2x_get_msglevel,
3442 .set_msglevel = bnx2x_set_msglevel,
3443 .nway_reset = bnx2x_nway_reset,
3444 .get_link = bnx2x_get_link,
3445 .get_eeprom_len = bnx2x_get_eeprom_len,
3446 .get_eeprom = bnx2x_get_eeprom,
3447 .set_eeprom = bnx2x_set_eeprom,
3448 .get_coalesce = bnx2x_get_coalesce,
3449 .set_coalesce = bnx2x_set_coalesce,
3450 .get_ringparam = bnx2x_get_ringparam,
3451 .set_ringparam = bnx2x_set_ringparam,
3452 .get_pauseparam = bnx2x_get_pauseparam,
3453 .set_pauseparam = bnx2x_set_pauseparam,
de0c62db
DK
3454 .self_test = bnx2x_self_test,
3455 .get_sset_count = bnx2x_get_sset_count,
3456 .get_strings = bnx2x_get_strings,
32d36134 3457 .set_phys_id = bnx2x_set_phys_id,
de0c62db 3458 .get_ethtool_stats = bnx2x_get_ethtool_stats,
ab532cf3 3459 .get_rxnfc = bnx2x_get_rxnfc,
5d317c6a 3460 .set_rxnfc = bnx2x_set_rxnfc,
7850f63f 3461 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
ab532cf3
TH
3462 .get_rxfh_indir = bnx2x_get_rxfh_indir,
3463 .set_rxfh_indir = bnx2x_set_rxfh_indir,
0e8d2ec5
MS
3464 .get_channels = bnx2x_get_channels,
3465 .set_channels = bnx2x_set_channels,
24ea818e
YM
3466 .get_module_info = bnx2x_get_module_info,
3467 .get_module_eeprom = bnx2x_get_module_eeprom,
e9939c80
YM
3468 .get_eee = bnx2x_get_eee,
3469 .set_eee = bnx2x_set_eee,
be53ce1e 3470 .get_ts_info = ethtool_op_get_ts_info,
de0c62db
DK
3471};
3472
005a07ba
AE
3473static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
3474 .get_settings = bnx2x_get_settings,
3475 .set_settings = bnx2x_set_settings,
3476 .get_drvinfo = bnx2x_get_drvinfo,
3477 .get_msglevel = bnx2x_get_msglevel,
3478 .set_msglevel = bnx2x_set_msglevel,
3479 .get_link = bnx2x_get_link,
3480 .get_coalesce = bnx2x_get_coalesce,
3481 .get_ringparam = bnx2x_get_ringparam,
3482 .set_ringparam = bnx2x_set_ringparam,
3483 .get_sset_count = bnx2x_get_sset_count,
3484 .get_strings = bnx2x_get_strings,
3485 .get_ethtool_stats = bnx2x_get_ethtool_stats,
3486 .get_rxnfc = bnx2x_get_rxnfc,
3487 .set_rxnfc = bnx2x_set_rxnfc,
3488 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3489 .get_rxfh_indir = bnx2x_get_rxfh_indir,
3490 .set_rxfh_indir = bnx2x_set_rxfh_indir,
3491 .get_channels = bnx2x_get_channels,
3492 .set_channels = bnx2x_set_channels,
3493};
3494
3495void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
de0c62db 3496{
005a07ba
AE
3497 if (IS_PF(bp))
3498 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
3499 else /* vf */
3500 SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
de0c62db 3501}