Manual merge with Linus.
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / e1000 / e1000_ethtool.c
1 /*******************************************************************************
2
3
4 Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 2 of the License, or (at your option)
9 any later version.
10
11 This program is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc., 59
18 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
20 The full GNU General Public License is included in this distribution in the
21 file called LICENSE.
22
23 Contact Information:
24 Linux NICS <linux.nics@intel.com>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 /* ethtool support for e1000 */
30
31 #include "e1000.h"
32
33 #include <asm/uaccess.h>
34
35 struct e1000_stats {
36 char stat_string[ETH_GSTRING_LEN];
37 int sizeof_stat;
38 int stat_offset;
39 };
40
41 #define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
42 offsetof(struct e1000_adapter, m)
43 static const struct e1000_stats e1000_gstrings_stats[] = {
44 { "rx_packets", E1000_STAT(net_stats.rx_packets) },
45 { "tx_packets", E1000_STAT(net_stats.tx_packets) },
46 { "rx_bytes", E1000_STAT(net_stats.rx_bytes) },
47 { "tx_bytes", E1000_STAT(net_stats.tx_bytes) },
48 { "rx_errors", E1000_STAT(net_stats.rx_errors) },
49 { "tx_errors", E1000_STAT(net_stats.tx_errors) },
50 { "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
51 { "multicast", E1000_STAT(net_stats.multicast) },
52 { "collisions", E1000_STAT(net_stats.collisions) },
53 { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) },
54 { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
55 { "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) },
56 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
57 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
58 { "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) },
59 { "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) },
60 { "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) },
61 { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
62 { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
63 { "tx_window_errors", E1000_STAT(net_stats.tx_window_errors) },
64 { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
65 { "tx_deferred_ok", E1000_STAT(stats.dc) },
66 { "tx_single_coll_ok", E1000_STAT(stats.scc) },
67 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
68 { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
69 { "rx_long_length_errors", E1000_STAT(stats.roc) },
70 { "rx_short_length_errors", E1000_STAT(stats.ruc) },
71 { "rx_align_errors", E1000_STAT(stats.algnerrc) },
72 { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
73 { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
74 { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
75 { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
76 { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
77 { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
78 { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
79 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
80 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
81 { "rx_header_split", E1000_STAT(rx_hdr_split) },
82 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
83 };
84
85 #define E1000_QUEUE_STATS_LEN 0
86 #define E1000_GLOBAL_STATS_LEN \
87 sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
88 #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
89 static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
90 "Register test (offline)", "Eeprom test (offline)",
91 "Interrupt test (offline)", "Loopback test (offline)",
92 "Link test (on/offline)"
93 };
94 #define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN
95
96 static int
97 e1000_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
98 {
99 struct e1000_adapter *adapter = netdev_priv(netdev);
100 struct e1000_hw *hw = &adapter->hw;
101
102 if (hw->media_type == e1000_media_type_copper) {
103
104 ecmd->supported = (SUPPORTED_10baseT_Half |
105 SUPPORTED_10baseT_Full |
106 SUPPORTED_100baseT_Half |
107 SUPPORTED_100baseT_Full |
108 SUPPORTED_1000baseT_Full|
109 SUPPORTED_Autoneg |
110 SUPPORTED_TP);
111
112 ecmd->advertising = ADVERTISED_TP;
113
114 if (hw->autoneg == 1) {
115 ecmd->advertising |= ADVERTISED_Autoneg;
116
117 /* the e1000 autoneg seems to match ethtool nicely */
118
119 ecmd->advertising |= hw->autoneg_advertised;
120 }
121
122 ecmd->port = PORT_TP;
123 ecmd->phy_address = hw->phy_addr;
124
125 if (hw->mac_type == e1000_82543)
126 ecmd->transceiver = XCVR_EXTERNAL;
127 else
128 ecmd->transceiver = XCVR_INTERNAL;
129
130 } else {
131 ecmd->supported = (SUPPORTED_1000baseT_Full |
132 SUPPORTED_FIBRE |
133 SUPPORTED_Autoneg);
134
135 ecmd->advertising = (ADVERTISED_1000baseT_Full |
136 ADVERTISED_FIBRE |
137 ADVERTISED_Autoneg);
138
139 ecmd->port = PORT_FIBRE;
140
141 if (hw->mac_type >= e1000_82545)
142 ecmd->transceiver = XCVR_INTERNAL;
143 else
144 ecmd->transceiver = XCVR_EXTERNAL;
145 }
146
147 if (netif_carrier_ok(adapter->netdev)) {
148
149 e1000_get_speed_and_duplex(hw, &adapter->link_speed,
150 &adapter->link_duplex);
151 ecmd->speed = adapter->link_speed;
152
153 /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
154 * and HALF_DUPLEX != DUPLEX_HALF */
155
156 if (adapter->link_duplex == FULL_DUPLEX)
157 ecmd->duplex = DUPLEX_FULL;
158 else
159 ecmd->duplex = DUPLEX_HALF;
160 } else {
161 ecmd->speed = -1;
162 ecmd->duplex = -1;
163 }
164
165 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
166 hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
167 return 0;
168 }
169
170 static int
171 e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
172 {
173 struct e1000_adapter *adapter = netdev_priv(netdev);
174 struct e1000_hw *hw = &adapter->hw;
175
176 /* When SoL/IDER sessions are active, autoneg/speed/duplex
177 * cannot be changed */
178 if (e1000_check_phy_reset_block(hw)) {
179 DPRINTK(DRV, ERR, "Cannot change link characteristics "
180 "when SoL/IDER is active.\n");
181 return -EINVAL;
182 }
183
184 if (ecmd->autoneg == AUTONEG_ENABLE) {
185 hw->autoneg = 1;
186 if (hw->media_type == e1000_media_type_fiber)
187 hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
188 ADVERTISED_FIBRE |
189 ADVERTISED_Autoneg;
190 else
191 hw->autoneg_advertised = ADVERTISED_10baseT_Half |
192 ADVERTISED_10baseT_Full |
193 ADVERTISED_100baseT_Half |
194 ADVERTISED_100baseT_Full |
195 ADVERTISED_1000baseT_Full|
196 ADVERTISED_Autoneg |
197 ADVERTISED_TP;
198 ecmd->advertising = hw->autoneg_advertised;
199 } else
200 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex))
201 return -EINVAL;
202
203 /* reset the link */
204
205 if (netif_running(adapter->netdev)) {
206 e1000_down(adapter);
207 e1000_reset(adapter);
208 e1000_up(adapter);
209 } else
210 e1000_reset(adapter);
211
212 return 0;
213 }
214
215 static void
216 e1000_get_pauseparam(struct net_device *netdev,
217 struct ethtool_pauseparam *pause)
218 {
219 struct e1000_adapter *adapter = netdev_priv(netdev);
220 struct e1000_hw *hw = &adapter->hw;
221
222 pause->autoneg =
223 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
224
225 if (hw->fc == e1000_fc_rx_pause)
226 pause->rx_pause = 1;
227 else if (hw->fc == e1000_fc_tx_pause)
228 pause->tx_pause = 1;
229 else if (hw->fc == e1000_fc_full) {
230 pause->rx_pause = 1;
231 pause->tx_pause = 1;
232 }
233 }
234
235 static int
236 e1000_set_pauseparam(struct net_device *netdev,
237 struct ethtool_pauseparam *pause)
238 {
239 struct e1000_adapter *adapter = netdev_priv(netdev);
240 struct e1000_hw *hw = &adapter->hw;
241
242 adapter->fc_autoneg = pause->autoneg;
243
244 if (pause->rx_pause && pause->tx_pause)
245 hw->fc = e1000_fc_full;
246 else if (pause->rx_pause && !pause->tx_pause)
247 hw->fc = e1000_fc_rx_pause;
248 else if (!pause->rx_pause && pause->tx_pause)
249 hw->fc = e1000_fc_tx_pause;
250 else if (!pause->rx_pause && !pause->tx_pause)
251 hw->fc = e1000_fc_none;
252
253 hw->original_fc = hw->fc;
254
255 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
256 if (netif_running(adapter->netdev)) {
257 e1000_down(adapter);
258 e1000_up(adapter);
259 } else
260 e1000_reset(adapter);
261 } else
262 return ((hw->media_type == e1000_media_type_fiber) ?
263 e1000_setup_link(hw) : e1000_force_mac_fc(hw));
264
265 return 0;
266 }
267
268 static uint32_t
269 e1000_get_rx_csum(struct net_device *netdev)
270 {
271 struct e1000_adapter *adapter = netdev_priv(netdev);
272 return adapter->rx_csum;
273 }
274
275 static int
276 e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
277 {
278 struct e1000_adapter *adapter = netdev_priv(netdev);
279 adapter->rx_csum = data;
280
281 if (netif_running(netdev)) {
282 e1000_down(adapter);
283 e1000_up(adapter);
284 } else
285 e1000_reset(adapter);
286 return 0;
287 }
288
289 static uint32_t
290 e1000_get_tx_csum(struct net_device *netdev)
291 {
292 return (netdev->features & NETIF_F_HW_CSUM) != 0;
293 }
294
295 static int
296 e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
297 {
298 struct e1000_adapter *adapter = netdev_priv(netdev);
299
300 if (adapter->hw.mac_type < e1000_82543) {
301 if (!data)
302 return -EINVAL;
303 return 0;
304 }
305
306 if (data)
307 netdev->features |= NETIF_F_HW_CSUM;
308 else
309 netdev->features &= ~NETIF_F_HW_CSUM;
310
311 return 0;
312 }
313
314 #ifdef NETIF_F_TSO
315 static int
316 e1000_set_tso(struct net_device *netdev, uint32_t data)
317 {
318 struct e1000_adapter *adapter = netdev_priv(netdev);
319 if ((adapter->hw.mac_type < e1000_82544) ||
320 (adapter->hw.mac_type == e1000_82547))
321 return data ? -EINVAL : 0;
322
323 if (data)
324 netdev->features |= NETIF_F_TSO;
325 else
326 netdev->features &= ~NETIF_F_TSO;
327
328 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
329 adapter->tso_force = TRUE;
330 return 0;
331 }
332 #endif /* NETIF_F_TSO */
333
334 static uint32_t
335 e1000_get_msglevel(struct net_device *netdev)
336 {
337 struct e1000_adapter *adapter = netdev_priv(netdev);
338 return adapter->msg_enable;
339 }
340
341 static void
342 e1000_set_msglevel(struct net_device *netdev, uint32_t data)
343 {
344 struct e1000_adapter *adapter = netdev_priv(netdev);
345 adapter->msg_enable = data;
346 }
347
348 static int
349 e1000_get_regs_len(struct net_device *netdev)
350 {
351 #define E1000_REGS_LEN 32
352 return E1000_REGS_LEN * sizeof(uint32_t);
353 }
354
355 static void
356 e1000_get_regs(struct net_device *netdev,
357 struct ethtool_regs *regs, void *p)
358 {
359 struct e1000_adapter *adapter = netdev_priv(netdev);
360 struct e1000_hw *hw = &adapter->hw;
361 uint32_t *regs_buff = p;
362 uint16_t phy_data;
363
364 memset(p, 0, E1000_REGS_LEN * sizeof(uint32_t));
365
366 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
367
368 regs_buff[0] = E1000_READ_REG(hw, CTRL);
369 regs_buff[1] = E1000_READ_REG(hw, STATUS);
370
371 regs_buff[2] = E1000_READ_REG(hw, RCTL);
372 regs_buff[3] = E1000_READ_REG(hw, RDLEN);
373 regs_buff[4] = E1000_READ_REG(hw, RDH);
374 regs_buff[5] = E1000_READ_REG(hw, RDT);
375 regs_buff[6] = E1000_READ_REG(hw, RDTR);
376
377 regs_buff[7] = E1000_READ_REG(hw, TCTL);
378 regs_buff[8] = E1000_READ_REG(hw, TDLEN);
379 regs_buff[9] = E1000_READ_REG(hw, TDH);
380 regs_buff[10] = E1000_READ_REG(hw, TDT);
381 regs_buff[11] = E1000_READ_REG(hw, TIDV);
382
383 regs_buff[12] = adapter->hw.phy_type; /* PHY type (IGP=1, M88=0) */
384 if (hw->phy_type == e1000_phy_igp) {
385 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
386 IGP01E1000_PHY_AGC_A);
387 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
388 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
389 regs_buff[13] = (uint32_t)phy_data; /* cable length */
390 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
391 IGP01E1000_PHY_AGC_B);
392 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
393 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
394 regs_buff[14] = (uint32_t)phy_data; /* cable length */
395 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
396 IGP01E1000_PHY_AGC_C);
397 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
398 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
399 regs_buff[15] = (uint32_t)phy_data; /* cable length */
400 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
401 IGP01E1000_PHY_AGC_D);
402 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
403 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
404 regs_buff[16] = (uint32_t)phy_data; /* cable length */
405 regs_buff[17] = 0; /* extended 10bt distance (not needed) */
406 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
407 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
408 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
409 regs_buff[18] = (uint32_t)phy_data; /* cable polarity */
410 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
411 IGP01E1000_PHY_PCS_INIT_REG);
412 e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
413 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
414 regs_buff[19] = (uint32_t)phy_data; /* cable polarity */
415 regs_buff[20] = 0; /* polarity correction enabled (always) */
416 regs_buff[22] = 0; /* phy receive errors (unavailable) */
417 regs_buff[23] = regs_buff[18]; /* mdix mode */
418 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
419 } else {
420 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
421 regs_buff[13] = (uint32_t)phy_data; /* cable length */
422 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
423 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
424 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
425 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
426 regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */
427 regs_buff[18] = regs_buff[13]; /* cable polarity */
428 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
429 regs_buff[20] = regs_buff[17]; /* polarity correction */
430 /* phy receive errors */
431 regs_buff[22] = adapter->phy_stats.receive_errors;
432 regs_buff[23] = regs_buff[13]; /* mdix mode */
433 }
434 regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
435 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
436 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
437 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
438 if (hw->mac_type >= e1000_82540 &&
439 hw->media_type == e1000_media_type_copper) {
440 regs_buff[26] = E1000_READ_REG(hw, MANC);
441 }
442 }
443
444 static int
445 e1000_get_eeprom_len(struct net_device *netdev)
446 {
447 struct e1000_adapter *adapter = netdev_priv(netdev);
448 return adapter->hw.eeprom.word_size * 2;
449 }
450
451 static int
452 e1000_get_eeprom(struct net_device *netdev,
453 struct ethtool_eeprom *eeprom, uint8_t *bytes)
454 {
455 struct e1000_adapter *adapter = netdev_priv(netdev);
456 struct e1000_hw *hw = &adapter->hw;
457 uint16_t *eeprom_buff;
458 int first_word, last_word;
459 int ret_val = 0;
460 uint16_t i;
461
462 if (eeprom->len == 0)
463 return -EINVAL;
464
465 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
466
467 first_word = eeprom->offset >> 1;
468 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
469
470 eeprom_buff = kmalloc(sizeof(uint16_t) *
471 (last_word - first_word + 1), GFP_KERNEL);
472 if (!eeprom_buff)
473 return -ENOMEM;
474
475 if (hw->eeprom.type == e1000_eeprom_spi)
476 ret_val = e1000_read_eeprom(hw, first_word,
477 last_word - first_word + 1,
478 eeprom_buff);
479 else {
480 for (i = 0; i < last_word - first_word + 1; i++)
481 if ((ret_val = e1000_read_eeprom(hw, first_word + i, 1,
482 &eeprom_buff[i])))
483 break;
484 }
485
486 /* Device's eeprom is always little-endian, word addressable */
487 for (i = 0; i < last_word - first_word + 1; i++)
488 le16_to_cpus(&eeprom_buff[i]);
489
490 memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
491 eeprom->len);
492 kfree(eeprom_buff);
493
494 return ret_val;
495 }
496
497 static int
498 e1000_set_eeprom(struct net_device *netdev,
499 struct ethtool_eeprom *eeprom, uint8_t *bytes)
500 {
501 struct e1000_adapter *adapter = netdev_priv(netdev);
502 struct e1000_hw *hw = &adapter->hw;
503 uint16_t *eeprom_buff;
504 void *ptr;
505 int max_len, first_word, last_word, ret_val = 0;
506 uint16_t i;
507
508 if (eeprom->len == 0)
509 return -EOPNOTSUPP;
510
511 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
512 return -EFAULT;
513
514 max_len = hw->eeprom.word_size * 2;
515
516 first_word = eeprom->offset >> 1;
517 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
518 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
519 if (!eeprom_buff)
520 return -ENOMEM;
521
522 ptr = (void *)eeprom_buff;
523
524 if (eeprom->offset & 1) {
525 /* need read/modify/write of first changed EEPROM word */
526 /* only the second byte of the word is being modified */
527 ret_val = e1000_read_eeprom(hw, first_word, 1,
528 &eeprom_buff[0]);
529 ptr++;
530 }
531 if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
532 /* need read/modify/write of last changed EEPROM word */
533 /* only the first byte of the word is being modified */
534 ret_val = e1000_read_eeprom(hw, last_word, 1,
535 &eeprom_buff[last_word - first_word]);
536 }
537
538 /* Device's eeprom is always little-endian, word addressable */
539 for (i = 0; i < last_word - first_word + 1; i++)
540 le16_to_cpus(&eeprom_buff[i]);
541
542 memcpy(ptr, bytes, eeprom->len);
543
544 for (i = 0; i < last_word - first_word + 1; i++)
545 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
546
547 ret_val = e1000_write_eeprom(hw, first_word,
548 last_word - first_word + 1, eeprom_buff);
549
550 /* Update the checksum over the first part of the EEPROM if needed
551 * and flush shadow RAM for 82573 conrollers */
552 if ((ret_val == 0) && ((first_word <= EEPROM_CHECKSUM_REG) ||
553 (hw->mac_type == e1000_82573)))
554 e1000_update_eeprom_checksum(hw);
555
556 kfree(eeprom_buff);
557 return ret_val;
558 }
559
560 static void
561 e1000_get_drvinfo(struct net_device *netdev,
562 struct ethtool_drvinfo *drvinfo)
563 {
564 struct e1000_adapter *adapter = netdev_priv(netdev);
565 char firmware_version[32];
566 uint16_t eeprom_data;
567
568 strncpy(drvinfo->driver, e1000_driver_name, 32);
569 strncpy(drvinfo->version, e1000_driver_version, 32);
570
571 /* EEPROM image version # is reported as firmware version # for
572 * 8257{1|2|3} controllers */
573 e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
574 switch (adapter->hw.mac_type) {
575 case e1000_82571:
576 case e1000_82572:
577 case e1000_82573:
578 case e1000_80003es2lan:
579 sprintf(firmware_version, "%d.%d-%d",
580 (eeprom_data & 0xF000) >> 12,
581 (eeprom_data & 0x0FF0) >> 4,
582 eeprom_data & 0x000F);
583 break;
584 default:
585 sprintf(firmware_version, "N/A");
586 }
587
588 strncpy(drvinfo->fw_version, firmware_version, 32);
589 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
590 drvinfo->n_stats = E1000_STATS_LEN;
591 drvinfo->testinfo_len = E1000_TEST_LEN;
592 drvinfo->regdump_len = e1000_get_regs_len(netdev);
593 drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
594 }
595
596 static void
597 e1000_get_ringparam(struct net_device *netdev,
598 struct ethtool_ringparam *ring)
599 {
600 struct e1000_adapter *adapter = netdev_priv(netdev);
601 e1000_mac_type mac_type = adapter->hw.mac_type;
602 struct e1000_tx_ring *txdr = adapter->tx_ring;
603 struct e1000_rx_ring *rxdr = adapter->rx_ring;
604
605 ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
606 E1000_MAX_82544_RXD;
607 ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
608 E1000_MAX_82544_TXD;
609 ring->rx_mini_max_pending = 0;
610 ring->rx_jumbo_max_pending = 0;
611 ring->rx_pending = rxdr->count;
612 ring->tx_pending = txdr->count;
613 ring->rx_mini_pending = 0;
614 ring->rx_jumbo_pending = 0;
615 }
616
617 static int
618 e1000_set_ringparam(struct net_device *netdev,
619 struct ethtool_ringparam *ring)
620 {
621 struct e1000_adapter *adapter = netdev_priv(netdev);
622 e1000_mac_type mac_type = adapter->hw.mac_type;
623 struct e1000_tx_ring *txdr, *tx_old, *tx_new;
624 struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
625 int i, err, tx_ring_size, rx_ring_size;
626
627 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
628 return -EINVAL;
629
630 tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
631 rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
632
633 if (netif_running(adapter->netdev))
634 e1000_down(adapter);
635
636 tx_old = adapter->tx_ring;
637 rx_old = adapter->rx_ring;
638
639 adapter->tx_ring = kmalloc(tx_ring_size, GFP_KERNEL);
640 if (!adapter->tx_ring) {
641 err = -ENOMEM;
642 goto err_setup_rx;
643 }
644 memset(adapter->tx_ring, 0, tx_ring_size);
645
646 adapter->rx_ring = kmalloc(rx_ring_size, GFP_KERNEL);
647 if (!adapter->rx_ring) {
648 kfree(adapter->tx_ring);
649 err = -ENOMEM;
650 goto err_setup_rx;
651 }
652 memset(adapter->rx_ring, 0, rx_ring_size);
653
654 txdr = adapter->tx_ring;
655 rxdr = adapter->rx_ring;
656
657 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
658 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
659 E1000_MAX_RXD : E1000_MAX_82544_RXD));
660 E1000_ROUNDUP(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
661
662 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
663 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
664 E1000_MAX_TXD : E1000_MAX_82544_TXD));
665 E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
666
667 for (i = 0; i < adapter->num_tx_queues; i++)
668 txdr[i].count = txdr->count;
669 for (i = 0; i < adapter->num_rx_queues; i++)
670 rxdr[i].count = rxdr->count;
671
672 if (netif_running(adapter->netdev)) {
673 /* Try to get new resources before deleting old */
674 if ((err = e1000_setup_all_rx_resources(adapter)))
675 goto err_setup_rx;
676 if ((err = e1000_setup_all_tx_resources(adapter)))
677 goto err_setup_tx;
678
679 /* save the new, restore the old in order to free it,
680 * then restore the new back again */
681
682 rx_new = adapter->rx_ring;
683 tx_new = adapter->tx_ring;
684 adapter->rx_ring = rx_old;
685 adapter->tx_ring = tx_old;
686 e1000_free_all_rx_resources(adapter);
687 e1000_free_all_tx_resources(adapter);
688 kfree(tx_old);
689 kfree(rx_old);
690 adapter->rx_ring = rx_new;
691 adapter->tx_ring = tx_new;
692 if ((err = e1000_up(adapter)))
693 return err;
694 }
695
696 return 0;
697 err_setup_tx:
698 e1000_free_all_rx_resources(adapter);
699 err_setup_rx:
700 adapter->rx_ring = rx_old;
701 adapter->tx_ring = tx_old;
702 e1000_up(adapter);
703 return err;
704 }
705
706 #define REG_PATTERN_TEST(R, M, W) \
707 { \
708 uint32_t pat, value; \
709 uint32_t test[] = \
710 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
711 for (pat = 0; pat < sizeof(test)/sizeof(test[0]); pat++) { \
712 E1000_WRITE_REG(&adapter->hw, R, (test[pat] & W)); \
713 value = E1000_READ_REG(&adapter->hw, R); \
714 if (value != (test[pat] & W & M)) { \
715 DPRINTK(DRV, ERR, "pattern test reg %04X failed: got " \
716 "0x%08X expected 0x%08X\n", \
717 E1000_##R, value, (test[pat] & W & M)); \
718 *data = (adapter->hw.mac_type < e1000_82543) ? \
719 E1000_82542_##R : E1000_##R; \
720 return 1; \
721 } \
722 } \
723 }
724
725 #define REG_SET_AND_CHECK(R, M, W) \
726 { \
727 uint32_t value; \
728 E1000_WRITE_REG(&adapter->hw, R, W & M); \
729 value = E1000_READ_REG(&adapter->hw, R); \
730 if ((W & M) != (value & M)) { \
731 DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X "\
732 "expected 0x%08X\n", E1000_##R, (value & M), (W & M)); \
733 *data = (adapter->hw.mac_type < e1000_82543) ? \
734 E1000_82542_##R : E1000_##R; \
735 return 1; \
736 } \
737 }
738
739 static int
740 e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
741 {
742 uint32_t value, before, after;
743 uint32_t i, toggle;
744
745 /* The status register is Read Only, so a write should fail.
746 * Some bits that get toggled are ignored.
747 */
748 switch (adapter->hw.mac_type) {
749 /* there are several bits on newer hardware that are r/w */
750 case e1000_82571:
751 case e1000_82572:
752 case e1000_80003es2lan:
753 toggle = 0x7FFFF3FF;
754 break;
755 case e1000_82573:
756 toggle = 0x7FFFF033;
757 break;
758 default:
759 toggle = 0xFFFFF833;
760 break;
761 }
762
763 before = E1000_READ_REG(&adapter->hw, STATUS);
764 value = (E1000_READ_REG(&adapter->hw, STATUS) & toggle);
765 E1000_WRITE_REG(&adapter->hw, STATUS, toggle);
766 after = E1000_READ_REG(&adapter->hw, STATUS) & toggle;
767 if (value != after) {
768 DPRINTK(DRV, ERR, "failed STATUS register test got: "
769 "0x%08X expected: 0x%08X\n", after, value);
770 *data = 1;
771 return 1;
772 }
773 /* restore previous status */
774 E1000_WRITE_REG(&adapter->hw, STATUS, before);
775
776 REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
777 REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
778 REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
779 REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
780 REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
781 REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
782 REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
783 REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
784 REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
785 REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
786 REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
787 REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
788 REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
789 REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
790
791 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
792 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0x003FFFFB);
793 REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
794
795 if (adapter->hw.mac_type >= e1000_82543) {
796
797 REG_SET_AND_CHECK(RCTL, 0x06DFB3FE, 0xFFFFFFFF);
798 REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
799 REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
800 REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
801 REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
802
803 for (i = 0; i < E1000_RAR_ENTRIES; i++) {
804 REG_PATTERN_TEST(RA + ((i << 1) << 2), 0xFFFFFFFF,
805 0xFFFFFFFF);
806 REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
807 0xFFFFFFFF);
808 }
809
810 } else {
811
812 REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
813 REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
814 REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
815 REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
816
817 }
818
819 for (i = 0; i < E1000_MC_TBL_SIZE; i++)
820 REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
821
822 *data = 0;
823 return 0;
824 }
825
826 static int
827 e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
828 {
829 uint16_t temp;
830 uint16_t checksum = 0;
831 uint16_t i;
832
833 *data = 0;
834 /* Read and add up the contents of the EEPROM */
835 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
836 if ((e1000_read_eeprom(&adapter->hw, i, 1, &temp)) < 0) {
837 *data = 1;
838 break;
839 }
840 checksum += temp;
841 }
842
843 /* If Checksum is not Correct return error else test passed */
844 if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
845 *data = 2;
846
847 return *data;
848 }
849
850 static irqreturn_t
851 e1000_test_intr(int irq,
852 void *data,
853 struct pt_regs *regs)
854 {
855 struct net_device *netdev = (struct net_device *) data;
856 struct e1000_adapter *adapter = netdev_priv(netdev);
857
858 adapter->test_icr |= E1000_READ_REG(&adapter->hw, ICR);
859
860 return IRQ_HANDLED;
861 }
862
863 static int
864 e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
865 {
866 struct net_device *netdev = adapter->netdev;
867 uint32_t mask, i=0, shared_int = TRUE;
868 uint32_t irq = adapter->pdev->irq;
869
870 *data = 0;
871
872 /* Hook up test interrupt handler just for this test */
873 if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
874 shared_int = FALSE;
875 } else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
876 netdev->name, netdev)){
877 *data = 1;
878 return -1;
879 }
880
881 /* Disable all the interrupts */
882 E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
883 msec_delay(10);
884
885 /* Test each interrupt */
886 for (; i < 10; i++) {
887
888 /* Interrupt to test */
889 mask = 1 << i;
890
891 if (!shared_int) {
892 /* Disable the interrupt to be reported in
893 * the cause register and then force the same
894 * interrupt and see if one gets posted. If
895 * an interrupt was posted to the bus, the
896 * test failed.
897 */
898 adapter->test_icr = 0;
899 E1000_WRITE_REG(&adapter->hw, IMC, mask);
900 E1000_WRITE_REG(&adapter->hw, ICS, mask);
901 msec_delay(10);
902
903 if (adapter->test_icr & mask) {
904 *data = 3;
905 break;
906 }
907 }
908
909 /* Enable the interrupt to be reported in
910 * the cause register and then force the same
911 * interrupt and see if one gets posted. If
912 * an interrupt was not posted to the bus, the
913 * test failed.
914 */
915 adapter->test_icr = 0;
916 E1000_WRITE_REG(&adapter->hw, IMS, mask);
917 E1000_WRITE_REG(&adapter->hw, ICS, mask);
918 msec_delay(10);
919
920 if (!(adapter->test_icr & mask)) {
921 *data = 4;
922 break;
923 }
924
925 if (!shared_int) {
926 /* Disable the other interrupts to be reported in
927 * the cause register and then force the other
928 * interrupts and see if any get posted. If
929 * an interrupt was posted to the bus, the
930 * test failed.
931 */
932 adapter->test_icr = 0;
933 E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF);
934 E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
935 msec_delay(10);
936
937 if (adapter->test_icr) {
938 *data = 5;
939 break;
940 }
941 }
942 }
943
944 /* Disable all the interrupts */
945 E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
946 msec_delay(10);
947
948 /* Unhook test interrupt handler */
949 free_irq(irq, netdev);
950
951 return *data;
952 }
953
954 static void
955 e1000_free_desc_rings(struct e1000_adapter *adapter)
956 {
957 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
958 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
959 struct pci_dev *pdev = adapter->pdev;
960 int i;
961
962 if (txdr->desc && txdr->buffer_info) {
963 for (i = 0; i < txdr->count; i++) {
964 if (txdr->buffer_info[i].dma)
965 pci_unmap_single(pdev, txdr->buffer_info[i].dma,
966 txdr->buffer_info[i].length,
967 PCI_DMA_TODEVICE);
968 if (txdr->buffer_info[i].skb)
969 dev_kfree_skb(txdr->buffer_info[i].skb);
970 }
971 }
972
973 if (rxdr->desc && rxdr->buffer_info) {
974 for (i = 0; i < rxdr->count; i++) {
975 if (rxdr->buffer_info[i].dma)
976 pci_unmap_single(pdev, rxdr->buffer_info[i].dma,
977 rxdr->buffer_info[i].length,
978 PCI_DMA_FROMDEVICE);
979 if (rxdr->buffer_info[i].skb)
980 dev_kfree_skb(rxdr->buffer_info[i].skb);
981 }
982 }
983
984 if (txdr->desc) {
985 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
986 txdr->desc = NULL;
987 }
988 if (rxdr->desc) {
989 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
990 rxdr->desc = NULL;
991 }
992
993 kfree(txdr->buffer_info);
994 txdr->buffer_info = NULL;
995 kfree(rxdr->buffer_info);
996 rxdr->buffer_info = NULL;
997
998 return;
999 }
1000
1001 static int
1002 e1000_setup_desc_rings(struct e1000_adapter *adapter)
1003 {
1004 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
1005 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
1006 struct pci_dev *pdev = adapter->pdev;
1007 uint32_t rctl;
1008 int size, i, ret_val;
1009
1010 /* Setup Tx descriptor ring and Tx buffers */
1011
1012 if (!txdr->count)
1013 txdr->count = E1000_DEFAULT_TXD;
1014
1015 size = txdr->count * sizeof(struct e1000_buffer);
1016 if (!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1017 ret_val = 1;
1018 goto err_nomem;
1019 }
1020 memset(txdr->buffer_info, 0, size);
1021
1022 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1023 E1000_ROUNDUP(txdr->size, 4096);
1024 if (!(txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma))) {
1025 ret_val = 2;
1026 goto err_nomem;
1027 }
1028 memset(txdr->desc, 0, txdr->size);
1029 txdr->next_to_use = txdr->next_to_clean = 0;
1030
1031 E1000_WRITE_REG(&adapter->hw, TDBAL,
1032 ((uint64_t) txdr->dma & 0x00000000FFFFFFFF));
1033 E1000_WRITE_REG(&adapter->hw, TDBAH, ((uint64_t) txdr->dma >> 32));
1034 E1000_WRITE_REG(&adapter->hw, TDLEN,
1035 txdr->count * sizeof(struct e1000_tx_desc));
1036 E1000_WRITE_REG(&adapter->hw, TDH, 0);
1037 E1000_WRITE_REG(&adapter->hw, TDT, 0);
1038 E1000_WRITE_REG(&adapter->hw, TCTL,
1039 E1000_TCTL_PSP | E1000_TCTL_EN |
1040 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1041 E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1042
1043 for (i = 0; i < txdr->count; i++) {
1044 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
1045 struct sk_buff *skb;
1046 unsigned int size = 1024;
1047
1048 if (!(skb = alloc_skb(size, GFP_KERNEL))) {
1049 ret_val = 3;
1050 goto err_nomem;
1051 }
1052 skb_put(skb, size);
1053 txdr->buffer_info[i].skb = skb;
1054 txdr->buffer_info[i].length = skb->len;
1055 txdr->buffer_info[i].dma =
1056 pci_map_single(pdev, skb->data, skb->len,
1057 PCI_DMA_TODEVICE);
1058 tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
1059 tx_desc->lower.data = cpu_to_le32(skb->len);
1060 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1061 E1000_TXD_CMD_IFCS |
1062 E1000_TXD_CMD_RPS);
1063 tx_desc->upper.data = 0;
1064 }
1065
1066 /* Setup Rx descriptor ring and Rx buffers */
1067
1068 if (!rxdr->count)
1069 rxdr->count = E1000_DEFAULT_RXD;
1070
1071 size = rxdr->count * sizeof(struct e1000_buffer);
1072 if (!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
1073 ret_val = 4;
1074 goto err_nomem;
1075 }
1076 memset(rxdr->buffer_info, 0, size);
1077
1078 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1079 if (!(rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma))) {
1080 ret_val = 5;
1081 goto err_nomem;
1082 }
1083 memset(rxdr->desc, 0, rxdr->size);
1084 rxdr->next_to_use = rxdr->next_to_clean = 0;
1085
1086 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1087 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
1088 E1000_WRITE_REG(&adapter->hw, RDBAL,
1089 ((uint64_t) rxdr->dma & 0xFFFFFFFF));
1090 E1000_WRITE_REG(&adapter->hw, RDBAH, ((uint64_t) rxdr->dma >> 32));
1091 E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size);
1092 E1000_WRITE_REG(&adapter->hw, RDH, 0);
1093 E1000_WRITE_REG(&adapter->hw, RDT, 0);
1094 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1095 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1096 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1097 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1098
1099 for (i = 0; i < rxdr->count; i++) {
1100 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
1101 struct sk_buff *skb;
1102
1103 if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
1104 GFP_KERNEL))) {
1105 ret_val = 6;
1106 goto err_nomem;
1107 }
1108 skb_reserve(skb, NET_IP_ALIGN);
1109 rxdr->buffer_info[i].skb = skb;
1110 rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
1111 rxdr->buffer_info[i].dma =
1112 pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
1113 PCI_DMA_FROMDEVICE);
1114 rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
1115 memset(skb->data, 0x00, skb->len);
1116 }
1117
1118 return 0;
1119
1120 err_nomem:
1121 e1000_free_desc_rings(adapter);
1122 return ret_val;
1123 }
1124
1125 static void
1126 e1000_phy_disable_receiver(struct e1000_adapter *adapter)
1127 {
1128 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1129 e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
1130 e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
1131 e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
1132 e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
1133 }
1134
1135 static void
1136 e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
1137 {
1138 uint16_t phy_reg;
1139
1140 /* Because we reset the PHY above, we need to re-force TX_CLK in the
1141 * Extended PHY Specific Control Register to 25MHz clock. This
1142 * value defaults back to a 2.5MHz clock when the PHY is reset.
1143 */
1144 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1145 phy_reg |= M88E1000_EPSCR_TX_CLK_25;
1146 e1000_write_phy_reg(&adapter->hw,
1147 M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
1148
1149 /* In addition, because of the s/w reset above, we need to enable
1150 * CRS on TX. This must be set for both full and half duplex
1151 * operation.
1152 */
1153 e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
1154 phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1155 e1000_write_phy_reg(&adapter->hw,
1156 M88E1000_PHY_SPEC_CTRL, phy_reg);
1157 }
1158
1159 static int
1160 e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1161 {
1162 uint32_t ctrl_reg;
1163 uint16_t phy_reg;
1164
1165 /* Setup the Device Control Register for PHY loopback test. */
1166
1167 ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
1168 ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */
1169 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1170 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1171 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
1172 E1000_CTRL_FD); /* Force Duplex to FULL */
1173
1174 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
1175
1176 /* Read the PHY Specific Control Register (0x10) */
1177 e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
1178
1179 /* Clear Auto-Crossover bits in PHY Specific Control Register
1180 * (bits 6:5).
1181 */
1182 phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
1183 e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
1184
1185 /* Perform software reset on the PHY */
1186 e1000_phy_reset(&adapter->hw);
1187
1188 /* Have to setup TX_CLK and TX_CRS after software reset */
1189 e1000_phy_reset_clk_and_crs(adapter);
1190
1191 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8100);
1192
1193 /* Wait for reset to complete. */
1194 udelay(500);
1195
1196 /* Have to setup TX_CLK and TX_CRS after software reset */
1197 e1000_phy_reset_clk_and_crs(adapter);
1198
1199 /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1200 e1000_phy_disable_receiver(adapter);
1201
1202 /* Set the loopback bit in the PHY control register. */
1203 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
1204 phy_reg |= MII_CR_LOOPBACK;
1205 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg);
1206
1207 /* Setup TX_CLK and TX_CRS one more time. */
1208 e1000_phy_reset_clk_and_crs(adapter);
1209
1210 /* Check Phy Configuration */
1211 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
1212 if (phy_reg != 0x4100)
1213 return 9;
1214
1215 e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1216 if (phy_reg != 0x0070)
1217 return 10;
1218
1219 e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
1220 if (phy_reg != 0x001A)
1221 return 11;
1222
1223 return 0;
1224 }
1225
1226 static int
1227 e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1228 {
1229 uint32_t ctrl_reg = 0;
1230 uint32_t stat_reg = 0;
1231
1232 adapter->hw.autoneg = FALSE;
1233
1234 if (adapter->hw.phy_type == e1000_phy_m88) {
1235 /* Auto-MDI/MDIX Off */
1236 e1000_write_phy_reg(&adapter->hw,
1237 M88E1000_PHY_SPEC_CTRL, 0x0808);
1238 /* reset to update Auto-MDI/MDIX */
1239 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x9140);
1240 /* autoneg off */
1241 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x8140);
1242 } else if (adapter->hw.phy_type == e1000_phy_gg82563) {
1243 e1000_write_phy_reg(&adapter->hw,
1244 GG82563_PHY_KMRN_MODE_CTRL,
1245 0x1CE);
1246 }
1247 /* force 1000, set loopback */
1248 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, 0x4140);
1249
1250 /* Now set up the MAC to the same speed/duplex as the PHY. */
1251 ctrl_reg = E1000_READ_REG(&adapter->hw, CTRL);
1252 ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1253 ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1254 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1255 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1256 E1000_CTRL_FD); /* Force Duplex to FULL */
1257
1258 if (adapter->hw.media_type == e1000_media_type_copper &&
1259 adapter->hw.phy_type == e1000_phy_m88) {
1260 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1261 } else {
1262 /* Set the ILOS bit on the fiber Nic is half
1263 * duplex link is detected. */
1264 stat_reg = E1000_READ_REG(&adapter->hw, STATUS);
1265 if ((stat_reg & E1000_STATUS_FD) == 0)
1266 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1267 }
1268
1269 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl_reg);
1270
1271 /* Disable the receiver on the PHY so when a cable is plugged in, the
1272 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1273 */
1274 if (adapter->hw.phy_type == e1000_phy_m88)
1275 e1000_phy_disable_receiver(adapter);
1276
1277 udelay(500);
1278
1279 return 0;
1280 }
1281
1282 static int
1283 e1000_set_phy_loopback(struct e1000_adapter *adapter)
1284 {
1285 uint16_t phy_reg = 0;
1286 uint16_t count = 0;
1287
1288 switch (adapter->hw.mac_type) {
1289 case e1000_82543:
1290 if (adapter->hw.media_type == e1000_media_type_copper) {
1291 /* Attempt to setup Loopback mode on Non-integrated PHY.
1292 * Some PHY registers get corrupted at random, so
1293 * attempt this 10 times.
1294 */
1295 while (e1000_nonintegrated_phy_loopback(adapter) &&
1296 count++ < 10);
1297 if (count < 11)
1298 return 0;
1299 }
1300 break;
1301
1302 case e1000_82544:
1303 case e1000_82540:
1304 case e1000_82545:
1305 case e1000_82545_rev_3:
1306 case e1000_82546:
1307 case e1000_82546_rev_3:
1308 case e1000_82541:
1309 case e1000_82541_rev_2:
1310 case e1000_82547:
1311 case e1000_82547_rev_2:
1312 case e1000_82571:
1313 case e1000_82572:
1314 case e1000_82573:
1315 case e1000_80003es2lan:
1316 return e1000_integrated_phy_loopback(adapter);
1317 break;
1318
1319 default:
1320 /* Default PHY loopback work is to read the MII
1321 * control register and assert bit 14 (loopback mode).
1322 */
1323 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_reg);
1324 phy_reg |= MII_CR_LOOPBACK;
1325 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_reg);
1326 return 0;
1327 break;
1328 }
1329
1330 return 8;
1331 }
1332
1333 static int
1334 e1000_setup_loopback_test(struct e1000_adapter *adapter)
1335 {
1336 struct e1000_hw *hw = &adapter->hw;
1337 uint32_t rctl;
1338
1339 if (hw->media_type == e1000_media_type_fiber ||
1340 hw->media_type == e1000_media_type_internal_serdes) {
1341 switch (hw->mac_type) {
1342 case e1000_82545:
1343 case e1000_82546:
1344 case e1000_82545_rev_3:
1345 case e1000_82546_rev_3:
1346 return e1000_set_phy_loopback(adapter);
1347 break;
1348 case e1000_82571:
1349 case e1000_82572:
1350 #define E1000_SERDES_LB_ON 0x410
1351 e1000_set_phy_loopback(adapter);
1352 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_ON);
1353 msec_delay(10);
1354 return 0;
1355 break;
1356 default:
1357 rctl = E1000_READ_REG(hw, RCTL);
1358 rctl |= E1000_RCTL_LBM_TCVR;
1359 E1000_WRITE_REG(hw, RCTL, rctl);
1360 return 0;
1361 }
1362 } else if (hw->media_type == e1000_media_type_copper)
1363 return e1000_set_phy_loopback(adapter);
1364
1365 return 7;
1366 }
1367
1368 static void
1369 e1000_loopback_cleanup(struct e1000_adapter *adapter)
1370 {
1371 struct e1000_hw *hw = &adapter->hw;
1372 uint32_t rctl;
1373 uint16_t phy_reg;
1374
1375 rctl = E1000_READ_REG(hw, RCTL);
1376 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1377 E1000_WRITE_REG(hw, RCTL, rctl);
1378
1379 switch (hw->mac_type) {
1380 case e1000_82571:
1381 case e1000_82572:
1382 if (hw->media_type == e1000_media_type_fiber ||
1383 hw->media_type == e1000_media_type_internal_serdes) {
1384 #define E1000_SERDES_LB_OFF 0x400
1385 E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
1386 msec_delay(10);
1387 break;
1388 }
1389 /* Fall Through */
1390 case e1000_82545:
1391 case e1000_82546:
1392 case e1000_82545_rev_3:
1393 case e1000_82546_rev_3:
1394 default:
1395 hw->autoneg = TRUE;
1396 if (hw->phy_type == e1000_phy_gg82563) {
1397 e1000_write_phy_reg(hw,
1398 GG82563_PHY_KMRN_MODE_CTRL,
1399 0x180);
1400 }
1401 e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1402 if (phy_reg & MII_CR_LOOPBACK) {
1403 phy_reg &= ~MII_CR_LOOPBACK;
1404 e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
1405 e1000_phy_reset(hw);
1406 }
1407 break;
1408 }
1409 }
1410
1411 static void
1412 e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1413 {
1414 memset(skb->data, 0xFF, frame_size);
1415 frame_size &= ~1;
1416 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1417 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1418 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1419 }
1420
1421 static int
1422 e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1423 {
1424 frame_size &= ~1;
1425 if (*(skb->data + 3) == 0xFF) {
1426 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1427 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
1428 return 0;
1429 }
1430 }
1431 return 13;
1432 }
1433
1434 static int
1435 e1000_run_loopback_test(struct e1000_adapter *adapter)
1436 {
1437 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
1438 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
1439 struct pci_dev *pdev = adapter->pdev;
1440 int i, j, k, l, lc, good_cnt, ret_val=0;
1441 unsigned long time;
1442
1443 E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
1444
1445 /* Calculate the loop count based on the largest descriptor ring
1446 * The idea is to wrap the largest ring a number of times using 64
1447 * send/receive pairs during each loop
1448 */
1449
1450 if (rxdr->count <= txdr->count)
1451 lc = ((txdr->count / 64) * 2) + 1;
1452 else
1453 lc = ((rxdr->count / 64) * 2) + 1;
1454
1455 k = l = 0;
1456 for (j = 0; j <= lc; j++) { /* loop count loop */
1457 for (i = 0; i < 64; i++) { /* send the packets */
1458 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1459 1024);
1460 pci_dma_sync_single_for_device(pdev,
1461 txdr->buffer_info[k].dma,
1462 txdr->buffer_info[k].length,
1463 PCI_DMA_TODEVICE);
1464 if (unlikely(++k == txdr->count)) k = 0;
1465 }
1466 E1000_WRITE_REG(&adapter->hw, TDT, k);
1467 msec_delay(200);
1468 time = jiffies; /* set the start time for the receive */
1469 good_cnt = 0;
1470 do { /* receive the sent packets */
1471 pci_dma_sync_single_for_cpu(pdev,
1472 rxdr->buffer_info[l].dma,
1473 rxdr->buffer_info[l].length,
1474 PCI_DMA_FROMDEVICE);
1475
1476 ret_val = e1000_check_lbtest_frame(
1477 rxdr->buffer_info[l].skb,
1478 1024);
1479 if (!ret_val)
1480 good_cnt++;
1481 if (unlikely(++l == rxdr->count)) l = 0;
1482 /* time + 20 msecs (200 msecs on 2.4) is more than
1483 * enough time to complete the receives, if it's
1484 * exceeded, break and error off
1485 */
1486 } while (good_cnt < 64 && jiffies < (time + 20));
1487 if (good_cnt != 64) {
1488 ret_val = 13; /* ret_val is the same as mis-compare */
1489 break;
1490 }
1491 if (jiffies >= (time + 2)) {
1492 ret_val = 14; /* error code for time out error */
1493 break;
1494 }
1495 } /* end loop count loop */
1496 return ret_val;
1497 }
1498
1499 static int
1500 e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
1501 {
1502 /* PHY loopback cannot be performed if SoL/IDER
1503 * sessions are active */
1504 if (e1000_check_phy_reset_block(&adapter->hw)) {
1505 DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
1506 "when SoL/IDER is active.\n");
1507 *data = 0;
1508 goto out;
1509 }
1510
1511 if ((*data = e1000_setup_desc_rings(adapter)))
1512 goto out;
1513 if ((*data = e1000_setup_loopback_test(adapter)))
1514 goto err_loopback;
1515 *data = e1000_run_loopback_test(adapter);
1516 e1000_loopback_cleanup(adapter);
1517
1518 err_loopback:
1519 e1000_free_desc_rings(adapter);
1520 out:
1521 return *data;
1522 }
1523
1524 static int
1525 e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
1526 {
1527 *data = 0;
1528 if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
1529 int i = 0;
1530 adapter->hw.serdes_link_down = TRUE;
1531
1532 /* On some blade server designs, link establishment
1533 * could take as long as 2-3 minutes */
1534 do {
1535 e1000_check_for_link(&adapter->hw);
1536 if (adapter->hw.serdes_link_down == FALSE)
1537 return *data;
1538 msec_delay(20);
1539 } while (i++ < 3750);
1540
1541 *data = 1;
1542 } else {
1543 e1000_check_for_link(&adapter->hw);
1544 if (adapter->hw.autoneg) /* if auto_neg is set wait for it */
1545 msec_delay(4000);
1546
1547 if (!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
1548 *data = 1;
1549 }
1550 }
1551 return *data;
1552 }
1553
1554 static int
1555 e1000_diag_test_count(struct net_device *netdev)
1556 {
1557 return E1000_TEST_LEN;
1558 }
1559
1560 static void
1561 e1000_diag_test(struct net_device *netdev,
1562 struct ethtool_test *eth_test, uint64_t *data)
1563 {
1564 struct e1000_adapter *adapter = netdev_priv(netdev);
1565 boolean_t if_running = netif_running(netdev);
1566
1567 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1568 /* Offline tests */
1569
1570 /* save speed, duplex, autoneg settings */
1571 uint16_t autoneg_advertised = adapter->hw.autoneg_advertised;
1572 uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
1573 uint8_t autoneg = adapter->hw.autoneg;
1574
1575 /* Link test performed before hardware reset so autoneg doesn't
1576 * interfere with test result */
1577 if (e1000_link_test(adapter, &data[4]))
1578 eth_test->flags |= ETH_TEST_FL_FAILED;
1579
1580 if (if_running)
1581 e1000_down(adapter);
1582 else
1583 e1000_reset(adapter);
1584
1585 if (e1000_reg_test(adapter, &data[0]))
1586 eth_test->flags |= ETH_TEST_FL_FAILED;
1587
1588 e1000_reset(adapter);
1589 if (e1000_eeprom_test(adapter, &data[1]))
1590 eth_test->flags |= ETH_TEST_FL_FAILED;
1591
1592 e1000_reset(adapter);
1593 if (e1000_intr_test(adapter, &data[2]))
1594 eth_test->flags |= ETH_TEST_FL_FAILED;
1595
1596 e1000_reset(adapter);
1597 if (e1000_loopback_test(adapter, &data[3]))
1598 eth_test->flags |= ETH_TEST_FL_FAILED;
1599
1600 /* restore speed, duplex, autoneg settings */
1601 adapter->hw.autoneg_advertised = autoneg_advertised;
1602 adapter->hw.forced_speed_duplex = forced_speed_duplex;
1603 adapter->hw.autoneg = autoneg;
1604
1605 e1000_reset(adapter);
1606 if (if_running)
1607 e1000_up(adapter);
1608 } else {
1609 /* Online tests */
1610 if (e1000_link_test(adapter, &data[4]))
1611 eth_test->flags |= ETH_TEST_FL_FAILED;
1612
1613 /* Offline tests aren't run; pass by default */
1614 data[0] = 0;
1615 data[1] = 0;
1616 data[2] = 0;
1617 data[3] = 0;
1618 }
1619 msleep_interruptible(4 * 1000);
1620 }
1621
1622 static void
1623 e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1624 {
1625 struct e1000_adapter *adapter = netdev_priv(netdev);
1626 struct e1000_hw *hw = &adapter->hw;
1627
1628 switch (adapter->hw.device_id) {
1629 case E1000_DEV_ID_82542:
1630 case E1000_DEV_ID_82543GC_FIBER:
1631 case E1000_DEV_ID_82543GC_COPPER:
1632 case E1000_DEV_ID_82544EI_FIBER:
1633 case E1000_DEV_ID_82546EB_QUAD_COPPER:
1634 case E1000_DEV_ID_82545EM_FIBER:
1635 case E1000_DEV_ID_82545EM_COPPER:
1636 case E1000_DEV_ID_82546GB_QUAD_COPPER:
1637 wol->supported = 0;
1638 wol->wolopts = 0;
1639 return;
1640
1641 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1642 /* device id 10B5 port-A supports wol */
1643 if (!adapter->ksp3_port_a) {
1644 wol->supported = 0;
1645 return;
1646 }
1647 /* KSP3 does not suppport UCAST wake-ups for any interface */
1648 wol->supported = WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
1649
1650 if (adapter->wol & E1000_WUFC_EX)
1651 DPRINTK(DRV, ERR, "Interface does not support "
1652 "directed (unicast) frame wake-up packets\n");
1653 wol->wolopts = 0;
1654 goto do_defaults;
1655
1656 case E1000_DEV_ID_82546EB_FIBER:
1657 case E1000_DEV_ID_82546GB_FIBER:
1658 case E1000_DEV_ID_82571EB_FIBER:
1659 /* Wake events only supported on port A for dual fiber */
1660 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
1661 wol->supported = 0;
1662 wol->wolopts = 0;
1663 return;
1664 }
1665 /* Fall Through */
1666
1667 default:
1668 wol->supported = WAKE_UCAST | WAKE_MCAST |
1669 WAKE_BCAST | WAKE_MAGIC;
1670 wol->wolopts = 0;
1671
1672 do_defaults:
1673 if (adapter->wol & E1000_WUFC_EX)
1674 wol->wolopts |= WAKE_UCAST;
1675 if (adapter->wol & E1000_WUFC_MC)
1676 wol->wolopts |= WAKE_MCAST;
1677 if (adapter->wol & E1000_WUFC_BC)
1678 wol->wolopts |= WAKE_BCAST;
1679 if (adapter->wol & E1000_WUFC_MAG)
1680 wol->wolopts |= WAKE_MAGIC;
1681 return;
1682 }
1683 }
1684
1685 static int
1686 e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1687 {
1688 struct e1000_adapter *adapter = netdev_priv(netdev);
1689 struct e1000_hw *hw = &adapter->hw;
1690
1691 switch (adapter->hw.device_id) {
1692 case E1000_DEV_ID_82542:
1693 case E1000_DEV_ID_82543GC_FIBER:
1694 case E1000_DEV_ID_82543GC_COPPER:
1695 case E1000_DEV_ID_82544EI_FIBER:
1696 case E1000_DEV_ID_82546EB_QUAD_COPPER:
1697 case E1000_DEV_ID_82546GB_QUAD_COPPER:
1698 case E1000_DEV_ID_82545EM_FIBER:
1699 case E1000_DEV_ID_82545EM_COPPER:
1700 return wol->wolopts ? -EOPNOTSUPP : 0;
1701
1702 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1703 /* device id 10B5 port-A supports wol */
1704 if (!adapter->ksp3_port_a)
1705 return wol->wolopts ? -EOPNOTSUPP : 0;
1706
1707 if (wol->wolopts & WAKE_UCAST) {
1708 DPRINTK(DRV, ERR, "Interface does not support "
1709 "directed (unicast) frame wake-up packets\n");
1710 return -EOPNOTSUPP;
1711 }
1712
1713 case E1000_DEV_ID_82546EB_FIBER:
1714 case E1000_DEV_ID_82546GB_FIBER:
1715 case E1000_DEV_ID_82571EB_FIBER:
1716 /* Wake events only supported on port A for dual fiber */
1717 if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
1718 return wol->wolopts ? -EOPNOTSUPP : 0;
1719 /* Fall Through */
1720
1721 default:
1722 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1723 return -EOPNOTSUPP;
1724
1725 adapter->wol = 0;
1726
1727 if (wol->wolopts & WAKE_UCAST)
1728 adapter->wol |= E1000_WUFC_EX;
1729 if (wol->wolopts & WAKE_MCAST)
1730 adapter->wol |= E1000_WUFC_MC;
1731 if (wol->wolopts & WAKE_BCAST)
1732 adapter->wol |= E1000_WUFC_BC;
1733 if (wol->wolopts & WAKE_MAGIC)
1734 adapter->wol |= E1000_WUFC_MAG;
1735 }
1736
1737 return 0;
1738 }
1739
1740 /* toggle LED 4 times per second = 2 "blinks" per second */
1741 #define E1000_ID_INTERVAL (HZ/4)
1742
1743 /* bit defines for adapter->led_status */
1744 #define E1000_LED_ON 0
1745
1746 static void
1747 e1000_led_blink_callback(unsigned long data)
1748 {
1749 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1750
1751 if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
1752 e1000_led_off(&adapter->hw);
1753 else
1754 e1000_led_on(&adapter->hw);
1755
1756 mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
1757 }
1758
1759 static int
1760 e1000_phys_id(struct net_device *netdev, uint32_t data)
1761 {
1762 struct e1000_adapter *adapter = netdev_priv(netdev);
1763
1764 if (!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
1765 data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
1766
1767 if (adapter->hw.mac_type < e1000_82571) {
1768 if (!adapter->blink_timer.function) {
1769 init_timer(&adapter->blink_timer);
1770 adapter->blink_timer.function = e1000_led_blink_callback;
1771 adapter->blink_timer.data = (unsigned long) adapter;
1772 }
1773 e1000_setup_led(&adapter->hw);
1774 mod_timer(&adapter->blink_timer, jiffies);
1775 msleep_interruptible(data * 1000);
1776 del_timer_sync(&adapter->blink_timer);
1777 } else if (adapter->hw.mac_type < e1000_82573) {
1778 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1779 (E1000_LEDCTL_LED2_BLINK_RATE |
1780 E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
1781 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1782 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
1783 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
1784 msleep_interruptible(data * 1000);
1785 } else {
1786 E1000_WRITE_REG(&adapter->hw, LEDCTL,
1787 (E1000_LEDCTL_LED2_BLINK_RATE |
1788 E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
1789 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
1790 (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
1791 (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
1792 msleep_interruptible(data * 1000);
1793 }
1794
1795 e1000_led_off(&adapter->hw);
1796 clear_bit(E1000_LED_ON, &adapter->led_status);
1797 e1000_cleanup_led(&adapter->hw);
1798
1799 return 0;
1800 }
1801
1802 static int
1803 e1000_nway_reset(struct net_device *netdev)
1804 {
1805 struct e1000_adapter *adapter = netdev_priv(netdev);
1806 if (netif_running(netdev)) {
1807 e1000_down(adapter);
1808 e1000_up(adapter);
1809 }
1810 return 0;
1811 }
1812
1813 static int
1814 e1000_get_stats_count(struct net_device *netdev)
1815 {
1816 return E1000_STATS_LEN;
1817 }
1818
1819 static void
1820 e1000_get_ethtool_stats(struct net_device *netdev,
1821 struct ethtool_stats *stats, uint64_t *data)
1822 {
1823 struct e1000_adapter *adapter = netdev_priv(netdev);
1824 int i;
1825
1826 e1000_update_stats(adapter);
1827 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1828 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
1829 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1830 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
1831 }
1832 /* BUG_ON(i != E1000_STATS_LEN); */
1833 }
1834
1835 static void
1836 e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
1837 {
1838 uint8_t *p = data;
1839 int i;
1840
1841 switch (stringset) {
1842 case ETH_SS_TEST:
1843 memcpy(data, *e1000_gstrings_test,
1844 E1000_TEST_LEN*ETH_GSTRING_LEN);
1845 break;
1846 case ETH_SS_STATS:
1847 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1848 memcpy(p, e1000_gstrings_stats[i].stat_string,
1849 ETH_GSTRING_LEN);
1850 p += ETH_GSTRING_LEN;
1851 }
1852 /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
1853 break;
1854 }
1855 }
1856
1857 static struct ethtool_ops e1000_ethtool_ops = {
1858 .get_settings = e1000_get_settings,
1859 .set_settings = e1000_set_settings,
1860 .get_drvinfo = e1000_get_drvinfo,
1861 .get_regs_len = e1000_get_regs_len,
1862 .get_regs = e1000_get_regs,
1863 .get_wol = e1000_get_wol,
1864 .set_wol = e1000_set_wol,
1865 .get_msglevel = e1000_get_msglevel,
1866 .set_msglevel = e1000_set_msglevel,
1867 .nway_reset = e1000_nway_reset,
1868 .get_link = ethtool_op_get_link,
1869 .get_eeprom_len = e1000_get_eeprom_len,
1870 .get_eeprom = e1000_get_eeprom,
1871 .set_eeprom = e1000_set_eeprom,
1872 .get_ringparam = e1000_get_ringparam,
1873 .set_ringparam = e1000_set_ringparam,
1874 .get_pauseparam = e1000_get_pauseparam,
1875 .set_pauseparam = e1000_set_pauseparam,
1876 .get_rx_csum = e1000_get_rx_csum,
1877 .set_rx_csum = e1000_set_rx_csum,
1878 .get_tx_csum = e1000_get_tx_csum,
1879 .set_tx_csum = e1000_set_tx_csum,
1880 .get_sg = ethtool_op_get_sg,
1881 .set_sg = ethtool_op_set_sg,
1882 #ifdef NETIF_F_TSO
1883 .get_tso = ethtool_op_get_tso,
1884 .set_tso = e1000_set_tso,
1885 #endif
1886 .self_test_count = e1000_diag_test_count,
1887 .self_test = e1000_diag_test,
1888 .get_strings = e1000_get_strings,
1889 .phys_id = e1000_phys_id,
1890 .get_stats_count = e1000_get_stats_count,
1891 .get_ethtool_stats = e1000_get_ethtool_stats,
1892 .get_perm_addr = ethtool_op_get_perm_addr,
1893 };
1894
1895 void e1000_set_ethtool_ops(struct net_device *netdev)
1896 {
1897 SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
1898 }