Staging: et131x: config is already zeroed
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / e1000e / lib.c
1 /*******************************************************************************
2
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2008 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include <linux/netdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/delay.h>
32 #include <linux/pci.h>
33
34 #include "e1000.h"
35
36 enum e1000_mng_mode {
37 e1000_mng_mode_none = 0,
38 e1000_mng_mode_asf,
39 e1000_mng_mode_pt,
40 e1000_mng_mode_ipmi,
41 e1000_mng_mode_host_if_only
42 };
43
44 #define E1000_FACTPS_MNGCG 0x20000000
45
46 /* Intel(R) Active Management Technology signature */
47 #define E1000_IAMT_SIGNATURE 0x544D4149
48
49 /**
50 * e1000e_get_bus_info_pcie - Get PCIe bus information
51 * @hw: pointer to the HW structure
52 *
53 * Determines and stores the system bus information for a particular
54 * network interface. The following bus information is determined and stored:
55 * bus speed, bus width, type (PCIe), and PCIe function.
56 **/
57 s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
58 {
59 struct e1000_bus_info *bus = &hw->bus;
60 struct e1000_adapter *adapter = hw->adapter;
61 u32 status;
62 u16 pcie_link_status, pci_header_type, cap_offset;
63
64 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
65 if (!cap_offset) {
66 bus->width = e1000_bus_width_unknown;
67 } else {
68 pci_read_config_word(adapter->pdev,
69 cap_offset + PCIE_LINK_STATUS,
70 &pcie_link_status);
71 bus->width = (enum e1000_bus_width)((pcie_link_status &
72 PCIE_LINK_WIDTH_MASK) >>
73 PCIE_LINK_WIDTH_SHIFT);
74 }
75
76 pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
77 &pci_header_type);
78 if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
79 status = er32(STATUS);
80 bus->func = (status & E1000_STATUS_FUNC_MASK)
81 >> E1000_STATUS_FUNC_SHIFT;
82 } else {
83 bus->func = 0;
84 }
85
86 return 0;
87 }
88
89 /**
90 * e1000e_write_vfta - Write value to VLAN filter table
91 * @hw: pointer to the HW structure
92 * @offset: register offset in VLAN filter table
93 * @value: register value written to VLAN filter table
94 *
95 * Writes value at the given offset in the register array which stores
96 * the VLAN filter table.
97 **/
98 void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
99 {
100 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
101 e1e_flush();
102 }
103
104 /**
105 * e1000e_init_rx_addrs - Initialize receive address's
106 * @hw: pointer to the HW structure
107 * @rar_count: receive address registers
108 *
109 * Setups the receive address registers by setting the base receive address
110 * register to the devices MAC address and clearing all the other receive
111 * address registers to 0.
112 **/
113 void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
114 {
115 u32 i;
116
117 /* Setup the receive address */
118 hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
119
120 e1000e_rar_set(hw, hw->mac.addr, 0);
121
122 /* Zero out the other (rar_entry_count - 1) receive addresses */
123 hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
124 for (i = 1; i < rar_count; i++) {
125 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
126 e1e_flush();
127 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
128 e1e_flush();
129 }
130 }
131
132 /**
133 * e1000e_rar_set - Set receive address register
134 * @hw: pointer to the HW structure
135 * @addr: pointer to the receive address
136 * @index: receive address array register
137 *
138 * Sets the receive address array register at index to the address passed
139 * in by addr.
140 **/
141 void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
142 {
143 u32 rar_low, rar_high;
144
145 /*
146 * HW expects these in little endian so we reverse the byte order
147 * from network order (big endian) to little endian
148 */
149 rar_low = ((u32) addr[0] |
150 ((u32) addr[1] << 8) |
151 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
152
153 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
154
155 rar_high |= E1000_RAH_AV;
156
157 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
158 E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
159 }
160
161 /**
162 * e1000_hash_mc_addr - Generate a multicast hash value
163 * @hw: pointer to the HW structure
164 * @mc_addr: pointer to a multicast address
165 *
166 * Generates a multicast address hash value which is used to determine
167 * the multicast filter table array address and new table value. See
168 * e1000_mta_set_generic()
169 **/
170 static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
171 {
172 u32 hash_value, hash_mask;
173 u8 bit_shift = 0;
174
175 /* Register count multiplied by bits per register */
176 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
177
178 /*
179 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
180 * where 0xFF would still fall within the hash mask.
181 */
182 while (hash_mask >> bit_shift != 0xFF)
183 bit_shift++;
184
185 /*
186 * The portion of the address that is used for the hash table
187 * is determined by the mc_filter_type setting.
188 * The algorithm is such that there is a total of 8 bits of shifting.
189 * The bit_shift for a mc_filter_type of 0 represents the number of
190 * left-shifts where the MSB of mc_addr[5] would still fall within
191 * the hash_mask. Case 0 does this exactly. Since there are a total
192 * of 8 bits of shifting, then mc_addr[4] will shift right the
193 * remaining number of bits. Thus 8 - bit_shift. The rest of the
194 * cases are a variation of this algorithm...essentially raising the
195 * number of bits to shift mc_addr[5] left, while still keeping the
196 * 8-bit shifting total.
197 *
198 * For example, given the following Destination MAC Address and an
199 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
200 * we can see that the bit_shift for case 0 is 4. These are the hash
201 * values resulting from each mc_filter_type...
202 * [0] [1] [2] [3] [4] [5]
203 * 01 AA 00 12 34 56
204 * LSB MSB
205 *
206 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
207 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
208 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
209 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
210 */
211 switch (hw->mac.mc_filter_type) {
212 default:
213 case 0:
214 break;
215 case 1:
216 bit_shift += 1;
217 break;
218 case 2:
219 bit_shift += 2;
220 break;
221 case 3:
222 bit_shift += 4;
223 break;
224 }
225
226 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
227 (((u16) mc_addr[5]) << bit_shift)));
228
229 return hash_value;
230 }
231
232 /**
233 * e1000e_update_mc_addr_list_generic - Update Multicast addresses
234 * @hw: pointer to the HW structure
235 * @mc_addr_list: array of multicast addresses to program
236 * @mc_addr_count: number of multicast addresses to program
237 * @rar_used_count: the first RAR register free to program
238 * @rar_count: total number of supported Receive Address Registers
239 *
240 * Updates the Receive Address Registers and Multicast Table Array.
241 * The caller must have a packed mc_addr_list of multicast addresses.
242 * The parameter rar_count will usually be hw->mac.rar_entry_count
243 * unless there are workarounds that change this.
244 **/
245 void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
246 u8 *mc_addr_list, u32 mc_addr_count,
247 u32 rar_used_count, u32 rar_count)
248 {
249 u32 i;
250 u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC);
251
252 if (!mcarray) {
253 printk(KERN_ERR "multicast array memory allocation failed\n");
254 return;
255 }
256
257 /*
258 * Load the first set of multicast addresses into the exact
259 * filters (RAR). If there are not enough to fill the RAR
260 * array, clear the filters.
261 */
262 for (i = rar_used_count; i < rar_count; i++) {
263 if (mc_addr_count) {
264 e1000e_rar_set(hw, mc_addr_list, i);
265 mc_addr_count--;
266 mc_addr_list += ETH_ALEN;
267 } else {
268 E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
269 e1e_flush();
270 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
271 e1e_flush();
272 }
273 }
274
275 /* Load any remaining multicast addresses into the hash table. */
276 for (; mc_addr_count > 0; mc_addr_count--) {
277 u32 hash_value, hash_reg, hash_bit, mta;
278 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
279 hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
280 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
281 hash_bit = hash_value & 0x1F;
282 mta = (1 << hash_bit);
283 mcarray[hash_reg] |= mta;
284 mc_addr_list += ETH_ALEN;
285 }
286
287 /* write the hash table completely */
288 for (i = 0; i < hw->mac.mta_reg_count; i++)
289 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]);
290
291 e1e_flush();
292 kfree(mcarray);
293 }
294
295 /**
296 * e1000e_clear_hw_cntrs_base - Clear base hardware counters
297 * @hw: pointer to the HW structure
298 *
299 * Clears the base hardware counters by reading the counter registers.
300 **/
301 void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
302 {
303 u32 temp;
304
305 temp = er32(CRCERRS);
306 temp = er32(SYMERRS);
307 temp = er32(MPC);
308 temp = er32(SCC);
309 temp = er32(ECOL);
310 temp = er32(MCC);
311 temp = er32(LATECOL);
312 temp = er32(COLC);
313 temp = er32(DC);
314 temp = er32(SEC);
315 temp = er32(RLEC);
316 temp = er32(XONRXC);
317 temp = er32(XONTXC);
318 temp = er32(XOFFRXC);
319 temp = er32(XOFFTXC);
320 temp = er32(FCRUC);
321 temp = er32(GPRC);
322 temp = er32(BPRC);
323 temp = er32(MPRC);
324 temp = er32(GPTC);
325 temp = er32(GORCL);
326 temp = er32(GORCH);
327 temp = er32(GOTCL);
328 temp = er32(GOTCH);
329 temp = er32(RNBC);
330 temp = er32(RUC);
331 temp = er32(RFC);
332 temp = er32(ROC);
333 temp = er32(RJC);
334 temp = er32(TORL);
335 temp = er32(TORH);
336 temp = er32(TOTL);
337 temp = er32(TOTH);
338 temp = er32(TPR);
339 temp = er32(TPT);
340 temp = er32(MPTC);
341 temp = er32(BPTC);
342 }
343
344 /**
345 * e1000e_check_for_copper_link - Check for link (Copper)
346 * @hw: pointer to the HW structure
347 *
348 * Checks to see of the link status of the hardware has changed. If a
349 * change in link status has been detected, then we read the PHY registers
350 * to get the current speed/duplex if link exists.
351 **/
352 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
353 {
354 struct e1000_mac_info *mac = &hw->mac;
355 s32 ret_val;
356 bool link;
357
358 /*
359 * We only want to go out to the PHY registers to see if Auto-Neg
360 * has completed and/or if our link status has changed. The
361 * get_link_status flag is set upon receiving a Link Status
362 * Change or Rx Sequence Error interrupt.
363 */
364 if (!mac->get_link_status)
365 return 0;
366
367 /*
368 * First we want to see if the MII Status Register reports
369 * link. If so, then we want to get the current speed/duplex
370 * of the PHY.
371 */
372 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
373 if (ret_val)
374 return ret_val;
375
376 if (!link)
377 return ret_val; /* No link detected */
378
379 mac->get_link_status = 0;
380
381 /*
382 * Check if there was DownShift, must be checked
383 * immediately after link-up
384 */
385 e1000e_check_downshift(hw);
386
387 /*
388 * If we are forcing speed/duplex, then we simply return since
389 * we have already determined whether we have link or not.
390 */
391 if (!mac->autoneg) {
392 ret_val = -E1000_ERR_CONFIG;
393 return ret_val;
394 }
395
396 /*
397 * Auto-Neg is enabled. Auto Speed Detection takes care
398 * of MAC speed/duplex configuration. So we only need to
399 * configure Collision Distance in the MAC.
400 */
401 e1000e_config_collision_dist(hw);
402
403 /*
404 * Configure Flow Control now that Auto-Neg has completed.
405 * First, we need to restore the desired flow control
406 * settings because we may have had to re-autoneg with a
407 * different link partner.
408 */
409 ret_val = e1000e_config_fc_after_link_up(hw);
410 if (ret_val) {
411 hw_dbg(hw, "Error configuring flow control\n");
412 }
413
414 return ret_val;
415 }
416
417 /**
418 * e1000e_check_for_fiber_link - Check for link (Fiber)
419 * @hw: pointer to the HW structure
420 *
421 * Checks for link up on the hardware. If link is not up and we have
422 * a signal, then we need to force link up.
423 **/
424 s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
425 {
426 struct e1000_mac_info *mac = &hw->mac;
427 u32 rxcw;
428 u32 ctrl;
429 u32 status;
430 s32 ret_val;
431
432 ctrl = er32(CTRL);
433 status = er32(STATUS);
434 rxcw = er32(RXCW);
435
436 /*
437 * If we don't have link (auto-negotiation failed or link partner
438 * cannot auto-negotiate), the cable is plugged in (we have signal),
439 * and our link partner is not trying to auto-negotiate with us (we
440 * are receiving idles or data), we need to force link up. We also
441 * need to give auto-negotiation time to complete, in case the cable
442 * was just plugged in. The autoneg_failed flag does this.
443 */
444 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
445 if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
446 (!(rxcw & E1000_RXCW_C))) {
447 if (mac->autoneg_failed == 0) {
448 mac->autoneg_failed = 1;
449 return 0;
450 }
451 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
452
453 /* Disable auto-negotiation in the TXCW register */
454 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
455
456 /* Force link-up and also force full-duplex. */
457 ctrl = er32(CTRL);
458 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
459 ew32(CTRL, ctrl);
460
461 /* Configure Flow Control after forcing link up. */
462 ret_val = e1000e_config_fc_after_link_up(hw);
463 if (ret_val) {
464 hw_dbg(hw, "Error configuring flow control\n");
465 return ret_val;
466 }
467 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
468 /*
469 * If we are forcing link and we are receiving /C/ ordered
470 * sets, re-enable auto-negotiation in the TXCW register
471 * and disable forced link in the Device Control register
472 * in an attempt to auto-negotiate with our link partner.
473 */
474 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
475 ew32(TXCW, mac->txcw);
476 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
477
478 mac->serdes_has_link = true;
479 }
480
481 return 0;
482 }
483
484 /**
485 * e1000e_check_for_serdes_link - Check for link (Serdes)
486 * @hw: pointer to the HW structure
487 *
488 * Checks for link up on the hardware. If link is not up and we have
489 * a signal, then we need to force link up.
490 **/
491 s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
492 {
493 struct e1000_mac_info *mac = &hw->mac;
494 u32 rxcw;
495 u32 ctrl;
496 u32 status;
497 s32 ret_val;
498
499 ctrl = er32(CTRL);
500 status = er32(STATUS);
501 rxcw = er32(RXCW);
502
503 /*
504 * If we don't have link (auto-negotiation failed or link partner
505 * cannot auto-negotiate), and our link partner is not trying to
506 * auto-negotiate with us (we are receiving idles or data),
507 * we need to force link up. We also need to give auto-negotiation
508 * time to complete.
509 */
510 /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
511 if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
512 if (mac->autoneg_failed == 0) {
513 mac->autoneg_failed = 1;
514 return 0;
515 }
516 hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
517
518 /* Disable auto-negotiation in the TXCW register */
519 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
520
521 /* Force link-up and also force full-duplex. */
522 ctrl = er32(CTRL);
523 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
524 ew32(CTRL, ctrl);
525
526 /* Configure Flow Control after forcing link up. */
527 ret_val = e1000e_config_fc_after_link_up(hw);
528 if (ret_val) {
529 hw_dbg(hw, "Error configuring flow control\n");
530 return ret_val;
531 }
532 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
533 /*
534 * If we are forcing link and we are receiving /C/ ordered
535 * sets, re-enable auto-negotiation in the TXCW register
536 * and disable forced link in the Device Control register
537 * in an attempt to auto-negotiate with our link partner.
538 */
539 hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
540 ew32(TXCW, mac->txcw);
541 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
542
543 mac->serdes_has_link = true;
544 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
545 /*
546 * If we force link for non-auto-negotiation switch, check
547 * link status based on MAC synchronization for internal
548 * serdes media type.
549 */
550 /* SYNCH bit and IV bit are sticky. */
551 udelay(10);
552 rxcw = er32(RXCW);
553 if (rxcw & E1000_RXCW_SYNCH) {
554 if (!(rxcw & E1000_RXCW_IV)) {
555 mac->serdes_has_link = true;
556 hw_dbg(hw, "SERDES: Link up - forced.\n");
557 }
558 } else {
559 mac->serdes_has_link = false;
560 hw_dbg(hw, "SERDES: Link down - force failed.\n");
561 }
562 }
563
564 if (E1000_TXCW_ANE & er32(TXCW)) {
565 status = er32(STATUS);
566 if (status & E1000_STATUS_LU) {
567 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
568 udelay(10);
569 rxcw = er32(RXCW);
570 if (rxcw & E1000_RXCW_SYNCH) {
571 if (!(rxcw & E1000_RXCW_IV)) {
572 mac->serdes_has_link = true;
573 hw_dbg(hw, "SERDES: Link up - autoneg "
574 "completed sucessfully.\n");
575 } else {
576 mac->serdes_has_link = false;
577 hw_dbg(hw, "SERDES: Link down - invalid"
578 "codewords detected in autoneg.\n");
579 }
580 } else {
581 mac->serdes_has_link = false;
582 hw_dbg(hw, "SERDES: Link down - no sync.\n");
583 }
584 } else {
585 mac->serdes_has_link = false;
586 hw_dbg(hw, "SERDES: Link down - autoneg failed\n");
587 }
588 }
589
590 return 0;
591 }
592
593 /**
594 * e1000_set_default_fc_generic - Set flow control default values
595 * @hw: pointer to the HW structure
596 *
597 * Read the EEPROM for the default values for flow control and store the
598 * values.
599 **/
600 static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
601 {
602 s32 ret_val;
603 u16 nvm_data;
604
605 /*
606 * Read and store word 0x0F of the EEPROM. This word contains bits
607 * that determine the hardware's default PAUSE (flow control) mode,
608 * a bit that determines whether the HW defaults to enabling or
609 * disabling auto-negotiation, and the direction of the
610 * SW defined pins. If there is no SW over-ride of the flow
611 * control setting, then the variable hw->fc will
612 * be initialized based on a value in the EEPROM.
613 */
614 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
615
616 if (ret_val) {
617 hw_dbg(hw, "NVM Read Error\n");
618 return ret_val;
619 }
620
621 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
622 hw->fc.requested_mode = e1000_fc_none;
623 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
624 NVM_WORD0F_ASM_DIR)
625 hw->fc.requested_mode = e1000_fc_tx_pause;
626 else
627 hw->fc.requested_mode = e1000_fc_full;
628
629 return 0;
630 }
631
632 /**
633 * e1000e_setup_link - Setup flow control and link settings
634 * @hw: pointer to the HW structure
635 *
636 * Determines which flow control settings to use, then configures flow
637 * control. Calls the appropriate media-specific link configuration
638 * function. Assuming the adapter has a valid link partner, a valid link
639 * should be established. Assumes the hardware has previously been reset
640 * and the transmitter and receiver are not enabled.
641 **/
642 s32 e1000e_setup_link(struct e1000_hw *hw)
643 {
644 struct e1000_mac_info *mac = &hw->mac;
645 s32 ret_val;
646
647 /*
648 * In the case of the phy reset being blocked, we already have a link.
649 * We do not need to set it up again.
650 */
651 if (e1000_check_reset_block(hw))
652 return 0;
653
654 /*
655 * If requested flow control is set to default, set flow control
656 * based on the EEPROM flow control settings.
657 */
658 if (hw->fc.requested_mode == e1000_fc_default) {
659 ret_val = e1000_set_default_fc_generic(hw);
660 if (ret_val)
661 return ret_val;
662 }
663
664 /*
665 * Save off the requested flow control mode for use later. Depending
666 * on the link partner's capabilities, we may or may not use this mode.
667 */
668 hw->fc.current_mode = hw->fc.requested_mode;
669
670 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n",
671 hw->fc.current_mode);
672
673 /* Call the necessary media_type subroutine to configure the link. */
674 ret_val = mac->ops.setup_physical_interface(hw);
675 if (ret_val)
676 return ret_val;
677
678 /*
679 * Initialize the flow control address, type, and PAUSE timer
680 * registers to their default values. This is done even if flow
681 * control is disabled, because it does not hurt anything to
682 * initialize these registers.
683 */
684 hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
685 ew32(FCT, FLOW_CONTROL_TYPE);
686 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
687 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
688
689 ew32(FCTTV, hw->fc.pause_time);
690
691 return e1000e_set_fc_watermarks(hw);
692 }
693
694 /**
695 * e1000_commit_fc_settings_generic - Configure flow control
696 * @hw: pointer to the HW structure
697 *
698 * Write the flow control settings to the Transmit Config Word Register (TXCW)
699 * base on the flow control settings in e1000_mac_info.
700 **/
701 static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
702 {
703 struct e1000_mac_info *mac = &hw->mac;
704 u32 txcw;
705
706 /*
707 * Check for a software override of the flow control settings, and
708 * setup the device accordingly. If auto-negotiation is enabled, then
709 * software will have to set the "PAUSE" bits to the correct value in
710 * the Transmit Config Word Register (TXCW) and re-start auto-
711 * negotiation. However, if auto-negotiation is disabled, then
712 * software will have to manually configure the two flow control enable
713 * bits in the CTRL register.
714 *
715 * The possible values of the "fc" parameter are:
716 * 0: Flow control is completely disabled
717 * 1: Rx flow control is enabled (we can receive pause frames,
718 * but not send pause frames).
719 * 2: Tx flow control is enabled (we can send pause frames but we
720 * do not support receiving pause frames).
721 * 3: Both Rx and Tx flow control (symmetric) are enabled.
722 */
723 switch (hw->fc.current_mode) {
724 case e1000_fc_none:
725 /* Flow control completely disabled by a software over-ride. */
726 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
727 break;
728 case e1000_fc_rx_pause:
729 /*
730 * Rx Flow control is enabled and Tx Flow control is disabled
731 * by a software over-ride. Since there really isn't a way to
732 * advertise that we are capable of Rx Pause ONLY, we will
733 * advertise that we support both symmetric and asymmetric Rx
734 * PAUSE. Later, we will disable the adapter's ability to send
735 * PAUSE frames.
736 */
737 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
738 break;
739 case e1000_fc_tx_pause:
740 /*
741 * Tx Flow control is enabled, and Rx Flow control is disabled,
742 * by a software over-ride.
743 */
744 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
745 break;
746 case e1000_fc_full:
747 /*
748 * Flow control (both Rx and Tx) is enabled by a software
749 * over-ride.
750 */
751 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
752 break;
753 default:
754 hw_dbg(hw, "Flow control param set incorrectly\n");
755 return -E1000_ERR_CONFIG;
756 break;
757 }
758
759 ew32(TXCW, txcw);
760 mac->txcw = txcw;
761
762 return 0;
763 }
764
765 /**
766 * e1000_poll_fiber_serdes_link_generic - Poll for link up
767 * @hw: pointer to the HW structure
768 *
769 * Polls for link up by reading the status register, if link fails to come
770 * up with auto-negotiation, then the link is forced if a signal is detected.
771 **/
772 static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
773 {
774 struct e1000_mac_info *mac = &hw->mac;
775 u32 i, status;
776 s32 ret_val;
777
778 /*
779 * If we have a signal (the cable is plugged in, or assumed true for
780 * serdes media) then poll for a "Link-Up" indication in the Device
781 * Status Register. Time-out if a link isn't seen in 500 milliseconds
782 * seconds (Auto-negotiation should complete in less than 500
783 * milliseconds even if the other end is doing it in SW).
784 */
785 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
786 msleep(10);
787 status = er32(STATUS);
788 if (status & E1000_STATUS_LU)
789 break;
790 }
791 if (i == FIBER_LINK_UP_LIMIT) {
792 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
793 mac->autoneg_failed = 1;
794 /*
795 * AutoNeg failed to achieve a link, so we'll call
796 * mac->check_for_link. This routine will force the
797 * link up if we detect a signal. This will allow us to
798 * communicate with non-autonegotiating link partners.
799 */
800 ret_val = mac->ops.check_for_link(hw);
801 if (ret_val) {
802 hw_dbg(hw, "Error while checking for link\n");
803 return ret_val;
804 }
805 mac->autoneg_failed = 0;
806 } else {
807 mac->autoneg_failed = 0;
808 hw_dbg(hw, "Valid Link Found\n");
809 }
810
811 return 0;
812 }
813
814 /**
815 * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
816 * @hw: pointer to the HW structure
817 *
818 * Configures collision distance and flow control for fiber and serdes
819 * links. Upon successful setup, poll for link.
820 **/
821 s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
822 {
823 u32 ctrl;
824 s32 ret_val;
825
826 ctrl = er32(CTRL);
827
828 /* Take the link out of reset */
829 ctrl &= ~E1000_CTRL_LRST;
830
831 e1000e_config_collision_dist(hw);
832
833 ret_val = e1000_commit_fc_settings_generic(hw);
834 if (ret_val)
835 return ret_val;
836
837 /*
838 * Since auto-negotiation is enabled, take the link out of reset (the
839 * link will be in reset, because we previously reset the chip). This
840 * will restart auto-negotiation. If auto-negotiation is successful
841 * then the link-up status bit will be set and the flow control enable
842 * bits (RFCE and TFCE) will be set according to their negotiated value.
843 */
844 hw_dbg(hw, "Auto-negotiation enabled\n");
845
846 ew32(CTRL, ctrl);
847 e1e_flush();
848 msleep(1);
849
850 /*
851 * For these adapters, the SW definable pin 1 is set when the optics
852 * detect a signal. If we have a signal, then poll for a "Link-Up"
853 * indication.
854 */
855 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
856 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
857 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
858 } else {
859 hw_dbg(hw, "No signal detected\n");
860 }
861
862 return 0;
863 }
864
865 /**
866 * e1000e_config_collision_dist - Configure collision distance
867 * @hw: pointer to the HW structure
868 *
869 * Configures the collision distance to the default value and is used
870 * during link setup. Currently no func pointer exists and all
871 * implementations are handled in the generic version of this function.
872 **/
873 void e1000e_config_collision_dist(struct e1000_hw *hw)
874 {
875 u32 tctl;
876
877 tctl = er32(TCTL);
878
879 tctl &= ~E1000_TCTL_COLD;
880 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
881
882 ew32(TCTL, tctl);
883 e1e_flush();
884 }
885
886 /**
887 * e1000e_set_fc_watermarks - Set flow control high/low watermarks
888 * @hw: pointer to the HW structure
889 *
890 * Sets the flow control high/low threshold (watermark) registers. If
891 * flow control XON frame transmission is enabled, then set XON frame
892 * transmission as well.
893 **/
894 s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
895 {
896 u32 fcrtl = 0, fcrth = 0;
897
898 /*
899 * Set the flow control receive threshold registers. Normally,
900 * these registers will be set to a default threshold that may be
901 * adjusted later by the driver's runtime code. However, if the
902 * ability to transmit pause frames is not enabled, then these
903 * registers will be set to 0.
904 */
905 if (hw->fc.current_mode & e1000_fc_tx_pause) {
906 /*
907 * We need to set up the Receive Threshold high and low water
908 * marks as well as (optionally) enabling the transmission of
909 * XON frames.
910 */
911 fcrtl = hw->fc.low_water;
912 fcrtl |= E1000_FCRTL_XONE;
913 fcrth = hw->fc.high_water;
914 }
915 ew32(FCRTL, fcrtl);
916 ew32(FCRTH, fcrth);
917
918 return 0;
919 }
920
921 /**
922 * e1000e_force_mac_fc - Force the MAC's flow control settings
923 * @hw: pointer to the HW structure
924 *
925 * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
926 * device control register to reflect the adapter settings. TFCE and RFCE
927 * need to be explicitly set by software when a copper PHY is used because
928 * autonegotiation is managed by the PHY rather than the MAC. Software must
929 * also configure these bits when link is forced on a fiber connection.
930 **/
931 s32 e1000e_force_mac_fc(struct e1000_hw *hw)
932 {
933 u32 ctrl;
934
935 ctrl = er32(CTRL);
936
937 /*
938 * Because we didn't get link via the internal auto-negotiation
939 * mechanism (we either forced link or we got link via PHY
940 * auto-neg), we have to manually enable/disable transmit an
941 * receive flow control.
942 *
943 * The "Case" statement below enables/disable flow control
944 * according to the "hw->fc.current_mode" parameter.
945 *
946 * The possible values of the "fc" parameter are:
947 * 0: Flow control is completely disabled
948 * 1: Rx flow control is enabled (we can receive pause
949 * frames but not send pause frames).
950 * 2: Tx flow control is enabled (we can send pause frames
951 * frames but we do not receive pause frames).
952 * 3: Both Rx and Tx flow control (symmetric) is enabled.
953 * other: No other values should be possible at this point.
954 */
955 hw_dbg(hw, "hw->fc.current_mode = %u\n", hw->fc.current_mode);
956
957 switch (hw->fc.current_mode) {
958 case e1000_fc_none:
959 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
960 break;
961 case e1000_fc_rx_pause:
962 ctrl &= (~E1000_CTRL_TFCE);
963 ctrl |= E1000_CTRL_RFCE;
964 break;
965 case e1000_fc_tx_pause:
966 ctrl &= (~E1000_CTRL_RFCE);
967 ctrl |= E1000_CTRL_TFCE;
968 break;
969 case e1000_fc_full:
970 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
971 break;
972 default:
973 hw_dbg(hw, "Flow control param set incorrectly\n");
974 return -E1000_ERR_CONFIG;
975 }
976
977 ew32(CTRL, ctrl);
978
979 return 0;
980 }
981
982 /**
983 * e1000e_config_fc_after_link_up - Configures flow control after link
984 * @hw: pointer to the HW structure
985 *
986 * Checks the status of auto-negotiation after link up to ensure that the
987 * speed and duplex were not forced. If the link needed to be forced, then
988 * flow control needs to be forced also. If auto-negotiation is enabled
989 * and did not fail, then we configure flow control based on our link
990 * partner.
991 **/
992 s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
993 {
994 struct e1000_mac_info *mac = &hw->mac;
995 s32 ret_val = 0;
996 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
997 u16 speed, duplex;
998
999 /*
1000 * Check for the case where we have fiber media and auto-neg failed
1001 * so we had to force link. In this case, we need to force the
1002 * configuration of the MAC to match the "fc" parameter.
1003 */
1004 if (mac->autoneg_failed) {
1005 if (hw->phy.media_type == e1000_media_type_fiber ||
1006 hw->phy.media_type == e1000_media_type_internal_serdes)
1007 ret_val = e1000e_force_mac_fc(hw);
1008 } else {
1009 if (hw->phy.media_type == e1000_media_type_copper)
1010 ret_val = e1000e_force_mac_fc(hw);
1011 }
1012
1013 if (ret_val) {
1014 hw_dbg(hw, "Error forcing flow control settings\n");
1015 return ret_val;
1016 }
1017
1018 /*
1019 * Check for the case where we have copper media and auto-neg is
1020 * enabled. In this case, we need to check and see if Auto-Neg
1021 * has completed, and if so, how the PHY and link partner has
1022 * flow control configured.
1023 */
1024 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1025 /*
1026 * Read the MII Status Register and check to see if AutoNeg
1027 * has completed. We read this twice because this reg has
1028 * some "sticky" (latched) bits.
1029 */
1030 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1031 if (ret_val)
1032 return ret_val;
1033 ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
1034 if (ret_val)
1035 return ret_val;
1036
1037 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
1038 hw_dbg(hw, "Copper PHY and Auto Neg "
1039 "has not completed.\n");
1040 return ret_val;
1041 }
1042
1043 /*
1044 * The AutoNeg process has completed, so we now need to
1045 * read both the Auto Negotiation Advertisement
1046 * Register (Address 4) and the Auto_Negotiation Base
1047 * Page Ability Register (Address 5) to determine how
1048 * flow control was negotiated.
1049 */
1050 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1051 if (ret_val)
1052 return ret_val;
1053 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1054 if (ret_val)
1055 return ret_val;
1056
1057 /*
1058 * Two bits in the Auto Negotiation Advertisement Register
1059 * (Address 4) and two bits in the Auto Negotiation Base
1060 * Page Ability Register (Address 5) determine flow control
1061 * for both the PHY and the link partner. The following
1062 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
1063 * 1999, describes these PAUSE resolution bits and how flow
1064 * control is determined based upon these settings.
1065 * NOTE: DC = Don't Care
1066 *
1067 * LOCAL DEVICE | LINK PARTNER
1068 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
1069 *-------|---------|-------|---------|--------------------
1070 * 0 | 0 | DC | DC | e1000_fc_none
1071 * 0 | 1 | 0 | DC | e1000_fc_none
1072 * 0 | 1 | 1 | 0 | e1000_fc_none
1073 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1074 * 1 | 0 | 0 | DC | e1000_fc_none
1075 * 1 | DC | 1 | DC | e1000_fc_full
1076 * 1 | 1 | 0 | 0 | e1000_fc_none
1077 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1078 *
1079 *
1080 * Are both PAUSE bits set to 1? If so, this implies
1081 * Symmetric Flow Control is enabled at both ends. The
1082 * ASM_DIR bits are irrelevant per the spec.
1083 *
1084 * For Symmetric Flow Control:
1085 *
1086 * LOCAL DEVICE | LINK PARTNER
1087 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1088 *-------|---------|-------|---------|--------------------
1089 * 1 | DC | 1 | DC | E1000_fc_full
1090 *
1091 */
1092 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1093 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1094 /*
1095 * Now we need to check if the user selected Rx ONLY
1096 * of pause frames. In this case, we had to advertise
1097 * FULL flow control because we could not advertise Rx
1098 * ONLY. Hence, we must now check to see if we need to
1099 * turn OFF the TRANSMISSION of PAUSE frames.
1100 */
1101 if (hw->fc.requested_mode == e1000_fc_full) {
1102 hw->fc.current_mode = e1000_fc_full;
1103 hw_dbg(hw, "Flow Control = FULL.\r\n");
1104 } else {
1105 hw->fc.current_mode = e1000_fc_rx_pause;
1106 hw_dbg(hw, "Flow Control = "
1107 "RX PAUSE frames only.\r\n");
1108 }
1109 }
1110 /*
1111 * For receiving PAUSE frames ONLY.
1112 *
1113 * LOCAL DEVICE | LINK PARTNER
1114 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1115 *-------|---------|-------|---------|--------------------
1116 * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
1117 *
1118 */
1119 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1120 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1121 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1122 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1123 hw->fc.current_mode = e1000_fc_tx_pause;
1124 hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n");
1125 }
1126 /*
1127 * For transmitting PAUSE frames ONLY.
1128 *
1129 * LOCAL DEVICE | LINK PARTNER
1130 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
1131 *-------|---------|-------|---------|--------------------
1132 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1133 *
1134 */
1135 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1136 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1137 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1138 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1139 hw->fc.current_mode = e1000_fc_rx_pause;
1140 hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n");
1141 } else {
1142 /*
1143 * Per the IEEE spec, at this point flow control
1144 * should be disabled.
1145 */
1146 hw->fc.current_mode = e1000_fc_none;
1147 hw_dbg(hw, "Flow Control = NONE.\r\n");
1148 }
1149
1150 /*
1151 * Now we need to do one last check... If we auto-
1152 * negotiated to HALF DUPLEX, flow control should not be
1153 * enabled per IEEE 802.3 spec.
1154 */
1155 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1156 if (ret_val) {
1157 hw_dbg(hw, "Error getting link speed and duplex\n");
1158 return ret_val;
1159 }
1160
1161 if (duplex == HALF_DUPLEX)
1162 hw->fc.current_mode = e1000_fc_none;
1163
1164 /*
1165 * Now we call a subroutine to actually force the MAC
1166 * controller to use the correct flow control settings.
1167 */
1168 ret_val = e1000e_force_mac_fc(hw);
1169 if (ret_val) {
1170 hw_dbg(hw, "Error forcing flow control settings\n");
1171 return ret_val;
1172 }
1173 }
1174
1175 return 0;
1176 }
1177
1178 /**
1179 * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
1180 * @hw: pointer to the HW structure
1181 * @speed: stores the current speed
1182 * @duplex: stores the current duplex
1183 *
1184 * Read the status register for the current speed/duplex and store the current
1185 * speed and duplex for copper connections.
1186 **/
1187 s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1188 {
1189 u32 status;
1190
1191 status = er32(STATUS);
1192 if (status & E1000_STATUS_SPEED_1000) {
1193 *speed = SPEED_1000;
1194 hw_dbg(hw, "1000 Mbs, ");
1195 } else if (status & E1000_STATUS_SPEED_100) {
1196 *speed = SPEED_100;
1197 hw_dbg(hw, "100 Mbs, ");
1198 } else {
1199 *speed = SPEED_10;
1200 hw_dbg(hw, "10 Mbs, ");
1201 }
1202
1203 if (status & E1000_STATUS_FD) {
1204 *duplex = FULL_DUPLEX;
1205 hw_dbg(hw, "Full Duplex\n");
1206 } else {
1207 *duplex = HALF_DUPLEX;
1208 hw_dbg(hw, "Half Duplex\n");
1209 }
1210
1211 return 0;
1212 }
1213
1214 /**
1215 * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
1216 * @hw: pointer to the HW structure
1217 * @speed: stores the current speed
1218 * @duplex: stores the current duplex
1219 *
1220 * Sets the speed and duplex to gigabit full duplex (the only possible option)
1221 * for fiber/serdes links.
1222 **/
1223 s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
1224 {
1225 *speed = SPEED_1000;
1226 *duplex = FULL_DUPLEX;
1227
1228 return 0;
1229 }
1230
1231 /**
1232 * e1000e_get_hw_semaphore - Acquire hardware semaphore
1233 * @hw: pointer to the HW structure
1234 *
1235 * Acquire the HW semaphore to access the PHY or NVM
1236 **/
1237 s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1238 {
1239 u32 swsm;
1240 s32 timeout = hw->nvm.word_size + 1;
1241 s32 i = 0;
1242
1243 /* Get the SW semaphore */
1244 while (i < timeout) {
1245 swsm = er32(SWSM);
1246 if (!(swsm & E1000_SWSM_SMBI))
1247 break;
1248
1249 udelay(50);
1250 i++;
1251 }
1252
1253 if (i == timeout) {
1254 hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
1255 return -E1000_ERR_NVM;
1256 }
1257
1258 /* Get the FW semaphore. */
1259 for (i = 0; i < timeout; i++) {
1260 swsm = er32(SWSM);
1261 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1262
1263 /* Semaphore acquired if bit latched */
1264 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1265 break;
1266
1267 udelay(50);
1268 }
1269
1270 if (i == timeout) {
1271 /* Release semaphores */
1272 e1000e_put_hw_semaphore(hw);
1273 hw_dbg(hw, "Driver can't access the NVM\n");
1274 return -E1000_ERR_NVM;
1275 }
1276
1277 return 0;
1278 }
1279
1280 /**
1281 * e1000e_put_hw_semaphore - Release hardware semaphore
1282 * @hw: pointer to the HW structure
1283 *
1284 * Release hardware semaphore used to access the PHY or NVM
1285 **/
1286 void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1287 {
1288 u32 swsm;
1289
1290 swsm = er32(SWSM);
1291 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1292 ew32(SWSM, swsm);
1293 }
1294
1295 /**
1296 * e1000e_get_auto_rd_done - Check for auto read completion
1297 * @hw: pointer to the HW structure
1298 *
1299 * Check EEPROM for Auto Read done bit.
1300 **/
1301 s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1302 {
1303 s32 i = 0;
1304
1305 while (i < AUTO_READ_DONE_TIMEOUT) {
1306 if (er32(EECD) & E1000_EECD_AUTO_RD)
1307 break;
1308 msleep(1);
1309 i++;
1310 }
1311
1312 if (i == AUTO_READ_DONE_TIMEOUT) {
1313 hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
1314 return -E1000_ERR_RESET;
1315 }
1316
1317 return 0;
1318 }
1319
1320 /**
1321 * e1000e_valid_led_default - Verify a valid default LED config
1322 * @hw: pointer to the HW structure
1323 * @data: pointer to the NVM (EEPROM)
1324 *
1325 * Read the EEPROM for the current default LED configuration. If the
1326 * LED configuration is not valid, set to a valid LED configuration.
1327 **/
1328 s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1329 {
1330 s32 ret_val;
1331
1332 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1333 if (ret_val) {
1334 hw_dbg(hw, "NVM Read Error\n");
1335 return ret_val;
1336 }
1337
1338 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1339 *data = ID_LED_DEFAULT;
1340
1341 return 0;
1342 }
1343
1344 /**
1345 * e1000e_id_led_init -
1346 * @hw: pointer to the HW structure
1347 *
1348 **/
1349 s32 e1000e_id_led_init(struct e1000_hw *hw)
1350 {
1351 struct e1000_mac_info *mac = &hw->mac;
1352 s32 ret_val;
1353 const u32 ledctl_mask = 0x000000FF;
1354 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1355 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1356 u16 data, i, temp;
1357 const u16 led_mask = 0x0F;
1358
1359 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1360 if (ret_val)
1361 return ret_val;
1362
1363 mac->ledctl_default = er32(LEDCTL);
1364 mac->ledctl_mode1 = mac->ledctl_default;
1365 mac->ledctl_mode2 = mac->ledctl_default;
1366
1367 for (i = 0; i < 4; i++) {
1368 temp = (data >> (i << 2)) & led_mask;
1369 switch (temp) {
1370 case ID_LED_ON1_DEF2:
1371 case ID_LED_ON1_ON2:
1372 case ID_LED_ON1_OFF2:
1373 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1374 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1375 break;
1376 case ID_LED_OFF1_DEF2:
1377 case ID_LED_OFF1_ON2:
1378 case ID_LED_OFF1_OFF2:
1379 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1380 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1381 break;
1382 default:
1383 /* Do nothing */
1384 break;
1385 }
1386 switch (temp) {
1387 case ID_LED_DEF1_ON2:
1388 case ID_LED_ON1_ON2:
1389 case ID_LED_OFF1_ON2:
1390 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1391 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1392 break;
1393 case ID_LED_DEF1_OFF2:
1394 case ID_LED_ON1_OFF2:
1395 case ID_LED_OFF1_OFF2:
1396 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1397 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1398 break;
1399 default:
1400 /* Do nothing */
1401 break;
1402 }
1403 }
1404
1405 return 0;
1406 }
1407
1408 /**
1409 * e1000e_setup_led_generic - Configures SW controllable LED
1410 * @hw: pointer to the HW structure
1411 *
1412 * This prepares the SW controllable LED for use and saves the current state
1413 * of the LED so it can be later restored.
1414 **/
1415 s32 e1000e_setup_led_generic(struct e1000_hw *hw)
1416 {
1417 u32 ledctl;
1418
1419 if (hw->mac.ops.setup_led != e1000e_setup_led_generic) {
1420 return -E1000_ERR_CONFIG;
1421 }
1422
1423 if (hw->phy.media_type == e1000_media_type_fiber) {
1424 ledctl = er32(LEDCTL);
1425 hw->mac.ledctl_default = ledctl;
1426 /* Turn off LED0 */
1427 ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
1428 E1000_LEDCTL_LED0_BLINK |
1429 E1000_LEDCTL_LED0_MODE_MASK);
1430 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1431 E1000_LEDCTL_LED0_MODE_SHIFT);
1432 ew32(LEDCTL, ledctl);
1433 } else if (hw->phy.media_type == e1000_media_type_copper) {
1434 ew32(LEDCTL, hw->mac.ledctl_mode1);
1435 }
1436
1437 return 0;
1438 }
1439
1440 /**
1441 * e1000e_cleanup_led_generic - Set LED config to default operation
1442 * @hw: pointer to the HW structure
1443 *
1444 * Remove the current LED configuration and set the LED configuration
1445 * to the default value, saved from the EEPROM.
1446 **/
1447 s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1448 {
1449 ew32(LEDCTL, hw->mac.ledctl_default);
1450 return 0;
1451 }
1452
1453 /**
1454 * e1000e_blink_led - Blink LED
1455 * @hw: pointer to the HW structure
1456 *
1457 * Blink the LEDs which are set to be on.
1458 **/
1459 s32 e1000e_blink_led(struct e1000_hw *hw)
1460 {
1461 u32 ledctl_blink = 0;
1462 u32 i;
1463
1464 if (hw->phy.media_type == e1000_media_type_fiber) {
1465 /* always blink LED0 for PCI-E fiber */
1466 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1467 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1468 } else {
1469 /*
1470 * set the blink bit for each LED that's "on" (0x0E)
1471 * in ledctl_mode2
1472 */
1473 ledctl_blink = hw->mac.ledctl_mode2;
1474 for (i = 0; i < 4; i++)
1475 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
1476 E1000_LEDCTL_MODE_LED_ON)
1477 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
1478 (i * 8));
1479 }
1480
1481 ew32(LEDCTL, ledctl_blink);
1482
1483 return 0;
1484 }
1485
1486 /**
1487 * e1000e_led_on_generic - Turn LED on
1488 * @hw: pointer to the HW structure
1489 *
1490 * Turn LED on.
1491 **/
1492 s32 e1000e_led_on_generic(struct e1000_hw *hw)
1493 {
1494 u32 ctrl;
1495
1496 switch (hw->phy.media_type) {
1497 case e1000_media_type_fiber:
1498 ctrl = er32(CTRL);
1499 ctrl &= ~E1000_CTRL_SWDPIN0;
1500 ctrl |= E1000_CTRL_SWDPIO0;
1501 ew32(CTRL, ctrl);
1502 break;
1503 case e1000_media_type_copper:
1504 ew32(LEDCTL, hw->mac.ledctl_mode2);
1505 break;
1506 default:
1507 break;
1508 }
1509
1510 return 0;
1511 }
1512
1513 /**
1514 * e1000e_led_off_generic - Turn LED off
1515 * @hw: pointer to the HW structure
1516 *
1517 * Turn LED off.
1518 **/
1519 s32 e1000e_led_off_generic(struct e1000_hw *hw)
1520 {
1521 u32 ctrl;
1522
1523 switch (hw->phy.media_type) {
1524 case e1000_media_type_fiber:
1525 ctrl = er32(CTRL);
1526 ctrl |= E1000_CTRL_SWDPIN0;
1527 ctrl |= E1000_CTRL_SWDPIO0;
1528 ew32(CTRL, ctrl);
1529 break;
1530 case e1000_media_type_copper:
1531 ew32(LEDCTL, hw->mac.ledctl_mode1);
1532 break;
1533 default:
1534 break;
1535 }
1536
1537 return 0;
1538 }
1539
1540 /**
1541 * e1000e_set_pcie_no_snoop - Set PCI-express capabilities
1542 * @hw: pointer to the HW structure
1543 * @no_snoop: bitmap of snoop events
1544 *
1545 * Set the PCI-express register to snoop for events enabled in 'no_snoop'.
1546 **/
1547 void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1548 {
1549 u32 gcr;
1550
1551 if (no_snoop) {
1552 gcr = er32(GCR);
1553 gcr &= ~(PCIE_NO_SNOOP_ALL);
1554 gcr |= no_snoop;
1555 ew32(GCR, gcr);
1556 }
1557 }
1558
1559 /**
1560 * e1000e_disable_pcie_master - Disables PCI-express master access
1561 * @hw: pointer to the HW structure
1562 *
1563 * Returns 0 if successful, else returns -10
1564 * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
1565 * the master requests to be disabled.
1566 *
1567 * Disables PCI-Express master access and verifies there are no pending
1568 * requests.
1569 **/
1570 s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1571 {
1572 u32 ctrl;
1573 s32 timeout = MASTER_DISABLE_TIMEOUT;
1574
1575 ctrl = er32(CTRL);
1576 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1577 ew32(CTRL, ctrl);
1578
1579 while (timeout) {
1580 if (!(er32(STATUS) &
1581 E1000_STATUS_GIO_MASTER_ENABLE))
1582 break;
1583 udelay(100);
1584 timeout--;
1585 }
1586
1587 if (!timeout) {
1588 hw_dbg(hw, "Master requests are pending.\n");
1589 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1590 }
1591
1592 return 0;
1593 }
1594
1595 /**
1596 * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
1597 * @hw: pointer to the HW structure
1598 *
1599 * Reset the Adaptive Interframe Spacing throttle to default values.
1600 **/
1601 void e1000e_reset_adaptive(struct e1000_hw *hw)
1602 {
1603 struct e1000_mac_info *mac = &hw->mac;
1604
1605 mac->current_ifs_val = 0;
1606 mac->ifs_min_val = IFS_MIN;
1607 mac->ifs_max_val = IFS_MAX;
1608 mac->ifs_step_size = IFS_STEP;
1609 mac->ifs_ratio = IFS_RATIO;
1610
1611 mac->in_ifs_mode = 0;
1612 ew32(AIT, 0);
1613 }
1614
1615 /**
1616 * e1000e_update_adaptive - Update Adaptive Interframe Spacing
1617 * @hw: pointer to the HW structure
1618 *
1619 * Update the Adaptive Interframe Spacing Throttle value based on the
1620 * time between transmitted packets and time between collisions.
1621 **/
1622 void e1000e_update_adaptive(struct e1000_hw *hw)
1623 {
1624 struct e1000_mac_info *mac = &hw->mac;
1625
1626 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1627 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1628 mac->in_ifs_mode = 1;
1629 if (mac->current_ifs_val < mac->ifs_max_val) {
1630 if (!mac->current_ifs_val)
1631 mac->current_ifs_val = mac->ifs_min_val;
1632 else
1633 mac->current_ifs_val +=
1634 mac->ifs_step_size;
1635 ew32(AIT, mac->current_ifs_val);
1636 }
1637 }
1638 } else {
1639 if (mac->in_ifs_mode &&
1640 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1641 mac->current_ifs_val = 0;
1642 mac->in_ifs_mode = 0;
1643 ew32(AIT, 0);
1644 }
1645 }
1646 }
1647
1648 /**
1649 * e1000_raise_eec_clk - Raise EEPROM clock
1650 * @hw: pointer to the HW structure
1651 * @eecd: pointer to the EEPROM
1652 *
1653 * Enable/Raise the EEPROM clock bit.
1654 **/
1655 static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
1656 {
1657 *eecd = *eecd | E1000_EECD_SK;
1658 ew32(EECD, *eecd);
1659 e1e_flush();
1660 udelay(hw->nvm.delay_usec);
1661 }
1662
1663 /**
1664 * e1000_lower_eec_clk - Lower EEPROM clock
1665 * @hw: pointer to the HW structure
1666 * @eecd: pointer to the EEPROM
1667 *
1668 * Clear/Lower the EEPROM clock bit.
1669 **/
1670 static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
1671 {
1672 *eecd = *eecd & ~E1000_EECD_SK;
1673 ew32(EECD, *eecd);
1674 e1e_flush();
1675 udelay(hw->nvm.delay_usec);
1676 }
1677
1678 /**
1679 * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
1680 * @hw: pointer to the HW structure
1681 * @data: data to send to the EEPROM
1682 * @count: number of bits to shift out
1683 *
1684 * We need to shift 'count' bits out to the EEPROM. So, the value in the
1685 * "data" parameter will be shifted out to the EEPROM one bit at a time.
1686 * In order to do this, "data" must be broken down into bits.
1687 **/
1688 static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
1689 {
1690 struct e1000_nvm_info *nvm = &hw->nvm;
1691 u32 eecd = er32(EECD);
1692 u32 mask;
1693
1694 mask = 0x01 << (count - 1);
1695 if (nvm->type == e1000_nvm_eeprom_spi)
1696 eecd |= E1000_EECD_DO;
1697
1698 do {
1699 eecd &= ~E1000_EECD_DI;
1700
1701 if (data & mask)
1702 eecd |= E1000_EECD_DI;
1703
1704 ew32(EECD, eecd);
1705 e1e_flush();
1706
1707 udelay(nvm->delay_usec);
1708
1709 e1000_raise_eec_clk(hw, &eecd);
1710 e1000_lower_eec_clk(hw, &eecd);
1711
1712 mask >>= 1;
1713 } while (mask);
1714
1715 eecd &= ~E1000_EECD_DI;
1716 ew32(EECD, eecd);
1717 }
1718
1719 /**
1720 * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
1721 * @hw: pointer to the HW structure
1722 * @count: number of bits to shift in
1723 *
1724 * In order to read a register from the EEPROM, we need to shift 'count' bits
1725 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
1726 * the EEPROM (setting the SK bit), and then reading the value of the data out
1727 * "DO" bit. During this "shifting in" process the data in "DI" bit should
1728 * always be clear.
1729 **/
1730 static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
1731 {
1732 u32 eecd;
1733 u32 i;
1734 u16 data;
1735
1736 eecd = er32(EECD);
1737
1738 eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
1739 data = 0;
1740
1741 for (i = 0; i < count; i++) {
1742 data <<= 1;
1743 e1000_raise_eec_clk(hw, &eecd);
1744
1745 eecd = er32(EECD);
1746
1747 eecd &= ~E1000_EECD_DI;
1748 if (eecd & E1000_EECD_DO)
1749 data |= 1;
1750
1751 e1000_lower_eec_clk(hw, &eecd);
1752 }
1753
1754 return data;
1755 }
1756
1757 /**
1758 * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
1759 * @hw: pointer to the HW structure
1760 * @ee_reg: EEPROM flag for polling
1761 *
1762 * Polls the EEPROM status bit for either read or write completion based
1763 * upon the value of 'ee_reg'.
1764 **/
1765 s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
1766 {
1767 u32 attempts = 100000;
1768 u32 i, reg = 0;
1769
1770 for (i = 0; i < attempts; i++) {
1771 if (ee_reg == E1000_NVM_POLL_READ)
1772 reg = er32(EERD);
1773 else
1774 reg = er32(EEWR);
1775
1776 if (reg & E1000_NVM_RW_REG_DONE)
1777 return 0;
1778
1779 udelay(5);
1780 }
1781
1782 return -E1000_ERR_NVM;
1783 }
1784
1785 /**
1786 * e1000e_acquire_nvm - Generic request for access to EEPROM
1787 * @hw: pointer to the HW structure
1788 *
1789 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1790 * Return successful if access grant bit set, else clear the request for
1791 * EEPROM access and return -E1000_ERR_NVM (-1).
1792 **/
1793 s32 e1000e_acquire_nvm(struct e1000_hw *hw)
1794 {
1795 u32 eecd = er32(EECD);
1796 s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
1797
1798 ew32(EECD, eecd | E1000_EECD_REQ);
1799 eecd = er32(EECD);
1800
1801 while (timeout) {
1802 if (eecd & E1000_EECD_GNT)
1803 break;
1804 udelay(5);
1805 eecd = er32(EECD);
1806 timeout--;
1807 }
1808
1809 if (!timeout) {
1810 eecd &= ~E1000_EECD_REQ;
1811 ew32(EECD, eecd);
1812 hw_dbg(hw, "Could not acquire NVM grant\n");
1813 return -E1000_ERR_NVM;
1814 }
1815
1816 return 0;
1817 }
1818
1819 /**
1820 * e1000_standby_nvm - Return EEPROM to standby state
1821 * @hw: pointer to the HW structure
1822 *
1823 * Return the EEPROM to a standby state.
1824 **/
1825 static void e1000_standby_nvm(struct e1000_hw *hw)
1826 {
1827 struct e1000_nvm_info *nvm = &hw->nvm;
1828 u32 eecd = er32(EECD);
1829
1830 if (nvm->type == e1000_nvm_eeprom_spi) {
1831 /* Toggle CS to flush commands */
1832 eecd |= E1000_EECD_CS;
1833 ew32(EECD, eecd);
1834 e1e_flush();
1835 udelay(nvm->delay_usec);
1836 eecd &= ~E1000_EECD_CS;
1837 ew32(EECD, eecd);
1838 e1e_flush();
1839 udelay(nvm->delay_usec);
1840 }
1841 }
1842
1843 /**
1844 * e1000_stop_nvm - Terminate EEPROM command
1845 * @hw: pointer to the HW structure
1846 *
1847 * Terminates the current command by inverting the EEPROM's chip select pin.
1848 **/
1849 static void e1000_stop_nvm(struct e1000_hw *hw)
1850 {
1851 u32 eecd;
1852
1853 eecd = er32(EECD);
1854 if (hw->nvm.type == e1000_nvm_eeprom_spi) {
1855 /* Pull CS high */
1856 eecd |= E1000_EECD_CS;
1857 e1000_lower_eec_clk(hw, &eecd);
1858 }
1859 }
1860
1861 /**
1862 * e1000e_release_nvm - Release exclusive access to EEPROM
1863 * @hw: pointer to the HW structure
1864 *
1865 * Stop any current commands to the EEPROM and clear the EEPROM request bit.
1866 **/
1867 void e1000e_release_nvm(struct e1000_hw *hw)
1868 {
1869 u32 eecd;
1870
1871 e1000_stop_nvm(hw);
1872
1873 eecd = er32(EECD);
1874 eecd &= ~E1000_EECD_REQ;
1875 ew32(EECD, eecd);
1876 }
1877
1878 /**
1879 * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
1880 * @hw: pointer to the HW structure
1881 *
1882 * Setups the EEPROM for reading and writing.
1883 **/
1884 static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1885 {
1886 struct e1000_nvm_info *nvm = &hw->nvm;
1887 u32 eecd = er32(EECD);
1888 u16 timeout = 0;
1889 u8 spi_stat_reg;
1890
1891 if (nvm->type == e1000_nvm_eeprom_spi) {
1892 /* Clear SK and CS */
1893 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
1894 ew32(EECD, eecd);
1895 udelay(1);
1896 timeout = NVM_MAX_RETRY_SPI;
1897
1898 /*
1899 * Read "Status Register" repeatedly until the LSB is cleared.
1900 * The EEPROM will signal that the command has been completed
1901 * by clearing bit 0 of the internal status register. If it's
1902 * not cleared within 'timeout', then error out.
1903 */
1904 while (timeout) {
1905 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1906 hw->nvm.opcode_bits);
1907 spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
1908 if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
1909 break;
1910
1911 udelay(5);
1912 e1000_standby_nvm(hw);
1913 timeout--;
1914 }
1915
1916 if (!timeout) {
1917 hw_dbg(hw, "SPI NVM Status error\n");
1918 return -E1000_ERR_NVM;
1919 }
1920 }
1921
1922 return 0;
1923 }
1924
1925 /**
1926 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
1927 * @hw: pointer to the HW structure
1928 * @offset: offset of word in the EEPROM to read
1929 * @words: number of words to read
1930 * @data: word read from the EEPROM
1931 *
1932 * Reads a 16 bit word from the EEPROM using the EERD register.
1933 **/
1934 s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1935 {
1936 struct e1000_nvm_info *nvm = &hw->nvm;
1937 u32 i, eerd = 0;
1938 s32 ret_val = 0;
1939
1940 /*
1941 * A check for invalid values: offset too large, too many words,
1942 * too many words for the offset, and not enough words.
1943 */
1944 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1945 (words == 0)) {
1946 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1947 return -E1000_ERR_NVM;
1948 }
1949
1950 for (i = 0; i < words; i++) {
1951 eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
1952 E1000_NVM_RW_REG_START;
1953
1954 ew32(EERD, eerd);
1955 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
1956 if (ret_val)
1957 break;
1958
1959 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
1960 }
1961
1962 return ret_val;
1963 }
1964
1965 /**
1966 * e1000e_write_nvm_spi - Write to EEPROM using SPI
1967 * @hw: pointer to the HW structure
1968 * @offset: offset within the EEPROM to be written to
1969 * @words: number of words to write
1970 * @data: 16 bit word(s) to be written to the EEPROM
1971 *
1972 * Writes data to EEPROM at offset using SPI interface.
1973 *
1974 * If e1000e_update_nvm_checksum is not called after this function , the
1975 * EEPROM will most likely contain an invalid checksum.
1976 **/
1977 s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1978 {
1979 struct e1000_nvm_info *nvm = &hw->nvm;
1980 s32 ret_val;
1981 u16 widx = 0;
1982
1983 /*
1984 * A check for invalid values: offset too large, too many words,
1985 * and not enough words.
1986 */
1987 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1988 (words == 0)) {
1989 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1990 return -E1000_ERR_NVM;
1991 }
1992
1993 ret_val = nvm->ops.acquire_nvm(hw);
1994 if (ret_val)
1995 return ret_val;
1996
1997 msleep(10);
1998
1999 while (widx < words) {
2000 u8 write_opcode = NVM_WRITE_OPCODE_SPI;
2001
2002 ret_val = e1000_ready_nvm_eeprom(hw);
2003 if (ret_val) {
2004 nvm->ops.release_nvm(hw);
2005 return ret_val;
2006 }
2007
2008 e1000_standby_nvm(hw);
2009
2010 /* Send the WRITE ENABLE command (8 bit opcode) */
2011 e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
2012 nvm->opcode_bits);
2013
2014 e1000_standby_nvm(hw);
2015
2016 /*
2017 * Some SPI eeproms use the 8th address bit embedded in the
2018 * opcode
2019 */
2020 if ((nvm->address_bits == 8) && (offset >= 128))
2021 write_opcode |= NVM_A8_OPCODE_SPI;
2022
2023 /* Send the Write command (8-bit opcode + addr) */
2024 e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
2025 e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
2026 nvm->address_bits);
2027
2028 /* Loop to allow for up to whole page write of eeprom */
2029 while (widx < words) {
2030 u16 word_out = data[widx];
2031 word_out = (word_out >> 8) | (word_out << 8);
2032 e1000_shift_out_eec_bits(hw, word_out, 16);
2033 widx++;
2034
2035 if ((((offset + widx) * 2) % nvm->page_size) == 0) {
2036 e1000_standby_nvm(hw);
2037 break;
2038 }
2039 }
2040 }
2041
2042 msleep(10);
2043 nvm->ops.release_nvm(hw);
2044 return 0;
2045 }
2046
2047 /**
2048 * e1000e_read_mac_addr - Read device MAC address
2049 * @hw: pointer to the HW structure
2050 *
2051 * Reads the device MAC address from the EEPROM and stores the value.
2052 * Since devices with two ports use the same EEPROM, we increment the
2053 * last bit in the MAC address for the second port.
2054 **/
2055 s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2056 {
2057 s32 ret_val;
2058 u16 offset, nvm_data, i;
2059 u16 mac_addr_offset = 0;
2060
2061 if (hw->mac.type == e1000_82571) {
2062 /* Check for an alternate MAC address. An alternate MAC
2063 * address can be setup by pre-boot software and must be
2064 * treated like a permanent address and must override the
2065 * actual permanent MAC address.*/
2066 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2067 &mac_addr_offset);
2068 if (ret_val) {
2069 hw_dbg(hw, "NVM Read Error\n");
2070 return ret_val;
2071 }
2072 if (mac_addr_offset == 0xFFFF)
2073 mac_addr_offset = 0;
2074
2075 if (mac_addr_offset) {
2076 if (hw->bus.func == E1000_FUNC_1)
2077 mac_addr_offset += ETH_ALEN/sizeof(u16);
2078
2079 /* make sure we have a valid mac address here
2080 * before using it */
2081 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2082 &nvm_data);
2083 if (ret_val) {
2084 hw_dbg(hw, "NVM Read Error\n");
2085 return ret_val;
2086 }
2087 if (nvm_data & 0x0001)
2088 mac_addr_offset = 0;
2089 }
2090
2091 if (mac_addr_offset)
2092 hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
2093 }
2094
2095 for (i = 0; i < ETH_ALEN; i += 2) {
2096 offset = mac_addr_offset + (i >> 1);
2097 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
2098 if (ret_val) {
2099 hw_dbg(hw, "NVM Read Error\n");
2100 return ret_val;
2101 }
2102 hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
2103 hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
2104 }
2105
2106 /* Flip last bit of mac address if we're on second port */
2107 if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1)
2108 hw->mac.perm_addr[5] ^= 1;
2109
2110 for (i = 0; i < ETH_ALEN; i++)
2111 hw->mac.addr[i] = hw->mac.perm_addr[i];
2112
2113 return 0;
2114 }
2115
2116 /**
2117 * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
2118 * @hw: pointer to the HW structure
2119 *
2120 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2121 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2122 **/
2123 s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
2124 {
2125 s32 ret_val;
2126 u16 checksum = 0;
2127 u16 i, nvm_data;
2128
2129 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
2130 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2131 if (ret_val) {
2132 hw_dbg(hw, "NVM Read Error\n");
2133 return ret_val;
2134 }
2135 checksum += nvm_data;
2136 }
2137
2138 if (checksum != (u16) NVM_SUM) {
2139 hw_dbg(hw, "NVM Checksum Invalid\n");
2140 return -E1000_ERR_NVM;
2141 }
2142
2143 return 0;
2144 }
2145
2146 /**
2147 * e1000e_update_nvm_checksum_generic - Update EEPROM checksum
2148 * @hw: pointer to the HW structure
2149 *
2150 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2151 * up to the checksum. Then calculates the EEPROM checksum and writes the
2152 * value to the EEPROM.
2153 **/
2154 s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
2155 {
2156 s32 ret_val;
2157 u16 checksum = 0;
2158 u16 i, nvm_data;
2159
2160 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
2161 ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
2162 if (ret_val) {
2163 hw_dbg(hw, "NVM Read Error while updating checksum.\n");
2164 return ret_val;
2165 }
2166 checksum += nvm_data;
2167 }
2168 checksum = (u16) NVM_SUM - checksum;
2169 ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
2170 if (ret_val)
2171 hw_dbg(hw, "NVM Write Error while updating checksum.\n");
2172
2173 return ret_val;
2174 }
2175
2176 /**
2177 * e1000e_reload_nvm - Reloads EEPROM
2178 * @hw: pointer to the HW structure
2179 *
2180 * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
2181 * extended control register.
2182 **/
2183 void e1000e_reload_nvm(struct e1000_hw *hw)
2184 {
2185 u32 ctrl_ext;
2186
2187 udelay(10);
2188 ctrl_ext = er32(CTRL_EXT);
2189 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
2190 ew32(CTRL_EXT, ctrl_ext);
2191 e1e_flush();
2192 }
2193
2194 /**
2195 * e1000_calculate_checksum - Calculate checksum for buffer
2196 * @buffer: pointer to EEPROM
2197 * @length: size of EEPROM to calculate a checksum for
2198 *
2199 * Calculates the checksum for some buffer on a specified length. The
2200 * checksum calculated is returned.
2201 **/
2202 static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
2203 {
2204 u32 i;
2205 u8 sum = 0;
2206
2207 if (!buffer)
2208 return 0;
2209
2210 for (i = 0; i < length; i++)
2211 sum += buffer[i];
2212
2213 return (u8) (0 - sum);
2214 }
2215
2216 /**
2217 * e1000_mng_enable_host_if - Checks host interface is enabled
2218 * @hw: pointer to the HW structure
2219 *
2220 * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
2221 *
2222 * This function checks whether the HOST IF is enabled for command operation
2223 * and also checks whether the previous command is completed. It busy waits
2224 * in case of previous command is not completed.
2225 **/
2226 static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2227 {
2228 u32 hicr;
2229 u8 i;
2230
2231 /* Check that the host interface is enabled. */
2232 hicr = er32(HICR);
2233 if ((hicr & E1000_HICR_EN) == 0) {
2234 hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
2235 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2236 }
2237 /* check the previous command is completed */
2238 for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
2239 hicr = er32(HICR);
2240 if (!(hicr & E1000_HICR_C))
2241 break;
2242 mdelay(1);
2243 }
2244
2245 if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
2246 hw_dbg(hw, "Previous command timeout failed .\n");
2247 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2248 }
2249
2250 return 0;
2251 }
2252
2253 /**
2254 * e1000e_check_mng_mode_generic - check management mode
2255 * @hw: pointer to the HW structure
2256 *
2257 * Reads the firmware semaphore register and returns true (>0) if
2258 * manageability is enabled, else false (0).
2259 **/
2260 bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
2261 {
2262 u32 fwsm = er32(FWSM);
2263
2264 return (fwsm & E1000_FWSM_MODE_MASK) ==
2265 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
2266 }
2267
2268 /**
2269 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
2270 * @hw: pointer to the HW structure
2271 *
2272 * Enables packet filtering on transmit packets if manageability is enabled
2273 * and host interface is enabled.
2274 **/
2275 bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2276 {
2277 struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
2278 u32 *buffer = (u32 *)&hw->mng_cookie;
2279 u32 offset;
2280 s32 ret_val, hdr_csum, csum;
2281 u8 i, len;
2282
2283 /* No manageability, no filtering */
2284 if (!e1000e_check_mng_mode(hw)) {
2285 hw->mac.tx_pkt_filtering = 0;
2286 return 0;
2287 }
2288
2289 /*
2290 * If we can't read from the host interface for whatever
2291 * reason, disable filtering.
2292 */
2293 ret_val = e1000_mng_enable_host_if(hw);
2294 if (ret_val != 0) {
2295 hw->mac.tx_pkt_filtering = 0;
2296 return ret_val;
2297 }
2298
2299 /* Read in the header. Length and offset are in dwords. */
2300 len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
2301 offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
2302 for (i = 0; i < len; i++)
2303 *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
2304 hdr_csum = hdr->checksum;
2305 hdr->checksum = 0;
2306 csum = e1000_calculate_checksum((u8 *)hdr,
2307 E1000_MNG_DHCP_COOKIE_LENGTH);
2308 /*
2309 * If either the checksums or signature don't match, then
2310 * the cookie area isn't considered valid, in which case we
2311 * take the safe route of assuming Tx filtering is enabled.
2312 */
2313 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
2314 hw->mac.tx_pkt_filtering = 1;
2315 return 1;
2316 }
2317
2318 /* Cookie area is valid, make the final check for filtering. */
2319 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
2320 hw->mac.tx_pkt_filtering = 0;
2321 return 0;
2322 }
2323
2324 hw->mac.tx_pkt_filtering = 1;
2325 return 1;
2326 }
2327
2328 /**
2329 * e1000_mng_write_cmd_header - Writes manageability command header
2330 * @hw: pointer to the HW structure
2331 * @hdr: pointer to the host interface command header
2332 *
2333 * Writes the command header after does the checksum calculation.
2334 **/
2335 static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
2336 struct e1000_host_mng_command_header *hdr)
2337 {
2338 u16 i, length = sizeof(struct e1000_host_mng_command_header);
2339
2340 /* Write the whole command header structure with new checksum. */
2341
2342 hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
2343
2344 length >>= 2;
2345 /* Write the relevant command block into the ram area. */
2346 for (i = 0; i < length; i++) {
2347 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
2348 *((u32 *) hdr + i));
2349 e1e_flush();
2350 }
2351
2352 return 0;
2353 }
2354
2355 /**
2356 * e1000_mng_host_if_write - Writes to the manageability host interface
2357 * @hw: pointer to the HW structure
2358 * @buffer: pointer to the host interface buffer
2359 * @length: size of the buffer
2360 * @offset: location in the buffer to write to
2361 * @sum: sum of the data (not checksum)
2362 *
2363 * This function writes the buffer content at the offset given on the host if.
2364 * It also does alignment considerations to do the writes in most efficient
2365 * way. Also fills up the sum of the buffer in *buffer parameter.
2366 **/
2367 static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2368 u16 length, u16 offset, u8 *sum)
2369 {
2370 u8 *tmp;
2371 u8 *bufptr = buffer;
2372 u32 data = 0;
2373 u16 remaining, i, j, prev_bytes;
2374
2375 /* sum = only sum of the data and it is not checksum */
2376
2377 if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
2378 return -E1000_ERR_PARAM;
2379
2380 tmp = (u8 *)&data;
2381 prev_bytes = offset & 0x3;
2382 offset >>= 2;
2383
2384 if (prev_bytes) {
2385 data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
2386 for (j = prev_bytes; j < sizeof(u32); j++) {
2387 *(tmp + j) = *bufptr++;
2388 *sum += *(tmp + j);
2389 }
2390 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
2391 length -= j - prev_bytes;
2392 offset++;
2393 }
2394
2395 remaining = length & 0x3;
2396 length -= remaining;
2397
2398 /* Calculate length in DWORDs */
2399 length >>= 2;
2400
2401 /*
2402 * The device driver writes the relevant command block into the
2403 * ram area.
2404 */
2405 for (i = 0; i < length; i++) {
2406 for (j = 0; j < sizeof(u32); j++) {
2407 *(tmp + j) = *bufptr++;
2408 *sum += *(tmp + j);
2409 }
2410
2411 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2412 }
2413 if (remaining) {
2414 for (j = 0; j < sizeof(u32); j++) {
2415 if (j < remaining)
2416 *(tmp + j) = *bufptr++;
2417 else
2418 *(tmp + j) = 0;
2419
2420 *sum += *(tmp + j);
2421 }
2422 E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
2423 }
2424
2425 return 0;
2426 }
2427
2428 /**
2429 * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
2430 * @hw: pointer to the HW structure
2431 * @buffer: pointer to the host interface
2432 * @length: size of the buffer
2433 *
2434 * Writes the DHCP information to the host interface.
2435 **/
2436 s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2437 {
2438 struct e1000_host_mng_command_header hdr;
2439 s32 ret_val;
2440 u32 hicr;
2441
2442 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
2443 hdr.command_length = length;
2444 hdr.reserved1 = 0;
2445 hdr.reserved2 = 0;
2446 hdr.checksum = 0;
2447
2448 /* Enable the host interface */
2449 ret_val = e1000_mng_enable_host_if(hw);
2450 if (ret_val)
2451 return ret_val;
2452
2453 /* Populate the host interface with the contents of "buffer". */
2454 ret_val = e1000_mng_host_if_write(hw, buffer, length,
2455 sizeof(hdr), &(hdr.checksum));
2456 if (ret_val)
2457 return ret_val;
2458
2459 /* Write the manageability command header */
2460 ret_val = e1000_mng_write_cmd_header(hw, &hdr);
2461 if (ret_val)
2462 return ret_val;
2463
2464 /* Tell the ARC a new command is pending. */
2465 hicr = er32(HICR);
2466 ew32(HICR, hicr | E1000_HICR_C);
2467
2468 return 0;
2469 }
2470
2471 /**
2472 * e1000e_enable_mng_pass_thru - Enable processing of ARP's
2473 * @hw: pointer to the HW structure
2474 *
2475 * Verifies the hardware needs to allow ARPs to be processed by the host.
2476 **/
2477 bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2478 {
2479 u32 manc;
2480 u32 fwsm, factps;
2481 bool ret_val = 0;
2482
2483 manc = er32(MANC);
2484
2485 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
2486 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
2487 return ret_val;
2488
2489 if (hw->mac.arc_subsystem_valid) {
2490 fwsm = er32(FWSM);
2491 factps = er32(FACTPS);
2492
2493 if (!(factps & E1000_FACTPS_MNGCG) &&
2494 ((fwsm & E1000_FWSM_MODE_MASK) ==
2495 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2496 ret_val = 1;
2497 return ret_val;
2498 }
2499 } else {
2500 if ((manc & E1000_MANC_SMBUS_EN) &&
2501 !(manc & E1000_MANC_ASF_EN)) {
2502 ret_val = 1;
2503 return ret_val;
2504 }
2505 }
2506
2507 return ret_val;
2508 }
2509
2510 s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
2511 {
2512 s32 ret_val;
2513 u16 nvm_data;
2514
2515 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2516 if (ret_val) {
2517 hw_dbg(hw, "NVM Read Error\n");
2518 return ret_val;
2519 }
2520 *pba_num = (u32)(nvm_data << 16);
2521
2522 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2523 if (ret_val) {
2524 hw_dbg(hw, "NVM Read Error\n");
2525 return ret_val;
2526 }
2527 *pba_num |= nvm_data;
2528
2529 return 0;
2530 }