igb/igbvf: cleanup exception handling in tx_map_adv
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / igb / igb_main.c
CommitLineData
9d5c8243
AK
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
86d5d38f 4 Copyright(c) 2007-2009 Intel Corporation.
9d5c8243
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/module.h>
29#include <linux/types.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/pagemap.h>
33#include <linux/netdevice.h>
9d5c8243
AK
34#include <linux/ipv6.h>
35#include <net/checksum.h>
36#include <net/ip6_checksum.h>
c6cb090b 37#include <linux/net_tstamp.h>
9d5c8243
AK
38#include <linux/mii.h>
39#include <linux/ethtool.h>
40#include <linux/if_vlan.h>
41#include <linux/pci.h>
c54106bb 42#include <linux/pci-aspm.h>
9d5c8243
AK
43#include <linux/delay.h>
44#include <linux/interrupt.h>
45#include <linux/if_ether.h>
40a914fa 46#include <linux/aer.h>
421e02f0 47#ifdef CONFIG_IGB_DCA
fe4506b6
JC
48#include <linux/dca.h>
49#endif
9d5c8243
AK
50#include "igb.h"
51
55cac248 52#define DRV_VERSION "2.1.0-k2"
9d5c8243
AK
53char igb_driver_name[] = "igb";
54char igb_driver_version[] = DRV_VERSION;
55static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
86d5d38f 57static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
9d5c8243 58
9d5c8243
AK
59static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
61};
62
63static struct pci_device_id igb_pci_tbl[] = {
55cac248
AD
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
2d064c06 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
9eb2341d 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
747d49ba 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
2d064c06
AD
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
4703bf73 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
c8ea5ea9 75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
9d5c8243
AK
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
79 /* required last entry */
80 {0, }
81};
82
83MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
84
85void igb_reset(struct igb_adapter *);
86static int igb_setup_all_tx_resources(struct igb_adapter *);
87static int igb_setup_all_rx_resources(struct igb_adapter *);
88static void igb_free_all_tx_resources(struct igb_adapter *);
89static void igb_free_all_rx_resources(struct igb_adapter *);
06cf2666 90static void igb_setup_mrqc(struct igb_adapter *);
9d5c8243
AK
91void igb_update_stats(struct igb_adapter *);
92static int igb_probe(struct pci_dev *, const struct pci_device_id *);
93static void __devexit igb_remove(struct pci_dev *pdev);
94static int igb_sw_init(struct igb_adapter *);
95static int igb_open(struct net_device *);
96static int igb_close(struct net_device *);
97static void igb_configure_tx(struct igb_adapter *);
98static void igb_configure_rx(struct igb_adapter *);
9d5c8243
AK
99static void igb_clean_all_tx_rings(struct igb_adapter *);
100static void igb_clean_all_rx_rings(struct igb_adapter *);
3b644cf6
MW
101static void igb_clean_tx_ring(struct igb_ring *);
102static void igb_clean_rx_ring(struct igb_ring *);
ff41f8dc 103static void igb_set_rx_mode(struct net_device *);
9d5c8243
AK
104static void igb_update_phy_info(unsigned long);
105static void igb_watchdog(unsigned long);
106static void igb_watchdog_task(struct work_struct *);
b1a436c3 107static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
9d5c8243
AK
108static struct net_device_stats *igb_get_stats(struct net_device *);
109static int igb_change_mtu(struct net_device *, int);
110static int igb_set_mac(struct net_device *, void *);
68d480c4 111static void igb_set_uta(struct igb_adapter *adapter);
9d5c8243
AK
112static irqreturn_t igb_intr(int irq, void *);
113static irqreturn_t igb_intr_msi(int irq, void *);
114static irqreturn_t igb_msix_other(int irq, void *);
047e0030 115static irqreturn_t igb_msix_ring(int irq, void *);
421e02f0 116#ifdef CONFIG_IGB_DCA
047e0030 117static void igb_update_dca(struct igb_q_vector *);
fe4506b6 118static void igb_setup_dca(struct igb_adapter *);
421e02f0 119#endif /* CONFIG_IGB_DCA */
047e0030 120static bool igb_clean_tx_irq(struct igb_q_vector *);
661086df 121static int igb_poll(struct napi_struct *, int);
047e0030 122static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
9d5c8243
AK
123static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
124static void igb_tx_timeout(struct net_device *);
125static void igb_reset_task(struct work_struct *);
126static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
127static void igb_vlan_rx_add_vid(struct net_device *, u16);
128static void igb_vlan_rx_kill_vid(struct net_device *, u16);
129static void igb_restore_vlan(struct igb_adapter *);
26ad9178 130static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
4ae196df
AD
131static void igb_ping_all_vfs(struct igb_adapter *);
132static void igb_msg_task(struct igb_adapter *);
4ae196df 133static void igb_vmm_control(struct igb_adapter *);
f2ca0dbe 134static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
4ae196df 135static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
9d5c8243 136
9d5c8243 137#ifdef CONFIG_PM
3fe7c4c9 138static int igb_suspend(struct pci_dev *, pm_message_t);
9d5c8243
AK
139static int igb_resume(struct pci_dev *);
140#endif
141static void igb_shutdown(struct pci_dev *);
421e02f0 142#ifdef CONFIG_IGB_DCA
fe4506b6
JC
143static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
144static struct notifier_block dca_notifier = {
145 .notifier_call = igb_notify_dca,
146 .next = NULL,
147 .priority = 0
148};
149#endif
9d5c8243
AK
150#ifdef CONFIG_NET_POLL_CONTROLLER
151/* for netdump / net console */
152static void igb_netpoll(struct net_device *);
153#endif
37680117 154#ifdef CONFIG_PCI_IOV
2a3abf6d
AD
155static unsigned int max_vfs = 0;
156module_param(max_vfs, uint, 0);
157MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
158 "per physical function");
159#endif /* CONFIG_PCI_IOV */
160
9d5c8243
AK
161static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
162 pci_channel_state_t);
163static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
164static void igb_io_resume(struct pci_dev *);
165
166static struct pci_error_handlers igb_err_handler = {
167 .error_detected = igb_io_error_detected,
168 .slot_reset = igb_io_slot_reset,
169 .resume = igb_io_resume,
170};
171
172
173static struct pci_driver igb_driver = {
174 .name = igb_driver_name,
175 .id_table = igb_pci_tbl,
176 .probe = igb_probe,
177 .remove = __devexit_p(igb_remove),
178#ifdef CONFIG_PM
179 /* Power Managment Hooks */
180 .suspend = igb_suspend,
181 .resume = igb_resume,
182#endif
183 .shutdown = igb_shutdown,
184 .err_handler = &igb_err_handler
185};
186
187MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
188MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
189MODULE_LICENSE("GPL");
190MODULE_VERSION(DRV_VERSION);
191
38c845c7
PO
192/**
193 * igb_read_clock - read raw cycle counter (to be used by time counter)
194 */
195static cycle_t igb_read_clock(const struct cyclecounter *tc)
196{
197 struct igb_adapter *adapter =
198 container_of(tc, struct igb_adapter, cycles);
199 struct e1000_hw *hw = &adapter->hw;
c5b9bd5e
AD
200 u64 stamp = 0;
201 int shift = 0;
38c845c7 202
55cac248
AD
203 /*
204 * The timestamp latches on lowest register read. For the 82580
205 * the lowest register is SYSTIMR instead of SYSTIML. However we never
206 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
207 */
208 if (hw->mac.type == e1000_82580) {
209 stamp = rd32(E1000_SYSTIMR) >> 8;
210 shift = IGB_82580_TSYNC_SHIFT;
211 }
212
c5b9bd5e
AD
213 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
214 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
38c845c7
PO
215 return stamp;
216}
217
9d5c8243
AK
218#ifdef DEBUG
219/**
220 * igb_get_hw_dev_name - return device name string
221 * used by hardware layer to print debugging information
222 **/
223char *igb_get_hw_dev_name(struct e1000_hw *hw)
224{
225 struct igb_adapter *adapter = hw->back;
226 return adapter->netdev->name;
227}
38c845c7
PO
228
229/**
230 * igb_get_time_str - format current NIC and system time as string
231 */
232static char *igb_get_time_str(struct igb_adapter *adapter,
233 char buffer[160])
234{
235 cycle_t hw = adapter->cycles.read(&adapter->cycles);
236 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
237 struct timespec sys;
238 struct timespec delta;
239 getnstimeofday(&sys);
240
241 delta = timespec_sub(nic, sys);
242
243 sprintf(buffer,
33af6bcc
PO
244 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
245 hw,
38c845c7
PO
246 (long)nic.tv_sec, nic.tv_nsec,
247 (long)sys.tv_sec, sys.tv_nsec,
248 (long)delta.tv_sec, delta.tv_nsec);
249
250 return buffer;
251}
9d5c8243
AK
252#endif
253
254/**
255 * igb_init_module - Driver Registration Routine
256 *
257 * igb_init_module is the first routine called when the driver is
258 * loaded. All it does is register with the PCI subsystem.
259 **/
260static int __init igb_init_module(void)
261{
262 int ret;
263 printk(KERN_INFO "%s - version %s\n",
264 igb_driver_string, igb_driver_version);
265
266 printk(KERN_INFO "%s\n", igb_copyright);
267
421e02f0 268#ifdef CONFIG_IGB_DCA
fe4506b6
JC
269 dca_register_notify(&dca_notifier);
270#endif
bbd98fe4 271 ret = pci_register_driver(&igb_driver);
9d5c8243
AK
272 return ret;
273}
274
275module_init(igb_init_module);
276
277/**
278 * igb_exit_module - Driver Exit Cleanup Routine
279 *
280 * igb_exit_module is called just before the driver is removed
281 * from memory.
282 **/
283static void __exit igb_exit_module(void)
284{
421e02f0 285#ifdef CONFIG_IGB_DCA
fe4506b6
JC
286 dca_unregister_notify(&dca_notifier);
287#endif
9d5c8243
AK
288 pci_unregister_driver(&igb_driver);
289}
290
291module_exit(igb_exit_module);
292
26bc19ec
AD
293#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
294/**
295 * igb_cache_ring_register - Descriptor ring to register mapping
296 * @adapter: board private structure to initialize
297 *
298 * Once we know the feature-set enabled for the device, we'll cache
299 * the register offset the descriptor ring is assigned to.
300 **/
301static void igb_cache_ring_register(struct igb_adapter *adapter)
302{
ee1b9f06 303 int i = 0, j = 0;
047e0030 304 u32 rbase_offset = adapter->vfs_allocated_count;
26bc19ec
AD
305
306 switch (adapter->hw.mac.type) {
307 case e1000_82576:
308 /* The queues are allocated for virtualization such that VF 0
309 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
310 * In order to avoid collision we start at the first free queue
311 * and continue consuming queues in the same sequence
312 */
ee1b9f06 313 if (adapter->vfs_allocated_count) {
a99955fc 314 for (; i < adapter->rss_queues; i++)
ee1b9f06
AD
315 adapter->rx_ring[i].reg_idx = rbase_offset +
316 Q_IDX_82576(i);
a99955fc 317 for (; j < adapter->rss_queues; j++)
ee1b9f06
AD
318 adapter->tx_ring[j].reg_idx = rbase_offset +
319 Q_IDX_82576(j);
320 }
26bc19ec 321 case e1000_82575:
55cac248 322 case e1000_82580:
26bc19ec 323 default:
ee1b9f06
AD
324 for (; i < adapter->num_rx_queues; i++)
325 adapter->rx_ring[i].reg_idx = rbase_offset + i;
326 for (; j < adapter->num_tx_queues; j++)
327 adapter->tx_ring[j].reg_idx = rbase_offset + j;
26bc19ec
AD
328 break;
329 }
330}
331
047e0030
AD
332static void igb_free_queues(struct igb_adapter *adapter)
333{
334 kfree(adapter->tx_ring);
335 kfree(adapter->rx_ring);
336
337 adapter->tx_ring = NULL;
338 adapter->rx_ring = NULL;
339
340 adapter->num_rx_queues = 0;
341 adapter->num_tx_queues = 0;
342}
343
9d5c8243
AK
344/**
345 * igb_alloc_queues - Allocate memory for all rings
346 * @adapter: board private structure to initialize
347 *
348 * We allocate one ring per queue at run-time since we don't know the
349 * number of queues at compile-time.
350 **/
351static int igb_alloc_queues(struct igb_adapter *adapter)
352{
353 int i;
354
355 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
356 sizeof(struct igb_ring), GFP_KERNEL);
357 if (!adapter->tx_ring)
047e0030 358 goto err;
9d5c8243
AK
359
360 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
361 sizeof(struct igb_ring), GFP_KERNEL);
047e0030
AD
362 if (!adapter->rx_ring)
363 goto err;
6eb5a7f1 364
661086df
PWJ
365 for (i = 0; i < adapter->num_tx_queues; i++) {
366 struct igb_ring *ring = &(adapter->tx_ring[i]);
68fd9910 367 ring->count = adapter->tx_ring_count;
661086df 368 ring->queue_index = i;
80785298 369 ring->pdev = adapter->pdev;
e694e964 370 ring->netdev = adapter->netdev;
85ad76b2
AD
371 /* For 82575, context index must be unique per ring. */
372 if (adapter->hw.mac.type == e1000_82575)
373 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
661086df 374 }
85ad76b2 375
9d5c8243
AK
376 for (i = 0; i < adapter->num_rx_queues; i++) {
377 struct igb_ring *ring = &(adapter->rx_ring[i]);
68fd9910 378 ring->count = adapter->rx_ring_count;
844290e5 379 ring->queue_index = i;
80785298 380 ring->pdev = adapter->pdev;
e694e964 381 ring->netdev = adapter->netdev;
4c844851 382 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
85ad76b2
AD
383 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
384 /* set flag indicating ring supports SCTP checksum offload */
385 if (adapter->hw.mac.type >= e1000_82576)
386 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
9d5c8243 387 }
26bc19ec
AD
388
389 igb_cache_ring_register(adapter);
9d5c8243 390
047e0030 391 return 0;
a88f10ec 392
047e0030
AD
393err:
394 igb_free_queues(adapter);
d1a8c9e1 395
047e0030 396 return -ENOMEM;
a88f10ec
AD
397}
398
9d5c8243 399#define IGB_N0_QUEUE -1
047e0030 400static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
9d5c8243
AK
401{
402 u32 msixbm = 0;
047e0030 403 struct igb_adapter *adapter = q_vector->adapter;
9d5c8243 404 struct e1000_hw *hw = &adapter->hw;
2d064c06 405 u32 ivar, index;
047e0030
AD
406 int rx_queue = IGB_N0_QUEUE;
407 int tx_queue = IGB_N0_QUEUE;
408
409 if (q_vector->rx_ring)
410 rx_queue = q_vector->rx_ring->reg_idx;
411 if (q_vector->tx_ring)
412 tx_queue = q_vector->tx_ring->reg_idx;
2d064c06
AD
413
414 switch (hw->mac.type) {
415 case e1000_82575:
9d5c8243
AK
416 /* The 82575 assigns vectors using a bitmask, which matches the
417 bitmask for the EICR/EIMS/EIMC registers. To assign one
418 or more queues to a vector, we write the appropriate bits
419 into the MSIXBM register for that vector. */
047e0030 420 if (rx_queue > IGB_N0_QUEUE)
9d5c8243 421 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
047e0030 422 if (tx_queue > IGB_N0_QUEUE)
9d5c8243 423 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
9d5c8243 424 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
047e0030 425 q_vector->eims_value = msixbm;
2d064c06
AD
426 break;
427 case e1000_82576:
26bc19ec 428 /* 82576 uses a table-based method for assigning vectors.
2d064c06
AD
429 Each queue has a single entry in the table to which we write
430 a vector number along with a "valid" bit. Sadly, the layout
431 of the table is somewhat counterintuitive. */
432 if (rx_queue > IGB_N0_QUEUE) {
047e0030 433 index = (rx_queue & 0x7);
2d064c06 434 ivar = array_rd32(E1000_IVAR0, index);
047e0030 435 if (rx_queue < 8) {
26bc19ec
AD
436 /* vector goes into low byte of register */
437 ivar = ivar & 0xFFFFFF00;
438 ivar |= msix_vector | E1000_IVAR_VALID;
047e0030
AD
439 } else {
440 /* vector goes into third byte of register */
441 ivar = ivar & 0xFF00FFFF;
442 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
2d064c06 443 }
2d064c06
AD
444 array_wr32(E1000_IVAR0, index, ivar);
445 }
446 if (tx_queue > IGB_N0_QUEUE) {
047e0030 447 index = (tx_queue & 0x7);
2d064c06 448 ivar = array_rd32(E1000_IVAR0, index);
047e0030 449 if (tx_queue < 8) {
26bc19ec
AD
450 /* vector goes into second byte of register */
451 ivar = ivar & 0xFFFF00FF;
452 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
047e0030
AD
453 } else {
454 /* vector goes into high byte of register */
455 ivar = ivar & 0x00FFFFFF;
456 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
2d064c06 457 }
2d064c06
AD
458 array_wr32(E1000_IVAR0, index, ivar);
459 }
047e0030 460 q_vector->eims_value = 1 << msix_vector;
2d064c06 461 break;
55cac248
AD
462 case e1000_82580:
463 /* 82580 uses the same table-based approach as 82576 but has fewer
464 entries as a result we carry over for queues greater than 4. */
465 if (rx_queue > IGB_N0_QUEUE) {
466 index = (rx_queue >> 1);
467 ivar = array_rd32(E1000_IVAR0, index);
468 if (rx_queue & 0x1) {
469 /* vector goes into third byte of register */
470 ivar = ivar & 0xFF00FFFF;
471 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
472 } else {
473 /* vector goes into low byte of register */
474 ivar = ivar & 0xFFFFFF00;
475 ivar |= msix_vector | E1000_IVAR_VALID;
476 }
477 array_wr32(E1000_IVAR0, index, ivar);
478 }
479 if (tx_queue > IGB_N0_QUEUE) {
480 index = (tx_queue >> 1);
481 ivar = array_rd32(E1000_IVAR0, index);
482 if (tx_queue & 0x1) {
483 /* vector goes into high byte of register */
484 ivar = ivar & 0x00FFFFFF;
485 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
486 } else {
487 /* vector goes into second byte of register */
488 ivar = ivar & 0xFFFF00FF;
489 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
490 }
491 array_wr32(E1000_IVAR0, index, ivar);
492 }
493 q_vector->eims_value = 1 << msix_vector;
494 break;
2d064c06
AD
495 default:
496 BUG();
497 break;
498 }
9d5c8243
AK
499}
500
501/**
502 * igb_configure_msix - Configure MSI-X hardware
503 *
504 * igb_configure_msix sets up the hardware to properly
505 * generate MSI-X interrupts.
506 **/
507static void igb_configure_msix(struct igb_adapter *adapter)
508{
509 u32 tmp;
510 int i, vector = 0;
511 struct e1000_hw *hw = &adapter->hw;
512
513 adapter->eims_enable_mask = 0;
9d5c8243
AK
514
515 /* set vector for other causes, i.e. link changes */
2d064c06
AD
516 switch (hw->mac.type) {
517 case e1000_82575:
9d5c8243
AK
518 tmp = rd32(E1000_CTRL_EXT);
519 /* enable MSI-X PBA support*/
520 tmp |= E1000_CTRL_EXT_PBA_CLR;
521
522 /* Auto-Mask interrupts upon ICR read. */
523 tmp |= E1000_CTRL_EXT_EIAME;
524 tmp |= E1000_CTRL_EXT_IRCA;
525
526 wr32(E1000_CTRL_EXT, tmp);
047e0030
AD
527
528 /* enable msix_other interrupt */
529 array_wr32(E1000_MSIXBM(0), vector++,
530 E1000_EIMS_OTHER);
844290e5 531 adapter->eims_other = E1000_EIMS_OTHER;
9d5c8243 532
2d064c06
AD
533 break;
534
535 case e1000_82576:
55cac248 536 case e1000_82580:
047e0030
AD
537 /* Turn on MSI-X capability first, or our settings
538 * won't stick. And it will take days to debug. */
539 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
540 E1000_GPIE_PBA | E1000_GPIE_EIAME |
541 E1000_GPIE_NSICR);
542
543 /* enable msix_other interrupt */
544 adapter->eims_other = 1 << vector;
2d064c06 545 tmp = (vector++ | E1000_IVAR_VALID) << 8;
2d064c06 546
047e0030 547 wr32(E1000_IVAR_MISC, tmp);
2d064c06
AD
548 break;
549 default:
550 /* do nothing, since nothing else supports MSI-X */
551 break;
552 } /* switch (hw->mac.type) */
047e0030
AD
553
554 adapter->eims_enable_mask |= adapter->eims_other;
555
556 for (i = 0; i < adapter->num_q_vectors; i++) {
557 struct igb_q_vector *q_vector = adapter->q_vector[i];
558 igb_assign_vector(q_vector, vector++);
559 adapter->eims_enable_mask |= q_vector->eims_value;
560 }
561
9d5c8243
AK
562 wrfl();
563}
564
565/**
566 * igb_request_msix - Initialize MSI-X interrupts
567 *
568 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
569 * kernel.
570 **/
571static int igb_request_msix(struct igb_adapter *adapter)
572{
573 struct net_device *netdev = adapter->netdev;
047e0030 574 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
575 int i, err = 0, vector = 0;
576
047e0030 577 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 578 igb_msix_other, 0, netdev->name, adapter);
047e0030
AD
579 if (err)
580 goto out;
581 vector++;
582
583 for (i = 0; i < adapter->num_q_vectors; i++) {
584 struct igb_q_vector *q_vector = adapter->q_vector[i];
585
586 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
587
588 if (q_vector->rx_ring && q_vector->tx_ring)
589 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
590 q_vector->rx_ring->queue_index);
591 else if (q_vector->tx_ring)
592 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
593 q_vector->tx_ring->queue_index);
594 else if (q_vector->rx_ring)
595 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
596 q_vector->rx_ring->queue_index);
9d5c8243 597 else
047e0030
AD
598 sprintf(q_vector->name, "%s-unused", netdev->name);
599
9d5c8243 600 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 601 igb_msix_ring, 0, q_vector->name,
047e0030 602 q_vector);
9d5c8243
AK
603 if (err)
604 goto out;
9d5c8243
AK
605 vector++;
606 }
607
9d5c8243
AK
608 igb_configure_msix(adapter);
609 return 0;
610out:
611 return err;
612}
613
614static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
615{
616 if (adapter->msix_entries) {
617 pci_disable_msix(adapter->pdev);
618 kfree(adapter->msix_entries);
619 adapter->msix_entries = NULL;
047e0030 620 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
9d5c8243 621 pci_disable_msi(adapter->pdev);
047e0030 622 }
9d5c8243
AK
623}
624
047e0030
AD
625/**
626 * igb_free_q_vectors - Free memory allocated for interrupt vectors
627 * @adapter: board private structure to initialize
628 *
629 * This function frees the memory allocated to the q_vectors. In addition if
630 * NAPI is enabled it will delete any references to the NAPI struct prior
631 * to freeing the q_vector.
632 **/
633static void igb_free_q_vectors(struct igb_adapter *adapter)
634{
635 int v_idx;
636
637 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
638 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
639 adapter->q_vector[v_idx] = NULL;
640 netif_napi_del(&q_vector->napi);
641 kfree(q_vector);
642 }
643 adapter->num_q_vectors = 0;
644}
645
646/**
647 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
648 *
649 * This function resets the device so that it has 0 rx queues, tx queues, and
650 * MSI-X interrupts allocated.
651 */
652static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
653{
654 igb_free_queues(adapter);
655 igb_free_q_vectors(adapter);
656 igb_reset_interrupt_capability(adapter);
657}
9d5c8243
AK
658
659/**
660 * igb_set_interrupt_capability - set MSI or MSI-X if supported
661 *
662 * Attempt to configure interrupts using the best available
663 * capabilities of the hardware and kernel.
664 **/
665static void igb_set_interrupt_capability(struct igb_adapter *adapter)
666{
667 int err;
668 int numvecs, i;
669
83b7180d 670 /* Number of supported queues. */
a99955fc
AD
671 adapter->num_rx_queues = adapter->rss_queues;
672 adapter->num_tx_queues = adapter->rss_queues;
83b7180d 673
047e0030
AD
674 /* start with one vector for every rx queue */
675 numvecs = adapter->num_rx_queues;
676
677 /* if tx handler is seperate add 1 for every tx queue */
a99955fc
AD
678 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
679 numvecs += adapter->num_tx_queues;
047e0030
AD
680
681 /* store the number of vectors reserved for queues */
682 adapter->num_q_vectors = numvecs;
683
684 /* add 1 vector for link status interrupts */
685 numvecs++;
9d5c8243
AK
686 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
687 GFP_KERNEL);
688 if (!adapter->msix_entries)
689 goto msi_only;
690
691 for (i = 0; i < numvecs; i++)
692 adapter->msix_entries[i].entry = i;
693
694 err = pci_enable_msix(adapter->pdev,
695 adapter->msix_entries,
696 numvecs);
697 if (err == 0)
34a20e89 698 goto out;
9d5c8243
AK
699
700 igb_reset_interrupt_capability(adapter);
701
702 /* If we can't do MSI-X, try MSI */
703msi_only:
2a3abf6d
AD
704#ifdef CONFIG_PCI_IOV
705 /* disable SR-IOV for non MSI-X configurations */
706 if (adapter->vf_data) {
707 struct e1000_hw *hw = &adapter->hw;
708 /* disable iov and allow time for transactions to clear */
709 pci_disable_sriov(adapter->pdev);
710 msleep(500);
711
712 kfree(adapter->vf_data);
713 adapter->vf_data = NULL;
714 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
715 msleep(100);
716 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
717 }
718#endif
4fc82adf 719 adapter->vfs_allocated_count = 0;
a99955fc 720 adapter->rss_queues = 1;
4fc82adf 721 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
9d5c8243 722 adapter->num_rx_queues = 1;
661086df 723 adapter->num_tx_queues = 1;
047e0030 724 adapter->num_q_vectors = 1;
9d5c8243 725 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 726 adapter->flags |= IGB_FLAG_HAS_MSI;
34a20e89 727out:
661086df 728 /* Notify the stack of the (possibly) reduced Tx Queue count. */
fd2ea0a7 729 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
9d5c8243
AK
730 return;
731}
732
047e0030
AD
733/**
734 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
735 * @adapter: board private structure to initialize
736 *
737 * We allocate one q_vector per queue interrupt. If allocation fails we
738 * return -ENOMEM.
739 **/
740static int igb_alloc_q_vectors(struct igb_adapter *adapter)
741{
742 struct igb_q_vector *q_vector;
743 struct e1000_hw *hw = &adapter->hw;
744 int v_idx;
745
746 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
747 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
748 if (!q_vector)
749 goto err_out;
750 q_vector->adapter = adapter;
751 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
752 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
753 q_vector->itr_val = IGB_START_ITR;
754 q_vector->set_itr = 1;
755 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
756 adapter->q_vector[v_idx] = q_vector;
757 }
758 return 0;
759
760err_out:
761 while (v_idx) {
762 v_idx--;
763 q_vector = adapter->q_vector[v_idx];
764 netif_napi_del(&q_vector->napi);
765 kfree(q_vector);
766 adapter->q_vector[v_idx] = NULL;
767 }
768 return -ENOMEM;
769}
770
771static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
772 int ring_idx, int v_idx)
773{
774 struct igb_q_vector *q_vector;
775
776 q_vector = adapter->q_vector[v_idx];
777 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
778 q_vector->rx_ring->q_vector = q_vector;
4fc82adf
AD
779 q_vector->itr_val = adapter->rx_itr_setting;
780 if (q_vector->itr_val && q_vector->itr_val <= 3)
781 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
782}
783
784static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
785 int ring_idx, int v_idx)
786{
787 struct igb_q_vector *q_vector;
788
789 q_vector = adapter->q_vector[v_idx];
790 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
791 q_vector->tx_ring->q_vector = q_vector;
4fc82adf
AD
792 q_vector->itr_val = adapter->tx_itr_setting;
793 if (q_vector->itr_val && q_vector->itr_val <= 3)
794 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
795}
796
797/**
798 * igb_map_ring_to_vector - maps allocated queues to vectors
799 *
800 * This function maps the recently allocated queues to vectors.
801 **/
802static int igb_map_ring_to_vector(struct igb_adapter *adapter)
803{
804 int i;
805 int v_idx = 0;
806
807 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
808 (adapter->num_q_vectors < adapter->num_tx_queues))
809 return -ENOMEM;
810
811 if (adapter->num_q_vectors >=
812 (adapter->num_rx_queues + adapter->num_tx_queues)) {
813 for (i = 0; i < adapter->num_rx_queues; i++)
814 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
815 for (i = 0; i < adapter->num_tx_queues; i++)
816 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
817 } else {
818 for (i = 0; i < adapter->num_rx_queues; i++) {
819 if (i < adapter->num_tx_queues)
820 igb_map_tx_ring_to_vector(adapter, i, v_idx);
821 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
822 }
823 for (; i < adapter->num_tx_queues; i++)
824 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
825 }
826 return 0;
827}
828
829/**
830 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
831 *
832 * This function initializes the interrupts and allocates all of the queues.
833 **/
834static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
835{
836 struct pci_dev *pdev = adapter->pdev;
837 int err;
838
839 igb_set_interrupt_capability(adapter);
840
841 err = igb_alloc_q_vectors(adapter);
842 if (err) {
843 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
844 goto err_alloc_q_vectors;
845 }
846
847 err = igb_alloc_queues(adapter);
848 if (err) {
849 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
850 goto err_alloc_queues;
851 }
852
853 err = igb_map_ring_to_vector(adapter);
854 if (err) {
855 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
856 goto err_map_queues;
857 }
858
859
860 return 0;
861err_map_queues:
862 igb_free_queues(adapter);
863err_alloc_queues:
864 igb_free_q_vectors(adapter);
865err_alloc_q_vectors:
866 igb_reset_interrupt_capability(adapter);
867 return err;
868}
869
9d5c8243
AK
870/**
871 * igb_request_irq - initialize interrupts
872 *
873 * Attempts to configure interrupts using the best available
874 * capabilities of the hardware and kernel.
875 **/
876static int igb_request_irq(struct igb_adapter *adapter)
877{
878 struct net_device *netdev = adapter->netdev;
047e0030 879 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
880 struct e1000_hw *hw = &adapter->hw;
881 int err = 0;
882
883 if (adapter->msix_entries) {
884 err = igb_request_msix(adapter);
844290e5 885 if (!err)
9d5c8243 886 goto request_done;
9d5c8243 887 /* fall back to MSI */
047e0030 888 igb_clear_interrupt_scheme(adapter);
9d5c8243 889 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 890 adapter->flags |= IGB_FLAG_HAS_MSI;
9d5c8243
AK
891 igb_free_all_tx_resources(adapter);
892 igb_free_all_rx_resources(adapter);
047e0030 893 adapter->num_tx_queues = 1;
9d5c8243 894 adapter->num_rx_queues = 1;
047e0030
AD
895 adapter->num_q_vectors = 1;
896 err = igb_alloc_q_vectors(adapter);
897 if (err) {
898 dev_err(&pdev->dev,
899 "Unable to allocate memory for vectors\n");
900 goto request_done;
901 }
902 err = igb_alloc_queues(adapter);
903 if (err) {
904 dev_err(&pdev->dev,
905 "Unable to allocate memory for queues\n");
906 igb_free_q_vectors(adapter);
907 goto request_done;
908 }
909 igb_setup_all_tx_resources(adapter);
910 igb_setup_all_rx_resources(adapter);
844290e5 911 } else {
2d064c06
AD
912 switch (hw->mac.type) {
913 case e1000_82575:
914 wr32(E1000_MSIXBM(0),
047e0030
AD
915 (E1000_EICR_RX_QUEUE0 |
916 E1000_EICR_TX_QUEUE0 |
917 E1000_EIMS_OTHER));
2d064c06 918 break;
55cac248 919 case e1000_82580:
2d064c06
AD
920 case e1000_82576:
921 wr32(E1000_IVAR0, E1000_IVAR_VALID);
922 break;
923 default:
924 break;
925 }
9d5c8243 926 }
844290e5 927
7dfc16fa 928 if (adapter->flags & IGB_FLAG_HAS_MSI) {
a0607fd3 929 err = request_irq(adapter->pdev->irq, igb_intr_msi, 0,
047e0030 930 netdev->name, adapter);
9d5c8243
AK
931 if (!err)
932 goto request_done;
047e0030 933
9d5c8243
AK
934 /* fall back to legacy interrupts */
935 igb_reset_interrupt_capability(adapter);
7dfc16fa 936 adapter->flags &= ~IGB_FLAG_HAS_MSI;
9d5c8243
AK
937 }
938
a0607fd3 939 err = request_irq(adapter->pdev->irq, igb_intr, IRQF_SHARED,
047e0030 940 netdev->name, adapter);
9d5c8243 941
6cb5e577 942 if (err)
9d5c8243
AK
943 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
944 err);
9d5c8243
AK
945
946request_done:
947 return err;
948}
949
950static void igb_free_irq(struct igb_adapter *adapter)
951{
9d5c8243
AK
952 if (adapter->msix_entries) {
953 int vector = 0, i;
954
047e0030 955 free_irq(adapter->msix_entries[vector++].vector, adapter);
9d5c8243 956
047e0030
AD
957 for (i = 0; i < adapter->num_q_vectors; i++) {
958 struct igb_q_vector *q_vector = adapter->q_vector[i];
959 free_irq(adapter->msix_entries[vector++].vector,
960 q_vector);
961 }
962 } else {
963 free_irq(adapter->pdev->irq, adapter);
9d5c8243 964 }
9d5c8243
AK
965}
966
967/**
968 * igb_irq_disable - Mask off interrupt generation on the NIC
969 * @adapter: board private structure
970 **/
971static void igb_irq_disable(struct igb_adapter *adapter)
972{
973 struct e1000_hw *hw = &adapter->hw;
974
25568a53
AD
975 /*
976 * we need to be careful when disabling interrupts. The VFs are also
977 * mapped into these registers and so clearing the bits can cause
978 * issues on the VF drivers so we only need to clear what we set
979 */
9d5c8243 980 if (adapter->msix_entries) {
2dfd1212
AD
981 u32 regval = rd32(E1000_EIAM);
982 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
983 wr32(E1000_EIMC, adapter->eims_enable_mask);
984 regval = rd32(E1000_EIAC);
985 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
9d5c8243 986 }
844290e5
PW
987
988 wr32(E1000_IAM, 0);
9d5c8243
AK
989 wr32(E1000_IMC, ~0);
990 wrfl();
991 synchronize_irq(adapter->pdev->irq);
992}
993
994/**
995 * igb_irq_enable - Enable default interrupt generation settings
996 * @adapter: board private structure
997 **/
998static void igb_irq_enable(struct igb_adapter *adapter)
999{
1000 struct e1000_hw *hw = &adapter->hw;
1001
1002 if (adapter->msix_entries) {
25568a53 1003 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
2dfd1212
AD
1004 u32 regval = rd32(E1000_EIAC);
1005 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1006 regval = rd32(E1000_EIAM);
1007 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
844290e5 1008 wr32(E1000_EIMS, adapter->eims_enable_mask);
25568a53 1009 if (adapter->vfs_allocated_count) {
4ae196df 1010 wr32(E1000_MBVFIMR, 0xFF);
25568a53
AD
1011 ims |= E1000_IMS_VMMB;
1012 }
55cac248
AD
1013 if (adapter->hw.mac.type == e1000_82580)
1014 ims |= E1000_IMS_DRSTA;
1015
25568a53 1016 wr32(E1000_IMS, ims);
844290e5 1017 } else {
55cac248
AD
1018 wr32(E1000_IMS, IMS_ENABLE_MASK |
1019 E1000_IMS_DRSTA);
1020 wr32(E1000_IAM, IMS_ENABLE_MASK |
1021 E1000_IMS_DRSTA);
844290e5 1022 }
9d5c8243
AK
1023}
1024
1025static void igb_update_mng_vlan(struct igb_adapter *adapter)
1026{
51466239 1027 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1028 u16 vid = adapter->hw.mng_cookie.vlan_id;
1029 u16 old_vid = adapter->mng_vlan_id;
51466239
AD
1030
1031 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1032 /* add VID to filter table */
1033 igb_vfta_set(hw, vid, true);
1034 adapter->mng_vlan_id = vid;
1035 } else {
1036 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1037 }
1038
1039 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1040 (vid != old_vid) &&
1041 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
1042 /* remove VID from filter table */
1043 igb_vfta_set(hw, old_vid, false);
9d5c8243
AK
1044 }
1045}
1046
1047/**
1048 * igb_release_hw_control - release control of the h/w to f/w
1049 * @adapter: address of board private structure
1050 *
1051 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1052 * For ASF and Pass Through versions of f/w this means that the
1053 * driver is no longer loaded.
1054 *
1055 **/
1056static void igb_release_hw_control(struct igb_adapter *adapter)
1057{
1058 struct e1000_hw *hw = &adapter->hw;
1059 u32 ctrl_ext;
1060
1061 /* Let firmware take over control of h/w */
1062 ctrl_ext = rd32(E1000_CTRL_EXT);
1063 wr32(E1000_CTRL_EXT,
1064 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1065}
1066
9d5c8243
AK
1067/**
1068 * igb_get_hw_control - get control of the h/w from f/w
1069 * @adapter: address of board private structure
1070 *
1071 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1072 * For ASF and Pass Through versions of f/w this means that
1073 * the driver is loaded.
1074 *
1075 **/
1076static void igb_get_hw_control(struct igb_adapter *adapter)
1077{
1078 struct e1000_hw *hw = &adapter->hw;
1079 u32 ctrl_ext;
1080
1081 /* Let firmware know the driver has taken over */
1082 ctrl_ext = rd32(E1000_CTRL_EXT);
1083 wr32(E1000_CTRL_EXT,
1084 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1085}
1086
9d5c8243
AK
1087/**
1088 * igb_configure - configure the hardware for RX and TX
1089 * @adapter: private board structure
1090 **/
1091static void igb_configure(struct igb_adapter *adapter)
1092{
1093 struct net_device *netdev = adapter->netdev;
1094 int i;
1095
1096 igb_get_hw_control(adapter);
ff41f8dc 1097 igb_set_rx_mode(netdev);
9d5c8243
AK
1098
1099 igb_restore_vlan(adapter);
9d5c8243 1100
85b430b4 1101 igb_setup_tctl(adapter);
06cf2666 1102 igb_setup_mrqc(adapter);
9d5c8243 1103 igb_setup_rctl(adapter);
85b430b4
AD
1104
1105 igb_configure_tx(adapter);
9d5c8243 1106 igb_configure_rx(adapter);
662d7205
AD
1107
1108 igb_rx_fifo_flush_82575(&adapter->hw);
1109
c493ea45 1110 /* call igb_desc_unused which always leaves
9d5c8243
AK
1111 * at least 1 descriptor unused to make sure
1112 * next_to_use != next_to_clean */
1113 for (i = 0; i < adapter->num_rx_queues; i++) {
1114 struct igb_ring *ring = &adapter->rx_ring[i];
c493ea45 1115 igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
9d5c8243
AK
1116 }
1117
1118
1119 adapter->tx_queue_len = netdev->tx_queue_len;
1120}
1121
1122
1123/**
1124 * igb_up - Open the interface and prepare it to handle traffic
1125 * @adapter: board private structure
1126 **/
9d5c8243
AK
1127int igb_up(struct igb_adapter *adapter)
1128{
1129 struct e1000_hw *hw = &adapter->hw;
1130 int i;
1131
1132 /* hardware has been reset, we need to reload some things */
1133 igb_configure(adapter);
1134
1135 clear_bit(__IGB_DOWN, &adapter->state);
1136
047e0030
AD
1137 for (i = 0; i < adapter->num_q_vectors; i++) {
1138 struct igb_q_vector *q_vector = adapter->q_vector[i];
1139 napi_enable(&q_vector->napi);
1140 }
844290e5 1141 if (adapter->msix_entries)
9d5c8243 1142 igb_configure_msix(adapter);
9d5c8243
AK
1143
1144 /* Clear any pending interrupts. */
1145 rd32(E1000_ICR);
1146 igb_irq_enable(adapter);
1147
d4960307
AD
1148 /* notify VFs that reset has been completed */
1149 if (adapter->vfs_allocated_count) {
1150 u32 reg_data = rd32(E1000_CTRL_EXT);
1151 reg_data |= E1000_CTRL_EXT_PFRSTD;
1152 wr32(E1000_CTRL_EXT, reg_data);
1153 }
1154
4cb9be7a
JB
1155 netif_tx_start_all_queues(adapter->netdev);
1156
25568a53
AD
1157 /* start the watchdog. */
1158 hw->mac.get_link_status = 1;
1159 schedule_work(&adapter->watchdog_task);
1160
9d5c8243
AK
1161 return 0;
1162}
1163
1164void igb_down(struct igb_adapter *adapter)
1165{
9d5c8243 1166 struct net_device *netdev = adapter->netdev;
330a6d6a 1167 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1168 u32 tctl, rctl;
1169 int i;
1170
1171 /* signal that we're down so the interrupt handler does not
1172 * reschedule our watchdog timer */
1173 set_bit(__IGB_DOWN, &adapter->state);
1174
1175 /* disable receives in the hardware */
1176 rctl = rd32(E1000_RCTL);
1177 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1178 /* flush and sleep below */
1179
fd2ea0a7 1180 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
1181
1182 /* disable transmits in the hardware */
1183 tctl = rd32(E1000_TCTL);
1184 tctl &= ~E1000_TCTL_EN;
1185 wr32(E1000_TCTL, tctl);
1186 /* flush both disables and wait for them to finish */
1187 wrfl();
1188 msleep(10);
1189
047e0030
AD
1190 for (i = 0; i < adapter->num_q_vectors; i++) {
1191 struct igb_q_vector *q_vector = adapter->q_vector[i];
1192 napi_disable(&q_vector->napi);
1193 }
9d5c8243 1194
9d5c8243
AK
1195 igb_irq_disable(adapter);
1196
1197 del_timer_sync(&adapter->watchdog_timer);
1198 del_timer_sync(&adapter->phy_info_timer);
1199
1200 netdev->tx_queue_len = adapter->tx_queue_len;
1201 netif_carrier_off(netdev);
04fe6358
AD
1202
1203 /* record the stats before reset*/
1204 igb_update_stats(adapter);
1205
9d5c8243
AK
1206 adapter->link_speed = 0;
1207 adapter->link_duplex = 0;
1208
3023682e
JK
1209 if (!pci_channel_offline(adapter->pdev))
1210 igb_reset(adapter);
9d5c8243
AK
1211 igb_clean_all_tx_rings(adapter);
1212 igb_clean_all_rx_rings(adapter);
7e0e99ef
AD
1213#ifdef CONFIG_IGB_DCA
1214
1215 /* since we reset the hardware DCA settings were cleared */
1216 igb_setup_dca(adapter);
1217#endif
9d5c8243
AK
1218}
1219
1220void igb_reinit_locked(struct igb_adapter *adapter)
1221{
1222 WARN_ON(in_interrupt());
1223 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1224 msleep(1);
1225 igb_down(adapter);
1226 igb_up(adapter);
1227 clear_bit(__IGB_RESETTING, &adapter->state);
1228}
1229
1230void igb_reset(struct igb_adapter *adapter)
1231{
090b1795 1232 struct pci_dev *pdev = adapter->pdev;
9d5c8243 1233 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
1234 struct e1000_mac_info *mac = &hw->mac;
1235 struct e1000_fc_info *fc = &hw->fc;
9d5c8243
AK
1236 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1237 u16 hwm;
1238
1239 /* Repartition Pba for greater than 9k mtu
1240 * To take effect CTRL.RST is required.
1241 */
fa4dfae0 1242 switch (mac->type) {
55cac248
AD
1243 case e1000_82580:
1244 pba = rd32(E1000_RXPBS);
1245 pba = igb_rxpbs_adjust_82580(pba);
1246 break;
fa4dfae0 1247 case e1000_82576:
d249be54
AD
1248 pba = rd32(E1000_RXPBS);
1249 pba &= E1000_RXPBS_SIZE_MASK_82576;
fa4dfae0
AD
1250 break;
1251 case e1000_82575:
1252 default:
1253 pba = E1000_PBA_34K;
1254 break;
2d064c06 1255 }
9d5c8243 1256
2d064c06
AD
1257 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1258 (mac->type < e1000_82576)) {
9d5c8243
AK
1259 /* adjust PBA for jumbo frames */
1260 wr32(E1000_PBA, pba);
1261
1262 /* To maintain wire speed transmits, the Tx FIFO should be
1263 * large enough to accommodate two full transmit packets,
1264 * rounded up to the next 1KB and expressed in KB. Likewise,
1265 * the Rx FIFO should be large enough to accommodate at least
1266 * one full receive packet and is similarly rounded up and
1267 * expressed in KB. */
1268 pba = rd32(E1000_PBA);
1269 /* upper 16 bits has Tx packet buffer allocation size in KB */
1270 tx_space = pba >> 16;
1271 /* lower 16 bits has Rx packet buffer allocation size in KB */
1272 pba &= 0xffff;
1273 /* the tx fifo also stores 16 bytes of information about the tx
1274 * but don't include ethernet FCS because hardware appends it */
1275 min_tx_space = (adapter->max_frame_size +
85e8d004 1276 sizeof(union e1000_adv_tx_desc) -
9d5c8243
AK
1277 ETH_FCS_LEN) * 2;
1278 min_tx_space = ALIGN(min_tx_space, 1024);
1279 min_tx_space >>= 10;
1280 /* software strips receive CRC, so leave room for it */
1281 min_rx_space = adapter->max_frame_size;
1282 min_rx_space = ALIGN(min_rx_space, 1024);
1283 min_rx_space >>= 10;
1284
1285 /* If current Tx allocation is less than the min Tx FIFO size,
1286 * and the min Tx FIFO size is less than the current Rx FIFO
1287 * allocation, take space away from current Rx allocation */
1288 if (tx_space < min_tx_space &&
1289 ((min_tx_space - tx_space) < pba)) {
1290 pba = pba - (min_tx_space - tx_space);
1291
1292 /* if short on rx space, rx wins and must trump tx
1293 * adjustment */
1294 if (pba < min_rx_space)
1295 pba = min_rx_space;
1296 }
2d064c06 1297 wr32(E1000_PBA, pba);
9d5c8243 1298 }
9d5c8243
AK
1299
1300 /* flow control settings */
1301 /* The high water mark must be low enough to fit one full frame
1302 * (or the size used for early receive) above it in the Rx FIFO.
1303 * Set it to the lower of:
1304 * - 90% of the Rx FIFO size, or
1305 * - the full Rx FIFO size minus one full frame */
1306 hwm = min(((pba << 10) * 9 / 10),
2d064c06 1307 ((pba << 10) - 2 * adapter->max_frame_size));
9d5c8243 1308
d405ea3e
AD
1309 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1310 fc->low_water = fc->high_water - 16;
9d5c8243
AK
1311 fc->pause_time = 0xFFFF;
1312 fc->send_xon = 1;
0cce119a 1313 fc->current_mode = fc->requested_mode;
9d5c8243 1314
4ae196df
AD
1315 /* disable receive for all VFs and wait one second */
1316 if (adapter->vfs_allocated_count) {
1317 int i;
1318 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
f2ca0dbe 1319 adapter->vf_data[i].flags = 0;
4ae196df
AD
1320
1321 /* ping all the active vfs to let them know we are going down */
f2ca0dbe 1322 igb_ping_all_vfs(adapter);
4ae196df
AD
1323
1324 /* disable transmits and receives */
1325 wr32(E1000_VFRE, 0);
1326 wr32(E1000_VFTE, 0);
1327 }
1328
9d5c8243 1329 /* Allow time for pending master requests to run */
330a6d6a 1330 hw->mac.ops.reset_hw(hw);
9d5c8243
AK
1331 wr32(E1000_WUC, 0);
1332
330a6d6a 1333 if (hw->mac.ops.init_hw(hw))
090b1795 1334 dev_err(&pdev->dev, "Hardware Error\n");
9d5c8243 1335
55cac248
AD
1336 if (hw->mac.type == e1000_82580) {
1337 u32 reg = rd32(E1000_PCIEMISC);
1338 wr32(E1000_PCIEMISC,
1339 reg & ~E1000_PCIEMISC_LX_DECISION);
1340 }
9d5c8243
AK
1341 igb_update_mng_vlan(adapter);
1342
1343 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1344 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1345
330a6d6a
AD
1346 igb_reset_adaptive(hw);
1347 igb_get_phy_info(hw);
9d5c8243
AK
1348}
1349
2e5c6922 1350static const struct net_device_ops igb_netdev_ops = {
559e9c49 1351 .ndo_open = igb_open,
2e5c6922 1352 .ndo_stop = igb_close,
00829823 1353 .ndo_start_xmit = igb_xmit_frame_adv,
2e5c6922 1354 .ndo_get_stats = igb_get_stats,
ff41f8dc
AD
1355 .ndo_set_rx_mode = igb_set_rx_mode,
1356 .ndo_set_multicast_list = igb_set_rx_mode,
2e5c6922
SH
1357 .ndo_set_mac_address = igb_set_mac,
1358 .ndo_change_mtu = igb_change_mtu,
1359 .ndo_do_ioctl = igb_ioctl,
1360 .ndo_tx_timeout = igb_tx_timeout,
1361 .ndo_validate_addr = eth_validate_addr,
1362 .ndo_vlan_rx_register = igb_vlan_rx_register,
1363 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1364 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1365#ifdef CONFIG_NET_POLL_CONTROLLER
1366 .ndo_poll_controller = igb_netpoll,
1367#endif
1368};
1369
9d5c8243
AK
1370/**
1371 * igb_probe - Device Initialization Routine
1372 * @pdev: PCI device information struct
1373 * @ent: entry in igb_pci_tbl
1374 *
1375 * Returns 0 on success, negative on failure
1376 *
1377 * igb_probe initializes an adapter identified by a pci_dev structure.
1378 * The OS initialization, configuring of the adapter private structure,
1379 * and a hardware reset occur.
1380 **/
1381static int __devinit igb_probe(struct pci_dev *pdev,
1382 const struct pci_device_id *ent)
1383{
1384 struct net_device *netdev;
1385 struct igb_adapter *adapter;
1386 struct e1000_hw *hw;
4337e993
AD
1387 u16 eeprom_data = 0;
1388 static int global_quad_port_a; /* global quad port a indication */
9d5c8243
AK
1389 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1390 unsigned long mmio_start, mmio_len;
2d6a5e95 1391 int err, pci_using_dac;
9d5c8243
AK
1392 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1393 u32 part_num;
1394
aed5dec3 1395 err = pci_enable_device_mem(pdev);
9d5c8243
AK
1396 if (err)
1397 return err;
1398
1399 pci_using_dac = 0;
6a35528a 1400 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
9d5c8243 1401 if (!err) {
6a35528a 1402 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
9d5c8243
AK
1403 if (!err)
1404 pci_using_dac = 1;
1405 } else {
284901a9 1406 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9d5c8243 1407 if (err) {
284901a9 1408 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
9d5c8243
AK
1409 if (err) {
1410 dev_err(&pdev->dev, "No usable DMA "
1411 "configuration, aborting\n");
1412 goto err_dma;
1413 }
1414 }
1415 }
1416
aed5dec3
AD
1417 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1418 IORESOURCE_MEM),
1419 igb_driver_name);
9d5c8243
AK
1420 if (err)
1421 goto err_pci_reg;
1422
19d5afd4 1423 pci_enable_pcie_error_reporting(pdev);
40a914fa 1424
9d5c8243 1425 pci_set_master(pdev);
c682fc23 1426 pci_save_state(pdev);
9d5c8243
AK
1427
1428 err = -ENOMEM;
1bfaf07b
AD
1429 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1430 IGB_ABS_MAX_TX_QUEUES);
9d5c8243
AK
1431 if (!netdev)
1432 goto err_alloc_etherdev;
1433
1434 SET_NETDEV_DEV(netdev, &pdev->dev);
1435
1436 pci_set_drvdata(pdev, netdev);
1437 adapter = netdev_priv(netdev);
1438 adapter->netdev = netdev;
1439 adapter->pdev = pdev;
1440 hw = &adapter->hw;
1441 hw->back = adapter;
1442 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1443
1444 mmio_start = pci_resource_start(pdev, 0);
1445 mmio_len = pci_resource_len(pdev, 0);
1446
1447 err = -EIO;
28b0759c
AD
1448 hw->hw_addr = ioremap(mmio_start, mmio_len);
1449 if (!hw->hw_addr)
9d5c8243
AK
1450 goto err_ioremap;
1451
2e5c6922 1452 netdev->netdev_ops = &igb_netdev_ops;
9d5c8243 1453 igb_set_ethtool_ops(netdev);
9d5c8243 1454 netdev->watchdog_timeo = 5 * HZ;
9d5c8243
AK
1455
1456 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1457
1458 netdev->mem_start = mmio_start;
1459 netdev->mem_end = mmio_start + mmio_len;
1460
9d5c8243
AK
1461 /* PCI config space info */
1462 hw->vendor_id = pdev->vendor;
1463 hw->device_id = pdev->device;
1464 hw->revision_id = pdev->revision;
1465 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1466 hw->subsystem_device_id = pdev->subsystem_device;
1467
9d5c8243
AK
1468 /* Copy the default MAC, PHY and NVM function pointers */
1469 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1470 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1471 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1472 /* Initialize skew-specific constants */
1473 err = ei->get_invariants(hw);
1474 if (err)
450c87c8 1475 goto err_sw_init;
9d5c8243 1476
450c87c8 1477 /* setup the private structure */
9d5c8243
AK
1478 err = igb_sw_init(adapter);
1479 if (err)
1480 goto err_sw_init;
1481
1482 igb_get_bus_info_pcie(hw);
1483
1484 hw->phy.autoneg_wait_to_complete = false;
1485 hw->mac.adaptive_ifs = true;
1486
1487 /* Copper options */
1488 if (hw->phy.media_type == e1000_media_type_copper) {
1489 hw->phy.mdix = AUTO_ALL_MODES;
1490 hw->phy.disable_polarity_correction = false;
1491 hw->phy.ms_type = e1000_ms_hw_default;
1492 }
1493
1494 if (igb_check_reset_block(hw))
1495 dev_info(&pdev->dev,
1496 "PHY reset is blocked due to SOL/IDER session.\n");
1497
1498 netdev->features = NETIF_F_SG |
7d8eb29e 1499 NETIF_F_IP_CSUM |
9d5c8243
AK
1500 NETIF_F_HW_VLAN_TX |
1501 NETIF_F_HW_VLAN_RX |
1502 NETIF_F_HW_VLAN_FILTER;
1503
7d8eb29e 1504 netdev->features |= NETIF_F_IPV6_CSUM;
9d5c8243 1505 netdev->features |= NETIF_F_TSO;
9d5c8243 1506 netdev->features |= NETIF_F_TSO6;
5c0999b7 1507 netdev->features |= NETIF_F_GRO;
d3352520 1508
48f29ffc
JK
1509 netdev->vlan_features |= NETIF_F_TSO;
1510 netdev->vlan_features |= NETIF_F_TSO6;
7d8eb29e 1511 netdev->vlan_features |= NETIF_F_IP_CSUM;
cd1da503 1512 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
48f29ffc
JK
1513 netdev->vlan_features |= NETIF_F_SG;
1514
9d5c8243
AK
1515 if (pci_using_dac)
1516 netdev->features |= NETIF_F_HIGHDMA;
1517
5b043fb0 1518 if (hw->mac.type >= e1000_82576)
b9473560
JB
1519 netdev->features |= NETIF_F_SCTP_CSUM;
1520
330a6d6a 1521 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
9d5c8243
AK
1522
1523 /* before reading the NVM, reset the controller to put the device in a
1524 * known good starting state */
1525 hw->mac.ops.reset_hw(hw);
1526
1527 /* make sure the NVM is good */
1528 if (igb_validate_nvm_checksum(hw) < 0) {
1529 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1530 err = -EIO;
1531 goto err_eeprom;
1532 }
1533
1534 /* copy the MAC address out of the NVM */
1535 if (hw->mac.ops.read_mac_addr(hw))
1536 dev_err(&pdev->dev, "NVM Read Error\n");
1537
1538 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1539 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1540
1541 if (!is_valid_ether_addr(netdev->perm_addr)) {
1542 dev_err(&pdev->dev, "Invalid MAC Address\n");
1543 err = -EIO;
1544 goto err_eeprom;
1545 }
1546
0e340485
AD
1547 setup_timer(&adapter->watchdog_timer, &igb_watchdog,
1548 (unsigned long) adapter);
1549 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info,
1550 (unsigned long) adapter);
9d5c8243
AK
1551
1552 INIT_WORK(&adapter->reset_task, igb_reset_task);
1553 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1554
450c87c8 1555 /* Initialize link properties that are user-changeable */
9d5c8243
AK
1556 adapter->fc_autoneg = true;
1557 hw->mac.autoneg = true;
1558 hw->phy.autoneg_advertised = 0x2f;
1559
0cce119a
AD
1560 hw->fc.requested_mode = e1000_fc_default;
1561 hw->fc.current_mode = e1000_fc_default;
9d5c8243 1562
9d5c8243
AK
1563 igb_validate_mdi_setting(hw);
1564
9d5c8243
AK
1565 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1566 * enable the ACPI Magic Packet filter
1567 */
1568
a2cf8b6c 1569 if (hw->bus.func == 0)
312c75ae 1570 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
55cac248
AD
1571 else if (hw->mac.type == e1000_82580)
1572 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1573 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1574 &eeprom_data);
a2cf8b6c
AD
1575 else if (hw->bus.func == 1)
1576 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
9d5c8243
AK
1577
1578 if (eeprom_data & eeprom_apme_mask)
1579 adapter->eeprom_wol |= E1000_WUFC_MAG;
1580
1581 /* now that we have the eeprom settings, apply the special cases where
1582 * the eeprom may be wrong or the board simply won't support wake on
1583 * lan on a particular port */
1584 switch (pdev->device) {
1585 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1586 adapter->eeprom_wol = 0;
1587 break;
1588 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2d064c06
AD
1589 case E1000_DEV_ID_82576_FIBER:
1590 case E1000_DEV_ID_82576_SERDES:
9d5c8243
AK
1591 /* Wake events only supported on port A for dual fiber
1592 * regardless of eeprom setting */
1593 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1594 adapter->eeprom_wol = 0;
1595 break;
c8ea5ea9
AD
1596 case E1000_DEV_ID_82576_QUAD_COPPER:
1597 /* if quad port adapter, disable WoL on all but port A */
1598 if (global_quad_port_a != 0)
1599 adapter->eeprom_wol = 0;
1600 else
1601 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1602 /* Reset for multiple quad port adapters */
1603 if (++global_quad_port_a == 4)
1604 global_quad_port_a = 0;
1605 break;
9d5c8243
AK
1606 }
1607
1608 /* initialize the wol settings based on the eeprom settings */
1609 adapter->wol = adapter->eeprom_wol;
e1b86d84 1610 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9d5c8243
AK
1611
1612 /* reset the hardware with the new settings */
1613 igb_reset(adapter);
1614
1615 /* let the f/w know that the h/w is now under the control of the
1616 * driver. */
1617 igb_get_hw_control(adapter);
1618
9d5c8243
AK
1619 strcpy(netdev->name, "eth%d");
1620 err = register_netdev(netdev);
1621 if (err)
1622 goto err_register;
1623
b168dfc5
JB
1624 /* carrier off reporting is important to ethtool even BEFORE open */
1625 netif_carrier_off(netdev);
1626
421e02f0 1627#ifdef CONFIG_IGB_DCA
bbd98fe4 1628 if (dca_add_requester(&pdev->dev) == 0) {
7dfc16fa 1629 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6 1630 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
1631 igb_setup_dca(adapter);
1632 }
fe4506b6 1633
38c845c7 1634#endif
9d5c8243
AK
1635 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1636 /* print bus type/speed/width info */
7c510e4b 1637 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
9d5c8243 1638 netdev->name,
559e9c49
AD
1639 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1640 "unknown"),
59c3de89
AD
1641 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1642 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1643 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
1644 "unknown"),
7c510e4b 1645 netdev->dev_addr);
9d5c8243
AK
1646
1647 igb_read_part_num(hw, &part_num);
1648 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1649 (part_num >> 8), (part_num & 0xff));
1650
1651 dev_info(&pdev->dev,
1652 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1653 adapter->msix_entries ? "MSI-X" :
7dfc16fa 1654 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
9d5c8243
AK
1655 adapter->num_rx_queues, adapter->num_tx_queues);
1656
9d5c8243
AK
1657 return 0;
1658
1659err_register:
1660 igb_release_hw_control(adapter);
1661err_eeprom:
1662 if (!igb_check_reset_block(hw))
f5f4cf08 1663 igb_reset_phy(hw);
9d5c8243
AK
1664
1665 if (hw->flash_address)
1666 iounmap(hw->flash_address);
9d5c8243 1667err_sw_init:
047e0030 1668 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
1669 iounmap(hw->hw_addr);
1670err_ioremap:
1671 free_netdev(netdev);
1672err_alloc_etherdev:
559e9c49
AD
1673 pci_release_selected_regions(pdev,
1674 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243
AK
1675err_pci_reg:
1676err_dma:
1677 pci_disable_device(pdev);
1678 return err;
1679}
1680
1681/**
1682 * igb_remove - Device Removal Routine
1683 * @pdev: PCI device information struct
1684 *
1685 * igb_remove is called by the PCI subsystem to alert the driver
1686 * that it should release a PCI device. The could be caused by a
1687 * Hot-Plug event, or because the driver is going to be removed from
1688 * memory.
1689 **/
1690static void __devexit igb_remove(struct pci_dev *pdev)
1691{
1692 struct net_device *netdev = pci_get_drvdata(pdev);
1693 struct igb_adapter *adapter = netdev_priv(netdev);
fe4506b6 1694 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1695
1696 /* flush_scheduled work may reschedule our watchdog task, so
1697 * explicitly disable watchdog tasks from being rescheduled */
1698 set_bit(__IGB_DOWN, &adapter->state);
1699 del_timer_sync(&adapter->watchdog_timer);
1700 del_timer_sync(&adapter->phy_info_timer);
1701
1702 flush_scheduled_work();
1703
421e02f0 1704#ifdef CONFIG_IGB_DCA
7dfc16fa 1705 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
1706 dev_info(&pdev->dev, "DCA disabled\n");
1707 dca_remove_requester(&pdev->dev);
7dfc16fa 1708 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 1709 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
1710 }
1711#endif
1712
9d5c8243
AK
1713 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1714 * would have already happened in close and is redundant. */
1715 igb_release_hw_control(adapter);
1716
1717 unregister_netdev(netdev);
1718
330a6d6a
AD
1719 if (!igb_check_reset_block(hw))
1720 igb_reset_phy(hw);
9d5c8243 1721
047e0030 1722 igb_clear_interrupt_scheme(adapter);
9d5c8243 1723
37680117
AD
1724#ifdef CONFIG_PCI_IOV
1725 /* reclaim resources allocated to VFs */
1726 if (adapter->vf_data) {
1727 /* disable iov and allow time for transactions to clear */
1728 pci_disable_sriov(pdev);
1729 msleep(500);
1730
1731 kfree(adapter->vf_data);
1732 adapter->vf_data = NULL;
1733 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1734 msleep(100);
1735 dev_info(&pdev->dev, "IOV Disabled\n");
1736 }
1737#endif
559e9c49 1738
28b0759c
AD
1739 iounmap(hw->hw_addr);
1740 if (hw->flash_address)
1741 iounmap(hw->flash_address);
559e9c49
AD
1742 pci_release_selected_regions(pdev,
1743 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243
AK
1744
1745 free_netdev(netdev);
1746
19d5afd4 1747 pci_disable_pcie_error_reporting(pdev);
40a914fa 1748
9d5c8243
AK
1749 pci_disable_device(pdev);
1750}
1751
a6b623e0
AD
1752/**
1753 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1754 * @adapter: board private structure to initialize
1755 *
1756 * This function initializes the vf specific data storage and then attempts to
1757 * allocate the VFs. The reason for ordering it this way is because it is much
1758 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1759 * the memory for the VFs.
1760 **/
1761static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1762{
1763#ifdef CONFIG_PCI_IOV
1764 struct pci_dev *pdev = adapter->pdev;
1765
1766 if (adapter->vfs_allocated_count > 7)
1767 adapter->vfs_allocated_count = 7;
1768
1769 if (adapter->vfs_allocated_count) {
1770 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1771 sizeof(struct vf_data_storage),
1772 GFP_KERNEL);
1773 /* if allocation failed then we do not support SR-IOV */
1774 if (!adapter->vf_data) {
1775 adapter->vfs_allocated_count = 0;
1776 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1777 "Data Storage\n");
1778 }
1779 }
1780
1781 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1782 kfree(adapter->vf_data);
1783 adapter->vf_data = NULL;
1784#endif /* CONFIG_PCI_IOV */
1785 adapter->vfs_allocated_count = 0;
1786#ifdef CONFIG_PCI_IOV
1787 } else {
1788 unsigned char mac_addr[ETH_ALEN];
1789 int i;
1790 dev_info(&pdev->dev, "%d vfs allocated\n",
1791 adapter->vfs_allocated_count);
1792 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1793 random_ether_addr(mac_addr);
1794 igb_set_vf_mac(adapter, i, mac_addr);
1795 }
1796 }
1797#endif /* CONFIG_PCI_IOV */
1798}
1799
115f459a
AD
1800
1801/**
1802 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1803 * @adapter: board private structure to initialize
1804 *
1805 * igb_init_hw_timer initializes the function pointer and values for the hw
1806 * timer found in hardware.
1807 **/
1808static void igb_init_hw_timer(struct igb_adapter *adapter)
1809{
1810 struct e1000_hw *hw = &adapter->hw;
1811
1812 switch (hw->mac.type) {
55cac248
AD
1813 case e1000_82580:
1814 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1815 adapter->cycles.read = igb_read_clock;
1816 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1817 adapter->cycles.mult = 1;
1818 /*
1819 * The 82580 timesync updates the system timer every 8ns by 8ns
1820 * and the value cannot be shifted. Instead we need to shift
1821 * the registers to generate a 64bit timer value. As a result
1822 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
1823 * 24 in order to generate a larger value for synchronization.
1824 */
1825 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
1826 /* disable system timer temporarily by setting bit 31 */
1827 wr32(E1000_TSAUXC, 0x80000000);
1828 wrfl();
1829
1830 /* Set registers so that rollover occurs soon to test this. */
1831 wr32(E1000_SYSTIMR, 0x00000000);
1832 wr32(E1000_SYSTIML, 0x80000000);
1833 wr32(E1000_SYSTIMH, 0x000000FF);
1834 wrfl();
1835
1836 /* enable system timer by clearing bit 31 */
1837 wr32(E1000_TSAUXC, 0x0);
1838 wrfl();
1839
1840 timecounter_init(&adapter->clock,
1841 &adapter->cycles,
1842 ktime_to_ns(ktime_get_real()));
1843 /*
1844 * Synchronize our NIC clock against system wall clock. NIC
1845 * time stamp reading requires ~3us per sample, each sample
1846 * was pretty stable even under load => only require 10
1847 * samples for each offset comparison.
1848 */
1849 memset(&adapter->compare, 0, sizeof(adapter->compare));
1850 adapter->compare.source = &adapter->clock;
1851 adapter->compare.target = ktime_get_real;
1852 adapter->compare.num_samples = 10;
1853 timecompare_update(&adapter->compare, 0);
1854 break;
115f459a
AD
1855 case e1000_82576:
1856 /*
1857 * Initialize hardware timer: we keep it running just in case
1858 * that some program needs it later on.
1859 */
1860 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1861 adapter->cycles.read = igb_read_clock;
1862 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1863 adapter->cycles.mult = 1;
1864 /**
1865 * Scale the NIC clock cycle by a large factor so that
1866 * relatively small clock corrections can be added or
1867 * substracted at each clock tick. The drawbacks of a large
1868 * factor are a) that the clock register overflows more quickly
1869 * (not such a big deal) and b) that the increment per tick has
1870 * to fit into 24 bits. As a result we need to use a shift of
1871 * 19 so we can fit a value of 16 into the TIMINCA register.
1872 */
1873 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1874 wr32(E1000_TIMINCA,
1875 (1 << E1000_TIMINCA_16NS_SHIFT) |
1876 (16 << IGB_82576_TSYNC_SHIFT));
1877
1878 /* Set registers so that rollover occurs soon to test this. */
1879 wr32(E1000_SYSTIML, 0x00000000);
1880 wr32(E1000_SYSTIMH, 0xFF800000);
1881 wrfl();
1882
1883 timecounter_init(&adapter->clock,
1884 &adapter->cycles,
1885 ktime_to_ns(ktime_get_real()));
1886 /*
1887 * Synchronize our NIC clock against system wall clock. NIC
1888 * time stamp reading requires ~3us per sample, each sample
1889 * was pretty stable even under load => only require 10
1890 * samples for each offset comparison.
1891 */
1892 memset(&adapter->compare, 0, sizeof(adapter->compare));
1893 adapter->compare.source = &adapter->clock;
1894 adapter->compare.target = ktime_get_real;
1895 adapter->compare.num_samples = 10;
1896 timecompare_update(&adapter->compare, 0);
1897 break;
1898 case e1000_82575:
1899 /* 82575 does not support timesync */
1900 default:
1901 break;
1902 }
1903
1904}
1905
9d5c8243
AK
1906/**
1907 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1908 * @adapter: board private structure to initialize
1909 *
1910 * igb_sw_init initializes the Adapter private data structure.
1911 * Fields are initialized based on PCI device information and
1912 * OS network device settings (MTU size).
1913 **/
1914static int __devinit igb_sw_init(struct igb_adapter *adapter)
1915{
1916 struct e1000_hw *hw = &adapter->hw;
1917 struct net_device *netdev = adapter->netdev;
1918 struct pci_dev *pdev = adapter->pdev;
1919
1920 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1921
68fd9910
AD
1922 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1923 adapter->rx_ring_count = IGB_DEFAULT_RXD;
4fc82adf
AD
1924 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1925 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1926
9d5c8243
AK
1927 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1928 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1929
a6b623e0
AD
1930#ifdef CONFIG_PCI_IOV
1931 if (hw->mac.type == e1000_82576)
1932 adapter->vfs_allocated_count = max_vfs;
1933
1934#endif /* CONFIG_PCI_IOV */
a99955fc
AD
1935 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1936
1937 /*
1938 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1939 * then we should combine the queues into a queue pair in order to
1940 * conserve interrupts due to limited supply
1941 */
1942 if ((adapter->rss_queues > 4) ||
1943 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1944 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1945
a6b623e0 1946 /* This call may decrease the number of queues */
047e0030 1947 if (igb_init_interrupt_scheme(adapter)) {
9d5c8243
AK
1948 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1949 return -ENOMEM;
1950 }
1951
115f459a 1952 igb_init_hw_timer(adapter);
a6b623e0
AD
1953 igb_probe_vfs(adapter);
1954
9d5c8243
AK
1955 /* Explicitly disable IRQ since the NIC can be in any state. */
1956 igb_irq_disable(adapter);
1957
1958 set_bit(__IGB_DOWN, &adapter->state);
1959 return 0;
1960}
1961
1962/**
1963 * igb_open - Called when a network interface is made active
1964 * @netdev: network interface device structure
1965 *
1966 * Returns 0 on success, negative value on failure
1967 *
1968 * The open entry point is called when a network interface is made
1969 * active by the system (IFF_UP). At this point all resources needed
1970 * for transmit and receive operations are allocated, the interrupt
1971 * handler is registered with the OS, the watchdog timer is started,
1972 * and the stack is notified that the interface is ready.
1973 **/
1974static int igb_open(struct net_device *netdev)
1975{
1976 struct igb_adapter *adapter = netdev_priv(netdev);
1977 struct e1000_hw *hw = &adapter->hw;
1978 int err;
1979 int i;
1980
1981 /* disallow open during test */
1982 if (test_bit(__IGB_TESTING, &adapter->state))
1983 return -EBUSY;
1984
b168dfc5
JB
1985 netif_carrier_off(netdev);
1986
9d5c8243
AK
1987 /* allocate transmit descriptors */
1988 err = igb_setup_all_tx_resources(adapter);
1989 if (err)
1990 goto err_setup_tx;
1991
1992 /* allocate receive descriptors */
1993 err = igb_setup_all_rx_resources(adapter);
1994 if (err)
1995 goto err_setup_rx;
1996
1997 /* e1000_power_up_phy(adapter); */
1998
9d5c8243
AK
1999 /* before we allocate an interrupt, we must be ready to handle it.
2000 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2001 * as soon as we call pci_request_irq, so we have to setup our
2002 * clean_rx handler before we do so. */
2003 igb_configure(adapter);
2004
2005 err = igb_request_irq(adapter);
2006 if (err)
2007 goto err_req_irq;
2008
2009 /* From here on the code is the same as igb_up() */
2010 clear_bit(__IGB_DOWN, &adapter->state);
2011
047e0030
AD
2012 for (i = 0; i < adapter->num_q_vectors; i++) {
2013 struct igb_q_vector *q_vector = adapter->q_vector[i];
2014 napi_enable(&q_vector->napi);
2015 }
9d5c8243
AK
2016
2017 /* Clear any pending interrupts. */
2018 rd32(E1000_ICR);
844290e5
PW
2019
2020 igb_irq_enable(adapter);
2021
d4960307
AD
2022 /* notify VFs that reset has been completed */
2023 if (adapter->vfs_allocated_count) {
2024 u32 reg_data = rd32(E1000_CTRL_EXT);
2025 reg_data |= E1000_CTRL_EXT_PFRSTD;
2026 wr32(E1000_CTRL_EXT, reg_data);
2027 }
2028
d55b53ff
JK
2029 netif_tx_start_all_queues(netdev);
2030
25568a53
AD
2031 /* start the watchdog. */
2032 hw->mac.get_link_status = 1;
2033 schedule_work(&adapter->watchdog_task);
9d5c8243
AK
2034
2035 return 0;
2036
2037err_req_irq:
2038 igb_release_hw_control(adapter);
2039 /* e1000_power_down_phy(adapter); */
2040 igb_free_all_rx_resources(adapter);
2041err_setup_rx:
2042 igb_free_all_tx_resources(adapter);
2043err_setup_tx:
2044 igb_reset(adapter);
2045
2046 return err;
2047}
2048
2049/**
2050 * igb_close - Disables a network interface
2051 * @netdev: network interface device structure
2052 *
2053 * Returns 0, this is not allowed to fail
2054 *
2055 * The close entry point is called when an interface is de-activated
2056 * by the OS. The hardware is still under the driver's control, but
2057 * needs to be disabled. A global MAC reset is issued to stop the
2058 * hardware, and all transmit and receive resources are freed.
2059 **/
2060static int igb_close(struct net_device *netdev)
2061{
2062 struct igb_adapter *adapter = netdev_priv(netdev);
2063
2064 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
2065 igb_down(adapter);
2066
2067 igb_free_irq(adapter);
2068
2069 igb_free_all_tx_resources(adapter);
2070 igb_free_all_rx_resources(adapter);
2071
9d5c8243
AK
2072 return 0;
2073}
2074
2075/**
2076 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
9d5c8243
AK
2077 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2078 *
2079 * Return 0 on success, negative on failure
2080 **/
80785298 2081int igb_setup_tx_resources(struct igb_ring *tx_ring)
9d5c8243 2082{
80785298 2083 struct pci_dev *pdev = tx_ring->pdev;
9d5c8243
AK
2084 int size;
2085
2086 size = sizeof(struct igb_buffer) * tx_ring->count;
2087 tx_ring->buffer_info = vmalloc(size);
2088 if (!tx_ring->buffer_info)
2089 goto err;
2090 memset(tx_ring->buffer_info, 0, size);
2091
2092 /* round up to nearest 4K */
85e8d004 2093 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
9d5c8243
AK
2094 tx_ring->size = ALIGN(tx_ring->size, 4096);
2095
439705e1
AD
2096 tx_ring->desc = pci_alloc_consistent(pdev,
2097 tx_ring->size,
9d5c8243
AK
2098 &tx_ring->dma);
2099
2100 if (!tx_ring->desc)
2101 goto err;
2102
9d5c8243
AK
2103 tx_ring->next_to_use = 0;
2104 tx_ring->next_to_clean = 0;
9d5c8243
AK
2105 return 0;
2106
2107err:
2108 vfree(tx_ring->buffer_info);
047e0030 2109 dev_err(&pdev->dev,
9d5c8243
AK
2110 "Unable to allocate memory for the transmit descriptor ring\n");
2111 return -ENOMEM;
2112}
2113
2114/**
2115 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2116 * (Descriptors) for all queues
2117 * @adapter: board private structure
2118 *
2119 * Return 0 on success, negative on failure
2120 **/
2121static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2122{
439705e1 2123 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2124 int i, err = 0;
2125
2126 for (i = 0; i < adapter->num_tx_queues; i++) {
80785298 2127 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
9d5c8243 2128 if (err) {
439705e1 2129 dev_err(&pdev->dev,
9d5c8243
AK
2130 "Allocation for Tx Queue %u failed\n", i);
2131 for (i--; i >= 0; i--)
3b644cf6 2132 igb_free_tx_resources(&adapter->tx_ring[i]);
9d5c8243
AK
2133 break;
2134 }
2135 }
2136
a99955fc 2137 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
439705e1 2138 int r_idx = i % adapter->num_tx_queues;
661086df 2139 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
eebbbdba 2140 }
9d5c8243
AK
2141 return err;
2142}
2143
2144/**
85b430b4
AD
2145 * igb_setup_tctl - configure the transmit control registers
2146 * @adapter: Board private structure
9d5c8243 2147 **/
d7ee5b3a 2148void igb_setup_tctl(struct igb_adapter *adapter)
9d5c8243 2149{
9d5c8243
AK
2150 struct e1000_hw *hw = &adapter->hw;
2151 u32 tctl;
9d5c8243 2152
85b430b4
AD
2153 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2154 wr32(E1000_TXDCTL(0), 0);
9d5c8243
AK
2155
2156 /* Program the Transmit Control Register */
9d5c8243
AK
2157 tctl = rd32(E1000_TCTL);
2158 tctl &= ~E1000_TCTL_CT;
2159 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2160 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2161
2162 igb_config_collision_dist(hw);
2163
9d5c8243
AK
2164 /* Enable transmits */
2165 tctl |= E1000_TCTL_EN;
2166
2167 wr32(E1000_TCTL, tctl);
2168}
2169
85b430b4
AD
2170/**
2171 * igb_configure_tx_ring - Configure transmit ring after Reset
2172 * @adapter: board private structure
2173 * @ring: tx ring to configure
2174 *
2175 * Configure a transmit ring after a reset.
2176 **/
d7ee5b3a
AD
2177void igb_configure_tx_ring(struct igb_adapter *adapter,
2178 struct igb_ring *ring)
85b430b4
AD
2179{
2180 struct e1000_hw *hw = &adapter->hw;
2181 u32 txdctl;
2182 u64 tdba = ring->dma;
2183 int reg_idx = ring->reg_idx;
2184
2185 /* disable the queue */
2186 txdctl = rd32(E1000_TXDCTL(reg_idx));
2187 wr32(E1000_TXDCTL(reg_idx),
2188 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2189 wrfl();
2190 mdelay(10);
2191
2192 wr32(E1000_TDLEN(reg_idx),
2193 ring->count * sizeof(union e1000_adv_tx_desc));
2194 wr32(E1000_TDBAL(reg_idx),
2195 tdba & 0x00000000ffffffffULL);
2196 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2197
fce99e34
AD
2198 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2199 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2200 writel(0, ring->head);
2201 writel(0, ring->tail);
85b430b4
AD
2202
2203 txdctl |= IGB_TX_PTHRESH;
2204 txdctl |= IGB_TX_HTHRESH << 8;
2205 txdctl |= IGB_TX_WTHRESH << 16;
2206
2207 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2208 wr32(E1000_TXDCTL(reg_idx), txdctl);
2209}
2210
2211/**
2212 * igb_configure_tx - Configure transmit Unit after Reset
2213 * @adapter: board private structure
2214 *
2215 * Configure the Tx unit of the MAC after a reset.
2216 **/
2217static void igb_configure_tx(struct igb_adapter *adapter)
2218{
2219 int i;
2220
2221 for (i = 0; i < adapter->num_tx_queues; i++)
2222 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
85b430b4
AD
2223}
2224
9d5c8243
AK
2225/**
2226 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
9d5c8243
AK
2227 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2228 *
2229 * Returns 0 on success, negative on failure
2230 **/
80785298 2231int igb_setup_rx_resources(struct igb_ring *rx_ring)
9d5c8243 2232{
80785298 2233 struct pci_dev *pdev = rx_ring->pdev;
9d5c8243
AK
2234 int size, desc_len;
2235
2236 size = sizeof(struct igb_buffer) * rx_ring->count;
2237 rx_ring->buffer_info = vmalloc(size);
2238 if (!rx_ring->buffer_info)
2239 goto err;
2240 memset(rx_ring->buffer_info, 0, size);
2241
2242 desc_len = sizeof(union e1000_adv_rx_desc);
2243
2244 /* Round up to nearest 4K */
2245 rx_ring->size = rx_ring->count * desc_len;
2246 rx_ring->size = ALIGN(rx_ring->size, 4096);
2247
2248 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
2249 &rx_ring->dma);
2250
2251 if (!rx_ring->desc)
2252 goto err;
2253
2254 rx_ring->next_to_clean = 0;
2255 rx_ring->next_to_use = 0;
9d5c8243 2256
9d5c8243
AK
2257 return 0;
2258
2259err:
2260 vfree(rx_ring->buffer_info);
439705e1 2261 rx_ring->buffer_info = NULL;
80785298 2262 dev_err(&pdev->dev, "Unable to allocate memory for "
9d5c8243
AK
2263 "the receive descriptor ring\n");
2264 return -ENOMEM;
2265}
2266
2267/**
2268 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2269 * (Descriptors) for all queues
2270 * @adapter: board private structure
2271 *
2272 * Return 0 on success, negative on failure
2273 **/
2274static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2275{
439705e1 2276 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2277 int i, err = 0;
2278
2279 for (i = 0; i < adapter->num_rx_queues; i++) {
80785298 2280 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
9d5c8243 2281 if (err) {
439705e1 2282 dev_err(&pdev->dev,
9d5c8243
AK
2283 "Allocation for Rx Queue %u failed\n", i);
2284 for (i--; i >= 0; i--)
3b644cf6 2285 igb_free_rx_resources(&adapter->rx_ring[i]);
9d5c8243
AK
2286 break;
2287 }
2288 }
2289
2290 return err;
2291}
2292
06cf2666
AD
2293/**
2294 * igb_setup_mrqc - configure the multiple receive queue control registers
2295 * @adapter: Board private structure
2296 **/
2297static void igb_setup_mrqc(struct igb_adapter *adapter)
2298{
2299 struct e1000_hw *hw = &adapter->hw;
2300 u32 mrqc, rxcsum;
2301 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2302 union e1000_reta {
2303 u32 dword;
2304 u8 bytes[4];
2305 } reta;
2306 static const u8 rsshash[40] = {
2307 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2308 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2309 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2310 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2311
2312 /* Fill out hash function seeds */
2313 for (j = 0; j < 10; j++) {
2314 u32 rsskey = rsshash[(j * 4)];
2315 rsskey |= rsshash[(j * 4) + 1] << 8;
2316 rsskey |= rsshash[(j * 4) + 2] << 16;
2317 rsskey |= rsshash[(j * 4) + 3] << 24;
2318 array_wr32(E1000_RSSRK(0), j, rsskey);
2319 }
2320
a99955fc 2321 num_rx_queues = adapter->rss_queues;
06cf2666
AD
2322
2323 if (adapter->vfs_allocated_count) {
2324 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2325 switch (hw->mac.type) {
55cac248
AD
2326 case e1000_82580:
2327 num_rx_queues = 1;
2328 shift = 0;
2329 break;
06cf2666
AD
2330 case e1000_82576:
2331 shift = 3;
2332 num_rx_queues = 2;
2333 break;
2334 case e1000_82575:
2335 shift = 2;
2336 shift2 = 6;
2337 default:
2338 break;
2339 }
2340 } else {
2341 if (hw->mac.type == e1000_82575)
2342 shift = 6;
2343 }
2344
2345 for (j = 0; j < (32 * 4); j++) {
2346 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2347 if (shift2)
2348 reta.bytes[j & 3] |= num_rx_queues << shift2;
2349 if ((j & 3) == 3)
2350 wr32(E1000_RETA(j >> 2), reta.dword);
2351 }
2352
2353 /*
2354 * Disable raw packet checksumming so that RSS hash is placed in
2355 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2356 * offloads as they are enabled by default
2357 */
2358 rxcsum = rd32(E1000_RXCSUM);
2359 rxcsum |= E1000_RXCSUM_PCSD;
2360
2361 if (adapter->hw.mac.type >= e1000_82576)
2362 /* Enable Receive Checksum Offload for SCTP */
2363 rxcsum |= E1000_RXCSUM_CRCOFL;
2364
2365 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2366 wr32(E1000_RXCSUM, rxcsum);
2367
2368 /* If VMDq is enabled then we set the appropriate mode for that, else
2369 * we default to RSS so that an RSS hash is calculated per packet even
2370 * if we are only using one queue */
2371 if (adapter->vfs_allocated_count) {
2372 if (hw->mac.type > e1000_82575) {
2373 /* Set the default pool for the PF's first queue */
2374 u32 vtctl = rd32(E1000_VT_CTL);
2375 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2376 E1000_VT_CTL_DISABLE_DEF_POOL);
2377 vtctl |= adapter->vfs_allocated_count <<
2378 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2379 wr32(E1000_VT_CTL, vtctl);
2380 }
a99955fc 2381 if (adapter->rss_queues > 1)
06cf2666
AD
2382 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2383 else
2384 mrqc = E1000_MRQC_ENABLE_VMDQ;
2385 } else {
2386 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2387 }
2388 igb_vmm_control(adapter);
2389
2390 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2391 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2392 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2393 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2394 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2395 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2396 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2397 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2398
2399 wr32(E1000_MRQC, mrqc);
2400}
2401
9d5c8243
AK
2402/**
2403 * igb_setup_rctl - configure the receive control registers
2404 * @adapter: Board private structure
2405 **/
d7ee5b3a 2406void igb_setup_rctl(struct igb_adapter *adapter)
9d5c8243
AK
2407{
2408 struct e1000_hw *hw = &adapter->hw;
2409 u32 rctl;
9d5c8243
AK
2410
2411 rctl = rd32(E1000_RCTL);
2412
2413 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
69d728ba 2414 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
9d5c8243 2415
69d728ba 2416 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
28b0759c 2417 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
9d5c8243 2418
87cb7e8c
AK
2419 /*
2420 * enable stripping of CRC. It's unlikely this will break BMC
2421 * redirection as it did with e1000. Newer features require
2422 * that the HW strips the CRC.
73cd78f1 2423 */
87cb7e8c 2424 rctl |= E1000_RCTL_SECRC;
9d5c8243 2425
559e9c49 2426 /* disable store bad packets and clear size bits. */
ec54d7d6 2427 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
9d5c8243 2428
6ec43fe6
AD
2429 /* enable LPE to prevent packets larger than max_frame_size */
2430 rctl |= E1000_RCTL_LPE;
9d5c8243 2431
952f72a8
AD
2432 /* disable queue 0 to prevent tail write w/o re-config */
2433 wr32(E1000_RXDCTL(0), 0);
9d5c8243 2434
e1739522
AD
2435 /* Attention!!! For SR-IOV PF driver operations you must enable
2436 * queue drop for all VF and PF queues to prevent head of line blocking
2437 * if an un-trusted VF does not provide descriptors to hardware.
2438 */
2439 if (adapter->vfs_allocated_count) {
e1739522
AD
2440 /* set all queue drop enable bits */
2441 wr32(E1000_QDE, ALL_QUEUES);
e1739522
AD
2442 }
2443
9d5c8243
AK
2444 wr32(E1000_RCTL, rctl);
2445}
2446
7d5753f0
AD
2447static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2448 int vfn)
2449{
2450 struct e1000_hw *hw = &adapter->hw;
2451 u32 vmolr;
2452
2453 /* if it isn't the PF check to see if VFs are enabled and
2454 * increase the size to support vlan tags */
2455 if (vfn < adapter->vfs_allocated_count &&
2456 adapter->vf_data[vfn].vlans_enabled)
2457 size += VLAN_TAG_SIZE;
2458
2459 vmolr = rd32(E1000_VMOLR(vfn));
2460 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2461 vmolr |= size | E1000_VMOLR_LPE;
2462 wr32(E1000_VMOLR(vfn), vmolr);
2463
2464 return 0;
2465}
2466
e1739522
AD
2467/**
2468 * igb_rlpml_set - set maximum receive packet size
2469 * @adapter: board private structure
2470 *
2471 * Configure maximum receivable packet size.
2472 **/
2473static void igb_rlpml_set(struct igb_adapter *adapter)
2474{
2475 u32 max_frame_size = adapter->max_frame_size;
2476 struct e1000_hw *hw = &adapter->hw;
2477 u16 pf_id = adapter->vfs_allocated_count;
2478
2479 if (adapter->vlgrp)
2480 max_frame_size += VLAN_TAG_SIZE;
2481
2482 /* if vfs are enabled we set RLPML to the largest possible request
2483 * size and set the VMOLR RLPML to the size we need */
2484 if (pf_id) {
2485 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
7d5753f0 2486 max_frame_size = MAX_JUMBO_FRAME_SIZE;
e1739522
AD
2487 }
2488
2489 wr32(E1000_RLPML, max_frame_size);
2490}
2491
7d5753f0
AD
2492static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2493{
2494 struct e1000_hw *hw = &adapter->hw;
2495 u32 vmolr;
2496
2497 /*
2498 * This register exists only on 82576 and newer so if we are older then
2499 * we should exit and do nothing
2500 */
2501 if (hw->mac.type < e1000_82576)
2502 return;
2503
2504 vmolr = rd32(E1000_VMOLR(vfn));
2505 vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
2506 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2507
2508 /* clear all bits that might not be set */
2509 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2510
a99955fc 2511 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
7d5753f0
AD
2512 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2513 /*
2514 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2515 * multicast packets
2516 */
2517 if (vfn <= adapter->vfs_allocated_count)
2518 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2519
2520 wr32(E1000_VMOLR(vfn), vmolr);
2521}
2522
85b430b4
AD
2523/**
2524 * igb_configure_rx_ring - Configure a receive ring after Reset
2525 * @adapter: board private structure
2526 * @ring: receive ring to be configured
2527 *
2528 * Configure the Rx unit of the MAC after a reset.
2529 **/
d7ee5b3a
AD
2530void igb_configure_rx_ring(struct igb_adapter *adapter,
2531 struct igb_ring *ring)
85b430b4
AD
2532{
2533 struct e1000_hw *hw = &adapter->hw;
2534 u64 rdba = ring->dma;
2535 int reg_idx = ring->reg_idx;
952f72a8 2536 u32 srrctl, rxdctl;
85b430b4
AD
2537
2538 /* disable the queue */
2539 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2540 wr32(E1000_RXDCTL(reg_idx),
2541 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2542
2543 /* Set DMA base address registers */
2544 wr32(E1000_RDBAL(reg_idx),
2545 rdba & 0x00000000ffffffffULL);
2546 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2547 wr32(E1000_RDLEN(reg_idx),
2548 ring->count * sizeof(union e1000_adv_rx_desc));
2549
2550 /* initialize head and tail */
fce99e34
AD
2551 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2552 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2553 writel(0, ring->head);
2554 writel(0, ring->tail);
85b430b4 2555
952f72a8 2556 /* set descriptor configuration */
4c844851
AD
2557 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2558 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
952f72a8
AD
2559 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2560#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2561 srrctl |= IGB_RXBUFFER_16384 >>
2562 E1000_SRRCTL_BSIZEPKT_SHIFT;
2563#else
2564 srrctl |= (PAGE_SIZE / 2) >>
2565 E1000_SRRCTL_BSIZEPKT_SHIFT;
2566#endif
2567 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2568 } else {
4c844851 2569 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
952f72a8
AD
2570 E1000_SRRCTL_BSIZEPKT_SHIFT;
2571 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2572 }
2573
2574 wr32(E1000_SRRCTL(reg_idx), srrctl);
2575
7d5753f0
AD
2576 /* set filtering for VMDQ pools */
2577 igb_set_vmolr(adapter, reg_idx & 0x7);
2578
85b430b4
AD
2579 /* enable receive descriptor fetching */
2580 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2581 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2582 rxdctl &= 0xFFF00000;
2583 rxdctl |= IGB_RX_PTHRESH;
2584 rxdctl |= IGB_RX_HTHRESH << 8;
2585 rxdctl |= IGB_RX_WTHRESH << 16;
2586 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2587}
2588
9d5c8243
AK
2589/**
2590 * igb_configure_rx - Configure receive Unit after Reset
2591 * @adapter: board private structure
2592 *
2593 * Configure the Rx unit of the MAC after a reset.
2594 **/
2595static void igb_configure_rx(struct igb_adapter *adapter)
2596{
9107584e 2597 int i;
9d5c8243 2598
68d480c4
AD
2599 /* set UTA to appropriate mode */
2600 igb_set_uta(adapter);
2601
26ad9178
AD
2602 /* set the correct pool for the PF default MAC address in entry 0 */
2603 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2604 adapter->vfs_allocated_count);
2605
06cf2666
AD
2606 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2607 * the Base and Length of the Rx Descriptor Ring */
2608 for (i = 0; i < adapter->num_rx_queues; i++)
2609 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
9d5c8243
AK
2610}
2611
2612/**
2613 * igb_free_tx_resources - Free Tx Resources per Queue
9d5c8243
AK
2614 * @tx_ring: Tx descriptor ring for a specific queue
2615 *
2616 * Free all transmit software resources
2617 **/
68fd9910 2618void igb_free_tx_resources(struct igb_ring *tx_ring)
9d5c8243 2619{
3b644cf6 2620 igb_clean_tx_ring(tx_ring);
9d5c8243
AK
2621
2622 vfree(tx_ring->buffer_info);
2623 tx_ring->buffer_info = NULL;
2624
439705e1
AD
2625 /* if not set, then don't free */
2626 if (!tx_ring->desc)
2627 return;
2628
80785298
AD
2629 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2630 tx_ring->desc, tx_ring->dma);
9d5c8243
AK
2631
2632 tx_ring->desc = NULL;
2633}
2634
2635/**
2636 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2637 * @adapter: board private structure
2638 *
2639 * Free all transmit software resources
2640 **/
2641static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2642{
2643 int i;
2644
2645 for (i = 0; i < adapter->num_tx_queues; i++)
3b644cf6 2646 igb_free_tx_resources(&adapter->tx_ring[i]);
9d5c8243
AK
2647}
2648
b1a436c3
AD
2649void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2650 struct igb_buffer *buffer_info)
9d5c8243 2651{
6366ad33
AD
2652 if (buffer_info->dma) {
2653 if (buffer_info->mapped_as_page)
2654 pci_unmap_page(tx_ring->pdev,
2655 buffer_info->dma,
2656 buffer_info->length,
2657 PCI_DMA_TODEVICE);
2658 else
2659 pci_unmap_single(tx_ring->pdev,
2660 buffer_info->dma,
2661 buffer_info->length,
2662 PCI_DMA_TODEVICE);
2663 buffer_info->dma = 0;
2664 }
9d5c8243
AK
2665 if (buffer_info->skb) {
2666 dev_kfree_skb_any(buffer_info->skb);
2667 buffer_info->skb = NULL;
2668 }
2669 buffer_info->time_stamp = 0;
6366ad33
AD
2670 buffer_info->length = 0;
2671 buffer_info->next_to_watch = 0;
2672 buffer_info->mapped_as_page = false;
9d5c8243
AK
2673}
2674
2675/**
2676 * igb_clean_tx_ring - Free Tx Buffers
9d5c8243
AK
2677 * @tx_ring: ring to be cleaned
2678 **/
3b644cf6 2679static void igb_clean_tx_ring(struct igb_ring *tx_ring)
9d5c8243
AK
2680{
2681 struct igb_buffer *buffer_info;
2682 unsigned long size;
2683 unsigned int i;
2684
2685 if (!tx_ring->buffer_info)
2686 return;
2687 /* Free all the Tx ring sk_buffs */
2688
2689 for (i = 0; i < tx_ring->count; i++) {
2690 buffer_info = &tx_ring->buffer_info[i];
80785298 2691 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
9d5c8243
AK
2692 }
2693
2694 size = sizeof(struct igb_buffer) * tx_ring->count;
2695 memset(tx_ring->buffer_info, 0, size);
2696
2697 /* Zero out the descriptor ring */
9d5c8243
AK
2698 memset(tx_ring->desc, 0, tx_ring->size);
2699
2700 tx_ring->next_to_use = 0;
2701 tx_ring->next_to_clean = 0;
9d5c8243
AK
2702}
2703
2704/**
2705 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2706 * @adapter: board private structure
2707 **/
2708static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2709{
2710 int i;
2711
2712 for (i = 0; i < adapter->num_tx_queues; i++)
3b644cf6 2713 igb_clean_tx_ring(&adapter->tx_ring[i]);
9d5c8243
AK
2714}
2715
2716/**
2717 * igb_free_rx_resources - Free Rx Resources
9d5c8243
AK
2718 * @rx_ring: ring to clean the resources from
2719 *
2720 * Free all receive software resources
2721 **/
68fd9910 2722void igb_free_rx_resources(struct igb_ring *rx_ring)
9d5c8243 2723{
3b644cf6 2724 igb_clean_rx_ring(rx_ring);
9d5c8243
AK
2725
2726 vfree(rx_ring->buffer_info);
2727 rx_ring->buffer_info = NULL;
2728
439705e1
AD
2729 /* if not set, then don't free */
2730 if (!rx_ring->desc)
2731 return;
2732
80785298
AD
2733 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2734 rx_ring->desc, rx_ring->dma);
9d5c8243
AK
2735
2736 rx_ring->desc = NULL;
2737}
2738
2739/**
2740 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2741 * @adapter: board private structure
2742 *
2743 * Free all receive software resources
2744 **/
2745static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2746{
2747 int i;
2748
2749 for (i = 0; i < adapter->num_rx_queues; i++)
3b644cf6 2750 igb_free_rx_resources(&adapter->rx_ring[i]);
9d5c8243
AK
2751}
2752
2753/**
2754 * igb_clean_rx_ring - Free Rx Buffers per Queue
9d5c8243
AK
2755 * @rx_ring: ring to free buffers from
2756 **/
3b644cf6 2757static void igb_clean_rx_ring(struct igb_ring *rx_ring)
9d5c8243
AK
2758{
2759 struct igb_buffer *buffer_info;
9d5c8243
AK
2760 unsigned long size;
2761 unsigned int i;
2762
2763 if (!rx_ring->buffer_info)
2764 return;
439705e1 2765
9d5c8243
AK
2766 /* Free all the Rx ring sk_buffs */
2767 for (i = 0; i < rx_ring->count; i++) {
2768 buffer_info = &rx_ring->buffer_info[i];
2769 if (buffer_info->dma) {
80785298
AD
2770 pci_unmap_single(rx_ring->pdev,
2771 buffer_info->dma,
4c844851 2772 rx_ring->rx_buffer_len,
6ec43fe6 2773 PCI_DMA_FROMDEVICE);
9d5c8243
AK
2774 buffer_info->dma = 0;
2775 }
2776
2777 if (buffer_info->skb) {
2778 dev_kfree_skb(buffer_info->skb);
2779 buffer_info->skb = NULL;
2780 }
6ec43fe6 2781 if (buffer_info->page_dma) {
80785298
AD
2782 pci_unmap_page(rx_ring->pdev,
2783 buffer_info->page_dma,
6ec43fe6
AD
2784 PAGE_SIZE / 2,
2785 PCI_DMA_FROMDEVICE);
2786 buffer_info->page_dma = 0;
2787 }
9d5c8243 2788 if (buffer_info->page) {
9d5c8243
AK
2789 put_page(buffer_info->page);
2790 buffer_info->page = NULL;
bf36c1a0 2791 buffer_info->page_offset = 0;
9d5c8243
AK
2792 }
2793 }
2794
9d5c8243
AK
2795 size = sizeof(struct igb_buffer) * rx_ring->count;
2796 memset(rx_ring->buffer_info, 0, size);
2797
2798 /* Zero out the descriptor ring */
2799 memset(rx_ring->desc, 0, rx_ring->size);
2800
2801 rx_ring->next_to_clean = 0;
2802 rx_ring->next_to_use = 0;
9d5c8243
AK
2803}
2804
2805/**
2806 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2807 * @adapter: board private structure
2808 **/
2809static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2810{
2811 int i;
2812
2813 for (i = 0; i < adapter->num_rx_queues; i++)
3b644cf6 2814 igb_clean_rx_ring(&adapter->rx_ring[i]);
9d5c8243
AK
2815}
2816
2817/**
2818 * igb_set_mac - Change the Ethernet Address of the NIC
2819 * @netdev: network interface device structure
2820 * @p: pointer to an address structure
2821 *
2822 * Returns 0 on success, negative on failure
2823 **/
2824static int igb_set_mac(struct net_device *netdev, void *p)
2825{
2826 struct igb_adapter *adapter = netdev_priv(netdev);
28b0759c 2827 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
2828 struct sockaddr *addr = p;
2829
2830 if (!is_valid_ether_addr(addr->sa_data))
2831 return -EADDRNOTAVAIL;
2832
2833 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
28b0759c 2834 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9d5c8243 2835
26ad9178
AD
2836 /* set the correct pool for the new PF MAC address in entry 0 */
2837 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2838 adapter->vfs_allocated_count);
e1739522 2839
9d5c8243
AK
2840 return 0;
2841}
2842
2843/**
68d480c4 2844 * igb_write_mc_addr_list - write multicast addresses to MTA
9d5c8243
AK
2845 * @netdev: network interface device structure
2846 *
68d480c4
AD
2847 * Writes multicast address list to the MTA hash table.
2848 * Returns: -ENOMEM on failure
2849 * 0 on no addresses written
2850 * X on writing X addresses to MTA
9d5c8243 2851 **/
68d480c4 2852static int igb_write_mc_addr_list(struct net_device *netdev)
9d5c8243
AK
2853{
2854 struct igb_adapter *adapter = netdev_priv(netdev);
2855 struct e1000_hw *hw = &adapter->hw;
ff41f8dc 2856 struct dev_mc_list *mc_ptr = netdev->mc_list;
68d480c4
AD
2857 u8 *mta_list;
2858 u32 vmolr = 0;
9d5c8243
AK
2859 int i;
2860
68d480c4
AD
2861 if (!netdev->mc_count) {
2862 /* nothing to program, so clear mc list */
2863 igb_update_mc_addr_list(hw, NULL, 0);
2864 igb_restore_vf_multicasts(adapter);
2865 return 0;
2866 }
9d5c8243 2867
68d480c4
AD
2868 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2869 if (!mta_list)
2870 return -ENOMEM;
ff41f8dc 2871
68d480c4
AD
2872 /* set vmolr receive overflow multicast bit */
2873 vmolr |= E1000_VMOLR_ROMPE;
2874
2875 /* The shared function expects a packed array of only addresses. */
2876 mc_ptr = netdev->mc_list;
2877
2878 for (i = 0; i < netdev->mc_count; i++) {
2879 if (!mc_ptr)
2880 break;
2881 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2882 mc_ptr = mc_ptr->next;
746b9f02 2883 }
68d480c4
AD
2884 igb_update_mc_addr_list(hw, mta_list, i);
2885 kfree(mta_list);
2886
2887 return netdev->mc_count;
2888}
2889
2890/**
2891 * igb_write_uc_addr_list - write unicast addresses to RAR table
2892 * @netdev: network interface device structure
2893 *
2894 * Writes unicast address list to the RAR table.
2895 * Returns: -ENOMEM on failure/insufficient address space
2896 * 0 on no addresses written
2897 * X on writing X addresses to the RAR table
2898 **/
2899static int igb_write_uc_addr_list(struct net_device *netdev)
2900{
2901 struct igb_adapter *adapter = netdev_priv(netdev);
2902 struct e1000_hw *hw = &adapter->hw;
2903 unsigned int vfn = adapter->vfs_allocated_count;
2904 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2905 int count = 0;
2906
2907 /* return ENOMEM indicating insufficient memory for addresses */
2908 if (netdev->uc.count > rar_entries)
2909 return -ENOMEM;
9d5c8243 2910
ff41f8dc
AD
2911 if (netdev->uc.count && rar_entries) {
2912 struct netdev_hw_addr *ha;
2913 list_for_each_entry(ha, &netdev->uc.list, list) {
2914 if (!rar_entries)
2915 break;
26ad9178
AD
2916 igb_rar_set_qsel(adapter, ha->addr,
2917 rar_entries--,
68d480c4
AD
2918 vfn);
2919 count++;
ff41f8dc
AD
2920 }
2921 }
2922 /* write the addresses in reverse order to avoid write combining */
2923 for (; rar_entries > 0 ; rar_entries--) {
2924 wr32(E1000_RAH(rar_entries), 0);
2925 wr32(E1000_RAL(rar_entries), 0);
2926 }
2927 wrfl();
2928
68d480c4
AD
2929 return count;
2930}
2931
2932/**
2933 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2934 * @netdev: network interface device structure
2935 *
2936 * The set_rx_mode entry point is called whenever the unicast or multicast
2937 * address lists or the network interface flags are updated. This routine is
2938 * responsible for configuring the hardware for proper unicast, multicast,
2939 * promiscuous mode, and all-multi behavior.
2940 **/
2941static void igb_set_rx_mode(struct net_device *netdev)
2942{
2943 struct igb_adapter *adapter = netdev_priv(netdev);
2944 struct e1000_hw *hw = &adapter->hw;
2945 unsigned int vfn = adapter->vfs_allocated_count;
2946 u32 rctl, vmolr = 0;
2947 int count;
2948
2949 /* Check for Promiscuous and All Multicast modes */
2950 rctl = rd32(E1000_RCTL);
2951
2952 /* clear the effected bits */
2953 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2954
2955 if (netdev->flags & IFF_PROMISC) {
2956 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2957 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2958 } else {
2959 if (netdev->flags & IFF_ALLMULTI) {
2960 rctl |= E1000_RCTL_MPE;
2961 vmolr |= E1000_VMOLR_MPME;
2962 } else {
2963 /*
2964 * Write addresses to the MTA, if the attempt fails
2965 * then we should just turn on promiscous mode so
2966 * that we can at least receive multicast traffic
2967 */
2968 count = igb_write_mc_addr_list(netdev);
2969 if (count < 0) {
2970 rctl |= E1000_RCTL_MPE;
2971 vmolr |= E1000_VMOLR_MPME;
2972 } else if (count) {
2973 vmolr |= E1000_VMOLR_ROMPE;
2974 }
2975 }
2976 /*
2977 * Write addresses to available RAR registers, if there is not
2978 * sufficient space to store all the addresses then enable
2979 * unicast promiscous mode
2980 */
2981 count = igb_write_uc_addr_list(netdev);
2982 if (count < 0) {
2983 rctl |= E1000_RCTL_UPE;
2984 vmolr |= E1000_VMOLR_ROPE;
2985 }
2986 rctl |= E1000_RCTL_VFE;
28fc06f5 2987 }
68d480c4 2988 wr32(E1000_RCTL, rctl);
28fc06f5 2989
68d480c4
AD
2990 /*
2991 * In order to support SR-IOV and eventually VMDq it is necessary to set
2992 * the VMOLR to enable the appropriate modes. Without this workaround
2993 * we will have issues with VLAN tag stripping not being done for frames
2994 * that are only arriving because we are the default pool
2995 */
2996 if (hw->mac.type < e1000_82576)
28fc06f5 2997 return;
9d5c8243 2998
68d480c4
AD
2999 vmolr |= rd32(E1000_VMOLR(vfn)) &
3000 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3001 wr32(E1000_VMOLR(vfn), vmolr);
28fc06f5 3002 igb_restore_vf_multicasts(adapter);
9d5c8243
AK
3003}
3004
3005/* Need to wait a few seconds after link up to get diagnostic information from
3006 * the phy */
3007static void igb_update_phy_info(unsigned long data)
3008{
3009 struct igb_adapter *adapter = (struct igb_adapter *) data;
f5f4cf08 3010 igb_get_phy_info(&adapter->hw);
9d5c8243
AK
3011}
3012
4d6b725e
AD
3013/**
3014 * igb_has_link - check shared code for link and determine up/down
3015 * @adapter: pointer to driver private info
3016 **/
3017static bool igb_has_link(struct igb_adapter *adapter)
3018{
3019 struct e1000_hw *hw = &adapter->hw;
3020 bool link_active = false;
3021 s32 ret_val = 0;
3022
3023 /* get_link_status is set on LSC (link status) interrupt or
3024 * rx sequence error interrupt. get_link_status will stay
3025 * false until the e1000_check_for_link establishes link
3026 * for copper adapters ONLY
3027 */
3028 switch (hw->phy.media_type) {
3029 case e1000_media_type_copper:
3030 if (hw->mac.get_link_status) {
3031 ret_val = hw->mac.ops.check_for_link(hw);
3032 link_active = !hw->mac.get_link_status;
3033 } else {
3034 link_active = true;
3035 }
3036 break;
4d6b725e
AD
3037 case e1000_media_type_internal_serdes:
3038 ret_val = hw->mac.ops.check_for_link(hw);
3039 link_active = hw->mac.serdes_has_link;
3040 break;
3041 default:
3042 case e1000_media_type_unknown:
3043 break;
3044 }
3045
3046 return link_active;
3047}
3048
9d5c8243
AK
3049/**
3050 * igb_watchdog - Timer Call-back
3051 * @data: pointer to adapter cast into an unsigned long
3052 **/
3053static void igb_watchdog(unsigned long data)
3054{
3055 struct igb_adapter *adapter = (struct igb_adapter *)data;
3056 /* Do the rest outside of interrupt context */
3057 schedule_work(&adapter->watchdog_task);
3058}
3059
3060static void igb_watchdog_task(struct work_struct *work)
3061{
3062 struct igb_adapter *adapter = container_of(work,
559e9c49
AD
3063 struct igb_adapter,
3064 watchdog_task);
9d5c8243 3065 struct e1000_hw *hw = &adapter->hw;
9d5c8243 3066 struct net_device *netdev = adapter->netdev;
9d5c8243 3067 u32 link;
7a6ea550 3068 int i;
9d5c8243 3069
4d6b725e 3070 link = igb_has_link(adapter);
9d5c8243
AK
3071 if (link) {
3072 if (!netif_carrier_ok(netdev)) {
3073 u32 ctrl;
330a6d6a
AD
3074 hw->mac.ops.get_speed_and_duplex(hw,
3075 &adapter->link_speed,
3076 &adapter->link_duplex);
9d5c8243
AK
3077
3078 ctrl = rd32(E1000_CTRL);
527d47c1
AD
3079 /* Links status message must follow this format */
3080 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
9d5c8243 3081 "Flow Control: %s\n",
559e9c49
AD
3082 netdev->name,
3083 adapter->link_speed,
3084 adapter->link_duplex == FULL_DUPLEX ?
9d5c8243 3085 "Full Duplex" : "Half Duplex",
559e9c49
AD
3086 ((ctrl & E1000_CTRL_TFCE) &&
3087 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
3088 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
3089 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
9d5c8243
AK
3090
3091 /* tweak tx_queue_len according to speed/duplex and
3092 * adjust the timeout factor */
3093 netdev->tx_queue_len = adapter->tx_queue_len;
3094 adapter->tx_timeout_factor = 1;
3095 switch (adapter->link_speed) {
3096 case SPEED_10:
3097 netdev->tx_queue_len = 10;
3098 adapter->tx_timeout_factor = 14;
3099 break;
3100 case SPEED_100:
3101 netdev->tx_queue_len = 100;
3102 /* maybe add some timeout factor ? */
3103 break;
3104 }
3105
3106 netif_carrier_on(netdev);
9d5c8243 3107
4ae196df
AD
3108 igb_ping_all_vfs(adapter);
3109
4b1a9877 3110 /* link state has changed, schedule phy info update */
9d5c8243
AK
3111 if (!test_bit(__IGB_DOWN, &adapter->state))
3112 mod_timer(&adapter->phy_info_timer,
3113 round_jiffies(jiffies + 2 * HZ));
3114 }
3115 } else {
3116 if (netif_carrier_ok(netdev)) {
3117 adapter->link_speed = 0;
3118 adapter->link_duplex = 0;
527d47c1
AD
3119 /* Links status message must follow this format */
3120 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3121 netdev->name);
9d5c8243 3122 netif_carrier_off(netdev);
4b1a9877 3123
4ae196df
AD
3124 igb_ping_all_vfs(adapter);
3125
4b1a9877 3126 /* link state has changed, schedule phy info update */
9d5c8243
AK
3127 if (!test_bit(__IGB_DOWN, &adapter->state))
3128 mod_timer(&adapter->phy_info_timer,
3129 round_jiffies(jiffies + 2 * HZ));
3130 }
3131 }
3132
9d5c8243 3133 igb_update_stats(adapter);
645a3abd 3134 igb_update_adaptive(hw);
9d5c8243 3135
dbabb065
AD
3136 for (i = 0; i < adapter->num_tx_queues; i++) {
3137 struct igb_ring *tx_ring = &adapter->tx_ring[i];
3138 if (!netif_carrier_ok(netdev)) {
9d5c8243
AK
3139 /* We've lost link, so the controller stops DMA,
3140 * but we've got queued Tx work that's never going
3141 * to get done, so reset controller to flush Tx.
3142 * (Do the reset outside of interrupt context). */
dbabb065
AD
3143 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3144 adapter->tx_timeout_count++;
3145 schedule_work(&adapter->reset_task);
3146 /* return immediately since reset is imminent */
3147 return;
3148 }
9d5c8243 3149 }
9d5c8243 3150
dbabb065
AD
3151 /* Force detection of hung controller every watchdog period */
3152 tx_ring->detect_tx_hung = true;
3153 }
f7ba205e 3154
9d5c8243 3155 /* Cause software interrupt to ensure rx ring is cleaned */
7a6ea550 3156 if (adapter->msix_entries) {
047e0030
AD
3157 u32 eics = 0;
3158 for (i = 0; i < adapter->num_q_vectors; i++) {
3159 struct igb_q_vector *q_vector = adapter->q_vector[i];
3160 eics |= q_vector->eims_value;
3161 }
7a6ea550
AD
3162 wr32(E1000_EICS, eics);
3163 } else {
3164 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3165 }
9d5c8243 3166
9d5c8243
AK
3167 /* Reset the timer */
3168 if (!test_bit(__IGB_DOWN, &adapter->state))
3169 mod_timer(&adapter->watchdog_timer,
3170 round_jiffies(jiffies + 2 * HZ));
3171}
3172
3173enum latency_range {
3174 lowest_latency = 0,
3175 low_latency = 1,
3176 bulk_latency = 2,
3177 latency_invalid = 255
3178};
3179
6eb5a7f1
AD
3180/**
3181 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3182 *
3183 * Stores a new ITR value based on strictly on packet size. This
3184 * algorithm is less sophisticated than that used in igb_update_itr,
3185 * due to the difficulty of synchronizing statistics across multiple
3186 * receive rings. The divisors and thresholds used by this fuction
3187 * were determined based on theoretical maximum wire speed and testing
3188 * data, in order to minimize response time while increasing bulk
3189 * throughput.
3190 * This functionality is controlled by the InterruptThrottleRate module
3191 * parameter (see igb_param.c)
3192 * NOTE: This function is called only when operating in a multiqueue
3193 * receive environment.
047e0030 3194 * @q_vector: pointer to q_vector
6eb5a7f1 3195 **/
047e0030 3196static void igb_update_ring_itr(struct igb_q_vector *q_vector)
9d5c8243 3197{
047e0030 3198 int new_val = q_vector->itr_val;
6eb5a7f1 3199 int avg_wire_size = 0;
047e0030 3200 struct igb_adapter *adapter = q_vector->adapter;
9d5c8243 3201
6eb5a7f1
AD
3202 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3203 * ints/sec - ITR timer value of 120 ticks.
3204 */
3205 if (adapter->link_speed != SPEED_1000) {
047e0030 3206 new_val = 976;
6eb5a7f1 3207 goto set_itr_val;
9d5c8243 3208 }
047e0030
AD
3209
3210 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3211 struct igb_ring *ring = q_vector->rx_ring;
3212 avg_wire_size = ring->total_bytes / ring->total_packets;
3213 }
3214
3215 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3216 struct igb_ring *ring = q_vector->tx_ring;
3217 avg_wire_size = max_t(u32, avg_wire_size,
3218 (ring->total_bytes /
3219 ring->total_packets));
3220 }
3221
3222 /* if avg_wire_size isn't set no work was done */
3223 if (!avg_wire_size)
3224 goto clear_counts;
9d5c8243 3225
6eb5a7f1
AD
3226 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3227 avg_wire_size += 24;
3228
3229 /* Don't starve jumbo frames */
3230 avg_wire_size = min(avg_wire_size, 3000);
9d5c8243 3231
6eb5a7f1
AD
3232 /* Give a little boost to mid-size frames */
3233 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3234 new_val = avg_wire_size / 3;
3235 else
3236 new_val = avg_wire_size / 2;
9d5c8243 3237
6eb5a7f1 3238set_itr_val:
047e0030
AD
3239 if (new_val != q_vector->itr_val) {
3240 q_vector->itr_val = new_val;
3241 q_vector->set_itr = 1;
9d5c8243 3242 }
6eb5a7f1 3243clear_counts:
047e0030
AD
3244 if (q_vector->rx_ring) {
3245 q_vector->rx_ring->total_bytes = 0;
3246 q_vector->rx_ring->total_packets = 0;
3247 }
3248 if (q_vector->tx_ring) {
3249 q_vector->tx_ring->total_bytes = 0;
3250 q_vector->tx_ring->total_packets = 0;
3251 }
9d5c8243
AK
3252}
3253
3254/**
3255 * igb_update_itr - update the dynamic ITR value based on statistics
3256 * Stores a new ITR value based on packets and byte
3257 * counts during the last interrupt. The advantage of per interrupt
3258 * computation is faster updates and more accurate ITR for the current
3259 * traffic pattern. Constants in this function were computed
3260 * based on theoretical maximum wire speed and thresholds were set based
3261 * on testing data as well as attempting to minimize response time
3262 * while increasing bulk throughput.
3263 * this functionality is controlled by the InterruptThrottleRate module
3264 * parameter (see igb_param.c)
3265 * NOTE: These calculations are only valid when operating in a single-
3266 * queue environment.
3267 * @adapter: pointer to adapter
047e0030 3268 * @itr_setting: current q_vector->itr_val
9d5c8243
AK
3269 * @packets: the number of packets during this measurement interval
3270 * @bytes: the number of bytes during this measurement interval
3271 **/
3272static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
3273 int packets, int bytes)
3274{
3275 unsigned int retval = itr_setting;
3276
3277 if (packets == 0)
3278 goto update_itr_done;
3279
3280 switch (itr_setting) {
3281 case lowest_latency:
3282 /* handle TSO and jumbo frames */
3283 if (bytes/packets > 8000)
3284 retval = bulk_latency;
3285 else if ((packets < 5) && (bytes > 512))
3286 retval = low_latency;
3287 break;
3288 case low_latency: /* 50 usec aka 20000 ints/s */
3289 if (bytes > 10000) {
3290 /* this if handles the TSO accounting */
3291 if (bytes/packets > 8000) {
3292 retval = bulk_latency;
3293 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3294 retval = bulk_latency;
3295 } else if ((packets > 35)) {
3296 retval = lowest_latency;
3297 }
3298 } else if (bytes/packets > 2000) {
3299 retval = bulk_latency;
3300 } else if (packets <= 2 && bytes < 512) {
3301 retval = lowest_latency;
3302 }
3303 break;
3304 case bulk_latency: /* 250 usec aka 4000 ints/s */
3305 if (bytes > 25000) {
3306 if (packets > 35)
3307 retval = low_latency;
1e5c3d21 3308 } else if (bytes < 1500) {
9d5c8243
AK
3309 retval = low_latency;
3310 }
3311 break;
3312 }
3313
3314update_itr_done:
3315 return retval;
3316}
3317
6eb5a7f1 3318static void igb_set_itr(struct igb_adapter *adapter)
9d5c8243 3319{
047e0030 3320 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243 3321 u16 current_itr;
047e0030 3322 u32 new_itr = q_vector->itr_val;
9d5c8243
AK
3323
3324 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3325 if (adapter->link_speed != SPEED_1000) {
3326 current_itr = 0;
3327 new_itr = 4000;
3328 goto set_itr_now;
3329 }
3330
3331 adapter->rx_itr = igb_update_itr(adapter,
3332 adapter->rx_itr,
3333 adapter->rx_ring->total_packets,
3334 adapter->rx_ring->total_bytes);
9d5c8243 3335
047e0030
AD
3336 adapter->tx_itr = igb_update_itr(adapter,
3337 adapter->tx_itr,
3338 adapter->tx_ring->total_packets,
3339 adapter->tx_ring->total_bytes);
3340 current_itr = max(adapter->rx_itr, adapter->tx_itr);
9d5c8243 3341
6eb5a7f1 3342 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4fc82adf 3343 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
6eb5a7f1
AD
3344 current_itr = low_latency;
3345
9d5c8243
AK
3346 switch (current_itr) {
3347 /* counts and packets in update_itr are dependent on these numbers */
3348 case lowest_latency:
78b1f607 3349 new_itr = 56; /* aka 70,000 ints/sec */
9d5c8243
AK
3350 break;
3351 case low_latency:
78b1f607 3352 new_itr = 196; /* aka 20,000 ints/sec */
9d5c8243
AK
3353 break;
3354 case bulk_latency:
78b1f607 3355 new_itr = 980; /* aka 4,000 ints/sec */
9d5c8243
AK
3356 break;
3357 default:
3358 break;
3359 }
3360
3361set_itr_now:
6eb5a7f1
AD
3362 adapter->rx_ring->total_bytes = 0;
3363 adapter->rx_ring->total_packets = 0;
047e0030
AD
3364 adapter->tx_ring->total_bytes = 0;
3365 adapter->tx_ring->total_packets = 0;
6eb5a7f1 3366
047e0030 3367 if (new_itr != q_vector->itr_val) {
9d5c8243
AK
3368 /* this attempts to bias the interrupt rate towards Bulk
3369 * by adding intermediate steps when interrupt rate is
3370 * increasing */
047e0030
AD
3371 new_itr = new_itr > q_vector->itr_val ?
3372 max((new_itr * q_vector->itr_val) /
3373 (new_itr + (q_vector->itr_val >> 2)),
3374 new_itr) :
9d5c8243
AK
3375 new_itr;
3376 /* Don't write the value here; it resets the adapter's
3377 * internal timer, and causes us to delay far longer than
3378 * we should between interrupts. Instead, we write the ITR
3379 * value at the beginning of the next interrupt so the timing
3380 * ends up being correct.
3381 */
047e0030
AD
3382 q_vector->itr_val = new_itr;
3383 q_vector->set_itr = 1;
9d5c8243
AK
3384 }
3385
3386 return;
3387}
3388
9d5c8243
AK
3389#define IGB_TX_FLAGS_CSUM 0x00000001
3390#define IGB_TX_FLAGS_VLAN 0x00000002
3391#define IGB_TX_FLAGS_TSO 0x00000004
3392#define IGB_TX_FLAGS_IPV4 0x00000008
cdfd01fc
AD
3393#define IGB_TX_FLAGS_TSTAMP 0x00000010
3394#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3395#define IGB_TX_FLAGS_VLAN_SHIFT 16
9d5c8243 3396
85ad76b2 3397static inline int igb_tso_adv(struct igb_ring *tx_ring,
9d5c8243
AK
3398 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3399{
3400 struct e1000_adv_tx_context_desc *context_desc;
3401 unsigned int i;
3402 int err;
3403 struct igb_buffer *buffer_info;
3404 u32 info = 0, tu_cmd = 0;
3405 u32 mss_l4len_idx, l4len;
3406 *hdr_len = 0;
3407
3408 if (skb_header_cloned(skb)) {
3409 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3410 if (err)
3411 return err;
3412 }
3413
3414 l4len = tcp_hdrlen(skb);
3415 *hdr_len += l4len;
3416
3417 if (skb->protocol == htons(ETH_P_IP)) {
3418 struct iphdr *iph = ip_hdr(skb);
3419 iph->tot_len = 0;
3420 iph->check = 0;
3421 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
3422 iph->daddr, 0,
3423 IPPROTO_TCP,
3424 0);
3425 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3426 ipv6_hdr(skb)->payload_len = 0;
3427 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3428 &ipv6_hdr(skb)->daddr,
3429 0, IPPROTO_TCP, 0);
3430 }
3431
3432 i = tx_ring->next_to_use;
3433
3434 buffer_info = &tx_ring->buffer_info[i];
3435 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3436 /* VLAN MACLEN IPLEN */
3437 if (tx_flags & IGB_TX_FLAGS_VLAN)
3438 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3439 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3440 *hdr_len += skb_network_offset(skb);
3441 info |= skb_network_header_len(skb);
3442 *hdr_len += skb_network_header_len(skb);
3443 context_desc->vlan_macip_lens = cpu_to_le32(info);
3444
3445 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3446 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3447
3448 if (skb->protocol == htons(ETH_P_IP))
3449 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3450 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3451
3452 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3453
3454 /* MSS L4LEN IDX */
3455 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
3456 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3457
73cd78f1 3458 /* For 82575, context index must be unique per ring. */
85ad76b2
AD
3459 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3460 mss_l4len_idx |= tx_ring->reg_idx << 4;
9d5c8243
AK
3461
3462 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3463 context_desc->seqnum_seed = 0;
3464
3465 buffer_info->time_stamp = jiffies;
0e014cb1 3466 buffer_info->next_to_watch = i;
9d5c8243
AK
3467 buffer_info->dma = 0;
3468 i++;
3469 if (i == tx_ring->count)
3470 i = 0;
3471
3472 tx_ring->next_to_use = i;
3473
3474 return true;
3475}
3476
85ad76b2
AD
3477static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3478 struct sk_buff *skb, u32 tx_flags)
9d5c8243
AK
3479{
3480 struct e1000_adv_tx_context_desc *context_desc;
80785298 3481 struct pci_dev *pdev = tx_ring->pdev;
9d5c8243
AK
3482 struct igb_buffer *buffer_info;
3483 u32 info = 0, tu_cmd = 0;
80785298 3484 unsigned int i;
9d5c8243
AK
3485
3486 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3487 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3488 i = tx_ring->next_to_use;
3489 buffer_info = &tx_ring->buffer_info[i];
3490 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3491
3492 if (tx_flags & IGB_TX_FLAGS_VLAN)
3493 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
cdfd01fc 3494
9d5c8243
AK
3495 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3496 if (skb->ip_summed == CHECKSUM_PARTIAL)
3497 info |= skb_network_header_len(skb);
3498
3499 context_desc->vlan_macip_lens = cpu_to_le32(info);
3500
3501 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3502
3503 if (skb->ip_summed == CHECKSUM_PARTIAL) {
fa4a7ef3
AJ
3504 __be16 protocol;
3505
3506 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3507 const struct vlan_ethhdr *vhdr =
3508 (const struct vlan_ethhdr*)skb->data;
3509
3510 protocol = vhdr->h_vlan_encapsulated_proto;
3511 } else {
3512 protocol = skb->protocol;
3513 }
3514
3515 switch (protocol) {
09640e63 3516 case cpu_to_be16(ETH_P_IP):
9d5c8243 3517 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
44b0cda3
MW
3518 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3519 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
b9473560
JB
3520 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
3521 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
44b0cda3 3522 break;
09640e63 3523 case cpu_to_be16(ETH_P_IPV6):
44b0cda3
MW
3524 /* XXX what about other V6 headers?? */
3525 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3526 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
b9473560
JB
3527 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
3528 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
44b0cda3
MW
3529 break;
3530 default:
3531 if (unlikely(net_ratelimit()))
80785298 3532 dev_warn(&pdev->dev,
44b0cda3
MW
3533 "partial checksum but proto=%x!\n",
3534 skb->protocol);
3535 break;
3536 }
9d5c8243
AK
3537 }
3538
3539 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3540 context_desc->seqnum_seed = 0;
85ad76b2 3541 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
7dfc16fa 3542 context_desc->mss_l4len_idx =
85ad76b2 3543 cpu_to_le32(tx_ring->reg_idx << 4);
9d5c8243
AK
3544
3545 buffer_info->time_stamp = jiffies;
0e014cb1 3546 buffer_info->next_to_watch = i;
9d5c8243
AK
3547 buffer_info->dma = 0;
3548
3549 i++;
3550 if (i == tx_ring->count)
3551 i = 0;
3552 tx_ring->next_to_use = i;
3553
3554 return true;
3555 }
9d5c8243
AK
3556 return false;
3557}
3558
3559#define IGB_MAX_TXD_PWR 16
3560#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3561
80785298 3562static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
0e014cb1 3563 unsigned int first)
9d5c8243
AK
3564{
3565 struct igb_buffer *buffer_info;
80785298 3566 struct pci_dev *pdev = tx_ring->pdev;
9d5c8243
AK
3567 unsigned int len = skb_headlen(skb);
3568 unsigned int count = 0, i;
3569 unsigned int f;
3570
3571 i = tx_ring->next_to_use;
3572
3573 buffer_info = &tx_ring->buffer_info[i];
3574 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3575 buffer_info->length = len;
3576 /* set time_stamp *before* dma to help avoid a possible race */
3577 buffer_info->time_stamp = jiffies;
0e014cb1 3578 buffer_info->next_to_watch = i;
6366ad33
AD
3579 buffer_info->dma = pci_map_single(pdev, skb->data, len,
3580 PCI_DMA_TODEVICE);
3581 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3582 goto dma_error;
9d5c8243
AK
3583
3584 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3585 struct skb_frag_struct *frag;
3586
8581145f 3587 count++;
65689fef
AD
3588 i++;
3589 if (i == tx_ring->count)
3590 i = 0;
3591
9d5c8243
AK
3592 frag = &skb_shinfo(skb)->frags[f];
3593 len = frag->size;
3594
3595 buffer_info = &tx_ring->buffer_info[i];
3596 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3597 buffer_info->length = len;
3598 buffer_info->time_stamp = jiffies;
0e014cb1 3599 buffer_info->next_to_watch = i;
6366ad33
AD
3600 buffer_info->mapped_as_page = true;
3601 buffer_info->dma = pci_map_page(pdev,
3602 frag->page,
3603 frag->page_offset,
3604 len,
3605 PCI_DMA_TODEVICE);
3606 if (pci_dma_mapping_error(pdev, buffer_info->dma))
3607 goto dma_error;
3608
9d5c8243
AK
3609 }
3610
9d5c8243 3611 tx_ring->buffer_info[i].skb = skb;
0e014cb1 3612 tx_ring->buffer_info[first].next_to_watch = i;
9d5c8243 3613
cdfd01fc 3614 return ++count;
6366ad33
AD
3615
3616dma_error:
3617 dev_err(&pdev->dev, "TX DMA map failed\n");
3618
3619 /* clear timestamp and dma mappings for failed buffer_info mapping */
3620 buffer_info->dma = 0;
3621 buffer_info->time_stamp = 0;
3622 buffer_info->length = 0;
3623 buffer_info->next_to_watch = 0;
3624 buffer_info->mapped_as_page = false;
3625 count--;
3626
3627 /* clear timestamp and dma mappings for remaining portion of packet */
3628 while (count >= 0) {
3629 count--;
3630 i--;
3631 if (i < 0)
3632 i += tx_ring->count;
3633 buffer_info = &tx_ring->buffer_info[i];
3634 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
3635 }
3636
3637 return 0;
9d5c8243
AK
3638}
3639
85ad76b2 3640static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
9d5c8243
AK
3641 int tx_flags, int count, u32 paylen,
3642 u8 hdr_len)
3643{
cdfd01fc 3644 union e1000_adv_tx_desc *tx_desc;
9d5c8243
AK
3645 struct igb_buffer *buffer_info;
3646 u32 olinfo_status = 0, cmd_type_len;
cdfd01fc 3647 unsigned int i = tx_ring->next_to_use;
9d5c8243
AK
3648
3649 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3650 E1000_ADVTXD_DCMD_DEXT);
3651
3652 if (tx_flags & IGB_TX_FLAGS_VLAN)
3653 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3654
33af6bcc
PO
3655 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3656 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3657
9d5c8243
AK
3658 if (tx_flags & IGB_TX_FLAGS_TSO) {
3659 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3660
3661 /* insert tcp checksum */
3662 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3663
3664 /* insert ip checksum */
3665 if (tx_flags & IGB_TX_FLAGS_IPV4)
3666 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3667
3668 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3669 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3670 }
3671
85ad76b2
AD
3672 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3673 (tx_flags & (IGB_TX_FLAGS_CSUM |
3674 IGB_TX_FLAGS_TSO |
7dfc16fa 3675 IGB_TX_FLAGS_VLAN)))
85ad76b2 3676 olinfo_status |= tx_ring->reg_idx << 4;
9d5c8243
AK
3677
3678 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3679
cdfd01fc 3680 do {
9d5c8243
AK
3681 buffer_info = &tx_ring->buffer_info[i];
3682 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3683 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3684 tx_desc->read.cmd_type_len =
3685 cpu_to_le32(cmd_type_len | buffer_info->length);
3686 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
cdfd01fc 3687 count--;
9d5c8243
AK
3688 i++;
3689 if (i == tx_ring->count)
3690 i = 0;
cdfd01fc 3691 } while (count > 0);
9d5c8243 3692
85ad76b2 3693 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
9d5c8243
AK
3694 /* Force memory writes to complete before letting h/w
3695 * know there are new descriptors to fetch. (Only
3696 * applicable for weak-ordered memory model archs,
3697 * such as IA-64). */
3698 wmb();
3699
3700 tx_ring->next_to_use = i;
fce99e34 3701 writel(i, tx_ring->tail);
9d5c8243
AK
3702 /* we need this if more than one processor can write to our tail
3703 * at a time, it syncronizes IO on IA64/Altix systems */
3704 mmiowb();
3705}
3706
e694e964 3707static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
9d5c8243 3708{
e694e964
AD
3709 struct net_device *netdev = tx_ring->netdev;
3710
661086df 3711 netif_stop_subqueue(netdev, tx_ring->queue_index);
661086df 3712
9d5c8243
AK
3713 /* Herbert's original patch had:
3714 * smp_mb__after_netif_stop_queue();
3715 * but since that doesn't exist yet, just open code it. */
3716 smp_mb();
3717
3718 /* We need to check again in a case another CPU has just
3719 * made room available. */
c493ea45 3720 if (igb_desc_unused(tx_ring) < size)
9d5c8243
AK
3721 return -EBUSY;
3722
3723 /* A reprieve! */
661086df 3724 netif_wake_subqueue(netdev, tx_ring->queue_index);
04a5fcaa 3725 tx_ring->tx_stats.restart_queue++;
9d5c8243
AK
3726 return 0;
3727}
3728
e694e964 3729static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
9d5c8243 3730{
c493ea45 3731 if (igb_desc_unused(tx_ring) >= size)
9d5c8243 3732 return 0;
e694e964 3733 return __igb_maybe_stop_tx(tx_ring, size);
9d5c8243
AK
3734}
3735
b1a436c3
AD
3736netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3737 struct igb_ring *tx_ring)
9d5c8243 3738{
e694e964 3739 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
0e014cb1 3740 unsigned int first;
9d5c8243 3741 unsigned int tx_flags = 0;
9d5c8243 3742 u8 hdr_len = 0;
cdfd01fc 3743 int tso = 0, count;
c5b9bd5e 3744 union skb_shared_tx *shtx = skb_tx(skb);
9d5c8243 3745
9d5c8243
AK
3746 /* need: 1 descriptor per page,
3747 * + 2 desc gap to keep tail from touching head,
3748 * + 1 desc for skb->data,
3749 * + 1 desc for context descriptor,
3750 * otherwise try next time */
e694e964 3751 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
9d5c8243 3752 /* this is a hard error */
9d5c8243
AK
3753 return NETDEV_TX_BUSY;
3754 }
33af6bcc 3755
33af6bcc
PO
3756 if (unlikely(shtx->hardware)) {
3757 shtx->in_progress = 1;
3758 tx_flags |= IGB_TX_FLAGS_TSTAMP;
33af6bcc 3759 }
9d5c8243 3760
cdfd01fc 3761 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
9d5c8243
AK
3762 tx_flags |= IGB_TX_FLAGS_VLAN;
3763 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3764 }
3765
661086df
PWJ
3766 if (skb->protocol == htons(ETH_P_IP))
3767 tx_flags |= IGB_TX_FLAGS_IPV4;
3768
0e014cb1 3769 first = tx_ring->next_to_use;
85ad76b2
AD
3770 if (skb_is_gso(skb)) {
3771 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
cdfd01fc 3772
85ad76b2
AD
3773 if (tso < 0) {
3774 dev_kfree_skb_any(skb);
3775 return NETDEV_TX_OK;
3776 }
9d5c8243
AK
3777 }
3778
3779 if (tso)
3780 tx_flags |= IGB_TX_FLAGS_TSO;
85ad76b2 3781 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
bc1cbd34
AD
3782 (skb->ip_summed == CHECKSUM_PARTIAL))
3783 tx_flags |= IGB_TX_FLAGS_CSUM;
9d5c8243 3784
65689fef 3785 /*
cdfd01fc 3786 * count reflects descriptors mapped, if 0 or less then mapping error
65689fef
AD
3787 * has occured and we need to rewind the descriptor queue
3788 */
80785298 3789 count = igb_tx_map_adv(tx_ring, skb, first);
6366ad33 3790 if (!count) {
65689fef
AD
3791 dev_kfree_skb_any(skb);
3792 tx_ring->buffer_info[first].time_stamp = 0;
3793 tx_ring->next_to_use = first;
85ad76b2 3794 return NETDEV_TX_OK;
65689fef 3795 }
9d5c8243 3796
85ad76b2
AD
3797 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3798
3799 /* Make sure there is space in the ring for the next send. */
e694e964 3800 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
85ad76b2 3801
9d5c8243
AK
3802 return NETDEV_TX_OK;
3803}
3804
3b29a56d
SH
3805static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3806 struct net_device *netdev)
9d5c8243
AK
3807{
3808 struct igb_adapter *adapter = netdev_priv(netdev);
661086df 3809 struct igb_ring *tx_ring;
661086df 3810 int r_idx = 0;
b1a436c3
AD
3811
3812 if (test_bit(__IGB_DOWN, &adapter->state)) {
3813 dev_kfree_skb_any(skb);
3814 return NETDEV_TX_OK;
3815 }
3816
3817 if (skb->len <= 0) {
3818 dev_kfree_skb_any(skb);
3819 return NETDEV_TX_OK;
3820 }
3821
1bfaf07b 3822 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
661086df 3823 tx_ring = adapter->multi_tx_table[r_idx];
9d5c8243
AK
3824
3825 /* This goes back to the question of how to logically map a tx queue
3826 * to a flow. Right now, performance is impacted slightly negatively
3827 * if using multiple tx queues. If the stack breaks away from a
3828 * single qdisc implementation, we can look at this again. */
e694e964 3829 return igb_xmit_frame_ring_adv(skb, tx_ring);
9d5c8243
AK
3830}
3831
3832/**
3833 * igb_tx_timeout - Respond to a Tx Hang
3834 * @netdev: network interface device structure
3835 **/
3836static void igb_tx_timeout(struct net_device *netdev)
3837{
3838 struct igb_adapter *adapter = netdev_priv(netdev);
3839 struct e1000_hw *hw = &adapter->hw;
3840
3841 /* Do the reset outside of interrupt context */
3842 adapter->tx_timeout_count++;
f7ba205e 3843
55cac248
AD
3844 if (hw->mac.type == e1000_82580)
3845 hw->dev_spec._82575.global_device_reset = true;
3846
9d5c8243 3847 schedule_work(&adapter->reset_task);
265de409
AD
3848 wr32(E1000_EICS,
3849 (adapter->eims_enable_mask & ~adapter->eims_other));
9d5c8243
AK
3850}
3851
3852static void igb_reset_task(struct work_struct *work)
3853{
3854 struct igb_adapter *adapter;
3855 adapter = container_of(work, struct igb_adapter, reset_task);
3856
3857 igb_reinit_locked(adapter);
3858}
3859
3860/**
3861 * igb_get_stats - Get System Network Statistics
3862 * @netdev: network interface device structure
3863 *
3864 * Returns the address of the device statistics structure.
3865 * The statistics are actually updated from the timer callback.
3866 **/
73cd78f1 3867static struct net_device_stats *igb_get_stats(struct net_device *netdev)
9d5c8243 3868{
9d5c8243 3869 /* only return the current stats */
8d24e933 3870 return &netdev->stats;
9d5c8243
AK
3871}
3872
3873/**
3874 * igb_change_mtu - Change the Maximum Transfer Unit
3875 * @netdev: network interface device structure
3876 * @new_mtu: new value for maximum frame size
3877 *
3878 * Returns 0 on success, negative on failure
3879 **/
3880static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3881{
3882 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 3883 struct pci_dev *pdev = adapter->pdev;
9d5c8243 3884 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4c844851 3885 u32 rx_buffer_len, i;
9d5c8243 3886
c809d227 3887 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
090b1795 3888 dev_err(&pdev->dev, "Invalid MTU setting\n");
9d5c8243
AK
3889 return -EINVAL;
3890 }
3891
9d5c8243 3892 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
090b1795 3893 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
9d5c8243
AK
3894 return -EINVAL;
3895 }
3896
3897 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3898 msleep(1);
73cd78f1 3899
9d5c8243
AK
3900 /* igb_down has a dependency on max_frame_size */
3901 adapter->max_frame_size = max_frame;
559e9c49 3902
9d5c8243
AK
3903 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3904 * means we reserve 2 more, this pushes us to allocate from the next
3905 * larger slab size.
3906 * i.e. RXBUFFER_2048 --> size-4096 slab
3907 */
3908
7d95b717 3909 if (max_frame <= IGB_RXBUFFER_1024)
4c844851 3910 rx_buffer_len = IGB_RXBUFFER_1024;
6ec43fe6 3911 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
4c844851 3912 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
6ec43fe6 3913 else
4c844851
AD
3914 rx_buffer_len = IGB_RXBUFFER_128;
3915
3916 if (netif_running(netdev))
3917 igb_down(adapter);
9d5c8243 3918
090b1795 3919 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
9d5c8243
AK
3920 netdev->mtu, new_mtu);
3921 netdev->mtu = new_mtu;
3922
4c844851
AD
3923 for (i = 0; i < adapter->num_rx_queues; i++)
3924 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3925
9d5c8243
AK
3926 if (netif_running(netdev))
3927 igb_up(adapter);
3928 else
3929 igb_reset(adapter);
3930
3931 clear_bit(__IGB_RESETTING, &adapter->state);
3932
3933 return 0;
3934}
3935
3936/**
3937 * igb_update_stats - Update the board statistics counters
3938 * @adapter: board private structure
3939 **/
3940
3941void igb_update_stats(struct igb_adapter *adapter)
3942{
128e45eb 3943 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
9d5c8243
AK
3944 struct e1000_hw *hw = &adapter->hw;
3945 struct pci_dev *pdev = adapter->pdev;
3f9c0164 3946 u32 rnbc;
9d5c8243 3947 u16 phy_tmp;
3f9c0164
AD
3948 int i;
3949 u64 bytes, packets;
9d5c8243
AK
3950
3951#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3952
3953 /*
3954 * Prevent stats update while adapter is being reset, or if the pci
3955 * connection is down.
3956 */
3957 if (adapter->link_speed == 0)
3958 return;
3959 if (pci_channel_offline(pdev))
3960 return;
3961
3f9c0164
AD
3962 bytes = 0;
3963 packets = 0;
3964 for (i = 0; i < adapter->num_rx_queues; i++) {
3965 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3966 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
128e45eb 3967 net_stats->rx_fifo_errors += rqdpc_tmp;
3f9c0164
AD
3968 bytes += adapter->rx_ring[i].rx_stats.bytes;
3969 packets += adapter->rx_ring[i].rx_stats.packets;
3970 }
3971
128e45eb
AD
3972 net_stats->rx_bytes = bytes;
3973 net_stats->rx_packets = packets;
3f9c0164
AD
3974
3975 bytes = 0;
3976 packets = 0;
3977 for (i = 0; i < adapter->num_tx_queues; i++) {
3978 bytes += adapter->tx_ring[i].tx_stats.bytes;
3979 packets += adapter->tx_ring[i].tx_stats.packets;
3980 }
128e45eb
AD
3981 net_stats->tx_bytes = bytes;
3982 net_stats->tx_packets = packets;
3f9c0164
AD
3983
3984 /* read stats registers */
9d5c8243
AK
3985 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3986 adapter->stats.gprc += rd32(E1000_GPRC);
3987 adapter->stats.gorc += rd32(E1000_GORCL);
3988 rd32(E1000_GORCH); /* clear GORCL */
3989 adapter->stats.bprc += rd32(E1000_BPRC);
3990 adapter->stats.mprc += rd32(E1000_MPRC);
3991 adapter->stats.roc += rd32(E1000_ROC);
3992
3993 adapter->stats.prc64 += rd32(E1000_PRC64);
3994 adapter->stats.prc127 += rd32(E1000_PRC127);
3995 adapter->stats.prc255 += rd32(E1000_PRC255);
3996 adapter->stats.prc511 += rd32(E1000_PRC511);
3997 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3998 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3999 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4000 adapter->stats.sec += rd32(E1000_SEC);
4001
4002 adapter->stats.mpc += rd32(E1000_MPC);
4003 adapter->stats.scc += rd32(E1000_SCC);
4004 adapter->stats.ecol += rd32(E1000_ECOL);
4005 adapter->stats.mcc += rd32(E1000_MCC);
4006 adapter->stats.latecol += rd32(E1000_LATECOL);
4007 adapter->stats.dc += rd32(E1000_DC);
4008 adapter->stats.rlec += rd32(E1000_RLEC);
4009 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4010 adapter->stats.xontxc += rd32(E1000_XONTXC);
4011 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4012 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4013 adapter->stats.fcruc += rd32(E1000_FCRUC);
4014 adapter->stats.gptc += rd32(E1000_GPTC);
4015 adapter->stats.gotc += rd32(E1000_GOTCL);
4016 rd32(E1000_GOTCH); /* clear GOTCL */
3f9c0164
AD
4017 rnbc = rd32(E1000_RNBC);
4018 adapter->stats.rnbc += rnbc;
128e45eb 4019 net_stats->rx_fifo_errors += rnbc;
9d5c8243
AK
4020 adapter->stats.ruc += rd32(E1000_RUC);
4021 adapter->stats.rfc += rd32(E1000_RFC);
4022 adapter->stats.rjc += rd32(E1000_RJC);
4023 adapter->stats.tor += rd32(E1000_TORH);
4024 adapter->stats.tot += rd32(E1000_TOTH);
4025 adapter->stats.tpr += rd32(E1000_TPR);
4026
4027 adapter->stats.ptc64 += rd32(E1000_PTC64);
4028 adapter->stats.ptc127 += rd32(E1000_PTC127);
4029 adapter->stats.ptc255 += rd32(E1000_PTC255);
4030 adapter->stats.ptc511 += rd32(E1000_PTC511);
4031 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4032 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4033
4034 adapter->stats.mptc += rd32(E1000_MPTC);
4035 adapter->stats.bptc += rd32(E1000_BPTC);
4036
4037 /* used for adaptive IFS */
9d5c8243
AK
4038 hw->mac.tx_packet_delta = rd32(E1000_TPT);
4039 adapter->stats.tpt += hw->mac.tx_packet_delta;
4040 hw->mac.collision_delta = rd32(E1000_COLC);
4041 adapter->stats.colc += hw->mac.collision_delta;
4042
4043 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
4044 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4045 adapter->stats.tncrs += rd32(E1000_TNCRS);
4046 adapter->stats.tsctc += rd32(E1000_TSCTC);
4047 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4048
4049 adapter->stats.iac += rd32(E1000_IAC);
4050 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4051 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4052 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4053 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4054 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4055 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4056 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4057 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4058
4059 /* Fill out the OS statistics structure */
128e45eb
AD
4060 net_stats->multicast = adapter->stats.mprc;
4061 net_stats->collisions = adapter->stats.colc;
9d5c8243
AK
4062
4063 /* Rx Errors */
4064
4065 /* RLEC on some newer hardware can be incorrect so build
8c0ab70a 4066 * our own version based on RUC and ROC */
128e45eb 4067 net_stats->rx_errors = adapter->stats.rxerrc +
9d5c8243
AK
4068 adapter->stats.crcerrs + adapter->stats.algnerrc +
4069 adapter->stats.ruc + adapter->stats.roc +
4070 adapter->stats.cexterr;
128e45eb
AD
4071 net_stats->rx_length_errors = adapter->stats.ruc +
4072 adapter->stats.roc;
4073 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4074 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4075 net_stats->rx_missed_errors = adapter->stats.mpc;
9d5c8243
AK
4076
4077 /* Tx Errors */
128e45eb
AD
4078 net_stats->tx_errors = adapter->stats.ecol +
4079 adapter->stats.latecol;
4080 net_stats->tx_aborted_errors = adapter->stats.ecol;
4081 net_stats->tx_window_errors = adapter->stats.latecol;
4082 net_stats->tx_carrier_errors = adapter->stats.tncrs;
9d5c8243
AK
4083
4084 /* Tx Dropped needs to be maintained elsewhere */
4085
4086 /* Phy Stats */
4087 if (hw->phy.media_type == e1000_media_type_copper) {
4088 if ((adapter->link_speed == SPEED_1000) &&
73cd78f1 4089 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
9d5c8243
AK
4090 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4091 adapter->phy_stats.idle_errors += phy_tmp;
4092 }
4093 }
4094
4095 /* Management Stats */
4096 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4097 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4098 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
4099}
4100
9d5c8243
AK
4101static irqreturn_t igb_msix_other(int irq, void *data)
4102{
047e0030 4103 struct igb_adapter *adapter = data;
9d5c8243 4104 struct e1000_hw *hw = &adapter->hw;
844290e5 4105 u32 icr = rd32(E1000_ICR);
844290e5 4106 /* reading ICR causes bit 31 of EICR to be cleared */
dda0e083 4107
047e0030 4108 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
4109 /* HW is reporting DMA is out of sync */
4110 adapter->stats.doosync++;
4111 }
eebbbdba 4112
4ae196df
AD
4113 /* Check for a mailbox event */
4114 if (icr & E1000_ICR_VMMB)
4115 igb_msg_task(adapter);
4116
4117 if (icr & E1000_ICR_LSC) {
4118 hw->mac.get_link_status = 1;
4119 /* guard against interrupt when we're going down */
4120 if (!test_bit(__IGB_DOWN, &adapter->state))
4121 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4122 }
4123
25568a53
AD
4124 if (adapter->vfs_allocated_count)
4125 wr32(E1000_IMS, E1000_IMS_LSC |
4126 E1000_IMS_VMMB |
4127 E1000_IMS_DOUTSYNC);
4128 else
4129 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
844290e5 4130 wr32(E1000_EIMS, adapter->eims_other);
9d5c8243
AK
4131
4132 return IRQ_HANDLED;
4133}
4134
047e0030 4135static void igb_write_itr(struct igb_q_vector *q_vector)
9d5c8243 4136{
047e0030 4137 u32 itr_val = q_vector->itr_val & 0x7FFC;
9d5c8243 4138
047e0030
AD
4139 if (!q_vector->set_itr)
4140 return;
73cd78f1 4141
047e0030
AD
4142 if (!itr_val)
4143 itr_val = 0x4;
661086df 4144
047e0030
AD
4145 if (q_vector->itr_shift)
4146 itr_val |= itr_val << q_vector->itr_shift;
661086df 4147 else
047e0030 4148 itr_val |= 0x8000000;
661086df 4149
047e0030
AD
4150 writel(itr_val, q_vector->itr_register);
4151 q_vector->set_itr = 0;
6eb5a7f1
AD
4152}
4153
047e0030 4154static irqreturn_t igb_msix_ring(int irq, void *data)
9d5c8243 4155{
047e0030 4156 struct igb_q_vector *q_vector = data;
9d5c8243 4157
047e0030
AD
4158 /* Write the ITR value calculated from the previous interrupt. */
4159 igb_write_itr(q_vector);
9d5c8243 4160
047e0030 4161 napi_schedule(&q_vector->napi);
844290e5 4162
047e0030 4163 return IRQ_HANDLED;
fe4506b6
JC
4164}
4165
421e02f0 4166#ifdef CONFIG_IGB_DCA
047e0030 4167static void igb_update_dca(struct igb_q_vector *q_vector)
fe4506b6 4168{
047e0030 4169 struct igb_adapter *adapter = q_vector->adapter;
fe4506b6
JC
4170 struct e1000_hw *hw = &adapter->hw;
4171 int cpu = get_cpu();
fe4506b6 4172
047e0030
AD
4173 if (q_vector->cpu == cpu)
4174 goto out_no_update;
4175
4176 if (q_vector->tx_ring) {
4177 int q = q_vector->tx_ring->reg_idx;
4178 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4179 if (hw->mac.type == e1000_82575) {
4180 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4181 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
2d064c06 4182 } else {
047e0030
AD
4183 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4184 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4185 E1000_DCA_TXCTRL_CPUID_SHIFT;
4186 }
4187 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4188 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4189 }
4190 if (q_vector->rx_ring) {
4191 int q = q_vector->rx_ring->reg_idx;
4192 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4193 if (hw->mac.type == e1000_82575) {
2d064c06 4194 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
92be7917 4195 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
047e0030
AD
4196 } else {
4197 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4198 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4199 E1000_DCA_RXCTRL_CPUID_SHIFT;
2d064c06 4200 }
fe4506b6
JC
4201 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4202 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4203 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4204 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
fe4506b6 4205 }
047e0030
AD
4206 q_vector->cpu = cpu;
4207out_no_update:
fe4506b6
JC
4208 put_cpu();
4209}
4210
4211static void igb_setup_dca(struct igb_adapter *adapter)
4212{
7e0e99ef 4213 struct e1000_hw *hw = &adapter->hw;
fe4506b6
JC
4214 int i;
4215
7dfc16fa 4216 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
fe4506b6
JC
4217 return;
4218
7e0e99ef
AD
4219 /* Always use CB2 mode, difference is masked in the CB driver. */
4220 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4221
047e0030
AD
4222 for (i = 0; i < adapter->num_q_vectors; i++) {
4223 struct igb_q_vector *q_vector = adapter->q_vector[i];
4224 q_vector->cpu = -1;
4225 igb_update_dca(q_vector);
fe4506b6
JC
4226 }
4227}
4228
4229static int __igb_notify_dca(struct device *dev, void *data)
4230{
4231 struct net_device *netdev = dev_get_drvdata(dev);
4232 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 4233 struct pci_dev *pdev = adapter->pdev;
fe4506b6
JC
4234 struct e1000_hw *hw = &adapter->hw;
4235 unsigned long event = *(unsigned long *)data;
4236
4237 switch (event) {
4238 case DCA_PROVIDER_ADD:
4239 /* if already enabled, don't do it again */
7dfc16fa 4240 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6 4241 break;
fe4506b6 4242 if (dca_add_requester(dev) == 0) {
bbd98fe4 4243 adapter->flags |= IGB_FLAG_DCA_ENABLED;
090b1795 4244 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
4245 igb_setup_dca(adapter);
4246 break;
4247 }
4248 /* Fall Through since DCA is disabled. */
4249 case DCA_PROVIDER_REMOVE:
7dfc16fa 4250 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6 4251 /* without this a class_device is left
047e0030 4252 * hanging around in the sysfs model */
fe4506b6 4253 dca_remove_requester(dev);
090b1795 4254 dev_info(&pdev->dev, "DCA disabled\n");
7dfc16fa 4255 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 4256 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
4257 }
4258 break;
4259 }
bbd98fe4 4260
fe4506b6 4261 return 0;
9d5c8243
AK
4262}
4263
fe4506b6
JC
4264static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4265 void *p)
4266{
4267 int ret_val;
4268
4269 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4270 __igb_notify_dca);
4271
4272 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4273}
421e02f0 4274#endif /* CONFIG_IGB_DCA */
9d5c8243 4275
4ae196df
AD
4276static void igb_ping_all_vfs(struct igb_adapter *adapter)
4277{
4278 struct e1000_hw *hw = &adapter->hw;
4279 u32 ping;
4280 int i;
4281
4282 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
4283 ping = E1000_PF_CONTROL_MSG;
f2ca0dbe 4284 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4ae196df
AD
4285 ping |= E1000_VT_MSGTYPE_CTS;
4286 igb_write_mbx(hw, &ping, 1, i);
4287 }
4288}
4289
7d5753f0
AD
4290static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4291{
4292 struct e1000_hw *hw = &adapter->hw;
4293 u32 vmolr = rd32(E1000_VMOLR(vf));
4294 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4295
4296 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4297 IGB_VF_FLAG_MULTI_PROMISC);
4298 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4299
4300 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4301 vmolr |= E1000_VMOLR_MPME;
4302 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4303 } else {
4304 /*
4305 * if we have hashes and we are clearing a multicast promisc
4306 * flag we need to write the hashes to the MTA as this step
4307 * was previously skipped
4308 */
4309 if (vf_data->num_vf_mc_hashes > 30) {
4310 vmolr |= E1000_VMOLR_MPME;
4311 } else if (vf_data->num_vf_mc_hashes) {
4312 int j;
4313 vmolr |= E1000_VMOLR_ROMPE;
4314 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4315 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4316 }
4317 }
4318
4319 wr32(E1000_VMOLR(vf), vmolr);
4320
4321 /* there are flags left unprocessed, likely not supported */
4322 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4323 return -EINVAL;
4324
4325 return 0;
4326
4327}
4328
4ae196df
AD
4329static int igb_set_vf_multicasts(struct igb_adapter *adapter,
4330 u32 *msgbuf, u32 vf)
4331{
4332 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4333 u16 *hash_list = (u16 *)&msgbuf[1];
4334 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4335 int i;
4336
7d5753f0 4337 /* salt away the number of multicast addresses assigned
4ae196df
AD
4338 * to this VF for later use to restore when the PF multi cast
4339 * list changes
4340 */
4341 vf_data->num_vf_mc_hashes = n;
4342
7d5753f0
AD
4343 /* only up to 30 hash values supported */
4344 if (n > 30)
4345 n = 30;
4346
4347 /* store the hashes for later use */
4ae196df 4348 for (i = 0; i < n; i++)
a419aef8 4349 vf_data->vf_mc_hashes[i] = hash_list[i];
4ae196df
AD
4350
4351 /* Flush and reset the mta with the new values */
ff41f8dc 4352 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
4353
4354 return 0;
4355}
4356
4357static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
4358{
4359 struct e1000_hw *hw = &adapter->hw;
4360 struct vf_data_storage *vf_data;
4361 int i, j;
4362
4363 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7d5753f0
AD
4364 u32 vmolr = rd32(E1000_VMOLR(i));
4365 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4366
4ae196df 4367 vf_data = &adapter->vf_data[i];
7d5753f0
AD
4368
4369 if ((vf_data->num_vf_mc_hashes > 30) ||
4370 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4371 vmolr |= E1000_VMOLR_MPME;
4372 } else if (vf_data->num_vf_mc_hashes) {
4373 vmolr |= E1000_VMOLR_ROMPE;
4374 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4375 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4376 }
4377 wr32(E1000_VMOLR(i), vmolr);
4ae196df
AD
4378 }
4379}
4380
4381static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
4382{
4383 struct e1000_hw *hw = &adapter->hw;
4384 u32 pool_mask, reg, vid;
4385 int i;
4386
4387 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4388
4389 /* Find the vlan filter for this id */
4390 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4391 reg = rd32(E1000_VLVF(i));
4392
4393 /* remove the vf from the pool */
4394 reg &= ~pool_mask;
4395
4396 /* if pool is empty then remove entry from vfta */
4397 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
4398 (reg & E1000_VLVF_VLANID_ENABLE)) {
4399 reg = 0;
4400 vid = reg & E1000_VLVF_VLANID_MASK;
4401 igb_vfta_set(hw, vid, false);
4402 }
4403
4404 wr32(E1000_VLVF(i), reg);
4405 }
ae641bdc
AD
4406
4407 adapter->vf_data[vf].vlans_enabled = 0;
4ae196df
AD
4408}
4409
4410static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4411{
4412 struct e1000_hw *hw = &adapter->hw;
4413 u32 reg, i;
4414
51466239
AD
4415 /* The vlvf table only exists on 82576 hardware and newer */
4416 if (hw->mac.type < e1000_82576)
4417 return -1;
4418
4419 /* we only need to do this if VMDq is enabled */
4ae196df
AD
4420 if (!adapter->vfs_allocated_count)
4421 return -1;
4422
4423 /* Find the vlan filter for this id */
4424 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4425 reg = rd32(E1000_VLVF(i));
4426 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
4427 vid == (reg & E1000_VLVF_VLANID_MASK))
4428 break;
4429 }
4430
4431 if (add) {
4432 if (i == E1000_VLVF_ARRAY_SIZE) {
4433 /* Did not find a matching VLAN ID entry that was
4434 * enabled. Search for a free filter entry, i.e.
4435 * one without the enable bit set
4436 */
4437 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
4438 reg = rd32(E1000_VLVF(i));
4439 if (!(reg & E1000_VLVF_VLANID_ENABLE))
4440 break;
4441 }
4442 }
4443 if (i < E1000_VLVF_ARRAY_SIZE) {
4444 /* Found an enabled/available entry */
4445 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
4446
4447 /* if !enabled we need to set this up in vfta */
4448 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
51466239
AD
4449 /* add VID to filter table */
4450 igb_vfta_set(hw, vid, true);
4ae196df
AD
4451 reg |= E1000_VLVF_VLANID_ENABLE;
4452 }
cad6d05f
AD
4453 reg &= ~E1000_VLVF_VLANID_MASK;
4454 reg |= vid;
4ae196df 4455 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
4456
4457 /* do not modify RLPML for PF devices */
4458 if (vf >= adapter->vfs_allocated_count)
4459 return 0;
4460
4461 if (!adapter->vf_data[vf].vlans_enabled) {
4462 u32 size;
4463 reg = rd32(E1000_VMOLR(vf));
4464 size = reg & E1000_VMOLR_RLPML_MASK;
4465 size += 4;
4466 reg &= ~E1000_VMOLR_RLPML_MASK;
4467 reg |= size;
4468 wr32(E1000_VMOLR(vf), reg);
4469 }
ae641bdc 4470
51466239 4471 adapter->vf_data[vf].vlans_enabled++;
4ae196df
AD
4472 return 0;
4473 }
4474 } else {
4475 if (i < E1000_VLVF_ARRAY_SIZE) {
4476 /* remove vf from the pool */
4477 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
4478 /* if pool is empty then remove entry from vfta */
4479 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
4480 reg = 0;
4481 igb_vfta_set(hw, vid, false);
4482 }
4483 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
4484
4485 /* do not modify RLPML for PF devices */
4486 if (vf >= adapter->vfs_allocated_count)
4487 return 0;
4488
4489 adapter->vf_data[vf].vlans_enabled--;
4490 if (!adapter->vf_data[vf].vlans_enabled) {
4491 u32 size;
4492 reg = rd32(E1000_VMOLR(vf));
4493 size = reg & E1000_VMOLR_RLPML_MASK;
4494 size -= 4;
4495 reg &= ~E1000_VMOLR_RLPML_MASK;
4496 reg |= size;
4497 wr32(E1000_VMOLR(vf), reg);
4498 }
4ae196df
AD
4499 return 0;
4500 }
4501 }
4502 return -1;
4503}
4504
4505static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4506{
4507 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
4508 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
4509
4510 return igb_vlvf_set(adapter, vid, add, vf);
4511}
4512
f2ca0dbe 4513static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4ae196df 4514{
f2ca0dbe
AD
4515 /* clear all flags */
4516 adapter->vf_data[vf].flags = 0;
4517 adapter->vf_data[vf].last_nack = jiffies;
4ae196df
AD
4518
4519 /* reset offloads to defaults */
7d5753f0 4520 igb_set_vmolr(adapter, vf);
4ae196df
AD
4521
4522 /* reset vlans for device */
4523 igb_clear_vf_vfta(adapter, vf);
4524
4525 /* reset multicast table array for vf */
4526 adapter->vf_data[vf].num_vf_mc_hashes = 0;
4527
4528 /* Flush and reset the mta with the new values */
ff41f8dc 4529 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
4530}
4531
f2ca0dbe
AD
4532static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4533{
4534 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4535
4536 /* generate a new mac address as we were hotplug removed/added */
4537 random_ether_addr(vf_mac);
4538
4539 /* process remaining reset events */
4540 igb_vf_reset(adapter, vf);
4541}
4542
4543static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4ae196df
AD
4544{
4545 struct e1000_hw *hw = &adapter->hw;
4546 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
ff41f8dc 4547 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df
AD
4548 u32 reg, msgbuf[3];
4549 u8 *addr = (u8 *)(&msgbuf[1]);
4550
4551 /* process all the same items cleared in a function level reset */
f2ca0dbe 4552 igb_vf_reset(adapter, vf);
4ae196df
AD
4553
4554 /* set vf mac address */
26ad9178 4555 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4ae196df
AD
4556
4557 /* enable transmit and receive for vf */
4558 reg = rd32(E1000_VFTE);
4559 wr32(E1000_VFTE, reg | (1 << vf));
4560 reg = rd32(E1000_VFRE);
4561 wr32(E1000_VFRE, reg | (1 << vf));
4562
f2ca0dbe 4563 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4ae196df
AD
4564
4565 /* reply to reset with ack and vf mac address */
4566 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
4567 memcpy(addr, vf_mac, 6);
4568 igb_write_mbx(hw, msgbuf, 3, vf);
4569}
4570
4571static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4572{
f2ca0dbe
AD
4573 unsigned char *addr = (char *)&msg[1];
4574 int err = -1;
4ae196df 4575
f2ca0dbe
AD
4576 if (is_valid_ether_addr(addr))
4577 err = igb_set_vf_mac(adapter, vf, addr);
4ae196df 4578
f2ca0dbe 4579 return err;
4ae196df
AD
4580}
4581
4582static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4583{
4584 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 4585 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
4586 u32 msg = E1000_VT_MSGTYPE_NACK;
4587
4588 /* if device isn't clear to send it shouldn't be reading either */
f2ca0dbe
AD
4589 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4590 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4ae196df 4591 igb_write_mbx(hw, &msg, 1, vf);
f2ca0dbe 4592 vf_data->last_nack = jiffies;
4ae196df
AD
4593 }
4594}
4595
f2ca0dbe 4596static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4ae196df 4597{
f2ca0dbe
AD
4598 struct pci_dev *pdev = adapter->pdev;
4599 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4ae196df 4600 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 4601 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
4602 s32 retval;
4603
f2ca0dbe 4604 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4ae196df 4605
fef45f4c
AD
4606 if (retval) {
4607 /* if receive failed revoke VF CTS stats and restart init */
f2ca0dbe 4608 dev_err(&pdev->dev, "Error receiving message from VF\n");
fef45f4c
AD
4609 vf_data->flags &= ~IGB_VF_FLAG_CTS;
4610 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4611 return;
4612 goto out;
4613 }
4ae196df
AD
4614
4615 /* this is a message we already processed, do nothing */
4616 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
f2ca0dbe 4617 return;
4ae196df
AD
4618
4619 /*
4620 * until the vf completes a reset it should not be
4621 * allowed to start any configuration.
4622 */
4623
4624 if (msgbuf[0] == E1000_VF_RESET) {
4625 igb_vf_reset_msg(adapter, vf);
f2ca0dbe 4626 return;
4ae196df
AD
4627 }
4628
f2ca0dbe 4629 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
fef45f4c
AD
4630 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
4631 return;
4632 retval = -1;
4633 goto out;
4ae196df
AD
4634 }
4635
4636 switch ((msgbuf[0] & 0xFFFF)) {
4637 case E1000_VF_SET_MAC_ADDR:
4638 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4639 break;
7d5753f0
AD
4640 case E1000_VF_SET_PROMISC:
4641 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4642 break;
4ae196df
AD
4643 case E1000_VF_SET_MULTICAST:
4644 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4645 break;
4646 case E1000_VF_SET_LPE:
4647 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4648 break;
4649 case E1000_VF_SET_VLAN:
4650 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4651 break;
4652 default:
090b1795 4653 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4ae196df
AD
4654 retval = -1;
4655 break;
4656 }
4657
fef45f4c
AD
4658 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4659out:
4ae196df
AD
4660 /* notify the VF of the results of what it sent us */
4661 if (retval)
4662 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4663 else
4664 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4665
4ae196df 4666 igb_write_mbx(hw, msgbuf, 1, vf);
f2ca0dbe 4667}
4ae196df 4668
f2ca0dbe
AD
4669static void igb_msg_task(struct igb_adapter *adapter)
4670{
4671 struct e1000_hw *hw = &adapter->hw;
4672 u32 vf;
4673
4674 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4675 /* process any reset requests */
4676 if (!igb_check_for_rst(hw, vf))
4677 igb_vf_reset_event(adapter, vf);
4678
4679 /* process any messages pending */
4680 if (!igb_check_for_msg(hw, vf))
4681 igb_rcv_msg_from_vf(adapter, vf);
4682
4683 /* process any acks */
4684 if (!igb_check_for_ack(hw, vf))
4685 igb_rcv_ack_from_vf(adapter, vf);
4686 }
4ae196df
AD
4687}
4688
68d480c4
AD
4689/**
4690 * igb_set_uta - Set unicast filter table address
4691 * @adapter: board private structure
4692 *
4693 * The unicast table address is a register array of 32-bit registers.
4694 * The table is meant to be used in a way similar to how the MTA is used
4695 * however due to certain limitations in the hardware it is necessary to
4696 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4697 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4698 **/
4699static void igb_set_uta(struct igb_adapter *adapter)
4700{
4701 struct e1000_hw *hw = &adapter->hw;
4702 int i;
4703
4704 /* The UTA table only exists on 82576 hardware and newer */
4705 if (hw->mac.type < e1000_82576)
4706 return;
4707
4708 /* we only need to do this if VMDq is enabled */
4709 if (!adapter->vfs_allocated_count)
4710 return;
4711
4712 for (i = 0; i < hw->mac.uta_reg_count; i++)
4713 array_wr32(E1000_UTA, i, ~0);
4714}
4715
9d5c8243
AK
4716/**
4717 * igb_intr_msi - Interrupt Handler
4718 * @irq: interrupt number
4719 * @data: pointer to a network interface device structure
4720 **/
4721static irqreturn_t igb_intr_msi(int irq, void *data)
4722{
047e0030
AD
4723 struct igb_adapter *adapter = data;
4724 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
4725 struct e1000_hw *hw = &adapter->hw;
4726 /* read ICR disables interrupts using IAM */
4727 u32 icr = rd32(E1000_ICR);
4728
047e0030 4729 igb_write_itr(q_vector);
9d5c8243 4730
047e0030 4731 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
4732 /* HW is reporting DMA is out of sync */
4733 adapter->stats.doosync++;
4734 }
4735
9d5c8243
AK
4736 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4737 hw->mac.get_link_status = 1;
4738 if (!test_bit(__IGB_DOWN, &adapter->state))
4739 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4740 }
4741
047e0030 4742 napi_schedule(&q_vector->napi);
9d5c8243
AK
4743
4744 return IRQ_HANDLED;
4745}
4746
4747/**
4a3c6433 4748 * igb_intr - Legacy Interrupt Handler
9d5c8243
AK
4749 * @irq: interrupt number
4750 * @data: pointer to a network interface device structure
4751 **/
4752static irqreturn_t igb_intr(int irq, void *data)
4753{
047e0030
AD
4754 struct igb_adapter *adapter = data;
4755 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
4756 struct e1000_hw *hw = &adapter->hw;
4757 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4758 * need for the IMC write */
4759 u32 icr = rd32(E1000_ICR);
9d5c8243
AK
4760 if (!icr)
4761 return IRQ_NONE; /* Not our interrupt */
4762
047e0030 4763 igb_write_itr(q_vector);
9d5c8243
AK
4764
4765 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4766 * not set, then the adapter didn't send an interrupt */
4767 if (!(icr & E1000_ICR_INT_ASSERTED))
4768 return IRQ_NONE;
4769
047e0030 4770 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
4771 /* HW is reporting DMA is out of sync */
4772 adapter->stats.doosync++;
4773 }
4774
9d5c8243
AK
4775 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4776 hw->mac.get_link_status = 1;
4777 /* guard against interrupt when we're going down */
4778 if (!test_bit(__IGB_DOWN, &adapter->state))
4779 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4780 }
4781
047e0030 4782 napi_schedule(&q_vector->napi);
9d5c8243
AK
4783
4784 return IRQ_HANDLED;
4785}
4786
047e0030 4787static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
9d5c8243 4788{
047e0030 4789 struct igb_adapter *adapter = q_vector->adapter;
46544258 4790 struct e1000_hw *hw = &adapter->hw;
9d5c8243 4791
4fc82adf
AD
4792 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4793 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
047e0030 4794 if (!adapter->msix_entries)
6eb5a7f1 4795 igb_set_itr(adapter);
46544258 4796 else
047e0030 4797 igb_update_ring_itr(q_vector);
9d5c8243
AK
4798 }
4799
46544258
AD
4800 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4801 if (adapter->msix_entries)
047e0030 4802 wr32(E1000_EIMS, q_vector->eims_value);
46544258
AD
4803 else
4804 igb_irq_enable(adapter);
4805 }
9d5c8243
AK
4806}
4807
46544258
AD
4808/**
4809 * igb_poll - NAPI Rx polling callback
4810 * @napi: napi polling structure
4811 * @budget: count of how many packets we should handle
4812 **/
4813static int igb_poll(struct napi_struct *napi, int budget)
9d5c8243 4814{
047e0030
AD
4815 struct igb_q_vector *q_vector = container_of(napi,
4816 struct igb_q_vector,
4817 napi);
4818 int tx_clean_complete = 1, work_done = 0;
9d5c8243 4819
421e02f0 4820#ifdef CONFIG_IGB_DCA
047e0030
AD
4821 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4822 igb_update_dca(q_vector);
fe4506b6 4823#endif
047e0030
AD
4824 if (q_vector->tx_ring)
4825 tx_clean_complete = igb_clean_tx_irq(q_vector);
9d5c8243 4826
047e0030
AD
4827 if (q_vector->rx_ring)
4828 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4829
4830 if (!tx_clean_complete)
4831 work_done = budget;
46544258 4832
9d5c8243 4833 /* If not enough Rx work done, exit the polling mode */
5e6d5b17 4834 if (work_done < budget) {
288379f0 4835 napi_complete(napi);
047e0030 4836 igb_ring_irq_enable(q_vector);
9d5c8243
AK
4837 }
4838
46544258 4839 return work_done;
9d5c8243 4840}
6d8126f9 4841
33af6bcc 4842/**
c5b9bd5e 4843 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
33af6bcc 4844 * @adapter: board private structure
c5b9bd5e
AD
4845 * @shhwtstamps: timestamp structure to update
4846 * @regval: unsigned 64bit system time value.
4847 *
4848 * We need to convert the system time value stored in the RX/TXSTMP registers
4849 * into a hwtstamp which can be used by the upper level timestamping functions
4850 */
4851static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4852 struct skb_shared_hwtstamps *shhwtstamps,
4853 u64 regval)
4854{
4855 u64 ns;
4856
55cac248
AD
4857 /*
4858 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
4859 * 24 to match clock shift we setup earlier.
4860 */
4861 if (adapter->hw.mac.type == e1000_82580)
4862 regval <<= IGB_82580_TSYNC_SHIFT;
4863
c5b9bd5e
AD
4864 ns = timecounter_cyc2time(&adapter->clock, regval);
4865 timecompare_update(&adapter->compare, ns);
4866 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4867 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4868 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4869}
4870
4871/**
4872 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4873 * @q_vector: pointer to q_vector containing needed info
33af6bcc
PO
4874 * @skb: packet that was just sent
4875 *
4876 * If we were asked to do hardware stamping and such a time stamp is
4877 * available, then it must have been for this skb here because we only
4878 * allow only one such packet into the queue.
4879 */
c5b9bd5e 4880static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
33af6bcc 4881{
c5b9bd5e 4882 struct igb_adapter *adapter = q_vector->adapter;
33af6bcc
PO
4883 union skb_shared_tx *shtx = skb_tx(skb);
4884 struct e1000_hw *hw = &adapter->hw;
c5b9bd5e
AD
4885 struct skb_shared_hwtstamps shhwtstamps;
4886 u64 regval;
33af6bcc 4887
c5b9bd5e
AD
4888 /* if skb does not support hw timestamp or TX stamp not valid exit */
4889 if (likely(!shtx->hardware) ||
4890 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4891 return;
4892
4893 regval = rd32(E1000_TXSTMPL);
4894 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4895
4896 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4897 skb_tstamp_tx(skb, &shhwtstamps);
33af6bcc
PO
4898}
4899
9d5c8243
AK
4900/**
4901 * igb_clean_tx_irq - Reclaim resources after transmit completes
047e0030 4902 * @q_vector: pointer to q_vector containing needed info
9d5c8243
AK
4903 * returns true if ring is completely cleaned
4904 **/
047e0030 4905static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
9d5c8243 4906{
047e0030
AD
4907 struct igb_adapter *adapter = q_vector->adapter;
4908 struct igb_ring *tx_ring = q_vector->tx_ring;
e694e964 4909 struct net_device *netdev = tx_ring->netdev;
0e014cb1 4910 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
4911 struct igb_buffer *buffer_info;
4912 struct sk_buff *skb;
0e014cb1 4913 union e1000_adv_tx_desc *tx_desc, *eop_desc;
9d5c8243 4914 unsigned int total_bytes = 0, total_packets = 0;
0e014cb1
AD
4915 unsigned int i, eop, count = 0;
4916 bool cleaned = false;
9d5c8243 4917
9d5c8243 4918 i = tx_ring->next_to_clean;
0e014cb1
AD
4919 eop = tx_ring->buffer_info[i].next_to_watch;
4920 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4921
4922 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4923 (count < tx_ring->count)) {
4924 for (cleaned = false; !cleaned; count++) {
4925 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
9d5c8243 4926 buffer_info = &tx_ring->buffer_info[i];
0e014cb1 4927 cleaned = (i == eop);
9d5c8243
AK
4928 skb = buffer_info->skb;
4929
4930 if (skb) {
4931 unsigned int segs, bytecount;
4932 /* gso_segs is currently only valid for tcp */
4933 segs = skb_shinfo(skb)->gso_segs ?: 1;
4934 /* multiply data chunks by size of headers */
4935 bytecount = ((segs - 1) * skb_headlen(skb)) +
4936 skb->len;
4937 total_packets += segs;
4938 total_bytes += bytecount;
33af6bcc 4939
c5b9bd5e 4940 igb_tx_hwtstamp(q_vector, skb);
9d5c8243
AK
4941 }
4942
80785298 4943 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
0e014cb1 4944 tx_desc->wb.status = 0;
9d5c8243
AK
4945
4946 i++;
4947 if (i == tx_ring->count)
4948 i = 0;
9d5c8243 4949 }
0e014cb1
AD
4950 eop = tx_ring->buffer_info[i].next_to_watch;
4951 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4952 }
4953
9d5c8243
AK
4954 tx_ring->next_to_clean = i;
4955
fc7d345d 4956 if (unlikely(count &&
9d5c8243 4957 netif_carrier_ok(netdev) &&
c493ea45 4958 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
9d5c8243
AK
4959 /* Make sure that anybody stopping the queue after this
4960 * sees the new next_to_clean.
4961 */
4962 smp_mb();
661086df
PWJ
4963 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4964 !(test_bit(__IGB_DOWN, &adapter->state))) {
4965 netif_wake_subqueue(netdev, tx_ring->queue_index);
04a5fcaa 4966 tx_ring->tx_stats.restart_queue++;
661086df 4967 }
9d5c8243
AK
4968 }
4969
4970 if (tx_ring->detect_tx_hung) {
4971 /* Detect a transmit hang in hardware, this serializes the
4972 * check with the clearing of time_stamp and movement of i */
4973 tx_ring->detect_tx_hung = false;
4974 if (tx_ring->buffer_info[i].time_stamp &&
4975 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
8e95a202
JP
4976 (adapter->tx_timeout_factor * HZ)) &&
4977 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
9d5c8243 4978
9d5c8243 4979 /* detected Tx unit hang */
80785298 4980 dev_err(&tx_ring->pdev->dev,
9d5c8243 4981 "Detected Tx Unit Hang\n"
2d064c06 4982 " Tx Queue <%d>\n"
9d5c8243
AK
4983 " TDH <%x>\n"
4984 " TDT <%x>\n"
4985 " next_to_use <%x>\n"
4986 " next_to_clean <%x>\n"
9d5c8243
AK
4987 "buffer_info[next_to_clean]\n"
4988 " time_stamp <%lx>\n"
0e014cb1 4989 " next_to_watch <%x>\n"
9d5c8243
AK
4990 " jiffies <%lx>\n"
4991 " desc.status <%x>\n",
2d064c06 4992 tx_ring->queue_index,
fce99e34
AD
4993 readl(tx_ring->head),
4994 readl(tx_ring->tail),
9d5c8243
AK
4995 tx_ring->next_to_use,
4996 tx_ring->next_to_clean,
f7ba205e 4997 tx_ring->buffer_info[eop].time_stamp,
0e014cb1 4998 eop,
9d5c8243 4999 jiffies,
0e014cb1 5000 eop_desc->wb.status);
661086df 5001 netif_stop_subqueue(netdev, tx_ring->queue_index);
9d5c8243
AK
5002 }
5003 }
5004 tx_ring->total_bytes += total_bytes;
5005 tx_ring->total_packets += total_packets;
e21ed353
AD
5006 tx_ring->tx_stats.bytes += total_bytes;
5007 tx_ring->tx_stats.packets += total_packets;
0e014cb1 5008 return (count < tx_ring->count);
9d5c8243
AK
5009}
5010
9d5c8243
AK
5011/**
5012 * igb_receive_skb - helper function to handle rx indications
047e0030
AD
5013 * @q_vector: structure containing interrupt and ring information
5014 * @skb: packet to send up
5015 * @vlan_tag: vlan tag for packet
9d5c8243 5016 **/
047e0030
AD
5017static void igb_receive_skb(struct igb_q_vector *q_vector,
5018 struct sk_buff *skb,
5019 u16 vlan_tag)
5020{
5021 struct igb_adapter *adapter = q_vector->adapter;
5022
5023 if (vlan_tag)
5024 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
5025 vlan_tag, skb);
182ff8df 5026 else
047e0030 5027 napi_gro_receive(&q_vector->napi, skb);
9d5c8243
AK
5028}
5029
04a5fcaa 5030static inline void igb_rx_checksum_adv(struct igb_ring *ring,
9d5c8243
AK
5031 u32 status_err, struct sk_buff *skb)
5032{
5033 skb->ip_summed = CHECKSUM_NONE;
5034
5035 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
85ad76b2
AD
5036 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
5037 (status_err & E1000_RXD_STAT_IXSM))
9d5c8243 5038 return;
85ad76b2 5039
9d5c8243
AK
5040 /* TCP/UDP checksum error bit is set */
5041 if (status_err &
5042 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
b9473560
JB
5043 /*
5044 * work around errata with sctp packets where the TCPE aka
5045 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5046 * packets, (aka let the stack check the crc32c)
5047 */
85ad76b2
AD
5048 if ((skb->len == 60) &&
5049 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
04a5fcaa 5050 ring->rx_stats.csum_err++;
85ad76b2 5051
9d5c8243 5052 /* let the stack verify checksum errors */
9d5c8243
AK
5053 return;
5054 }
5055 /* It must be a TCP or UDP packet with a valid checksum */
5056 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5057 skb->ip_summed = CHECKSUM_UNNECESSARY;
5058
85ad76b2 5059 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
9d5c8243
AK
5060}
5061
c5b9bd5e
AD
5062static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5063 struct sk_buff *skb)
5064{
5065 struct igb_adapter *adapter = q_vector->adapter;
5066 struct e1000_hw *hw = &adapter->hw;
5067 u64 regval;
5068
5069 /*
5070 * If this bit is set, then the RX registers contain the time stamp. No
5071 * other packet will be time stamped until we read these registers, so
5072 * read the registers to make them available again. Because only one
5073 * packet can be time stamped at a time, we know that the register
5074 * values must belong to this one here and therefore we don't need to
5075 * compare any of the additional attributes stored for it.
5076 *
5077 * If nothing went wrong, then it should have a skb_shared_tx that we
5078 * can turn into a skb_shared_hwtstamps.
5079 */
5080 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
5081 return;
5082 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5083 return;
5084
5085 regval = rd32(E1000_RXSTMPL);
5086 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5087
5088 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5089}
4c844851 5090static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
2d94d8ab
AD
5091 union e1000_adv_rx_desc *rx_desc)
5092{
5093 /* HW will not DMA in data larger than the given buffer, even if it
5094 * parses the (NFS, of course) header to be larger. In that case, it
5095 * fills the header buffer and spills the rest into the page.
5096 */
5097 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5098 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4c844851
AD
5099 if (hlen > rx_ring->rx_buffer_len)
5100 hlen = rx_ring->rx_buffer_len;
2d94d8ab
AD
5101 return hlen;
5102}
5103
047e0030
AD
5104static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5105 int *work_done, int budget)
9d5c8243 5106{
047e0030 5107 struct igb_ring *rx_ring = q_vector->rx_ring;
e694e964 5108 struct net_device *netdev = rx_ring->netdev;
80785298 5109 struct pci_dev *pdev = rx_ring->pdev;
9d5c8243
AK
5110 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5111 struct igb_buffer *buffer_info , *next_buffer;
5112 struct sk_buff *skb;
9d5c8243
AK
5113 bool cleaned = false;
5114 int cleaned_count = 0;
d1eff350 5115 int current_node = numa_node_id();
9d5c8243 5116 unsigned int total_bytes = 0, total_packets = 0;
73cd78f1 5117 unsigned int i;
2d94d8ab
AD
5118 u32 staterr;
5119 u16 length;
047e0030 5120 u16 vlan_tag;
9d5c8243
AK
5121
5122 i = rx_ring->next_to_clean;
69d3ca53 5123 buffer_info = &rx_ring->buffer_info[i];
9d5c8243
AK
5124 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5125 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5126
5127 while (staterr & E1000_RXD_STAT_DD) {
5128 if (*work_done >= budget)
5129 break;
5130 (*work_done)++;
9d5c8243 5131
69d3ca53
AD
5132 skb = buffer_info->skb;
5133 prefetch(skb->data - NET_IP_ALIGN);
5134 buffer_info->skb = NULL;
5135
5136 i++;
5137 if (i == rx_ring->count)
5138 i = 0;
42d0781a 5139
69d3ca53
AD
5140 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
5141 prefetch(next_rxd);
5142 next_buffer = &rx_ring->buffer_info[i];
9d5c8243
AK
5143
5144 length = le16_to_cpu(rx_desc->wb.upper.length);
5145 cleaned = true;
5146 cleaned_count++;
5147
2d94d8ab 5148 if (buffer_info->dma) {
bf36c1a0 5149 pci_unmap_single(pdev, buffer_info->dma,
4c844851 5150 rx_ring->rx_buffer_len,
bf36c1a0 5151 PCI_DMA_FROMDEVICE);
91615f76 5152 buffer_info->dma = 0;
4c844851 5153 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
6ec43fe6
AD
5154 skb_put(skb, length);
5155 goto send_up;
5156 }
4c844851 5157 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
bf36c1a0
AD
5158 }
5159
5160 if (length) {
9d5c8243 5161 pci_unmap_page(pdev, buffer_info->page_dma,
bf36c1a0 5162 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
9d5c8243 5163 buffer_info->page_dma = 0;
bf36c1a0
AD
5164
5165 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
5166 buffer_info->page,
5167 buffer_info->page_offset,
5168 length);
5169
d1eff350
AD
5170 if ((page_count(buffer_info->page) != 1) ||
5171 (page_to_nid(buffer_info->page) != current_node))
bf36c1a0
AD
5172 buffer_info->page = NULL;
5173 else
5174 get_page(buffer_info->page);
9d5c8243
AK
5175
5176 skb->len += length;
5177 skb->data_len += length;
bf36c1a0 5178 skb->truesize += length;
9d5c8243 5179 }
9d5c8243 5180
bf36c1a0 5181 if (!(staterr & E1000_RXD_STAT_EOP)) {
b2d56536
AD
5182 buffer_info->skb = next_buffer->skb;
5183 buffer_info->dma = next_buffer->dma;
5184 next_buffer->skb = skb;
5185 next_buffer->dma = 0;
bf36c1a0
AD
5186 goto next_desc;
5187 }
69d3ca53 5188send_up:
9d5c8243
AK
5189 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
5190 dev_kfree_skb_irq(skb);
5191 goto next_desc;
5192 }
9d5c8243 5193
c5b9bd5e 5194 igb_rx_hwtstamp(q_vector, staterr, skb);
9d5c8243
AK
5195 total_bytes += skb->len;
5196 total_packets++;
5197
85ad76b2 5198 igb_rx_checksum_adv(rx_ring, staterr, skb);
9d5c8243
AK
5199
5200 skb->protocol = eth_type_trans(skb, netdev);
047e0030
AD
5201 skb_record_rx_queue(skb, rx_ring->queue_index);
5202
5203 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5204 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
9d5c8243 5205
047e0030 5206 igb_receive_skb(q_vector, skb, vlan_tag);
9d5c8243 5207
9d5c8243
AK
5208next_desc:
5209 rx_desc->wb.upper.status_error = 0;
5210
5211 /* return some buffers to hardware, one at a time is too slow */
5212 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
3b644cf6 5213 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
9d5c8243
AK
5214 cleaned_count = 0;
5215 }
5216
5217 /* use prefetched values */
5218 rx_desc = next_rxd;
5219 buffer_info = next_buffer;
9d5c8243
AK
5220 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5221 }
bf36c1a0 5222
9d5c8243 5223 rx_ring->next_to_clean = i;
c493ea45 5224 cleaned_count = igb_desc_unused(rx_ring);
9d5c8243
AK
5225
5226 if (cleaned_count)
3b644cf6 5227 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
9d5c8243
AK
5228
5229 rx_ring->total_packets += total_packets;
5230 rx_ring->total_bytes += total_bytes;
5231 rx_ring->rx_stats.packets += total_packets;
5232 rx_ring->rx_stats.bytes += total_bytes;
9d5c8243
AK
5233 return cleaned;
5234}
5235
9d5c8243
AK
5236/**
5237 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
5238 * @adapter: address of board private structure
5239 **/
d7ee5b3a 5240void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
9d5c8243 5241{
e694e964 5242 struct net_device *netdev = rx_ring->netdev;
9d5c8243
AK
5243 union e1000_adv_rx_desc *rx_desc;
5244 struct igb_buffer *buffer_info;
5245 struct sk_buff *skb;
5246 unsigned int i;
db761762 5247 int bufsz;
9d5c8243
AK
5248
5249 i = rx_ring->next_to_use;
5250 buffer_info = &rx_ring->buffer_info[i];
5251
4c844851 5252 bufsz = rx_ring->rx_buffer_len;
db761762 5253
9d5c8243
AK
5254 while (cleaned_count--) {
5255 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
5256
6ec43fe6 5257 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
9d5c8243 5258 if (!buffer_info->page) {
42d0781a 5259 buffer_info->page = netdev_alloc_page(netdev);
bf36c1a0 5260 if (!buffer_info->page) {
04a5fcaa 5261 rx_ring->rx_stats.alloc_failed++;
bf36c1a0
AD
5262 goto no_buffers;
5263 }
5264 buffer_info->page_offset = 0;
5265 } else {
5266 buffer_info->page_offset ^= PAGE_SIZE / 2;
9d5c8243
AK
5267 }
5268 buffer_info->page_dma =
80785298 5269 pci_map_page(rx_ring->pdev, buffer_info->page,
bf36c1a0
AD
5270 buffer_info->page_offset,
5271 PAGE_SIZE / 2,
9d5c8243 5272 PCI_DMA_FROMDEVICE);
42d0781a
AD
5273 if (pci_dma_mapping_error(rx_ring->pdev,
5274 buffer_info->page_dma)) {
5275 buffer_info->page_dma = 0;
5276 rx_ring->rx_stats.alloc_failed++;
5277 goto no_buffers;
5278 }
9d5c8243
AK
5279 }
5280
42d0781a
AD
5281 skb = buffer_info->skb;
5282 if (!skb) {
89d71a66 5283 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
9d5c8243 5284 if (!skb) {
04a5fcaa 5285 rx_ring->rx_stats.alloc_failed++;
9d5c8243
AK
5286 goto no_buffers;
5287 }
5288
9d5c8243 5289 buffer_info->skb = skb;
42d0781a
AD
5290 }
5291 if (!buffer_info->dma) {
80785298
AD
5292 buffer_info->dma = pci_map_single(rx_ring->pdev,
5293 skb->data,
9d5c8243
AK
5294 bufsz,
5295 PCI_DMA_FROMDEVICE);
42d0781a
AD
5296 if (pci_dma_mapping_error(rx_ring->pdev,
5297 buffer_info->dma)) {
5298 buffer_info->dma = 0;
5299 rx_ring->rx_stats.alloc_failed++;
5300 goto no_buffers;
5301 }
9d5c8243
AK
5302 }
5303 /* Refresh the desc even if buffer_addrs didn't change because
5304 * each write-back erases this info. */
6ec43fe6 5305 if (bufsz < IGB_RXBUFFER_1024) {
9d5c8243
AK
5306 rx_desc->read.pkt_addr =
5307 cpu_to_le64(buffer_info->page_dma);
5308 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
5309 } else {
42d0781a 5310 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
9d5c8243
AK
5311 rx_desc->read.hdr_addr = 0;
5312 }
5313
5314 i++;
5315 if (i == rx_ring->count)
5316 i = 0;
5317 buffer_info = &rx_ring->buffer_info[i];
5318 }
5319
5320no_buffers:
5321 if (rx_ring->next_to_use != i) {
5322 rx_ring->next_to_use = i;
5323 if (i == 0)
5324 i = (rx_ring->count - 1);
5325 else
5326 i--;
5327
5328 /* Force memory writes to complete before letting h/w
5329 * know there are new descriptors to fetch. (Only
5330 * applicable for weak-ordered memory model archs,
5331 * such as IA-64). */
5332 wmb();
fce99e34 5333 writel(i, rx_ring->tail);
9d5c8243
AK
5334 }
5335}
5336
5337/**
5338 * igb_mii_ioctl -
5339 * @netdev:
5340 * @ifreq:
5341 * @cmd:
5342 **/
5343static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5344{
5345 struct igb_adapter *adapter = netdev_priv(netdev);
5346 struct mii_ioctl_data *data = if_mii(ifr);
5347
5348 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5349 return -EOPNOTSUPP;
5350
5351 switch (cmd) {
5352 case SIOCGMIIPHY:
5353 data->phy_id = adapter->hw.phy.addr;
5354 break;
5355 case SIOCGMIIREG:
f5f4cf08
AD
5356 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
5357 &data->val_out))
9d5c8243
AK
5358 return -EIO;
5359 break;
5360 case SIOCSMIIREG:
5361 default:
5362 return -EOPNOTSUPP;
5363 }
5364 return 0;
5365}
5366
c6cb090b
PO
5367/**
5368 * igb_hwtstamp_ioctl - control hardware time stamping
5369 * @netdev:
5370 * @ifreq:
5371 * @cmd:
5372 *
33af6bcc
PO
5373 * Outgoing time stamping can be enabled and disabled. Play nice and
5374 * disable it when requested, although it shouldn't case any overhead
5375 * when no packet needs it. At most one packet in the queue may be
5376 * marked for time stamping, otherwise it would be impossible to tell
5377 * for sure to which packet the hardware time stamp belongs.
5378 *
5379 * Incoming time stamping has to be configured via the hardware
5380 * filters. Not all combinations are supported, in particular event
5381 * type has to be specified. Matching the kind of event packet is
5382 * not supported, with the exception of "all V2 events regardless of
5383 * level 2 or 4".
5384 *
c6cb090b
PO
5385 **/
5386static int igb_hwtstamp_ioctl(struct net_device *netdev,
5387 struct ifreq *ifr, int cmd)
5388{
33af6bcc
PO
5389 struct igb_adapter *adapter = netdev_priv(netdev);
5390 struct e1000_hw *hw = &adapter->hw;
c6cb090b 5391 struct hwtstamp_config config;
c5b9bd5e
AD
5392 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
5393 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
33af6bcc 5394 u32 tsync_rx_cfg = 0;
c5b9bd5e
AD
5395 bool is_l4 = false;
5396 bool is_l2 = false;
33af6bcc 5397 u32 regval;
c6cb090b
PO
5398
5399 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5400 return -EFAULT;
5401
5402 /* reserved for future extensions */
5403 if (config.flags)
5404 return -EINVAL;
5405
33af6bcc
PO
5406 switch (config.tx_type) {
5407 case HWTSTAMP_TX_OFF:
c5b9bd5e 5408 tsync_tx_ctl = 0;
33af6bcc 5409 case HWTSTAMP_TX_ON:
33af6bcc
PO
5410 break;
5411 default:
5412 return -ERANGE;
5413 }
5414
5415 switch (config.rx_filter) {
5416 case HWTSTAMP_FILTER_NONE:
c5b9bd5e 5417 tsync_rx_ctl = 0;
33af6bcc
PO
5418 break;
5419 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
5420 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
5421 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
5422 case HWTSTAMP_FILTER_ALL:
5423 /*
5424 * register TSYNCRXCFG must be set, therefore it is not
5425 * possible to time stamp both Sync and Delay_Req messages
5426 * => fall back to time stamping all packets
5427 */
c5b9bd5e 5428 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
33af6bcc
PO
5429 config.rx_filter = HWTSTAMP_FILTER_ALL;
5430 break;
5431 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
c5b9bd5e 5432 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
33af6bcc 5433 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
c5b9bd5e 5434 is_l4 = true;
33af6bcc
PO
5435 break;
5436 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
c5b9bd5e 5437 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
33af6bcc 5438 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
c5b9bd5e 5439 is_l4 = true;
33af6bcc
PO
5440 break;
5441 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5442 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
c5b9bd5e 5443 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
33af6bcc 5444 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
c5b9bd5e
AD
5445 is_l2 = true;
5446 is_l4 = true;
33af6bcc
PO
5447 config.rx_filter = HWTSTAMP_FILTER_SOME;
5448 break;
5449 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5450 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
c5b9bd5e 5451 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
33af6bcc 5452 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
c5b9bd5e
AD
5453 is_l2 = true;
5454 is_l4 = true;
33af6bcc
PO
5455 config.rx_filter = HWTSTAMP_FILTER_SOME;
5456 break;
5457 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5458 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5459 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
c5b9bd5e 5460 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
33af6bcc 5461 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
c5b9bd5e 5462 is_l2 = true;
33af6bcc
PO
5463 break;
5464 default:
5465 return -ERANGE;
5466 }
5467
c5b9bd5e
AD
5468 if (hw->mac.type == e1000_82575) {
5469 if (tsync_rx_ctl | tsync_tx_ctl)
5470 return -EINVAL;
5471 return 0;
5472 }
5473
33af6bcc
PO
5474 /* enable/disable TX */
5475 regval = rd32(E1000_TSYNCTXCTL);
c5b9bd5e
AD
5476 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5477 regval |= tsync_tx_ctl;
33af6bcc
PO
5478 wr32(E1000_TSYNCTXCTL, regval);
5479
c5b9bd5e 5480 /* enable/disable RX */
33af6bcc 5481 regval = rd32(E1000_TSYNCRXCTL);
c5b9bd5e
AD
5482 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5483 regval |= tsync_rx_ctl;
33af6bcc 5484 wr32(E1000_TSYNCRXCTL, regval);
33af6bcc 5485
c5b9bd5e
AD
5486 /* define which PTP packets are time stamped */
5487 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
33af6bcc 5488
c5b9bd5e
AD
5489 /* define ethertype filter for timestamped packets */
5490 if (is_l2)
5491 wr32(E1000_ETQF(3),
5492 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5493 E1000_ETQF_1588 | /* enable timestamping */
5494 ETH_P_1588)); /* 1588 eth protocol type */
5495 else
5496 wr32(E1000_ETQF(3), 0);
5497
5498#define PTP_PORT 319
5499 /* L4 Queue Filter[3]: filter by destination port and protocol */
5500 if (is_l4) {
5501 u32 ftqf = (IPPROTO_UDP /* UDP */
5502 | E1000_FTQF_VF_BP /* VF not compared */
5503 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5504 | E1000_FTQF_MASK); /* mask all inputs */
5505 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5506
5507 wr32(E1000_IMIR(3), htons(PTP_PORT));
5508 wr32(E1000_IMIREXT(3),
5509 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5510 if (hw->mac.type == e1000_82576) {
5511 /* enable source port check */
5512 wr32(E1000_SPQF(3), htons(PTP_PORT));
5513 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5514 }
5515 wr32(E1000_FTQF(3), ftqf);
5516 } else {
5517 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5518 }
33af6bcc
PO
5519 wrfl();
5520
5521 adapter->hwtstamp_config = config;
5522
5523 /* clear TX/RX time stamp registers, just to be sure */
5524 regval = rd32(E1000_TXSTMPH);
5525 regval = rd32(E1000_RXSTMPH);
c6cb090b 5526
33af6bcc
PO
5527 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
5528 -EFAULT : 0;
c6cb090b
PO
5529}
5530
9d5c8243
AK
5531/**
5532 * igb_ioctl -
5533 * @netdev:
5534 * @ifreq:
5535 * @cmd:
5536 **/
5537static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5538{
5539 switch (cmd) {
5540 case SIOCGMIIPHY:
5541 case SIOCGMIIREG:
5542 case SIOCSMIIREG:
5543 return igb_mii_ioctl(netdev, ifr, cmd);
c6cb090b
PO
5544 case SIOCSHWTSTAMP:
5545 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
9d5c8243
AK
5546 default:
5547 return -EOPNOTSUPP;
5548 }
5549}
5550
009bc06e
AD
5551s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5552{
5553 struct igb_adapter *adapter = hw->back;
5554 u16 cap_offset;
5555
5556 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5557 if (!cap_offset)
5558 return -E1000_ERR_CONFIG;
5559
5560 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
5561
5562 return 0;
5563}
5564
5565s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
5566{
5567 struct igb_adapter *adapter = hw->back;
5568 u16 cap_offset;
5569
5570 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
5571 if (!cap_offset)
5572 return -E1000_ERR_CONFIG;
5573
5574 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
5575
5576 return 0;
5577}
5578
9d5c8243
AK
5579static void igb_vlan_rx_register(struct net_device *netdev,
5580 struct vlan_group *grp)
5581{
5582 struct igb_adapter *adapter = netdev_priv(netdev);
5583 struct e1000_hw *hw = &adapter->hw;
5584 u32 ctrl, rctl;
5585
5586 igb_irq_disable(adapter);
5587 adapter->vlgrp = grp;
5588
5589 if (grp) {
5590 /* enable VLAN tag insert/strip */
5591 ctrl = rd32(E1000_CTRL);
5592 ctrl |= E1000_CTRL_VME;
5593 wr32(E1000_CTRL, ctrl);
5594
51466239 5595 /* Disable CFI check */
9d5c8243 5596 rctl = rd32(E1000_RCTL);
9d5c8243
AK
5597 rctl &= ~E1000_RCTL_CFIEN;
5598 wr32(E1000_RCTL, rctl);
9d5c8243
AK
5599 } else {
5600 /* disable VLAN tag insert/strip */
5601 ctrl = rd32(E1000_CTRL);
5602 ctrl &= ~E1000_CTRL_VME;
5603 wr32(E1000_CTRL, ctrl);
9d5c8243
AK
5604 }
5605
e1739522
AD
5606 igb_rlpml_set(adapter);
5607
9d5c8243
AK
5608 if (!test_bit(__IGB_DOWN, &adapter->state))
5609 igb_irq_enable(adapter);
5610}
5611
5612static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5613{
5614 struct igb_adapter *adapter = netdev_priv(netdev);
5615 struct e1000_hw *hw = &adapter->hw;
4ae196df 5616 int pf_id = adapter->vfs_allocated_count;
9d5c8243 5617
51466239
AD
5618 /* attempt to add filter to vlvf array */
5619 igb_vlvf_set(adapter, vid, true, pf_id);
4ae196df 5620
51466239
AD
5621 /* add the filter since PF can receive vlans w/o entry in vlvf */
5622 igb_vfta_set(hw, vid, true);
9d5c8243
AK
5623}
5624
5625static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5626{
5627 struct igb_adapter *adapter = netdev_priv(netdev);
5628 struct e1000_hw *hw = &adapter->hw;
4ae196df 5629 int pf_id = adapter->vfs_allocated_count;
51466239 5630 s32 err;
9d5c8243
AK
5631
5632 igb_irq_disable(adapter);
5633 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5634
5635 if (!test_bit(__IGB_DOWN, &adapter->state))
5636 igb_irq_enable(adapter);
5637
51466239
AD
5638 /* remove vlan from VLVF table array */
5639 err = igb_vlvf_set(adapter, vid, false, pf_id);
9d5c8243 5640
51466239
AD
5641 /* if vid was not present in VLVF just remove it from table */
5642 if (err)
4ae196df 5643 igb_vfta_set(hw, vid, false);
9d5c8243
AK
5644}
5645
5646static void igb_restore_vlan(struct igb_adapter *adapter)
5647{
5648 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5649
5650 if (adapter->vlgrp) {
5651 u16 vid;
5652 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5653 if (!vlan_group_get_device(adapter->vlgrp, vid))
5654 continue;
5655 igb_vlan_rx_add_vid(adapter->netdev, vid);
5656 }
5657 }
5658}
5659
5660int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5661{
090b1795 5662 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
5663 struct e1000_mac_info *mac = &adapter->hw.mac;
5664
5665 mac->autoneg = 0;
5666
9d5c8243
AK
5667 switch (spddplx) {
5668 case SPEED_10 + DUPLEX_HALF:
5669 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5670 break;
5671 case SPEED_10 + DUPLEX_FULL:
5672 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5673 break;
5674 case SPEED_100 + DUPLEX_HALF:
5675 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5676 break;
5677 case SPEED_100 + DUPLEX_FULL:
5678 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5679 break;
5680 case SPEED_1000 + DUPLEX_FULL:
5681 mac->autoneg = 1;
5682 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5683 break;
5684 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5685 default:
090b1795 5686 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
9d5c8243
AK
5687 return -EINVAL;
5688 }
5689 return 0;
5690}
5691
3fe7c4c9 5692static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
9d5c8243
AK
5693{
5694 struct net_device *netdev = pci_get_drvdata(pdev);
5695 struct igb_adapter *adapter = netdev_priv(netdev);
5696 struct e1000_hw *hw = &adapter->hw;
2d064c06 5697 u32 ctrl, rctl, status;
9d5c8243
AK
5698 u32 wufc = adapter->wol;
5699#ifdef CONFIG_PM
5700 int retval = 0;
5701#endif
5702
5703 netif_device_detach(netdev);
5704
a88f10ec
AD
5705 if (netif_running(netdev))
5706 igb_close(netdev);
5707
047e0030 5708 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
5709
5710#ifdef CONFIG_PM
5711 retval = pci_save_state(pdev);
5712 if (retval)
5713 return retval;
5714#endif
5715
5716 status = rd32(E1000_STATUS);
5717 if (status & E1000_STATUS_LU)
5718 wufc &= ~E1000_WUFC_LNKC;
5719
5720 if (wufc) {
5721 igb_setup_rctl(adapter);
ff41f8dc 5722 igb_set_rx_mode(netdev);
9d5c8243
AK
5723
5724 /* turn on all-multi mode if wake on multicast is enabled */
5725 if (wufc & E1000_WUFC_MC) {
5726 rctl = rd32(E1000_RCTL);
5727 rctl |= E1000_RCTL_MPE;
5728 wr32(E1000_RCTL, rctl);
5729 }
5730
5731 ctrl = rd32(E1000_CTRL);
5732 /* advertise wake from D3Cold */
5733 #define E1000_CTRL_ADVD3WUC 0x00100000
5734 /* phy power management enable */
5735 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5736 ctrl |= E1000_CTRL_ADVD3WUC;
5737 wr32(E1000_CTRL, ctrl);
5738
9d5c8243 5739 /* Allow time for pending master requests to run */
330a6d6a 5740 igb_disable_pcie_master(hw);
9d5c8243
AK
5741
5742 wr32(E1000_WUC, E1000_WUC_PME_EN);
5743 wr32(E1000_WUFC, wufc);
9d5c8243
AK
5744 } else {
5745 wr32(E1000_WUC, 0);
5746 wr32(E1000_WUFC, 0);
9d5c8243
AK
5747 }
5748
3fe7c4c9
RW
5749 *enable_wake = wufc || adapter->en_mng_pt;
5750 if (!*enable_wake)
2fb02a26 5751 igb_shutdown_serdes_link_82575(hw);
9d5c8243
AK
5752
5753 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5754 * would have already happened in close and is redundant. */
5755 igb_release_hw_control(adapter);
5756
5757 pci_disable_device(pdev);
5758
9d5c8243
AK
5759 return 0;
5760}
5761
5762#ifdef CONFIG_PM
3fe7c4c9
RW
5763static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5764{
5765 int retval;
5766 bool wake;
5767
5768 retval = __igb_shutdown(pdev, &wake);
5769 if (retval)
5770 return retval;
5771
5772 if (wake) {
5773 pci_prepare_to_sleep(pdev);
5774 } else {
5775 pci_wake_from_d3(pdev, false);
5776 pci_set_power_state(pdev, PCI_D3hot);
5777 }
5778
5779 return 0;
5780}
5781
9d5c8243
AK
5782static int igb_resume(struct pci_dev *pdev)
5783{
5784 struct net_device *netdev = pci_get_drvdata(pdev);
5785 struct igb_adapter *adapter = netdev_priv(netdev);
5786 struct e1000_hw *hw = &adapter->hw;
5787 u32 err;
5788
5789 pci_set_power_state(pdev, PCI_D0);
5790 pci_restore_state(pdev);
42bfd33a 5791
aed5dec3 5792 err = pci_enable_device_mem(pdev);
9d5c8243
AK
5793 if (err) {
5794 dev_err(&pdev->dev,
5795 "igb: Cannot enable PCI device from suspend\n");
5796 return err;
5797 }
5798 pci_set_master(pdev);
5799
5800 pci_enable_wake(pdev, PCI_D3hot, 0);
5801 pci_enable_wake(pdev, PCI_D3cold, 0);
5802
047e0030 5803 if (igb_init_interrupt_scheme(adapter)) {
a88f10ec
AD
5804 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5805 return -ENOMEM;
9d5c8243
AK
5806 }
5807
5808 /* e1000_power_up_phy(adapter); */
5809
5810 igb_reset(adapter);
a8564f03
AD
5811
5812 /* let the f/w know that the h/w is now under the control of the
5813 * driver. */
5814 igb_get_hw_control(adapter);
5815
9d5c8243
AK
5816 wr32(E1000_WUS, ~0);
5817
a88f10ec
AD
5818 if (netif_running(netdev)) {
5819 err = igb_open(netdev);
5820 if (err)
5821 return err;
5822 }
9d5c8243
AK
5823
5824 netif_device_attach(netdev);
5825
9d5c8243
AK
5826 return 0;
5827}
5828#endif
5829
5830static void igb_shutdown(struct pci_dev *pdev)
5831{
3fe7c4c9
RW
5832 bool wake;
5833
5834 __igb_shutdown(pdev, &wake);
5835
5836 if (system_state == SYSTEM_POWER_OFF) {
5837 pci_wake_from_d3(pdev, wake);
5838 pci_set_power_state(pdev, PCI_D3hot);
5839 }
9d5c8243
AK
5840}
5841
5842#ifdef CONFIG_NET_POLL_CONTROLLER
5843/*
5844 * Polling 'interrupt' - used by things like netconsole to send skbs
5845 * without having to re-enable interrupts. It's not called while
5846 * the interrupt routine is executing.
5847 */
5848static void igb_netpoll(struct net_device *netdev)
5849{
5850 struct igb_adapter *adapter = netdev_priv(netdev);
eebbbdba 5851 struct e1000_hw *hw = &adapter->hw;
9d5c8243 5852 int i;
9d5c8243 5853
eebbbdba 5854 if (!adapter->msix_entries) {
047e0030 5855 struct igb_q_vector *q_vector = adapter->q_vector[0];
eebbbdba 5856 igb_irq_disable(adapter);
047e0030 5857 napi_schedule(&q_vector->napi);
eebbbdba
AD
5858 return;
5859 }
9d5c8243 5860
047e0030
AD
5861 for (i = 0; i < adapter->num_q_vectors; i++) {
5862 struct igb_q_vector *q_vector = adapter->q_vector[i];
5863 wr32(E1000_EIMC, q_vector->eims_value);
5864 napi_schedule(&q_vector->napi);
eebbbdba 5865 }
9d5c8243
AK
5866}
5867#endif /* CONFIG_NET_POLL_CONTROLLER */
5868
5869/**
5870 * igb_io_error_detected - called when PCI error is detected
5871 * @pdev: Pointer to PCI device
5872 * @state: The current pci connection state
5873 *
5874 * This function is called after a PCI bus error affecting
5875 * this device has been detected.
5876 */
5877static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5878 pci_channel_state_t state)
5879{
5880 struct net_device *netdev = pci_get_drvdata(pdev);
5881 struct igb_adapter *adapter = netdev_priv(netdev);
5882
5883 netif_device_detach(netdev);
5884
59ed6eec
AD
5885 if (state == pci_channel_io_perm_failure)
5886 return PCI_ERS_RESULT_DISCONNECT;
5887
9d5c8243
AK
5888 if (netif_running(netdev))
5889 igb_down(adapter);
5890 pci_disable_device(pdev);
5891
5892 /* Request a slot slot reset. */
5893 return PCI_ERS_RESULT_NEED_RESET;
5894}
5895
5896/**
5897 * igb_io_slot_reset - called after the pci bus has been reset.
5898 * @pdev: Pointer to PCI device
5899 *
5900 * Restart the card from scratch, as if from a cold-boot. Implementation
5901 * resembles the first-half of the igb_resume routine.
5902 */
5903static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5904{
5905 struct net_device *netdev = pci_get_drvdata(pdev);
5906 struct igb_adapter *adapter = netdev_priv(netdev);
5907 struct e1000_hw *hw = &adapter->hw;
40a914fa 5908 pci_ers_result_t result;
42bfd33a 5909 int err;
9d5c8243 5910
aed5dec3 5911 if (pci_enable_device_mem(pdev)) {
9d5c8243
AK
5912 dev_err(&pdev->dev,
5913 "Cannot re-enable PCI device after reset.\n");
40a914fa
AD
5914 result = PCI_ERS_RESULT_DISCONNECT;
5915 } else {
5916 pci_set_master(pdev);
5917 pci_restore_state(pdev);
9d5c8243 5918
40a914fa
AD
5919 pci_enable_wake(pdev, PCI_D3hot, 0);
5920 pci_enable_wake(pdev, PCI_D3cold, 0);
9d5c8243 5921
40a914fa
AD
5922 igb_reset(adapter);
5923 wr32(E1000_WUS, ~0);
5924 result = PCI_ERS_RESULT_RECOVERED;
5925 }
9d5c8243 5926
ea943d41
JK
5927 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5928 if (err) {
5929 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5930 "failed 0x%0x\n", err);
5931 /* non-fatal, continue */
5932 }
40a914fa
AD
5933
5934 return result;
9d5c8243
AK
5935}
5936
5937/**
5938 * igb_io_resume - called when traffic can start flowing again.
5939 * @pdev: Pointer to PCI device
5940 *
5941 * This callback is called when the error recovery driver tells us that
5942 * its OK to resume normal operation. Implementation resembles the
5943 * second-half of the igb_resume routine.
5944 */
5945static void igb_io_resume(struct pci_dev *pdev)
5946{
5947 struct net_device *netdev = pci_get_drvdata(pdev);
5948 struct igb_adapter *adapter = netdev_priv(netdev);
5949
9d5c8243
AK
5950 if (netif_running(netdev)) {
5951 if (igb_up(adapter)) {
5952 dev_err(&pdev->dev, "igb_up failed after reset\n");
5953 return;
5954 }
5955 }
5956
5957 netif_device_attach(netdev);
5958
5959 /* let the f/w know that the h/w is now under the control of the
5960 * driver. */
5961 igb_get_hw_control(adapter);
9d5c8243
AK
5962}
5963
26ad9178
AD
5964static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5965 u8 qsel)
5966{
5967 u32 rar_low, rar_high;
5968 struct e1000_hw *hw = &adapter->hw;
5969
5970 /* HW expects these in little endian so we reverse the byte order
5971 * from network order (big endian) to little endian
5972 */
5973 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5974 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5975 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5976
5977 /* Indicate to hardware the Address is Valid. */
5978 rar_high |= E1000_RAH_AV;
5979
5980 if (hw->mac.type == e1000_82575)
5981 rar_high |= E1000_RAH_POOL_1 * qsel;
5982 else
5983 rar_high |= E1000_RAH_POOL_1 << qsel;
5984
5985 wr32(E1000_RAL(index), rar_low);
5986 wrfl();
5987 wr32(E1000_RAH(index), rar_high);
5988 wrfl();
5989}
5990
4ae196df
AD
5991static int igb_set_vf_mac(struct igb_adapter *adapter,
5992 int vf, unsigned char *mac_addr)
5993{
5994 struct e1000_hw *hw = &adapter->hw;
ff41f8dc
AD
5995 /* VF MAC addresses start at end of receive addresses and moves
5996 * torwards the first, as a result a collision should not be possible */
5997 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df 5998
37680117 5999 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
4ae196df 6000
26ad9178 6001 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
4ae196df
AD
6002
6003 return 0;
6004}
6005
6006static void igb_vmm_control(struct igb_adapter *adapter)
6007{
6008 struct e1000_hw *hw = &adapter->hw;
10d8e907 6009 u32 reg;
4ae196df 6010
d4960307
AD
6011 /* replication is not supported for 82575 */
6012 if (hw->mac.type == e1000_82575)
4ae196df
AD
6013 return;
6014
10d8e907
AD
6015 /* enable replication vlan tag stripping */
6016 reg = rd32(E1000_RPLOLR);
6017 reg |= E1000_RPLOLR_STRVLAN;
6018 wr32(E1000_RPLOLR, reg);
6019
6020 /* notify HW that the MAC is adding vlan tags */
6021 reg = rd32(E1000_DTXCTL);
6022 reg |= E1000_DTXCTL_VLAN_ADDED;
6023 wr32(E1000_DTXCTL, reg);
6024
d4960307
AD
6025 if (adapter->vfs_allocated_count) {
6026 igb_vmdq_set_loopback_pf(hw, true);
6027 igb_vmdq_set_replication_pf(hw, true);
6028 } else {
6029 igb_vmdq_set_loopback_pf(hw, false);
6030 igb_vmdq_set_replication_pf(hw, false);
6031 }
4ae196df
AD
6032}
6033
9d5c8243 6034/* igb_main.c */