igb: Change Tx cleanup loop to do/while instead of for
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / intel / igb / igb_main.c
CommitLineData
9d5c8243
AK
1/*******************************************************************************
2
3 Intel(R) Gigabit Ethernet Linux driver
6e861326 4 Copyright(c) 2007-2012 Intel Corporation.
9d5c8243
AK
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
876d2d6f
JK
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
9d5c8243
AK
30#include <linux/module.h>
31#include <linux/types.h>
32#include <linux/init.h>
b2cb09b1 33#include <linux/bitops.h>
9d5c8243
AK
34#include <linux/vmalloc.h>
35#include <linux/pagemap.h>
36#include <linux/netdevice.h>
9d5c8243 37#include <linux/ipv6.h>
5a0e3ad6 38#include <linux/slab.h>
9d5c8243
AK
39#include <net/checksum.h>
40#include <net/ip6_checksum.h>
c6cb090b 41#include <linux/net_tstamp.h>
9d5c8243
AK
42#include <linux/mii.h>
43#include <linux/ethtool.h>
01789349 44#include <linux/if.h>
9d5c8243
AK
45#include <linux/if_vlan.h>
46#include <linux/pci.h>
c54106bb 47#include <linux/pci-aspm.h>
9d5c8243
AK
48#include <linux/delay.h>
49#include <linux/interrupt.h>
7d13a7d0
AD
50#include <linux/ip.h>
51#include <linux/tcp.h>
52#include <linux/sctp.h>
9d5c8243 53#include <linux/if_ether.h>
40a914fa 54#include <linux/aer.h>
70c71606 55#include <linux/prefetch.h>
749ab2cd 56#include <linux/pm_runtime.h>
421e02f0 57#ifdef CONFIG_IGB_DCA
fe4506b6
JC
58#include <linux/dca.h>
59#endif
9d5c8243
AK
60#include "igb.h"
61
200e5fd5
CW
62#define MAJ 4
63#define MIN 0
64#define BUILD 1
0d1fe82d 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
929dd047 66__stringify(BUILD) "-k"
9d5c8243
AK
67char igb_driver_name[] = "igb";
68char igb_driver_version[] = DRV_VERSION;
69static const char igb_driver_string[] =
70 "Intel(R) Gigabit Ethernet Network Driver";
6e861326 71static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
9d5c8243 72
9d5c8243
AK
73static const struct e1000_info *igb_info_tbl[] = {
74 [board_82575] = &e1000_82575_info,
75};
76
a3aa1884 77static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
f96a8a0b
CW
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
81 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
82 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
d2ba2ed8
AD
83 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
84 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
85 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
86 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
55cac248
AD
87 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
88 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
6493d24f 89 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
55cac248
AD
90 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
91 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
92 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
308fb39a
JG
93 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
94 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
1b5dda33
GJ
95 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
96 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
2d064c06 97 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
9eb2341d 98 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
747d49ba 99 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
2d064c06
AD
100 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
101 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
4703bf73 102 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
b894fa26 103 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
c8ea5ea9 104 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
9d5c8243
AK
105 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
106 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
107 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
108 /* required last entry */
109 {0, }
110};
111
112MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
113
114void igb_reset(struct igb_adapter *);
115static int igb_setup_all_tx_resources(struct igb_adapter *);
116static int igb_setup_all_rx_resources(struct igb_adapter *);
117static void igb_free_all_tx_resources(struct igb_adapter *);
118static void igb_free_all_rx_resources(struct igb_adapter *);
06cf2666 119static void igb_setup_mrqc(struct igb_adapter *);
9d5c8243
AK
120static int igb_probe(struct pci_dev *, const struct pci_device_id *);
121static void __devexit igb_remove(struct pci_dev *pdev);
122static int igb_sw_init(struct igb_adapter *);
123static int igb_open(struct net_device *);
124static int igb_close(struct net_device *);
125static void igb_configure_tx(struct igb_adapter *);
126static void igb_configure_rx(struct igb_adapter *);
9d5c8243
AK
127static void igb_clean_all_tx_rings(struct igb_adapter *);
128static void igb_clean_all_rx_rings(struct igb_adapter *);
3b644cf6
MW
129static void igb_clean_tx_ring(struct igb_ring *);
130static void igb_clean_rx_ring(struct igb_ring *);
ff41f8dc 131static void igb_set_rx_mode(struct net_device *);
9d5c8243
AK
132static void igb_update_phy_info(unsigned long);
133static void igb_watchdog(unsigned long);
134static void igb_watchdog_task(struct work_struct *);
cd392f5c 135static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
12dcd86b
ED
136static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
137 struct rtnl_link_stats64 *stats);
9d5c8243
AK
138static int igb_change_mtu(struct net_device *, int);
139static int igb_set_mac(struct net_device *, void *);
68d480c4 140static void igb_set_uta(struct igb_adapter *adapter);
9d5c8243
AK
141static irqreturn_t igb_intr(int irq, void *);
142static irqreturn_t igb_intr_msi(int irq, void *);
143static irqreturn_t igb_msix_other(int irq, void *);
047e0030 144static irqreturn_t igb_msix_ring(int irq, void *);
421e02f0 145#ifdef CONFIG_IGB_DCA
047e0030 146static void igb_update_dca(struct igb_q_vector *);
fe4506b6 147static void igb_setup_dca(struct igb_adapter *);
421e02f0 148#endif /* CONFIG_IGB_DCA */
661086df 149static int igb_poll(struct napi_struct *, int);
13fde97a 150static bool igb_clean_tx_irq(struct igb_q_vector *);
cd392f5c 151static bool igb_clean_rx_irq(struct igb_q_vector *, int);
9d5c8243
AK
152static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
153static void igb_tx_timeout(struct net_device *);
154static void igb_reset_task(struct work_struct *);
c8f44aff 155static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
8e586137
JP
156static int igb_vlan_rx_add_vid(struct net_device *, u16);
157static int igb_vlan_rx_kill_vid(struct net_device *, u16);
9d5c8243 158static void igb_restore_vlan(struct igb_adapter *);
26ad9178 159static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
4ae196df
AD
160static void igb_ping_all_vfs(struct igb_adapter *);
161static void igb_msg_task(struct igb_adapter *);
4ae196df 162static void igb_vmm_control(struct igb_adapter *);
f2ca0dbe 163static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
4ae196df 164static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
8151d294
WM
165static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
166static int igb_ndo_set_vf_vlan(struct net_device *netdev,
167 int vf, u16 vlan, u8 qos);
168static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
169static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
170 struct ifla_vf_info *ivi);
17dc566c 171static void igb_check_vf_rate_limit(struct igb_adapter *);
46a01698
RL
172
173#ifdef CONFIG_PCI_IOV
0224d663 174static int igb_vf_configure(struct igb_adapter *adapter, int vf);
f557147c 175static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
46a01698 176#endif
9d5c8243 177
9d5c8243 178#ifdef CONFIG_PM
d9dd966d 179#ifdef CONFIG_PM_SLEEP
749ab2cd 180static int igb_suspend(struct device *);
d9dd966d 181#endif
749ab2cd
YZ
182static int igb_resume(struct device *);
183#ifdef CONFIG_PM_RUNTIME
184static int igb_runtime_suspend(struct device *dev);
185static int igb_runtime_resume(struct device *dev);
186static int igb_runtime_idle(struct device *dev);
187#endif
188static const struct dev_pm_ops igb_pm_ops = {
189 SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
190 SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
191 igb_runtime_idle)
192};
9d5c8243
AK
193#endif
194static void igb_shutdown(struct pci_dev *);
421e02f0 195#ifdef CONFIG_IGB_DCA
fe4506b6
JC
196static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
197static struct notifier_block dca_notifier = {
198 .notifier_call = igb_notify_dca,
199 .next = NULL,
200 .priority = 0
201};
202#endif
9d5c8243
AK
203#ifdef CONFIG_NET_POLL_CONTROLLER
204/* for netdump / net console */
205static void igb_netpoll(struct net_device *);
206#endif
37680117 207#ifdef CONFIG_PCI_IOV
2a3abf6d
AD
208static unsigned int max_vfs = 0;
209module_param(max_vfs, uint, 0);
210MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
211 "per physical function");
212#endif /* CONFIG_PCI_IOV */
213
9d5c8243
AK
214static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
215 pci_channel_state_t);
216static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
217static void igb_io_resume(struct pci_dev *);
218
219static struct pci_error_handlers igb_err_handler = {
220 .error_detected = igb_io_error_detected,
221 .slot_reset = igb_io_slot_reset,
222 .resume = igb_io_resume,
223};
224
b6e0c419 225static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
9d5c8243
AK
226
227static struct pci_driver igb_driver = {
228 .name = igb_driver_name,
229 .id_table = igb_pci_tbl,
230 .probe = igb_probe,
231 .remove = __devexit_p(igb_remove),
232#ifdef CONFIG_PM
749ab2cd 233 .driver.pm = &igb_pm_ops,
9d5c8243
AK
234#endif
235 .shutdown = igb_shutdown,
236 .err_handler = &igb_err_handler
237};
238
239MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
240MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
241MODULE_LICENSE("GPL");
242MODULE_VERSION(DRV_VERSION);
243
b3f4d599 244#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
245static int debug = -1;
246module_param(debug, int, 0);
247MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
248
c97ec42a
TI
249struct igb_reg_info {
250 u32 ofs;
251 char *name;
252};
253
254static const struct igb_reg_info igb_reg_info_tbl[] = {
255
256 /* General Registers */
257 {E1000_CTRL, "CTRL"},
258 {E1000_STATUS, "STATUS"},
259 {E1000_CTRL_EXT, "CTRL_EXT"},
260
261 /* Interrupt Registers */
262 {E1000_ICR, "ICR"},
263
264 /* RX Registers */
265 {E1000_RCTL, "RCTL"},
266 {E1000_RDLEN(0), "RDLEN"},
267 {E1000_RDH(0), "RDH"},
268 {E1000_RDT(0), "RDT"},
269 {E1000_RXDCTL(0), "RXDCTL"},
270 {E1000_RDBAL(0), "RDBAL"},
271 {E1000_RDBAH(0), "RDBAH"},
272
273 /* TX Registers */
274 {E1000_TCTL, "TCTL"},
275 {E1000_TDBAL(0), "TDBAL"},
276 {E1000_TDBAH(0), "TDBAH"},
277 {E1000_TDLEN(0), "TDLEN"},
278 {E1000_TDH(0), "TDH"},
279 {E1000_TDT(0), "TDT"},
280 {E1000_TXDCTL(0), "TXDCTL"},
281 {E1000_TDFH, "TDFH"},
282 {E1000_TDFT, "TDFT"},
283 {E1000_TDFHS, "TDFHS"},
284 {E1000_TDFPC, "TDFPC"},
285
286 /* List Terminator */
287 {}
288};
289
290/*
291 * igb_regdump - register printout routine
292 */
293static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
294{
295 int n = 0;
296 char rname[16];
297 u32 regs[8];
298
299 switch (reginfo->ofs) {
300 case E1000_RDLEN(0):
301 for (n = 0; n < 4; n++)
302 regs[n] = rd32(E1000_RDLEN(n));
303 break;
304 case E1000_RDH(0):
305 for (n = 0; n < 4; n++)
306 regs[n] = rd32(E1000_RDH(n));
307 break;
308 case E1000_RDT(0):
309 for (n = 0; n < 4; n++)
310 regs[n] = rd32(E1000_RDT(n));
311 break;
312 case E1000_RXDCTL(0):
313 for (n = 0; n < 4; n++)
314 regs[n] = rd32(E1000_RXDCTL(n));
315 break;
316 case E1000_RDBAL(0):
317 for (n = 0; n < 4; n++)
318 regs[n] = rd32(E1000_RDBAL(n));
319 break;
320 case E1000_RDBAH(0):
321 for (n = 0; n < 4; n++)
322 regs[n] = rd32(E1000_RDBAH(n));
323 break;
324 case E1000_TDBAL(0):
325 for (n = 0; n < 4; n++)
326 regs[n] = rd32(E1000_RDBAL(n));
327 break;
328 case E1000_TDBAH(0):
329 for (n = 0; n < 4; n++)
330 regs[n] = rd32(E1000_TDBAH(n));
331 break;
332 case E1000_TDLEN(0):
333 for (n = 0; n < 4; n++)
334 regs[n] = rd32(E1000_TDLEN(n));
335 break;
336 case E1000_TDH(0):
337 for (n = 0; n < 4; n++)
338 regs[n] = rd32(E1000_TDH(n));
339 break;
340 case E1000_TDT(0):
341 for (n = 0; n < 4; n++)
342 regs[n] = rd32(E1000_TDT(n));
343 break;
344 case E1000_TXDCTL(0):
345 for (n = 0; n < 4; n++)
346 regs[n] = rd32(E1000_TXDCTL(n));
347 break;
348 default:
876d2d6f 349 pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
c97ec42a
TI
350 return;
351 }
352
353 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
876d2d6f
JK
354 pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
355 regs[2], regs[3]);
c97ec42a
TI
356}
357
358/*
359 * igb_dump - Print registers, tx-rings and rx-rings
360 */
361static void igb_dump(struct igb_adapter *adapter)
362{
363 struct net_device *netdev = adapter->netdev;
364 struct e1000_hw *hw = &adapter->hw;
365 struct igb_reg_info *reginfo;
c97ec42a
TI
366 struct igb_ring *tx_ring;
367 union e1000_adv_tx_desc *tx_desc;
368 struct my_u0 { u64 a; u64 b; } *u0;
c97ec42a
TI
369 struct igb_ring *rx_ring;
370 union e1000_adv_rx_desc *rx_desc;
371 u32 staterr;
6ad4edfc 372 u16 i, n;
c97ec42a
TI
373
374 if (!netif_msg_hw(adapter))
375 return;
376
377 /* Print netdevice Info */
378 if (netdev) {
379 dev_info(&adapter->pdev->dev, "Net device Info\n");
876d2d6f
JK
380 pr_info("Device Name state trans_start "
381 "last_rx\n");
382 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
383 netdev->state, netdev->trans_start, netdev->last_rx);
c97ec42a
TI
384 }
385
386 /* Print Registers */
387 dev_info(&adapter->pdev->dev, "Register Dump\n");
876d2d6f 388 pr_info(" Register Name Value\n");
c97ec42a
TI
389 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
390 reginfo->name; reginfo++) {
391 igb_regdump(hw, reginfo);
392 }
393
394 /* Print TX Ring Summary */
395 if (!netdev || !netif_running(netdev))
396 goto exit;
397
398 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
876d2d6f 399 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
c97ec42a 400 for (n = 0; n < adapter->num_tx_queues; n++) {
06034649 401 struct igb_tx_buffer *buffer_info;
c97ec42a 402 tx_ring = adapter->tx_ring[n];
06034649 403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
876d2d6f
JK
404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
405 n, tx_ring->next_to_use, tx_ring->next_to_clean,
406 (u64)buffer_info->dma,
407 buffer_info->length,
408 buffer_info->next_to_watch,
409 (u64)buffer_info->time_stamp);
c97ec42a
TI
410 }
411
412 /* Print TX Rings */
413 if (!netif_msg_tx_done(adapter))
414 goto rx_ring_summary;
415
416 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
417
418 /* Transmit Descriptor Formats
419 *
420 * Advanced Transmit Descriptor
421 * +--------------------------------------------------------------+
422 * 0 | Buffer Address [63:0] |
423 * +--------------------------------------------------------------+
424 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
425 * +--------------------------------------------------------------+
426 * 63 46 45 40 39 38 36 35 32 31 24 15 0
427 */
428
429 for (n = 0; n < adapter->num_tx_queues; n++) {
430 tx_ring = adapter->tx_ring[n];
876d2d6f
JK
431 pr_info("------------------------------------\n");
432 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
433 pr_info("------------------------------------\n");
434 pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
435 "[bi->dma ] leng ntw timestamp "
436 "bi->skb\n");
c97ec42a
TI
437
438 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
876d2d6f 439 const char *next_desc;
06034649 440 struct igb_tx_buffer *buffer_info;
60136906 441 tx_desc = IGB_TX_DESC(tx_ring, i);
06034649 442 buffer_info = &tx_ring->tx_buffer_info[i];
c97ec42a 443 u0 = (struct my_u0 *)tx_desc;
876d2d6f
JK
444 if (i == tx_ring->next_to_use &&
445 i == tx_ring->next_to_clean)
446 next_desc = " NTC/U";
447 else if (i == tx_ring->next_to_use)
448 next_desc = " NTU";
449 else if (i == tx_ring->next_to_clean)
450 next_desc = " NTC";
451 else
452 next_desc = "";
453
454 pr_info("T [0x%03X] %016llX %016llX %016llX"
455 " %04X %p %016llX %p%s\n", i,
c97ec42a
TI
456 le64_to_cpu(u0->a),
457 le64_to_cpu(u0->b),
458 (u64)buffer_info->dma,
459 buffer_info->length,
460 buffer_info->next_to_watch,
461 (u64)buffer_info->time_stamp,
876d2d6f 462 buffer_info->skb, next_desc);
c97ec42a 463
b669588a 464 if (netif_msg_pktdata(adapter) && buffer_info->skb)
c97ec42a
TI
465 print_hex_dump(KERN_INFO, "",
466 DUMP_PREFIX_ADDRESS,
b669588a 467 16, 1, buffer_info->skb->data,
c97ec42a
TI
468 buffer_info->length, true);
469 }
470 }
471
472 /* Print RX Rings Summary */
473rx_ring_summary:
474 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
876d2d6f 475 pr_info("Queue [NTU] [NTC]\n");
c97ec42a
TI
476 for (n = 0; n < adapter->num_rx_queues; n++) {
477 rx_ring = adapter->rx_ring[n];
876d2d6f
JK
478 pr_info(" %5d %5X %5X\n",
479 n, rx_ring->next_to_use, rx_ring->next_to_clean);
c97ec42a
TI
480 }
481
482 /* Print RX Rings */
483 if (!netif_msg_rx_status(adapter))
484 goto exit;
485
486 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
487
488 /* Advanced Receive Descriptor (Read) Format
489 * 63 1 0
490 * +-----------------------------------------------------+
491 * 0 | Packet Buffer Address [63:1] |A0/NSE|
492 * +----------------------------------------------+------+
493 * 8 | Header Buffer Address [63:1] | DD |
494 * +-----------------------------------------------------+
495 *
496 *
497 * Advanced Receive Descriptor (Write-Back) Format
498 *
499 * 63 48 47 32 31 30 21 20 17 16 4 3 0
500 * +------------------------------------------------------+
501 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
502 * | Checksum Ident | | | | Type | Type |
503 * +------------------------------------------------------+
504 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
505 * +------------------------------------------------------+
506 * 63 48 47 32 31 20 19 0
507 */
508
509 for (n = 0; n < adapter->num_rx_queues; n++) {
510 rx_ring = adapter->rx_ring[n];
876d2d6f
JK
511 pr_info("------------------------------------\n");
512 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
513 pr_info("------------------------------------\n");
514 pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
515 "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
516 pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
517 "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
c97ec42a
TI
518
519 for (i = 0; i < rx_ring->count; i++) {
876d2d6f 520 const char *next_desc;
06034649
AD
521 struct igb_rx_buffer *buffer_info;
522 buffer_info = &rx_ring->rx_buffer_info[i];
60136906 523 rx_desc = IGB_RX_DESC(rx_ring, i);
c97ec42a
TI
524 u0 = (struct my_u0 *)rx_desc;
525 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
876d2d6f
JK
526
527 if (i == rx_ring->next_to_use)
528 next_desc = " NTU";
529 else if (i == rx_ring->next_to_clean)
530 next_desc = " NTC";
531 else
532 next_desc = "";
533
c97ec42a
TI
534 if (staterr & E1000_RXD_STAT_DD) {
535 /* Descriptor Done */
876d2d6f
JK
536 pr_info("%s[0x%03X] %016llX %016llX -------"
537 "--------- %p%s\n", "RWB", i,
c97ec42a
TI
538 le64_to_cpu(u0->a),
539 le64_to_cpu(u0->b),
876d2d6f 540 buffer_info->skb, next_desc);
c97ec42a 541 } else {
876d2d6f
JK
542 pr_info("%s[0x%03X] %016llX %016llX %016llX"
543 " %p%s\n", "R ", i,
c97ec42a
TI
544 le64_to_cpu(u0->a),
545 le64_to_cpu(u0->b),
546 (u64)buffer_info->dma,
876d2d6f 547 buffer_info->skb, next_desc);
c97ec42a 548
b669588a
ET
549 if (netif_msg_pktdata(adapter) &&
550 buffer_info->dma && buffer_info->skb) {
c97ec42a 551 print_hex_dump(KERN_INFO, "",
b669588a
ET
552 DUMP_PREFIX_ADDRESS,
553 16, 1, buffer_info->skb->data,
554 IGB_RX_HDR_LEN, true);
44390ca6
AD
555 print_hex_dump(KERN_INFO, "",
556 DUMP_PREFIX_ADDRESS,
557 16, 1,
b669588a
ET
558 page_address(buffer_info->page) +
559 buffer_info->page_offset,
44390ca6 560 PAGE_SIZE/2, true);
c97ec42a
TI
561 }
562 }
c97ec42a
TI
563 }
564 }
565
566exit:
567 return;
568}
569
9d5c8243 570/**
c041076a 571 * igb_get_hw_dev - return device
9d5c8243
AK
572 * used by hardware layer to print debugging information
573 **/
c041076a 574struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
9d5c8243
AK
575{
576 struct igb_adapter *adapter = hw->back;
c041076a 577 return adapter->netdev;
9d5c8243 578}
38c845c7 579
9d5c8243
AK
580/**
581 * igb_init_module - Driver Registration Routine
582 *
583 * igb_init_module is the first routine called when the driver is
584 * loaded. All it does is register with the PCI subsystem.
585 **/
586static int __init igb_init_module(void)
587{
588 int ret;
876d2d6f 589 pr_info("%s - version %s\n",
9d5c8243
AK
590 igb_driver_string, igb_driver_version);
591
876d2d6f 592 pr_info("%s\n", igb_copyright);
9d5c8243 593
421e02f0 594#ifdef CONFIG_IGB_DCA
fe4506b6
JC
595 dca_register_notify(&dca_notifier);
596#endif
bbd98fe4 597 ret = pci_register_driver(&igb_driver);
9d5c8243
AK
598 return ret;
599}
600
601module_init(igb_init_module);
602
603/**
604 * igb_exit_module - Driver Exit Cleanup Routine
605 *
606 * igb_exit_module is called just before the driver is removed
607 * from memory.
608 **/
609static void __exit igb_exit_module(void)
610{
421e02f0 611#ifdef CONFIG_IGB_DCA
fe4506b6
JC
612 dca_unregister_notify(&dca_notifier);
613#endif
9d5c8243
AK
614 pci_unregister_driver(&igb_driver);
615}
616
617module_exit(igb_exit_module);
618
26bc19ec
AD
619#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
620/**
621 * igb_cache_ring_register - Descriptor ring to register mapping
622 * @adapter: board private structure to initialize
623 *
624 * Once we know the feature-set enabled for the device, we'll cache
625 * the register offset the descriptor ring is assigned to.
626 **/
627static void igb_cache_ring_register(struct igb_adapter *adapter)
628{
ee1b9f06 629 int i = 0, j = 0;
047e0030 630 u32 rbase_offset = adapter->vfs_allocated_count;
26bc19ec
AD
631
632 switch (adapter->hw.mac.type) {
633 case e1000_82576:
634 /* The queues are allocated for virtualization such that VF 0
635 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
636 * In order to avoid collision we start at the first free queue
637 * and continue consuming queues in the same sequence
638 */
ee1b9f06 639 if (adapter->vfs_allocated_count) {
a99955fc 640 for (; i < adapter->rss_queues; i++)
3025a446
AD
641 adapter->rx_ring[i]->reg_idx = rbase_offset +
642 Q_IDX_82576(i);
ee1b9f06 643 }
26bc19ec 644 case e1000_82575:
55cac248 645 case e1000_82580:
d2ba2ed8 646 case e1000_i350:
f96a8a0b
CW
647 case e1000_i210:
648 case e1000_i211:
26bc19ec 649 default:
ee1b9f06 650 for (; i < adapter->num_rx_queues; i++)
3025a446 651 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
ee1b9f06 652 for (; j < adapter->num_tx_queues; j++)
3025a446 653 adapter->tx_ring[j]->reg_idx = rbase_offset + j;
26bc19ec
AD
654 break;
655 }
656}
657
047e0030
AD
658static void igb_free_queues(struct igb_adapter *adapter)
659{
3025a446 660 int i;
047e0030 661
3025a446
AD
662 for (i = 0; i < adapter->num_tx_queues; i++) {
663 kfree(adapter->tx_ring[i]);
664 adapter->tx_ring[i] = NULL;
665 }
666 for (i = 0; i < adapter->num_rx_queues; i++) {
667 kfree(adapter->rx_ring[i]);
668 adapter->rx_ring[i] = NULL;
669 }
047e0030
AD
670 adapter->num_rx_queues = 0;
671 adapter->num_tx_queues = 0;
672}
673
9d5c8243
AK
674/**
675 * igb_alloc_queues - Allocate memory for all rings
676 * @adapter: board private structure to initialize
677 *
678 * We allocate one ring per queue at run-time since we don't know the
679 * number of queues at compile-time.
680 **/
681static int igb_alloc_queues(struct igb_adapter *adapter)
682{
3025a446 683 struct igb_ring *ring;
9d5c8243
AK
684 int i;
685
661086df 686 for (i = 0; i < adapter->num_tx_queues; i++) {
f33005a6 687 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
3025a446
AD
688 if (!ring)
689 goto err;
68fd9910 690 ring->count = adapter->tx_ring_count;
661086df 691 ring->queue_index = i;
59d71989 692 ring->dev = &adapter->pdev->dev;
e694e964 693 ring->netdev = adapter->netdev;
85ad76b2
AD
694 /* For 82575, context index must be unique per ring. */
695 if (adapter->hw.mac.type == e1000_82575)
866cff06 696 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
3025a446 697 adapter->tx_ring[i] = ring;
661086df 698 }
85ad76b2 699
9d5c8243 700 for (i = 0; i < adapter->num_rx_queues; i++) {
f33005a6 701 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
3025a446
AD
702 if (!ring)
703 goto err;
68fd9910 704 ring->count = adapter->rx_ring_count;
844290e5 705 ring->queue_index = i;
59d71989 706 ring->dev = &adapter->pdev->dev;
e694e964 707 ring->netdev = adapter->netdev;
85ad76b2
AD
708 /* set flag indicating ring supports SCTP checksum offload */
709 if (adapter->hw.mac.type >= e1000_82576)
866cff06 710 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
8be10e91 711
f96a8a0b
CW
712 /*
713 * On i350, i210, and i211, loopback VLAN packets
714 * have the tag byte-swapped.
715 * */
716 if (adapter->hw.mac.type >= e1000_i350)
8be10e91
AD
717 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
718
3025a446 719 adapter->rx_ring[i] = ring;
9d5c8243 720 }
26bc19ec
AD
721
722 igb_cache_ring_register(adapter);
9d5c8243 723
047e0030 724 return 0;
a88f10ec 725
047e0030
AD
726err:
727 igb_free_queues(adapter);
d1a8c9e1 728
047e0030 729 return -ENOMEM;
a88f10ec
AD
730}
731
4be000c8
AD
732/**
733 * igb_write_ivar - configure ivar for given MSI-X vector
734 * @hw: pointer to the HW structure
735 * @msix_vector: vector number we are allocating to a given ring
736 * @index: row index of IVAR register to write within IVAR table
737 * @offset: column offset of in IVAR, should be multiple of 8
738 *
739 * This function is intended to handle the writing of the IVAR register
740 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
741 * each containing an cause allocation for an Rx and Tx ring, and a
742 * variable number of rows depending on the number of queues supported.
743 **/
744static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
745 int index, int offset)
746{
747 u32 ivar = array_rd32(E1000_IVAR0, index);
748
749 /* clear any bits that are currently set */
750 ivar &= ~((u32)0xFF << offset);
751
752 /* write vector and valid bit */
753 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
754
755 array_wr32(E1000_IVAR0, index, ivar);
756}
757
9d5c8243 758#define IGB_N0_QUEUE -1
047e0030 759static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
9d5c8243 760{
047e0030 761 struct igb_adapter *adapter = q_vector->adapter;
9d5c8243 762 struct e1000_hw *hw = &adapter->hw;
047e0030
AD
763 int rx_queue = IGB_N0_QUEUE;
764 int tx_queue = IGB_N0_QUEUE;
4be000c8 765 u32 msixbm = 0;
047e0030 766
0ba82994
AD
767 if (q_vector->rx.ring)
768 rx_queue = q_vector->rx.ring->reg_idx;
769 if (q_vector->tx.ring)
770 tx_queue = q_vector->tx.ring->reg_idx;
2d064c06
AD
771
772 switch (hw->mac.type) {
773 case e1000_82575:
9d5c8243
AK
774 /* The 82575 assigns vectors using a bitmask, which matches the
775 bitmask for the EICR/EIMS/EIMC registers. To assign one
776 or more queues to a vector, we write the appropriate bits
777 into the MSIXBM register for that vector. */
047e0030 778 if (rx_queue > IGB_N0_QUEUE)
9d5c8243 779 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
047e0030 780 if (tx_queue > IGB_N0_QUEUE)
9d5c8243 781 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
feeb2721
AD
782 if (!adapter->msix_entries && msix_vector == 0)
783 msixbm |= E1000_EIMS_OTHER;
9d5c8243 784 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
047e0030 785 q_vector->eims_value = msixbm;
2d064c06
AD
786 break;
787 case e1000_82576:
4be000c8
AD
788 /*
789 * 82576 uses a table that essentially consists of 2 columns
790 * with 8 rows. The ordering is column-major so we use the
791 * lower 3 bits as the row index, and the 4th bit as the
792 * column offset.
793 */
794 if (rx_queue > IGB_N0_QUEUE)
795 igb_write_ivar(hw, msix_vector,
796 rx_queue & 0x7,
797 (rx_queue & 0x8) << 1);
798 if (tx_queue > IGB_N0_QUEUE)
799 igb_write_ivar(hw, msix_vector,
800 tx_queue & 0x7,
801 ((tx_queue & 0x8) << 1) + 8);
047e0030 802 q_vector->eims_value = 1 << msix_vector;
2d064c06 803 break;
55cac248 804 case e1000_82580:
d2ba2ed8 805 case e1000_i350:
f96a8a0b
CW
806 case e1000_i210:
807 case e1000_i211:
4be000c8
AD
808 /*
809 * On 82580 and newer adapters the scheme is similar to 82576
810 * however instead of ordering column-major we have things
811 * ordered row-major. So we traverse the table by using
812 * bit 0 as the column offset, and the remaining bits as the
813 * row index.
814 */
815 if (rx_queue > IGB_N0_QUEUE)
816 igb_write_ivar(hw, msix_vector,
817 rx_queue >> 1,
818 (rx_queue & 0x1) << 4);
819 if (tx_queue > IGB_N0_QUEUE)
820 igb_write_ivar(hw, msix_vector,
821 tx_queue >> 1,
822 ((tx_queue & 0x1) << 4) + 8);
55cac248
AD
823 q_vector->eims_value = 1 << msix_vector;
824 break;
2d064c06
AD
825 default:
826 BUG();
827 break;
828 }
26b39276
AD
829
830 /* add q_vector eims value to global eims_enable_mask */
831 adapter->eims_enable_mask |= q_vector->eims_value;
832
833 /* configure q_vector to set itr on first interrupt */
834 q_vector->set_itr = 1;
9d5c8243
AK
835}
836
837/**
838 * igb_configure_msix - Configure MSI-X hardware
839 *
840 * igb_configure_msix sets up the hardware to properly
841 * generate MSI-X interrupts.
842 **/
843static void igb_configure_msix(struct igb_adapter *adapter)
844{
845 u32 tmp;
846 int i, vector = 0;
847 struct e1000_hw *hw = &adapter->hw;
848
849 adapter->eims_enable_mask = 0;
9d5c8243
AK
850
851 /* set vector for other causes, i.e. link changes */
2d064c06
AD
852 switch (hw->mac.type) {
853 case e1000_82575:
9d5c8243
AK
854 tmp = rd32(E1000_CTRL_EXT);
855 /* enable MSI-X PBA support*/
856 tmp |= E1000_CTRL_EXT_PBA_CLR;
857
858 /* Auto-Mask interrupts upon ICR read. */
859 tmp |= E1000_CTRL_EXT_EIAME;
860 tmp |= E1000_CTRL_EXT_IRCA;
861
862 wr32(E1000_CTRL_EXT, tmp);
047e0030
AD
863
864 /* enable msix_other interrupt */
865 array_wr32(E1000_MSIXBM(0), vector++,
866 E1000_EIMS_OTHER);
844290e5 867 adapter->eims_other = E1000_EIMS_OTHER;
9d5c8243 868
2d064c06
AD
869 break;
870
871 case e1000_82576:
55cac248 872 case e1000_82580:
d2ba2ed8 873 case e1000_i350:
f96a8a0b
CW
874 case e1000_i210:
875 case e1000_i211:
047e0030
AD
876 /* Turn on MSI-X capability first, or our settings
877 * won't stick. And it will take days to debug. */
878 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
879 E1000_GPIE_PBA | E1000_GPIE_EIAME |
880 E1000_GPIE_NSICR);
881
882 /* enable msix_other interrupt */
883 adapter->eims_other = 1 << vector;
2d064c06 884 tmp = (vector++ | E1000_IVAR_VALID) << 8;
2d064c06 885
047e0030 886 wr32(E1000_IVAR_MISC, tmp);
2d064c06
AD
887 break;
888 default:
889 /* do nothing, since nothing else supports MSI-X */
890 break;
891 } /* switch (hw->mac.type) */
047e0030
AD
892
893 adapter->eims_enable_mask |= adapter->eims_other;
894
26b39276
AD
895 for (i = 0; i < adapter->num_q_vectors; i++)
896 igb_assign_vector(adapter->q_vector[i], vector++);
047e0030 897
9d5c8243
AK
898 wrfl();
899}
900
901/**
902 * igb_request_msix - Initialize MSI-X interrupts
903 *
904 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
905 * kernel.
906 **/
907static int igb_request_msix(struct igb_adapter *adapter)
908{
909 struct net_device *netdev = adapter->netdev;
047e0030 910 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
911 int i, err = 0, vector = 0;
912
047e0030 913 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 914 igb_msix_other, 0, netdev->name, adapter);
047e0030
AD
915 if (err)
916 goto out;
917 vector++;
918
919 for (i = 0; i < adapter->num_q_vectors; i++) {
920 struct igb_q_vector *q_vector = adapter->q_vector[i];
921
922 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
923
0ba82994 924 if (q_vector->rx.ring && q_vector->tx.ring)
047e0030 925 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
0ba82994
AD
926 q_vector->rx.ring->queue_index);
927 else if (q_vector->tx.ring)
047e0030 928 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
0ba82994
AD
929 q_vector->tx.ring->queue_index);
930 else if (q_vector->rx.ring)
047e0030 931 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
0ba82994 932 q_vector->rx.ring->queue_index);
9d5c8243 933 else
047e0030
AD
934 sprintf(q_vector->name, "%s-unused", netdev->name);
935
9d5c8243 936 err = request_irq(adapter->msix_entries[vector].vector,
a0607fd3 937 igb_msix_ring, 0, q_vector->name,
047e0030 938 q_vector);
9d5c8243
AK
939 if (err)
940 goto out;
9d5c8243
AK
941 vector++;
942 }
943
9d5c8243
AK
944 igb_configure_msix(adapter);
945 return 0;
946out:
947 return err;
948}
949
950static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
951{
952 if (adapter->msix_entries) {
953 pci_disable_msix(adapter->pdev);
954 kfree(adapter->msix_entries);
955 adapter->msix_entries = NULL;
047e0030 956 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
9d5c8243 957 pci_disable_msi(adapter->pdev);
047e0030 958 }
9d5c8243
AK
959}
960
047e0030
AD
961/**
962 * igb_free_q_vectors - Free memory allocated for interrupt vectors
963 * @adapter: board private structure to initialize
964 *
965 * This function frees the memory allocated to the q_vectors. In addition if
966 * NAPI is enabled it will delete any references to the NAPI struct prior
967 * to freeing the q_vector.
968 **/
969static void igb_free_q_vectors(struct igb_adapter *adapter)
970{
971 int v_idx;
972
973 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
974 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
975 adapter->q_vector[v_idx] = NULL;
fe0592b4
NN
976 if (!q_vector)
977 continue;
047e0030
AD
978 netif_napi_del(&q_vector->napi);
979 kfree(q_vector);
980 }
981 adapter->num_q_vectors = 0;
982}
983
984/**
985 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
986 *
987 * This function resets the device so that it has 0 rx queues, tx queues, and
988 * MSI-X interrupts allocated.
989 */
990static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
991{
992 igb_free_queues(adapter);
993 igb_free_q_vectors(adapter);
994 igb_reset_interrupt_capability(adapter);
995}
9d5c8243
AK
996
997/**
998 * igb_set_interrupt_capability - set MSI or MSI-X if supported
999 *
1000 * Attempt to configure interrupts using the best available
1001 * capabilities of the hardware and kernel.
1002 **/
21adef3e 1003static int igb_set_interrupt_capability(struct igb_adapter *adapter)
9d5c8243
AK
1004{
1005 int err;
1006 int numvecs, i;
1007
83b7180d 1008 /* Number of supported queues. */
a99955fc 1009 adapter->num_rx_queues = adapter->rss_queues;
5fa8517f
GR
1010 if (adapter->vfs_allocated_count)
1011 adapter->num_tx_queues = 1;
1012 else
1013 adapter->num_tx_queues = adapter->rss_queues;
83b7180d 1014
047e0030
AD
1015 /* start with one vector for every rx queue */
1016 numvecs = adapter->num_rx_queues;
1017
3ad2f3fb 1018 /* if tx handler is separate add 1 for every tx queue */
a99955fc
AD
1019 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1020 numvecs += adapter->num_tx_queues;
047e0030
AD
1021
1022 /* store the number of vectors reserved for queues */
1023 adapter->num_q_vectors = numvecs;
1024
1025 /* add 1 vector for link status interrupts */
1026 numvecs++;
9d5c8243
AK
1027 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
1028 GFP_KERNEL);
f96a8a0b 1029
9d5c8243
AK
1030 if (!adapter->msix_entries)
1031 goto msi_only;
1032
1033 for (i = 0; i < numvecs; i++)
1034 adapter->msix_entries[i].entry = i;
1035
1036 err = pci_enable_msix(adapter->pdev,
1037 adapter->msix_entries,
1038 numvecs);
1039 if (err == 0)
34a20e89 1040 goto out;
9d5c8243
AK
1041
1042 igb_reset_interrupt_capability(adapter);
1043
1044 /* If we can't do MSI-X, try MSI */
1045msi_only:
2a3abf6d
AD
1046#ifdef CONFIG_PCI_IOV
1047 /* disable SR-IOV for non MSI-X configurations */
1048 if (adapter->vf_data) {
1049 struct e1000_hw *hw = &adapter->hw;
1050 /* disable iov and allow time for transactions to clear */
1051 pci_disable_sriov(adapter->pdev);
1052 msleep(500);
1053
1054 kfree(adapter->vf_data);
1055 adapter->vf_data = NULL;
1056 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
945a5151 1057 wrfl();
2a3abf6d
AD
1058 msleep(100);
1059 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
1060 }
1061#endif
4fc82adf 1062 adapter->vfs_allocated_count = 0;
a99955fc 1063 adapter->rss_queues = 1;
4fc82adf 1064 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
9d5c8243 1065 adapter->num_rx_queues = 1;
661086df 1066 adapter->num_tx_queues = 1;
047e0030 1067 adapter->num_q_vectors = 1;
9d5c8243 1068 if (!pci_enable_msi(adapter->pdev))
7dfc16fa 1069 adapter->flags |= IGB_FLAG_HAS_MSI;
34a20e89 1070out:
21adef3e 1071 /* Notify the stack of the (possibly) reduced queue counts. */
cfb8c3aa 1072 rtnl_lock();
21adef3e 1073 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
cfb8c3aa
BP
1074 err = netif_set_real_num_rx_queues(adapter->netdev,
1075 adapter->num_rx_queues);
1076 rtnl_unlock();
1077 return err;
9d5c8243
AK
1078}
1079
047e0030
AD
1080/**
1081 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1082 * @adapter: board private structure to initialize
1083 *
1084 * We allocate one q_vector per queue interrupt. If allocation fails we
1085 * return -ENOMEM.
1086 **/
1087static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1088{
1089 struct igb_q_vector *q_vector;
1090 struct e1000_hw *hw = &adapter->hw;
1091 int v_idx;
1092
1093 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
f33005a6
AD
1094 q_vector = kzalloc(sizeof(struct igb_q_vector),
1095 GFP_KERNEL);
047e0030
AD
1096 if (!q_vector)
1097 goto err_out;
1098 q_vector->adapter = adapter;
047e0030
AD
1099 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
1100 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1101 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1102 adapter->q_vector[v_idx] = q_vector;
1103 }
81c2fc22 1104
047e0030
AD
1105 return 0;
1106
1107err_out:
fe0592b4 1108 igb_free_q_vectors(adapter);
047e0030
AD
1109 return -ENOMEM;
1110}
1111
1112static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1113 int ring_idx, int v_idx)
1114{
3025a446 1115 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
047e0030 1116
0ba82994
AD
1117 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1118 q_vector->rx.ring->q_vector = q_vector;
1119 q_vector->rx.count++;
4fc82adf
AD
1120 q_vector->itr_val = adapter->rx_itr_setting;
1121 if (q_vector->itr_val && q_vector->itr_val <= 3)
1122 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1123}
1124
1125static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1126 int ring_idx, int v_idx)
1127{
3025a446 1128 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
047e0030 1129
0ba82994
AD
1130 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1131 q_vector->tx.ring->q_vector = q_vector;
1132 q_vector->tx.count++;
4fc82adf 1133 q_vector->itr_val = adapter->tx_itr_setting;
0ba82994 1134 q_vector->tx.work_limit = adapter->tx_work_limit;
4fc82adf
AD
1135 if (q_vector->itr_val && q_vector->itr_val <= 3)
1136 q_vector->itr_val = IGB_START_ITR;
047e0030
AD
1137}
1138
1139/**
1140 * igb_map_ring_to_vector - maps allocated queues to vectors
1141 *
1142 * This function maps the recently allocated queues to vectors.
1143 **/
1144static int igb_map_ring_to_vector(struct igb_adapter *adapter)
1145{
1146 int i;
1147 int v_idx = 0;
1148
1149 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
1150 (adapter->num_q_vectors < adapter->num_tx_queues))
1151 return -ENOMEM;
1152
1153 if (adapter->num_q_vectors >=
1154 (adapter->num_rx_queues + adapter->num_tx_queues)) {
1155 for (i = 0; i < adapter->num_rx_queues; i++)
1156 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1157 for (i = 0; i < adapter->num_tx_queues; i++)
1158 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1159 } else {
1160 for (i = 0; i < adapter->num_rx_queues; i++) {
1161 if (i < adapter->num_tx_queues)
1162 igb_map_tx_ring_to_vector(adapter, i, v_idx);
1163 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
1164 }
1165 for (; i < adapter->num_tx_queues; i++)
1166 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
1167 }
1168 return 0;
1169}
1170
1171/**
1172 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1173 *
1174 * This function initializes the interrupts and allocates all of the queues.
1175 **/
1176static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1177{
1178 struct pci_dev *pdev = adapter->pdev;
1179 int err;
1180
21adef3e
BH
1181 err = igb_set_interrupt_capability(adapter);
1182 if (err)
1183 return err;
047e0030
AD
1184
1185 err = igb_alloc_q_vectors(adapter);
1186 if (err) {
1187 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
1188 goto err_alloc_q_vectors;
1189 }
1190
1191 err = igb_alloc_queues(adapter);
1192 if (err) {
1193 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1194 goto err_alloc_queues;
1195 }
1196
1197 err = igb_map_ring_to_vector(adapter);
1198 if (err) {
1199 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
1200 goto err_map_queues;
1201 }
1202
1203
1204 return 0;
1205err_map_queues:
1206 igb_free_queues(adapter);
1207err_alloc_queues:
1208 igb_free_q_vectors(adapter);
1209err_alloc_q_vectors:
1210 igb_reset_interrupt_capability(adapter);
1211 return err;
1212}
1213
9d5c8243
AK
1214/**
1215 * igb_request_irq - initialize interrupts
1216 *
1217 * Attempts to configure interrupts using the best available
1218 * capabilities of the hardware and kernel.
1219 **/
1220static int igb_request_irq(struct igb_adapter *adapter)
1221{
1222 struct net_device *netdev = adapter->netdev;
047e0030 1223 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
1224 int err = 0;
1225
1226 if (adapter->msix_entries) {
1227 err = igb_request_msix(adapter);
844290e5 1228 if (!err)
9d5c8243 1229 goto request_done;
9d5c8243 1230 /* fall back to MSI */
047e0030 1231 igb_clear_interrupt_scheme(adapter);
c74d588e 1232 if (!pci_enable_msi(pdev))
7dfc16fa 1233 adapter->flags |= IGB_FLAG_HAS_MSI;
9d5c8243
AK
1234 igb_free_all_tx_resources(adapter);
1235 igb_free_all_rx_resources(adapter);
047e0030 1236 adapter->num_tx_queues = 1;
9d5c8243 1237 adapter->num_rx_queues = 1;
047e0030
AD
1238 adapter->num_q_vectors = 1;
1239 err = igb_alloc_q_vectors(adapter);
1240 if (err) {
1241 dev_err(&pdev->dev,
1242 "Unable to allocate memory for vectors\n");
1243 goto request_done;
1244 }
1245 err = igb_alloc_queues(adapter);
1246 if (err) {
1247 dev_err(&pdev->dev,
1248 "Unable to allocate memory for queues\n");
1249 igb_free_q_vectors(adapter);
1250 goto request_done;
1251 }
1252 igb_setup_all_tx_resources(adapter);
1253 igb_setup_all_rx_resources(adapter);
9d5c8243 1254 }
844290e5 1255
c74d588e
AD
1256 igb_assign_vector(adapter->q_vector[0], 0);
1257
7dfc16fa 1258 if (adapter->flags & IGB_FLAG_HAS_MSI) {
c74d588e 1259 err = request_irq(pdev->irq, igb_intr_msi, 0,
047e0030 1260 netdev->name, adapter);
9d5c8243
AK
1261 if (!err)
1262 goto request_done;
047e0030 1263
9d5c8243
AK
1264 /* fall back to legacy interrupts */
1265 igb_reset_interrupt_capability(adapter);
7dfc16fa 1266 adapter->flags &= ~IGB_FLAG_HAS_MSI;
9d5c8243
AK
1267 }
1268
c74d588e 1269 err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
047e0030 1270 netdev->name, adapter);
9d5c8243 1271
6cb5e577 1272 if (err)
c74d588e 1273 dev_err(&pdev->dev, "Error %d getting interrupt\n",
9d5c8243 1274 err);
9d5c8243
AK
1275
1276request_done:
1277 return err;
1278}
1279
1280static void igb_free_irq(struct igb_adapter *adapter)
1281{
9d5c8243
AK
1282 if (adapter->msix_entries) {
1283 int vector = 0, i;
1284
047e0030 1285 free_irq(adapter->msix_entries[vector++].vector, adapter);
9d5c8243 1286
0d1ae7f4 1287 for (i = 0; i < adapter->num_q_vectors; i++)
047e0030 1288 free_irq(adapter->msix_entries[vector++].vector,
0d1ae7f4 1289 adapter->q_vector[i]);
047e0030
AD
1290 } else {
1291 free_irq(adapter->pdev->irq, adapter);
9d5c8243 1292 }
9d5c8243
AK
1293}
1294
1295/**
1296 * igb_irq_disable - Mask off interrupt generation on the NIC
1297 * @adapter: board private structure
1298 **/
1299static void igb_irq_disable(struct igb_adapter *adapter)
1300{
1301 struct e1000_hw *hw = &adapter->hw;
1302
25568a53
AD
1303 /*
1304 * we need to be careful when disabling interrupts. The VFs are also
1305 * mapped into these registers and so clearing the bits can cause
1306 * issues on the VF drivers so we only need to clear what we set
1307 */
9d5c8243 1308 if (adapter->msix_entries) {
2dfd1212
AD
1309 u32 regval = rd32(E1000_EIAM);
1310 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
1311 wr32(E1000_EIMC, adapter->eims_enable_mask);
1312 regval = rd32(E1000_EIAC);
1313 wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
9d5c8243 1314 }
844290e5
PW
1315
1316 wr32(E1000_IAM, 0);
9d5c8243
AK
1317 wr32(E1000_IMC, ~0);
1318 wrfl();
81a61859
ET
1319 if (adapter->msix_entries) {
1320 int i;
1321 for (i = 0; i < adapter->num_q_vectors; i++)
1322 synchronize_irq(adapter->msix_entries[i].vector);
1323 } else {
1324 synchronize_irq(adapter->pdev->irq);
1325 }
9d5c8243
AK
1326}
1327
1328/**
1329 * igb_irq_enable - Enable default interrupt generation settings
1330 * @adapter: board private structure
1331 **/
1332static void igb_irq_enable(struct igb_adapter *adapter)
1333{
1334 struct e1000_hw *hw = &adapter->hw;
1335
1336 if (adapter->msix_entries) {
06218a8d 1337 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
2dfd1212
AD
1338 u32 regval = rd32(E1000_EIAC);
1339 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
1340 regval = rd32(E1000_EIAM);
1341 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
844290e5 1342 wr32(E1000_EIMS, adapter->eims_enable_mask);
25568a53 1343 if (adapter->vfs_allocated_count) {
4ae196df 1344 wr32(E1000_MBVFIMR, 0xFF);
25568a53
AD
1345 ims |= E1000_IMS_VMMB;
1346 }
1347 wr32(E1000_IMS, ims);
844290e5 1348 } else {
55cac248
AD
1349 wr32(E1000_IMS, IMS_ENABLE_MASK |
1350 E1000_IMS_DRSTA);
1351 wr32(E1000_IAM, IMS_ENABLE_MASK |
1352 E1000_IMS_DRSTA);
844290e5 1353 }
9d5c8243
AK
1354}
1355
1356static void igb_update_mng_vlan(struct igb_adapter *adapter)
1357{
51466239 1358 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1359 u16 vid = adapter->hw.mng_cookie.vlan_id;
1360 u16 old_vid = adapter->mng_vlan_id;
51466239
AD
1361
1362 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
1363 /* add VID to filter table */
1364 igb_vfta_set(hw, vid, true);
1365 adapter->mng_vlan_id = vid;
1366 } else {
1367 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1368 }
1369
1370 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
1371 (vid != old_vid) &&
b2cb09b1 1372 !test_bit(old_vid, adapter->active_vlans)) {
51466239
AD
1373 /* remove VID from filter table */
1374 igb_vfta_set(hw, old_vid, false);
9d5c8243
AK
1375 }
1376}
1377
1378/**
1379 * igb_release_hw_control - release control of the h/w to f/w
1380 * @adapter: address of board private structure
1381 *
1382 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1383 * For ASF and Pass Through versions of f/w this means that the
1384 * driver is no longer loaded.
1385 *
1386 **/
1387static void igb_release_hw_control(struct igb_adapter *adapter)
1388{
1389 struct e1000_hw *hw = &adapter->hw;
1390 u32 ctrl_ext;
1391
1392 /* Let firmware take over control of h/w */
1393 ctrl_ext = rd32(E1000_CTRL_EXT);
1394 wr32(E1000_CTRL_EXT,
1395 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1396}
1397
9d5c8243
AK
1398/**
1399 * igb_get_hw_control - get control of the h/w from f/w
1400 * @adapter: address of board private structure
1401 *
1402 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1403 * For ASF and Pass Through versions of f/w this means that
1404 * the driver is loaded.
1405 *
1406 **/
1407static void igb_get_hw_control(struct igb_adapter *adapter)
1408{
1409 struct e1000_hw *hw = &adapter->hw;
1410 u32 ctrl_ext;
1411
1412 /* Let firmware know the driver has taken over */
1413 ctrl_ext = rd32(E1000_CTRL_EXT);
1414 wr32(E1000_CTRL_EXT,
1415 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1416}
1417
9d5c8243
AK
1418/**
1419 * igb_configure - configure the hardware for RX and TX
1420 * @adapter: private board structure
1421 **/
1422static void igb_configure(struct igb_adapter *adapter)
1423{
1424 struct net_device *netdev = adapter->netdev;
1425 int i;
1426
1427 igb_get_hw_control(adapter);
ff41f8dc 1428 igb_set_rx_mode(netdev);
9d5c8243
AK
1429
1430 igb_restore_vlan(adapter);
9d5c8243 1431
85b430b4 1432 igb_setup_tctl(adapter);
06cf2666 1433 igb_setup_mrqc(adapter);
9d5c8243 1434 igb_setup_rctl(adapter);
85b430b4
AD
1435
1436 igb_configure_tx(adapter);
9d5c8243 1437 igb_configure_rx(adapter);
662d7205
AD
1438
1439 igb_rx_fifo_flush_82575(&adapter->hw);
1440
c493ea45 1441 /* call igb_desc_unused which always leaves
9d5c8243
AK
1442 * at least 1 descriptor unused to make sure
1443 * next_to_use != next_to_clean */
1444 for (i = 0; i < adapter->num_rx_queues; i++) {
3025a446 1445 struct igb_ring *ring = adapter->rx_ring[i];
cd392f5c 1446 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
9d5c8243 1447 }
9d5c8243
AK
1448}
1449
88a268c1
NN
1450/**
1451 * igb_power_up_link - Power up the phy/serdes link
1452 * @adapter: address of board private structure
1453 **/
1454void igb_power_up_link(struct igb_adapter *adapter)
1455{
76886596
AA
1456 igb_reset_phy(&adapter->hw);
1457
88a268c1
NN
1458 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1459 igb_power_up_phy_copper(&adapter->hw);
1460 else
1461 igb_power_up_serdes_link_82575(&adapter->hw);
1462}
1463
1464/**
1465 * igb_power_down_link - Power down the phy/serdes link
1466 * @adapter: address of board private structure
1467 */
1468static void igb_power_down_link(struct igb_adapter *adapter)
1469{
1470 if (adapter->hw.phy.media_type == e1000_media_type_copper)
1471 igb_power_down_phy_copper_82575(&adapter->hw);
1472 else
1473 igb_shutdown_serdes_link_82575(&adapter->hw);
1474}
9d5c8243
AK
1475
1476/**
1477 * igb_up - Open the interface and prepare it to handle traffic
1478 * @adapter: board private structure
1479 **/
9d5c8243
AK
1480int igb_up(struct igb_adapter *adapter)
1481{
1482 struct e1000_hw *hw = &adapter->hw;
1483 int i;
1484
1485 /* hardware has been reset, we need to reload some things */
1486 igb_configure(adapter);
1487
1488 clear_bit(__IGB_DOWN, &adapter->state);
1489
0d1ae7f4
AD
1490 for (i = 0; i < adapter->num_q_vectors; i++)
1491 napi_enable(&(adapter->q_vector[i]->napi));
1492
844290e5 1493 if (adapter->msix_entries)
9d5c8243 1494 igb_configure_msix(adapter);
feeb2721
AD
1495 else
1496 igb_assign_vector(adapter->q_vector[0], 0);
9d5c8243
AK
1497
1498 /* Clear any pending interrupts. */
1499 rd32(E1000_ICR);
1500 igb_irq_enable(adapter);
1501
d4960307
AD
1502 /* notify VFs that reset has been completed */
1503 if (adapter->vfs_allocated_count) {
1504 u32 reg_data = rd32(E1000_CTRL_EXT);
1505 reg_data |= E1000_CTRL_EXT_PFRSTD;
1506 wr32(E1000_CTRL_EXT, reg_data);
1507 }
1508
4cb9be7a
JB
1509 netif_tx_start_all_queues(adapter->netdev);
1510
25568a53
AD
1511 /* start the watchdog. */
1512 hw->mac.get_link_status = 1;
1513 schedule_work(&adapter->watchdog_task);
1514
9d5c8243
AK
1515 return 0;
1516}
1517
1518void igb_down(struct igb_adapter *adapter)
1519{
9d5c8243 1520 struct net_device *netdev = adapter->netdev;
330a6d6a 1521 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
1522 u32 tctl, rctl;
1523 int i;
1524
1525 /* signal that we're down so the interrupt handler does not
1526 * reschedule our watchdog timer */
1527 set_bit(__IGB_DOWN, &adapter->state);
1528
1529 /* disable receives in the hardware */
1530 rctl = rd32(E1000_RCTL);
1531 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1532 /* flush and sleep below */
1533
fd2ea0a7 1534 netif_tx_stop_all_queues(netdev);
9d5c8243
AK
1535
1536 /* disable transmits in the hardware */
1537 tctl = rd32(E1000_TCTL);
1538 tctl &= ~E1000_TCTL_EN;
1539 wr32(E1000_TCTL, tctl);
1540 /* flush both disables and wait for them to finish */
1541 wrfl();
1542 msleep(10);
1543
0d1ae7f4
AD
1544 for (i = 0; i < adapter->num_q_vectors; i++)
1545 napi_disable(&(adapter->q_vector[i]->napi));
9d5c8243 1546
9d5c8243
AK
1547 igb_irq_disable(adapter);
1548
1549 del_timer_sync(&adapter->watchdog_timer);
1550 del_timer_sync(&adapter->phy_info_timer);
1551
9d5c8243 1552 netif_carrier_off(netdev);
04fe6358
AD
1553
1554 /* record the stats before reset*/
12dcd86b
ED
1555 spin_lock(&adapter->stats64_lock);
1556 igb_update_stats(adapter, &adapter->stats64);
1557 spin_unlock(&adapter->stats64_lock);
04fe6358 1558
9d5c8243
AK
1559 adapter->link_speed = 0;
1560 adapter->link_duplex = 0;
1561
3023682e
JK
1562 if (!pci_channel_offline(adapter->pdev))
1563 igb_reset(adapter);
9d5c8243
AK
1564 igb_clean_all_tx_rings(adapter);
1565 igb_clean_all_rx_rings(adapter);
7e0e99ef
AD
1566#ifdef CONFIG_IGB_DCA
1567
1568 /* since we reset the hardware DCA settings were cleared */
1569 igb_setup_dca(adapter);
1570#endif
9d5c8243
AK
1571}
1572
1573void igb_reinit_locked(struct igb_adapter *adapter)
1574{
1575 WARN_ON(in_interrupt());
1576 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
1577 msleep(1);
1578 igb_down(adapter);
1579 igb_up(adapter);
1580 clear_bit(__IGB_RESETTING, &adapter->state);
1581}
1582
1583void igb_reset(struct igb_adapter *adapter)
1584{
090b1795 1585 struct pci_dev *pdev = adapter->pdev;
9d5c8243 1586 struct e1000_hw *hw = &adapter->hw;
2d064c06
AD
1587 struct e1000_mac_info *mac = &hw->mac;
1588 struct e1000_fc_info *fc = &hw->fc;
9d5c8243
AK
1589 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
1590 u16 hwm;
1591
1592 /* Repartition Pba for greater than 9k mtu
1593 * To take effect CTRL.RST is required.
1594 */
fa4dfae0 1595 switch (mac->type) {
d2ba2ed8 1596 case e1000_i350:
55cac248
AD
1597 case e1000_82580:
1598 pba = rd32(E1000_RXPBS);
1599 pba = igb_rxpbs_adjust_82580(pba);
1600 break;
fa4dfae0 1601 case e1000_82576:
d249be54
AD
1602 pba = rd32(E1000_RXPBS);
1603 pba &= E1000_RXPBS_SIZE_MASK_82576;
fa4dfae0
AD
1604 break;
1605 case e1000_82575:
f96a8a0b
CW
1606 case e1000_i210:
1607 case e1000_i211:
fa4dfae0
AD
1608 default:
1609 pba = E1000_PBA_34K;
1610 break;
2d064c06 1611 }
9d5c8243 1612
2d064c06
AD
1613 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1614 (mac->type < e1000_82576)) {
9d5c8243
AK
1615 /* adjust PBA for jumbo frames */
1616 wr32(E1000_PBA, pba);
1617
1618 /* To maintain wire speed transmits, the Tx FIFO should be
1619 * large enough to accommodate two full transmit packets,
1620 * rounded up to the next 1KB and expressed in KB. Likewise,
1621 * the Rx FIFO should be large enough to accommodate at least
1622 * one full receive packet and is similarly rounded up and
1623 * expressed in KB. */
1624 pba = rd32(E1000_PBA);
1625 /* upper 16 bits has Tx packet buffer allocation size in KB */
1626 tx_space = pba >> 16;
1627 /* lower 16 bits has Rx packet buffer allocation size in KB */
1628 pba &= 0xffff;
1629 /* the tx fifo also stores 16 bytes of information about the tx
1630 * but don't include ethernet FCS because hardware appends it */
1631 min_tx_space = (adapter->max_frame_size +
85e8d004 1632 sizeof(union e1000_adv_tx_desc) -
9d5c8243
AK
1633 ETH_FCS_LEN) * 2;
1634 min_tx_space = ALIGN(min_tx_space, 1024);
1635 min_tx_space >>= 10;
1636 /* software strips receive CRC, so leave room for it */
1637 min_rx_space = adapter->max_frame_size;
1638 min_rx_space = ALIGN(min_rx_space, 1024);
1639 min_rx_space >>= 10;
1640
1641 /* If current Tx allocation is less than the min Tx FIFO size,
1642 * and the min Tx FIFO size is less than the current Rx FIFO
1643 * allocation, take space away from current Rx allocation */
1644 if (tx_space < min_tx_space &&
1645 ((min_tx_space - tx_space) < pba)) {
1646 pba = pba - (min_tx_space - tx_space);
1647
1648 /* if short on rx space, rx wins and must trump tx
1649 * adjustment */
1650 if (pba < min_rx_space)
1651 pba = min_rx_space;
1652 }
2d064c06 1653 wr32(E1000_PBA, pba);
9d5c8243 1654 }
9d5c8243
AK
1655
1656 /* flow control settings */
1657 /* The high water mark must be low enough to fit one full frame
1658 * (or the size used for early receive) above it in the Rx FIFO.
1659 * Set it to the lower of:
1660 * - 90% of the Rx FIFO size, or
1661 * - the full Rx FIFO size minus one full frame */
1662 hwm = min(((pba << 10) * 9 / 10),
2d064c06 1663 ((pba << 10) - 2 * adapter->max_frame_size));
9d5c8243 1664
d405ea3e
AD
1665 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1666 fc->low_water = fc->high_water - 16;
9d5c8243
AK
1667 fc->pause_time = 0xFFFF;
1668 fc->send_xon = 1;
0cce119a 1669 fc->current_mode = fc->requested_mode;
9d5c8243 1670
4ae196df
AD
1671 /* disable receive for all VFs and wait one second */
1672 if (adapter->vfs_allocated_count) {
1673 int i;
1674 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
8fa7e0f7 1675 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
4ae196df
AD
1676
1677 /* ping all the active vfs to let them know we are going down */
f2ca0dbe 1678 igb_ping_all_vfs(adapter);
4ae196df
AD
1679
1680 /* disable transmits and receives */
1681 wr32(E1000_VFRE, 0);
1682 wr32(E1000_VFTE, 0);
1683 }
1684
9d5c8243 1685 /* Allow time for pending master requests to run */
330a6d6a 1686 hw->mac.ops.reset_hw(hw);
9d5c8243
AK
1687 wr32(E1000_WUC, 0);
1688
330a6d6a 1689 if (hw->mac.ops.init_hw(hw))
090b1795 1690 dev_err(&pdev->dev, "Hardware Error\n");
831ec0b4 1691
a27416bb
MV
1692 /*
1693 * Flow control settings reset on hardware reset, so guarantee flow
1694 * control is off when forcing speed.
1695 */
1696 if (!hw->mac.autoneg)
1697 igb_force_mac_fc(hw);
1698
b6e0c419 1699 igb_init_dmac(adapter, pba);
88a268c1
NN
1700 if (!netif_running(adapter->netdev))
1701 igb_power_down_link(adapter);
1702
9d5c8243
AK
1703 igb_update_mng_vlan(adapter);
1704
1705 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1706 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1707
1f6e8178
MV
1708#ifdef CONFIG_IGB_PTP
1709 /* Re-enable PTP, where applicable. */
1710 igb_ptp_reset(adapter);
1711#endif /* CONFIG_IGB_PTP */
1712
330a6d6a 1713 igb_get_phy_info(hw);
9d5c8243
AK
1714}
1715
c8f44aff
MM
1716static netdev_features_t igb_fix_features(struct net_device *netdev,
1717 netdev_features_t features)
b2cb09b1
JP
1718{
1719 /*
1720 * Since there is no support for separate rx/tx vlan accel
1721 * enable/disable make sure tx flag is always in same state as rx.
1722 */
1723 if (features & NETIF_F_HW_VLAN_RX)
1724 features |= NETIF_F_HW_VLAN_TX;
1725 else
1726 features &= ~NETIF_F_HW_VLAN_TX;
1727
1728 return features;
1729}
1730
c8f44aff
MM
1731static int igb_set_features(struct net_device *netdev,
1732 netdev_features_t features)
ac52caa3 1733{
c8f44aff 1734 netdev_features_t changed = netdev->features ^ features;
89eaefb6 1735 struct igb_adapter *adapter = netdev_priv(netdev);
ac52caa3 1736
b2cb09b1
JP
1737 if (changed & NETIF_F_HW_VLAN_RX)
1738 igb_vlan_mode(netdev, features);
1739
89eaefb6
BG
1740 if (!(changed & NETIF_F_RXALL))
1741 return 0;
1742
1743 netdev->features = features;
1744
1745 if (netif_running(netdev))
1746 igb_reinit_locked(adapter);
1747 else
1748 igb_reset(adapter);
1749
ac52caa3
MM
1750 return 0;
1751}
1752
2e5c6922 1753static const struct net_device_ops igb_netdev_ops = {
559e9c49 1754 .ndo_open = igb_open,
2e5c6922 1755 .ndo_stop = igb_close,
cd392f5c 1756 .ndo_start_xmit = igb_xmit_frame,
12dcd86b 1757 .ndo_get_stats64 = igb_get_stats64,
ff41f8dc 1758 .ndo_set_rx_mode = igb_set_rx_mode,
2e5c6922
SH
1759 .ndo_set_mac_address = igb_set_mac,
1760 .ndo_change_mtu = igb_change_mtu,
1761 .ndo_do_ioctl = igb_ioctl,
1762 .ndo_tx_timeout = igb_tx_timeout,
1763 .ndo_validate_addr = eth_validate_addr,
2e5c6922
SH
1764 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1765 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
8151d294
WM
1766 .ndo_set_vf_mac = igb_ndo_set_vf_mac,
1767 .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
1768 .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
1769 .ndo_get_vf_config = igb_ndo_get_vf_config,
2e5c6922
SH
1770#ifdef CONFIG_NET_POLL_CONTROLLER
1771 .ndo_poll_controller = igb_netpoll,
1772#endif
b2cb09b1
JP
1773 .ndo_fix_features = igb_fix_features,
1774 .ndo_set_features = igb_set_features,
2e5c6922
SH
1775};
1776
d67974f0
CW
1777/**
1778 * igb_set_fw_version - Configure version string for ethtool
1779 * @adapter: adapter struct
1780 *
1781 **/
1782void igb_set_fw_version(struct igb_adapter *adapter)
1783{
1784 struct e1000_hw *hw = &adapter->hw;
1785 u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
1786 u16 major, build, patch, fw_version;
1787 u32 etrack_id;
1788
1789 hw->nvm.ops.read(hw, 5, 1, &fw_version);
1790 if (adapter->hw.mac.type != e1000_i211) {
1791 hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
1792 hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
1793 etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
1794
1795 /* combo image version needs to be found */
1796 hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
1797 if ((comb_offset != 0x0) &&
1798 (comb_offset != IGB_NVM_VER_INVALID)) {
1799 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
1800 + 1), 1, &comb_verh);
1801 hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
1802 1, &comb_verl);
1803
1804 /* Only display Option Rom if it exists and is valid */
1805 if ((comb_verh && comb_verl) &&
1806 ((comb_verh != IGB_NVM_VER_INVALID) &&
1807 (comb_verl != IGB_NVM_VER_INVALID))) {
1808 major = comb_verl >> IGB_COMB_VER_SHFT;
1809 build = (comb_verl << IGB_COMB_VER_SHFT) |
1810 (comb_verh >> IGB_COMB_VER_SHFT);
1811 patch = comb_verh & IGB_COMB_VER_MASK;
1812 snprintf(adapter->fw_version,
1813 sizeof(adapter->fw_version),
1814 "%d.%d%d, 0x%08x, %d.%d.%d",
1815 (fw_version & IGB_MAJOR_MASK) >>
1816 IGB_MAJOR_SHIFT,
1817 (fw_version & IGB_MINOR_MASK) >>
1818 IGB_MINOR_SHIFT,
1819 (fw_version & IGB_BUILD_MASK),
1820 etrack_id, major, build, patch);
1821 goto out;
1822 }
1823 }
1824 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1825 "%d.%d%d, 0x%08x",
1826 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1827 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1828 (fw_version & IGB_BUILD_MASK), etrack_id);
1829 } else {
1830 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
1831 "%d.%d%d",
1832 (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
1833 (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
1834 (fw_version & IGB_BUILD_MASK));
1835 }
1836out:
1837 return;
1838}
1839
9d5c8243
AK
1840/**
1841 * igb_probe - Device Initialization Routine
1842 * @pdev: PCI device information struct
1843 * @ent: entry in igb_pci_tbl
1844 *
1845 * Returns 0 on success, negative on failure
1846 *
1847 * igb_probe initializes an adapter identified by a pci_dev structure.
1848 * The OS initialization, configuring of the adapter private structure,
1849 * and a hardware reset occur.
1850 **/
1851static int __devinit igb_probe(struct pci_dev *pdev,
1852 const struct pci_device_id *ent)
1853{
1854 struct net_device *netdev;
1855 struct igb_adapter *adapter;
1856 struct e1000_hw *hw;
4337e993 1857 u16 eeprom_data = 0;
9835fd73 1858 s32 ret_val;
4337e993 1859 static int global_quad_port_a; /* global quad port a indication */
9d5c8243
AK
1860 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1861 unsigned long mmio_start, mmio_len;
2d6a5e95 1862 int err, pci_using_dac;
9d5c8243 1863 u16 eeprom_apme_mask = IGB_EEPROM_APME;
9835fd73 1864 u8 part_str[E1000_PBANUM_LENGTH];
9d5c8243 1865
bded64a7
AG
1866 /* Catch broken hardware that put the wrong VF device ID in
1867 * the PCIe SR-IOV capability.
1868 */
1869 if (pdev->is_virtfn) {
1870 WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
f96a8a0b 1871 pci_name(pdev), pdev->vendor, pdev->device);
bded64a7
AG
1872 return -EINVAL;
1873 }
1874
aed5dec3 1875 err = pci_enable_device_mem(pdev);
9d5c8243
AK
1876 if (err)
1877 return err;
1878
1879 pci_using_dac = 0;
59d71989 1880 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
9d5c8243 1881 if (!err) {
59d71989 1882 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
9d5c8243
AK
1883 if (!err)
1884 pci_using_dac = 1;
1885 } else {
59d71989 1886 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
9d5c8243 1887 if (err) {
59d71989 1888 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
9d5c8243
AK
1889 if (err) {
1890 dev_err(&pdev->dev, "No usable DMA "
1891 "configuration, aborting\n");
1892 goto err_dma;
1893 }
1894 }
1895 }
1896
aed5dec3
AD
1897 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1898 IORESOURCE_MEM),
1899 igb_driver_name);
9d5c8243
AK
1900 if (err)
1901 goto err_pci_reg;
1902
19d5afd4 1903 pci_enable_pcie_error_reporting(pdev);
40a914fa 1904
9d5c8243 1905 pci_set_master(pdev);
c682fc23 1906 pci_save_state(pdev);
9d5c8243
AK
1907
1908 err = -ENOMEM;
1bfaf07b 1909 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1cc3bd87 1910 IGB_MAX_TX_QUEUES);
9d5c8243
AK
1911 if (!netdev)
1912 goto err_alloc_etherdev;
1913
1914 SET_NETDEV_DEV(netdev, &pdev->dev);
1915
1916 pci_set_drvdata(pdev, netdev);
1917 adapter = netdev_priv(netdev);
1918 adapter->netdev = netdev;
1919 adapter->pdev = pdev;
1920 hw = &adapter->hw;
1921 hw->back = adapter;
b3f4d599 1922 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
9d5c8243
AK
1923
1924 mmio_start = pci_resource_start(pdev, 0);
1925 mmio_len = pci_resource_len(pdev, 0);
1926
1927 err = -EIO;
28b0759c
AD
1928 hw->hw_addr = ioremap(mmio_start, mmio_len);
1929 if (!hw->hw_addr)
9d5c8243
AK
1930 goto err_ioremap;
1931
2e5c6922 1932 netdev->netdev_ops = &igb_netdev_ops;
9d5c8243 1933 igb_set_ethtool_ops(netdev);
9d5c8243 1934 netdev->watchdog_timeo = 5 * HZ;
9d5c8243
AK
1935
1936 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1937
1938 netdev->mem_start = mmio_start;
1939 netdev->mem_end = mmio_start + mmio_len;
1940
9d5c8243
AK
1941 /* PCI config space info */
1942 hw->vendor_id = pdev->vendor;
1943 hw->device_id = pdev->device;
1944 hw->revision_id = pdev->revision;
1945 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1946 hw->subsystem_device_id = pdev->subsystem_device;
1947
9d5c8243
AK
1948 /* Copy the default MAC, PHY and NVM function pointers */
1949 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1950 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1951 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1952 /* Initialize skew-specific constants */
1953 err = ei->get_invariants(hw);
1954 if (err)
450c87c8 1955 goto err_sw_init;
9d5c8243 1956
450c87c8 1957 /* setup the private structure */
9d5c8243
AK
1958 err = igb_sw_init(adapter);
1959 if (err)
1960 goto err_sw_init;
1961
1962 igb_get_bus_info_pcie(hw);
1963
1964 hw->phy.autoneg_wait_to_complete = false;
9d5c8243
AK
1965
1966 /* Copper options */
1967 if (hw->phy.media_type == e1000_media_type_copper) {
1968 hw->phy.mdix = AUTO_ALL_MODES;
1969 hw->phy.disable_polarity_correction = false;
1970 hw->phy.ms_type = e1000_ms_hw_default;
1971 }
1972
1973 if (igb_check_reset_block(hw))
1974 dev_info(&pdev->dev,
1975 "PHY reset is blocked due to SOL/IDER session.\n");
1976
077887c3
AD
1977 /*
1978 * features is initialized to 0 in allocation, it might have bits
1979 * set by igb_sw_init so we should use an or instead of an
1980 * assignment.
1981 */
1982 netdev->features |= NETIF_F_SG |
1983 NETIF_F_IP_CSUM |
1984 NETIF_F_IPV6_CSUM |
1985 NETIF_F_TSO |
1986 NETIF_F_TSO6 |
1987 NETIF_F_RXHASH |
1988 NETIF_F_RXCSUM |
1989 NETIF_F_HW_VLAN_RX |
1990 NETIF_F_HW_VLAN_TX;
1991
1992 /* copy netdev features into list of user selectable features */
1993 netdev->hw_features |= netdev->features;
89eaefb6 1994 netdev->hw_features |= NETIF_F_RXALL;
077887c3
AD
1995
1996 /* set this bit last since it cannot be part of hw_features */
1997 netdev->features |= NETIF_F_HW_VLAN_FILTER;
1998
1999 netdev->vlan_features |= NETIF_F_TSO |
2000 NETIF_F_TSO6 |
2001 NETIF_F_IP_CSUM |
2002 NETIF_F_IPV6_CSUM |
2003 NETIF_F_SG;
48f29ffc 2004
6b8f0922
BG
2005 netdev->priv_flags |= IFF_SUPP_NOFCS;
2006
7b872a55 2007 if (pci_using_dac) {
9d5c8243 2008 netdev->features |= NETIF_F_HIGHDMA;
7b872a55
YZ
2009 netdev->vlan_features |= NETIF_F_HIGHDMA;
2010 }
9d5c8243 2011
ac52caa3
MM
2012 if (hw->mac.type >= e1000_82576) {
2013 netdev->hw_features |= NETIF_F_SCTP_CSUM;
b9473560 2014 netdev->features |= NETIF_F_SCTP_CSUM;
ac52caa3 2015 }
b9473560 2016
01789349
JP
2017 netdev->priv_flags |= IFF_UNICAST_FLT;
2018
330a6d6a 2019 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
9d5c8243
AK
2020
2021 /* before reading the NVM, reset the controller to put the device in a
2022 * known good starting state */
2023 hw->mac.ops.reset_hw(hw);
2024
f96a8a0b
CW
2025 /*
2026 * make sure the NVM is good , i211 parts have special NVM that
2027 * doesn't contain a checksum
2028 */
2029 if (hw->mac.type != e1000_i211) {
2030 if (hw->nvm.ops.validate(hw) < 0) {
2031 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
2032 err = -EIO;
2033 goto err_eeprom;
2034 }
9d5c8243
AK
2035 }
2036
2037 /* copy the MAC address out of the NVM */
2038 if (hw->mac.ops.read_mac_addr(hw))
2039 dev_err(&pdev->dev, "NVM Read Error\n");
2040
2041 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2042 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
2043
2044 if (!is_valid_ether_addr(netdev->perm_addr)) {
2045 dev_err(&pdev->dev, "Invalid MAC Address\n");
2046 err = -EIO;
2047 goto err_eeprom;
2048 }
2049
d67974f0
CW
2050 /* get firmware version for ethtool -i */
2051 igb_set_fw_version(adapter);
2052
c061b18d 2053 setup_timer(&adapter->watchdog_timer, igb_watchdog,
0e340485 2054 (unsigned long) adapter);
c061b18d 2055 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
0e340485 2056 (unsigned long) adapter);
9d5c8243
AK
2057
2058 INIT_WORK(&adapter->reset_task, igb_reset_task);
2059 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
2060
450c87c8 2061 /* Initialize link properties that are user-changeable */
9d5c8243
AK
2062 adapter->fc_autoneg = true;
2063 hw->mac.autoneg = true;
2064 hw->phy.autoneg_advertised = 0x2f;
2065
0cce119a
AD
2066 hw->fc.requested_mode = e1000_fc_default;
2067 hw->fc.current_mode = e1000_fc_default;
9d5c8243 2068
9d5c8243
AK
2069 igb_validate_mdi_setting(hw);
2070
9d5c8243
AK
2071 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
2072 * enable the ACPI Magic Packet filter
2073 */
2074
a2cf8b6c 2075 if (hw->bus.func == 0)
312c75ae 2076 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
6d337dce 2077 else if (hw->mac.type >= e1000_82580)
55cac248
AD
2078 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2079 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2080 &eeprom_data);
a2cf8b6c
AD
2081 else if (hw->bus.func == 1)
2082 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
9d5c8243
AK
2083
2084 if (eeprom_data & eeprom_apme_mask)
2085 adapter->eeprom_wol |= E1000_WUFC_MAG;
2086
2087 /* now that we have the eeprom settings, apply the special cases where
2088 * the eeprom may be wrong or the board simply won't support wake on
2089 * lan on a particular port */
2090 switch (pdev->device) {
2091 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2092 adapter->eeprom_wol = 0;
2093 break;
2094 case E1000_DEV_ID_82575EB_FIBER_SERDES:
2d064c06
AD
2095 case E1000_DEV_ID_82576_FIBER:
2096 case E1000_DEV_ID_82576_SERDES:
9d5c8243
AK
2097 /* Wake events only supported on port A for dual fiber
2098 * regardless of eeprom setting */
2099 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2100 adapter->eeprom_wol = 0;
2101 break;
c8ea5ea9 2102 case E1000_DEV_ID_82576_QUAD_COPPER:
d5aa2252 2103 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
c8ea5ea9
AD
2104 /* if quad port adapter, disable WoL on all but port A */
2105 if (global_quad_port_a != 0)
2106 adapter->eeprom_wol = 0;
2107 else
2108 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
2109 /* Reset for multiple quad port adapters */
2110 if (++global_quad_port_a == 4)
2111 global_quad_port_a = 0;
2112 break;
9d5c8243
AK
2113 }
2114
2115 /* initialize the wol settings based on the eeprom settings */
2116 adapter->wol = adapter->eeprom_wol;
e1b86d84 2117 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
9d5c8243
AK
2118
2119 /* reset the hardware with the new settings */
2120 igb_reset(adapter);
2121
2122 /* let the f/w know that the h/w is now under the control of the
2123 * driver. */
2124 igb_get_hw_control(adapter);
2125
9d5c8243
AK
2126 strcpy(netdev->name, "eth%d");
2127 err = register_netdev(netdev);
2128 if (err)
2129 goto err_register;
2130
b168dfc5
JB
2131 /* carrier off reporting is important to ethtool even BEFORE open */
2132 netif_carrier_off(netdev);
2133
421e02f0 2134#ifdef CONFIG_IGB_DCA
bbd98fe4 2135 if (dca_add_requester(&pdev->dev) == 0) {
7dfc16fa 2136 adapter->flags |= IGB_FLAG_DCA_ENABLED;
fe4506b6 2137 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
2138 igb_setup_dca(adapter);
2139 }
fe4506b6 2140
38c845c7 2141#endif
3c89f6d0 2142
7ebae817 2143#ifdef CONFIG_IGB_PTP
673b8b70 2144 /* do hw tstamp init after resetting */
7ebae817 2145 igb_ptp_init(adapter);
3c89f6d0 2146#endif /* CONFIG_IGB_PTP */
673b8b70 2147
9d5c8243
AK
2148 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2149 /* print bus type/speed/width info */
7c510e4b 2150 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
9d5c8243 2151 netdev->name,
559e9c49 2152 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
ff846f52 2153 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
559e9c49 2154 "unknown"),
59c3de89
AD
2155 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2156 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2157 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
2158 "unknown"),
7c510e4b 2159 netdev->dev_addr);
9d5c8243 2160
9835fd73
CW
2161 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2162 if (ret_val)
2163 strcpy(part_str, "Unknown");
2164 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
9d5c8243
AK
2165 dev_info(&pdev->dev,
2166 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2167 adapter->msix_entries ? "MSI-X" :
7dfc16fa 2168 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
9d5c8243 2169 adapter->num_rx_queues, adapter->num_tx_queues);
09b068d4
CW
2170 switch (hw->mac.type) {
2171 case e1000_i350:
f96a8a0b
CW
2172 case e1000_i210:
2173 case e1000_i211:
09b068d4
CW
2174 igb_set_eee_i350(hw);
2175 break;
2176 default:
2177 break;
2178 }
749ab2cd
YZ
2179
2180 pm_runtime_put_noidle(&pdev->dev);
9d5c8243
AK
2181 return 0;
2182
2183err_register:
2184 igb_release_hw_control(adapter);
2185err_eeprom:
2186 if (!igb_check_reset_block(hw))
f5f4cf08 2187 igb_reset_phy(hw);
9d5c8243
AK
2188
2189 if (hw->flash_address)
2190 iounmap(hw->flash_address);
9d5c8243 2191err_sw_init:
047e0030 2192 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
2193 iounmap(hw->hw_addr);
2194err_ioremap:
2195 free_netdev(netdev);
2196err_alloc_etherdev:
559e9c49
AD
2197 pci_release_selected_regions(pdev,
2198 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243
AK
2199err_pci_reg:
2200err_dma:
2201 pci_disable_device(pdev);
2202 return err;
2203}
2204
2205/**
2206 * igb_remove - Device Removal Routine
2207 * @pdev: PCI device information struct
2208 *
2209 * igb_remove is called by the PCI subsystem to alert the driver
2210 * that it should release a PCI device. The could be caused by a
2211 * Hot-Plug event, or because the driver is going to be removed from
2212 * memory.
2213 **/
2214static void __devexit igb_remove(struct pci_dev *pdev)
2215{
2216 struct net_device *netdev = pci_get_drvdata(pdev);
2217 struct igb_adapter *adapter = netdev_priv(netdev);
fe4506b6 2218 struct e1000_hw *hw = &adapter->hw;
9d5c8243 2219
749ab2cd 2220 pm_runtime_get_noresume(&pdev->dev);
7ebae817 2221#ifdef CONFIG_IGB_PTP
a79f4f88 2222 igb_ptp_stop(adapter);
3c89f6d0 2223#endif /* CONFIG_IGB_PTP */
749ab2cd 2224
760141a5
TH
2225 /*
2226 * The watchdog timer may be rescheduled, so explicitly
2227 * disable watchdog from being rescheduled.
2228 */
9d5c8243
AK
2229 set_bit(__IGB_DOWN, &adapter->state);
2230 del_timer_sync(&adapter->watchdog_timer);
2231 del_timer_sync(&adapter->phy_info_timer);
2232
760141a5
TH
2233 cancel_work_sync(&adapter->reset_task);
2234 cancel_work_sync(&adapter->watchdog_task);
9d5c8243 2235
421e02f0 2236#ifdef CONFIG_IGB_DCA
7dfc16fa 2237 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6
JC
2238 dev_info(&pdev->dev, "DCA disabled\n");
2239 dca_remove_requester(&pdev->dev);
7dfc16fa 2240 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 2241 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
2242 }
2243#endif
2244
9d5c8243
AK
2245 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2246 * would have already happened in close and is redundant. */
2247 igb_release_hw_control(adapter);
2248
2249 unregister_netdev(netdev);
2250
047e0030 2251 igb_clear_interrupt_scheme(adapter);
9d5c8243 2252
37680117
AD
2253#ifdef CONFIG_PCI_IOV
2254 /* reclaim resources allocated to VFs */
2255 if (adapter->vf_data) {
2256 /* disable iov and allow time for transactions to clear */
f557147c
SA
2257 if (igb_vfs_are_assigned(adapter)) {
2258 dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2259 } else {
0224d663
GR
2260 pci_disable_sriov(pdev);
2261 msleep(500);
0224d663 2262 }
37680117
AD
2263
2264 kfree(adapter->vf_data);
2265 adapter->vf_data = NULL;
2266 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
945a5151 2267 wrfl();
37680117
AD
2268 msleep(100);
2269 dev_info(&pdev->dev, "IOV Disabled\n");
2270 }
2271#endif
559e9c49 2272
28b0759c
AD
2273 iounmap(hw->hw_addr);
2274 if (hw->flash_address)
2275 iounmap(hw->flash_address);
559e9c49
AD
2276 pci_release_selected_regions(pdev,
2277 pci_select_bars(pdev, IORESOURCE_MEM));
9d5c8243 2278
1128c756 2279 kfree(adapter->shadow_vfta);
9d5c8243
AK
2280 free_netdev(netdev);
2281
19d5afd4 2282 pci_disable_pcie_error_reporting(pdev);
40a914fa 2283
9d5c8243
AK
2284 pci_disable_device(pdev);
2285}
2286
a6b623e0
AD
2287/**
2288 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2289 * @adapter: board private structure to initialize
2290 *
2291 * This function initializes the vf specific data storage and then attempts to
2292 * allocate the VFs. The reason for ordering it this way is because it is much
2293 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2294 * the memory for the VFs.
2295 **/
2296static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2297{
2298#ifdef CONFIG_PCI_IOV
2299 struct pci_dev *pdev = adapter->pdev;
f96a8a0b 2300 struct e1000_hw *hw = &adapter->hw;
f557147c 2301 int old_vfs = pci_num_vf(adapter->pdev);
0224d663 2302 int i;
a6b623e0 2303
f96a8a0b
CW
2304 /* Virtualization features not supported on i210 family. */
2305 if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
2306 return;
2307
0224d663
GR
2308 if (old_vfs) {
2309 dev_info(&pdev->dev, "%d pre-allocated VFs found - override "
2310 "max_vfs setting of %d\n", old_vfs, max_vfs);
2311 adapter->vfs_allocated_count = old_vfs;
a6b623e0
AD
2312 }
2313
0224d663
GR
2314 if (!adapter->vfs_allocated_count)
2315 return;
2316
2317 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
2318 sizeof(struct vf_data_storage), GFP_KERNEL);
f96a8a0b 2319
0224d663
GR
2320 /* if allocation failed then we do not support SR-IOV */
2321 if (!adapter->vf_data) {
a6b623e0 2322 adapter->vfs_allocated_count = 0;
0224d663
GR
2323 dev_err(&pdev->dev, "Unable to allocate memory for VF "
2324 "Data Storage\n");
2325 goto out;
a6b623e0 2326 }
0224d663
GR
2327
2328 if (!old_vfs) {
2329 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count))
2330 goto err_out;
2331 }
2332 dev_info(&pdev->dev, "%d VFs allocated\n",
2333 adapter->vfs_allocated_count);
2334 for (i = 0; i < adapter->vfs_allocated_count; i++)
2335 igb_vf_configure(adapter, i);
2336
2337 /* DMA Coalescing is not supported in IOV mode. */
2338 adapter->flags &= ~IGB_FLAG_DMAC;
2339 goto out;
2340err_out:
2341 kfree(adapter->vf_data);
2342 adapter->vf_data = NULL;
2343 adapter->vfs_allocated_count = 0;
2344out:
2345 return;
a6b623e0
AD
2346#endif /* CONFIG_PCI_IOV */
2347}
2348
9d5c8243
AK
2349/**
2350 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2351 * @adapter: board private structure to initialize
2352 *
2353 * igb_sw_init initializes the Adapter private data structure.
2354 * Fields are initialized based on PCI device information and
2355 * OS network device settings (MTU size).
2356 **/
2357static int __devinit igb_sw_init(struct igb_adapter *adapter)
2358{
2359 struct e1000_hw *hw = &adapter->hw;
2360 struct net_device *netdev = adapter->netdev;
2361 struct pci_dev *pdev = adapter->pdev;
374a542d 2362 u32 max_rss_queues;
9d5c8243
AK
2363
2364 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
2365
13fde97a 2366 /* set default ring sizes */
68fd9910
AD
2367 adapter->tx_ring_count = IGB_DEFAULT_TXD;
2368 adapter->rx_ring_count = IGB_DEFAULT_RXD;
13fde97a
AD
2369
2370 /* set default ITR values */
4fc82adf
AD
2371 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
2372 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
2373
13fde97a
AD
2374 /* set default work limits */
2375 adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
2376
153285f9
AD
2377 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
2378 VLAN_HLEN;
9d5c8243
AK
2379 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2380
12dcd86b 2381 spin_lock_init(&adapter->stats64_lock);
a6b623e0 2382#ifdef CONFIG_PCI_IOV
6b78bb1d
CW
2383 switch (hw->mac.type) {
2384 case e1000_82576:
2385 case e1000_i350:
9b082d73
SA
2386 if (max_vfs > 7) {
2387 dev_warn(&pdev->dev,
2388 "Maximum of 7 VFs per PF, using max\n");
2389 adapter->vfs_allocated_count = 7;
2390 } else
2391 adapter->vfs_allocated_count = max_vfs;
6b78bb1d
CW
2392 break;
2393 default:
2394 break;
2395 }
a6b623e0 2396#endif /* CONFIG_PCI_IOV */
374a542d
MV
2397
2398 /* Determine the maximum number of RSS queues supported. */
f96a8a0b 2399 switch (hw->mac.type) {
374a542d
MV
2400 case e1000_i211:
2401 max_rss_queues = IGB_MAX_RX_QUEUES_I211;
2402 break;
2403 case e1000_82575:
f96a8a0b 2404 case e1000_i210:
374a542d
MV
2405 max_rss_queues = IGB_MAX_RX_QUEUES_82575;
2406 break;
2407 case e1000_i350:
2408 /* I350 cannot do RSS and SR-IOV at the same time */
2409 if (!!adapter->vfs_allocated_count) {
2410 max_rss_queues = 1;
2411 break;
2412 }
2413 /* fall through */
2414 case e1000_82576:
2415 if (!!adapter->vfs_allocated_count) {
2416 max_rss_queues = 2;
2417 break;
2418 }
2419 /* fall through */
2420 case e1000_82580:
2421 default:
2422 max_rss_queues = IGB_MAX_RX_QUEUES;
f96a8a0b 2423 break;
374a542d
MV
2424 }
2425
2426 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2427
2428 /* Determine if we need to pair queues. */
2429 switch (hw->mac.type) {
2430 case e1000_82575:
f96a8a0b 2431 case e1000_i211:
374a542d 2432 /* Device supports enough interrupts without queue pairing. */
f96a8a0b 2433 break;
374a542d
MV
2434 case e1000_82576:
2435 /*
2436 * If VFs are going to be allocated with RSS queues then we
2437 * should pair the queues in order to conserve interrupts due
2438 * to limited supply.
2439 */
2440 if ((adapter->rss_queues > 1) &&
2441 (adapter->vfs_allocated_count > 6))
2442 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
2443 /* fall through */
2444 case e1000_82580:
2445 case e1000_i350:
2446 case e1000_i210:
f96a8a0b 2447 default:
374a542d
MV
2448 /*
2449 * If rss_queues > half of max_rss_queues, pair the queues in
2450 * order to conserve interrupts due to limited supply.
2451 */
2452 if (adapter->rss_queues > (max_rss_queues / 2))
2453 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
f96a8a0b
CW
2454 break;
2455 }
a99955fc 2456
1128c756
CW
2457 /* Setup and initialize a copy of the hw vlan table array */
2458 adapter->shadow_vfta = kzalloc(sizeof(u32) *
2459 E1000_VLAN_FILTER_TBL_SIZE,
2460 GFP_ATOMIC);
2461
a6b623e0 2462 /* This call may decrease the number of queues */
047e0030 2463 if (igb_init_interrupt_scheme(adapter)) {
9d5c8243
AK
2464 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
2465 return -ENOMEM;
2466 }
2467
a6b623e0
AD
2468 igb_probe_vfs(adapter);
2469
9d5c8243
AK
2470 /* Explicitly disable IRQ since the NIC can be in any state. */
2471 igb_irq_disable(adapter);
2472
f96a8a0b 2473 if (hw->mac.type >= e1000_i350)
831ec0b4
CW
2474 adapter->flags &= ~IGB_FLAG_DMAC;
2475
9d5c8243
AK
2476 set_bit(__IGB_DOWN, &adapter->state);
2477 return 0;
2478}
2479
2480/**
2481 * igb_open - Called when a network interface is made active
2482 * @netdev: network interface device structure
2483 *
2484 * Returns 0 on success, negative value on failure
2485 *
2486 * The open entry point is called when a network interface is made
2487 * active by the system (IFF_UP). At this point all resources needed
2488 * for transmit and receive operations are allocated, the interrupt
2489 * handler is registered with the OS, the watchdog timer is started,
2490 * and the stack is notified that the interface is ready.
2491 **/
749ab2cd 2492static int __igb_open(struct net_device *netdev, bool resuming)
9d5c8243
AK
2493{
2494 struct igb_adapter *adapter = netdev_priv(netdev);
2495 struct e1000_hw *hw = &adapter->hw;
749ab2cd 2496 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2497 int err;
2498 int i;
2499
2500 /* disallow open during test */
749ab2cd
YZ
2501 if (test_bit(__IGB_TESTING, &adapter->state)) {
2502 WARN_ON(resuming);
9d5c8243 2503 return -EBUSY;
749ab2cd
YZ
2504 }
2505
2506 if (!resuming)
2507 pm_runtime_get_sync(&pdev->dev);
9d5c8243 2508
b168dfc5
JB
2509 netif_carrier_off(netdev);
2510
9d5c8243
AK
2511 /* allocate transmit descriptors */
2512 err = igb_setup_all_tx_resources(adapter);
2513 if (err)
2514 goto err_setup_tx;
2515
2516 /* allocate receive descriptors */
2517 err = igb_setup_all_rx_resources(adapter);
2518 if (err)
2519 goto err_setup_rx;
2520
88a268c1 2521 igb_power_up_link(adapter);
9d5c8243 2522
9d5c8243
AK
2523 /* before we allocate an interrupt, we must be ready to handle it.
2524 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2525 * as soon as we call pci_request_irq, so we have to setup our
2526 * clean_rx handler before we do so. */
2527 igb_configure(adapter);
2528
2529 err = igb_request_irq(adapter);
2530 if (err)
2531 goto err_req_irq;
2532
2533 /* From here on the code is the same as igb_up() */
2534 clear_bit(__IGB_DOWN, &adapter->state);
2535
0d1ae7f4
AD
2536 for (i = 0; i < adapter->num_q_vectors; i++)
2537 napi_enable(&(adapter->q_vector[i]->napi));
9d5c8243
AK
2538
2539 /* Clear any pending interrupts. */
2540 rd32(E1000_ICR);
844290e5
PW
2541
2542 igb_irq_enable(adapter);
2543
d4960307
AD
2544 /* notify VFs that reset has been completed */
2545 if (adapter->vfs_allocated_count) {
2546 u32 reg_data = rd32(E1000_CTRL_EXT);
2547 reg_data |= E1000_CTRL_EXT_PFRSTD;
2548 wr32(E1000_CTRL_EXT, reg_data);
2549 }
2550
d55b53ff
JK
2551 netif_tx_start_all_queues(netdev);
2552
749ab2cd
YZ
2553 if (!resuming)
2554 pm_runtime_put(&pdev->dev);
2555
25568a53
AD
2556 /* start the watchdog. */
2557 hw->mac.get_link_status = 1;
2558 schedule_work(&adapter->watchdog_task);
9d5c8243
AK
2559
2560 return 0;
2561
2562err_req_irq:
2563 igb_release_hw_control(adapter);
88a268c1 2564 igb_power_down_link(adapter);
9d5c8243
AK
2565 igb_free_all_rx_resources(adapter);
2566err_setup_rx:
2567 igb_free_all_tx_resources(adapter);
2568err_setup_tx:
2569 igb_reset(adapter);
749ab2cd
YZ
2570 if (!resuming)
2571 pm_runtime_put(&pdev->dev);
9d5c8243
AK
2572
2573 return err;
2574}
2575
749ab2cd
YZ
2576static int igb_open(struct net_device *netdev)
2577{
2578 return __igb_open(netdev, false);
2579}
2580
9d5c8243
AK
2581/**
2582 * igb_close - Disables a network interface
2583 * @netdev: network interface device structure
2584 *
2585 * Returns 0, this is not allowed to fail
2586 *
2587 * The close entry point is called when an interface is de-activated
2588 * by the OS. The hardware is still under the driver's control, but
2589 * needs to be disabled. A global MAC reset is issued to stop the
2590 * hardware, and all transmit and receive resources are freed.
2591 **/
749ab2cd 2592static int __igb_close(struct net_device *netdev, bool suspending)
9d5c8243
AK
2593{
2594 struct igb_adapter *adapter = netdev_priv(netdev);
749ab2cd 2595 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2596
2597 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
9d5c8243 2598
749ab2cd
YZ
2599 if (!suspending)
2600 pm_runtime_get_sync(&pdev->dev);
2601
2602 igb_down(adapter);
9d5c8243
AK
2603 igb_free_irq(adapter);
2604
2605 igb_free_all_tx_resources(adapter);
2606 igb_free_all_rx_resources(adapter);
2607
749ab2cd
YZ
2608 if (!suspending)
2609 pm_runtime_put_sync(&pdev->dev);
9d5c8243
AK
2610 return 0;
2611}
2612
749ab2cd
YZ
2613static int igb_close(struct net_device *netdev)
2614{
2615 return __igb_close(netdev, false);
2616}
2617
9d5c8243
AK
2618/**
2619 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
9d5c8243
AK
2620 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2621 *
2622 * Return 0 on success, negative on failure
2623 **/
80785298 2624int igb_setup_tx_resources(struct igb_ring *tx_ring)
9d5c8243 2625{
59d71989 2626 struct device *dev = tx_ring->dev;
9d5c8243
AK
2627 int size;
2628
06034649 2629 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
f33005a6
AD
2630
2631 tx_ring->tx_buffer_info = vzalloc(size);
06034649 2632 if (!tx_ring->tx_buffer_info)
9d5c8243 2633 goto err;
9d5c8243
AK
2634
2635 /* round up to nearest 4K */
85e8d004 2636 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
9d5c8243
AK
2637 tx_ring->size = ALIGN(tx_ring->size, 4096);
2638
59d71989
AD
2639 tx_ring->desc = dma_alloc_coherent(dev,
2640 tx_ring->size,
2641 &tx_ring->dma,
2642 GFP_KERNEL);
9d5c8243
AK
2643 if (!tx_ring->desc)
2644 goto err;
2645
9d5c8243
AK
2646 tx_ring->next_to_use = 0;
2647 tx_ring->next_to_clean = 0;
81c2fc22 2648
9d5c8243
AK
2649 return 0;
2650
2651err:
06034649 2652 vfree(tx_ring->tx_buffer_info);
f33005a6
AD
2653 tx_ring->tx_buffer_info = NULL;
2654 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
9d5c8243
AK
2655 return -ENOMEM;
2656}
2657
2658/**
2659 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2660 * (Descriptors) for all queues
2661 * @adapter: board private structure
2662 *
2663 * Return 0 on success, negative on failure
2664 **/
2665static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2666{
439705e1 2667 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2668 int i, err = 0;
2669
2670 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 2671 err = igb_setup_tx_resources(adapter->tx_ring[i]);
9d5c8243 2672 if (err) {
439705e1 2673 dev_err(&pdev->dev,
9d5c8243
AK
2674 "Allocation for Tx Queue %u failed\n", i);
2675 for (i--; i >= 0; i--)
3025a446 2676 igb_free_tx_resources(adapter->tx_ring[i]);
9d5c8243
AK
2677 break;
2678 }
2679 }
2680
2681 return err;
2682}
2683
2684/**
85b430b4
AD
2685 * igb_setup_tctl - configure the transmit control registers
2686 * @adapter: Board private structure
9d5c8243 2687 **/
d7ee5b3a 2688void igb_setup_tctl(struct igb_adapter *adapter)
9d5c8243 2689{
9d5c8243
AK
2690 struct e1000_hw *hw = &adapter->hw;
2691 u32 tctl;
9d5c8243 2692
85b430b4
AD
2693 /* disable queue 0 which is enabled by default on 82575 and 82576 */
2694 wr32(E1000_TXDCTL(0), 0);
9d5c8243
AK
2695
2696 /* Program the Transmit Control Register */
9d5c8243
AK
2697 tctl = rd32(E1000_TCTL);
2698 tctl &= ~E1000_TCTL_CT;
2699 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2700 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2701
2702 igb_config_collision_dist(hw);
2703
9d5c8243
AK
2704 /* Enable transmits */
2705 tctl |= E1000_TCTL_EN;
2706
2707 wr32(E1000_TCTL, tctl);
2708}
2709
85b430b4
AD
2710/**
2711 * igb_configure_tx_ring - Configure transmit ring after Reset
2712 * @adapter: board private structure
2713 * @ring: tx ring to configure
2714 *
2715 * Configure a transmit ring after a reset.
2716 **/
d7ee5b3a
AD
2717void igb_configure_tx_ring(struct igb_adapter *adapter,
2718 struct igb_ring *ring)
85b430b4
AD
2719{
2720 struct e1000_hw *hw = &adapter->hw;
a74420e0 2721 u32 txdctl = 0;
85b430b4
AD
2722 u64 tdba = ring->dma;
2723 int reg_idx = ring->reg_idx;
2724
2725 /* disable the queue */
a74420e0 2726 wr32(E1000_TXDCTL(reg_idx), 0);
85b430b4
AD
2727 wrfl();
2728 mdelay(10);
2729
2730 wr32(E1000_TDLEN(reg_idx),
2731 ring->count * sizeof(union e1000_adv_tx_desc));
2732 wr32(E1000_TDBAL(reg_idx),
2733 tdba & 0x00000000ffffffffULL);
2734 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2735
fce99e34 2736 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
a74420e0 2737 wr32(E1000_TDH(reg_idx), 0);
fce99e34 2738 writel(0, ring->tail);
85b430b4
AD
2739
2740 txdctl |= IGB_TX_PTHRESH;
2741 txdctl |= IGB_TX_HTHRESH << 8;
2742 txdctl |= IGB_TX_WTHRESH << 16;
2743
2744 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2745 wr32(E1000_TXDCTL(reg_idx), txdctl);
2746}
2747
2748/**
2749 * igb_configure_tx - Configure transmit Unit after Reset
2750 * @adapter: board private structure
2751 *
2752 * Configure the Tx unit of the MAC after a reset.
2753 **/
2754static void igb_configure_tx(struct igb_adapter *adapter)
2755{
2756 int i;
2757
2758 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 2759 igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
85b430b4
AD
2760}
2761
9d5c8243
AK
2762/**
2763 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
9d5c8243
AK
2764 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2765 *
2766 * Returns 0 on success, negative on failure
2767 **/
80785298 2768int igb_setup_rx_resources(struct igb_ring *rx_ring)
9d5c8243 2769{
59d71989 2770 struct device *dev = rx_ring->dev;
f33005a6 2771 int size;
9d5c8243 2772
06034649 2773 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
f33005a6
AD
2774
2775 rx_ring->rx_buffer_info = vzalloc(size);
06034649 2776 if (!rx_ring->rx_buffer_info)
9d5c8243 2777 goto err;
9d5c8243 2778
9d5c8243
AK
2779
2780 /* Round up to nearest 4K */
f33005a6 2781 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
9d5c8243
AK
2782 rx_ring->size = ALIGN(rx_ring->size, 4096);
2783
59d71989
AD
2784 rx_ring->desc = dma_alloc_coherent(dev,
2785 rx_ring->size,
2786 &rx_ring->dma,
2787 GFP_KERNEL);
9d5c8243
AK
2788 if (!rx_ring->desc)
2789 goto err;
2790
2791 rx_ring->next_to_clean = 0;
2792 rx_ring->next_to_use = 0;
9d5c8243 2793
9d5c8243
AK
2794 return 0;
2795
2796err:
06034649
AD
2797 vfree(rx_ring->rx_buffer_info);
2798 rx_ring->rx_buffer_info = NULL;
f33005a6 2799 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
9d5c8243
AK
2800 return -ENOMEM;
2801}
2802
2803/**
2804 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
2805 * (Descriptors) for all queues
2806 * @adapter: board private structure
2807 *
2808 * Return 0 on success, negative on failure
2809 **/
2810static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2811{
439705e1 2812 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
2813 int i, err = 0;
2814
2815 for (i = 0; i < adapter->num_rx_queues; i++) {
3025a446 2816 err = igb_setup_rx_resources(adapter->rx_ring[i]);
9d5c8243 2817 if (err) {
439705e1 2818 dev_err(&pdev->dev,
9d5c8243
AK
2819 "Allocation for Rx Queue %u failed\n", i);
2820 for (i--; i >= 0; i--)
3025a446 2821 igb_free_rx_resources(adapter->rx_ring[i]);
9d5c8243
AK
2822 break;
2823 }
2824 }
2825
2826 return err;
2827}
2828
06cf2666
AD
2829/**
2830 * igb_setup_mrqc - configure the multiple receive queue control registers
2831 * @adapter: Board private structure
2832 **/
2833static void igb_setup_mrqc(struct igb_adapter *adapter)
2834{
2835 struct e1000_hw *hw = &adapter->hw;
2836 u32 mrqc, rxcsum;
2837 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2838 union e1000_reta {
2839 u32 dword;
2840 u8 bytes[4];
2841 } reta;
2842 static const u8 rsshash[40] = {
2843 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2844 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2845 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2846 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2847
2848 /* Fill out hash function seeds */
2849 for (j = 0; j < 10; j++) {
2850 u32 rsskey = rsshash[(j * 4)];
2851 rsskey |= rsshash[(j * 4) + 1] << 8;
2852 rsskey |= rsshash[(j * 4) + 2] << 16;
2853 rsskey |= rsshash[(j * 4) + 3] << 24;
2854 array_wr32(E1000_RSSRK(0), j, rsskey);
2855 }
2856
a99955fc 2857 num_rx_queues = adapter->rss_queues;
06cf2666
AD
2858
2859 if (adapter->vfs_allocated_count) {
2860 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2861 switch (hw->mac.type) {
d2ba2ed8 2862 case e1000_i350:
55cac248
AD
2863 case e1000_82580:
2864 num_rx_queues = 1;
2865 shift = 0;
2866 break;
06cf2666
AD
2867 case e1000_82576:
2868 shift = 3;
2869 num_rx_queues = 2;
2870 break;
2871 case e1000_82575:
2872 shift = 2;
2873 shift2 = 6;
2874 default:
2875 break;
2876 }
2877 } else {
2878 if (hw->mac.type == e1000_82575)
2879 shift = 6;
2880 }
2881
2882 for (j = 0; j < (32 * 4); j++) {
2883 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2884 if (shift2)
2885 reta.bytes[j & 3] |= num_rx_queues << shift2;
2886 if ((j & 3) == 3)
2887 wr32(E1000_RETA(j >> 2), reta.dword);
2888 }
2889
2890 /*
2891 * Disable raw packet checksumming so that RSS hash is placed in
2892 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2893 * offloads as they are enabled by default
2894 */
2895 rxcsum = rd32(E1000_RXCSUM);
2896 rxcsum |= E1000_RXCSUM_PCSD;
2897
2898 if (adapter->hw.mac.type >= e1000_82576)
2899 /* Enable Receive Checksum Offload for SCTP */
2900 rxcsum |= E1000_RXCSUM_CRCOFL;
2901
2902 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2903 wr32(E1000_RXCSUM, rxcsum);
f96a8a0b
CW
2904 /*
2905 * Generate RSS hash based on TCP port numbers and/or
2906 * IPv4/v6 src and dst addresses since UDP cannot be
2907 * hashed reliably due to IP fragmentation
2908 */
2909
2910 mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
2911 E1000_MRQC_RSS_FIELD_IPV4_TCP |
2912 E1000_MRQC_RSS_FIELD_IPV6 |
2913 E1000_MRQC_RSS_FIELD_IPV6_TCP |
2914 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
06cf2666
AD
2915
2916 /* If VMDq is enabled then we set the appropriate mode for that, else
2917 * we default to RSS so that an RSS hash is calculated per packet even
2918 * if we are only using one queue */
2919 if (adapter->vfs_allocated_count) {
2920 if (hw->mac.type > e1000_82575) {
2921 /* Set the default pool for the PF's first queue */
2922 u32 vtctl = rd32(E1000_VT_CTL);
2923 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2924 E1000_VT_CTL_DISABLE_DEF_POOL);
2925 vtctl |= adapter->vfs_allocated_count <<
2926 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2927 wr32(E1000_VT_CTL, vtctl);
2928 }
a99955fc 2929 if (adapter->rss_queues > 1)
f96a8a0b 2930 mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
06cf2666 2931 else
f96a8a0b 2932 mrqc |= E1000_MRQC_ENABLE_VMDQ;
06cf2666 2933 } else {
f96a8a0b
CW
2934 if (hw->mac.type != e1000_i211)
2935 mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
06cf2666
AD
2936 }
2937 igb_vmm_control(adapter);
2938
06cf2666
AD
2939 wr32(E1000_MRQC, mrqc);
2940}
2941
9d5c8243
AK
2942/**
2943 * igb_setup_rctl - configure the receive control registers
2944 * @adapter: Board private structure
2945 **/
d7ee5b3a 2946void igb_setup_rctl(struct igb_adapter *adapter)
9d5c8243
AK
2947{
2948 struct e1000_hw *hw = &adapter->hw;
2949 u32 rctl;
9d5c8243
AK
2950
2951 rctl = rd32(E1000_RCTL);
2952
2953 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
69d728ba 2954 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
9d5c8243 2955
69d728ba 2956 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
28b0759c 2957 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
9d5c8243 2958
87cb7e8c
AK
2959 /*
2960 * enable stripping of CRC. It's unlikely this will break BMC
2961 * redirection as it did with e1000. Newer features require
2962 * that the HW strips the CRC.
73cd78f1 2963 */
87cb7e8c 2964 rctl |= E1000_RCTL_SECRC;
9d5c8243 2965
559e9c49 2966 /* disable store bad packets and clear size bits. */
ec54d7d6 2967 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
9d5c8243 2968
6ec43fe6
AD
2969 /* enable LPE to prevent packets larger than max_frame_size */
2970 rctl |= E1000_RCTL_LPE;
9d5c8243 2971
952f72a8
AD
2972 /* disable queue 0 to prevent tail write w/o re-config */
2973 wr32(E1000_RXDCTL(0), 0);
9d5c8243 2974
e1739522
AD
2975 /* Attention!!! For SR-IOV PF driver operations you must enable
2976 * queue drop for all VF and PF queues to prevent head of line blocking
2977 * if an un-trusted VF does not provide descriptors to hardware.
2978 */
2979 if (adapter->vfs_allocated_count) {
e1739522
AD
2980 /* set all queue drop enable bits */
2981 wr32(E1000_QDE, ALL_QUEUES);
e1739522
AD
2982 }
2983
89eaefb6
BG
2984 /* This is useful for sniffing bad packets. */
2985 if (adapter->netdev->features & NETIF_F_RXALL) {
2986 /* UPE and MPE will be handled by normal PROMISC logic
2987 * in e1000e_set_rx_mode */
2988 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
2989 E1000_RCTL_BAM | /* RX All Bcast Pkts */
2990 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
2991
2992 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
2993 E1000_RCTL_DPF | /* Allow filtered pause */
2994 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
2995 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
2996 * and that breaks VLANs.
2997 */
2998 }
2999
9d5c8243
AK
3000 wr32(E1000_RCTL, rctl);
3001}
3002
7d5753f0
AD
3003static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3004 int vfn)
3005{
3006 struct e1000_hw *hw = &adapter->hw;
3007 u32 vmolr;
3008
3009 /* if it isn't the PF check to see if VFs are enabled and
3010 * increase the size to support vlan tags */
3011 if (vfn < adapter->vfs_allocated_count &&
3012 adapter->vf_data[vfn].vlans_enabled)
3013 size += VLAN_TAG_SIZE;
3014
3015 vmolr = rd32(E1000_VMOLR(vfn));
3016 vmolr &= ~E1000_VMOLR_RLPML_MASK;
3017 vmolr |= size | E1000_VMOLR_LPE;
3018 wr32(E1000_VMOLR(vfn), vmolr);
3019
3020 return 0;
3021}
3022
e1739522
AD
3023/**
3024 * igb_rlpml_set - set maximum receive packet size
3025 * @adapter: board private structure
3026 *
3027 * Configure maximum receivable packet size.
3028 **/
3029static void igb_rlpml_set(struct igb_adapter *adapter)
3030{
153285f9 3031 u32 max_frame_size = adapter->max_frame_size;
e1739522
AD
3032 struct e1000_hw *hw = &adapter->hw;
3033 u16 pf_id = adapter->vfs_allocated_count;
3034
e1739522
AD
3035 if (pf_id) {
3036 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
153285f9
AD
3037 /*
3038 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3039 * to our max jumbo frame size, in case we need to enable
3040 * jumbo frames on one of the rings later.
3041 * This will not pass over-length frames into the default
3042 * queue because it's gated by the VMOLR.RLPML.
3043 */
7d5753f0 3044 max_frame_size = MAX_JUMBO_FRAME_SIZE;
e1739522
AD
3045 }
3046
3047 wr32(E1000_RLPML, max_frame_size);
3048}
3049
8151d294
WM
3050static inline void igb_set_vmolr(struct igb_adapter *adapter,
3051 int vfn, bool aupe)
7d5753f0
AD
3052{
3053 struct e1000_hw *hw = &adapter->hw;
3054 u32 vmolr;
3055
3056 /*
3057 * This register exists only on 82576 and newer so if we are older then
3058 * we should exit and do nothing
3059 */
3060 if (hw->mac.type < e1000_82576)
3061 return;
3062
3063 vmolr = rd32(E1000_VMOLR(vfn));
8151d294
WM
3064 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3065 if (aupe)
3066 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3067 else
3068 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
7d5753f0
AD
3069
3070 /* clear all bits that might not be set */
3071 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
3072
a99955fc 3073 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
7d5753f0
AD
3074 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3075 /*
3076 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3077 * multicast packets
3078 */
3079 if (vfn <= adapter->vfs_allocated_count)
3080 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3081
3082 wr32(E1000_VMOLR(vfn), vmolr);
3083}
3084
85b430b4
AD
3085/**
3086 * igb_configure_rx_ring - Configure a receive ring after Reset
3087 * @adapter: board private structure
3088 * @ring: receive ring to be configured
3089 *
3090 * Configure the Rx unit of the MAC after a reset.
3091 **/
d7ee5b3a
AD
3092void igb_configure_rx_ring(struct igb_adapter *adapter,
3093 struct igb_ring *ring)
85b430b4
AD
3094{
3095 struct e1000_hw *hw = &adapter->hw;
3096 u64 rdba = ring->dma;
3097 int reg_idx = ring->reg_idx;
a74420e0 3098 u32 srrctl = 0, rxdctl = 0;
85b430b4
AD
3099
3100 /* disable the queue */
a74420e0 3101 wr32(E1000_RXDCTL(reg_idx), 0);
85b430b4
AD
3102
3103 /* Set DMA base address registers */
3104 wr32(E1000_RDBAL(reg_idx),
3105 rdba & 0x00000000ffffffffULL);
3106 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3107 wr32(E1000_RDLEN(reg_idx),
3108 ring->count * sizeof(union e1000_adv_rx_desc));
3109
3110 /* initialize head and tail */
fce99e34 3111 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
a74420e0 3112 wr32(E1000_RDH(reg_idx), 0);
fce99e34 3113 writel(0, ring->tail);
85b430b4 3114
952f72a8 3115 /* set descriptor configuration */
44390ca6 3116 srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
952f72a8 3117#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
44390ca6 3118 srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
952f72a8 3119#else
44390ca6 3120 srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
952f72a8 3121#endif
44390ca6 3122 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
3c89f6d0 3123#ifdef CONFIG_IGB_PTP
06218a8d 3124 if (hw->mac.type >= e1000_82580)
757b77e2 3125 srrctl |= E1000_SRRCTL_TIMESTAMP;
3c89f6d0 3126#endif /* CONFIG_IGB_PTP */
e6bdb6fe
NN
3127 /* Only set Drop Enable if we are supporting multiple queues */
3128 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
3129 srrctl |= E1000_SRRCTL_DROP_EN;
952f72a8
AD
3130
3131 wr32(E1000_SRRCTL(reg_idx), srrctl);
3132
7d5753f0 3133 /* set filtering for VMDQ pools */
8151d294 3134 igb_set_vmolr(adapter, reg_idx & 0x7, true);
7d5753f0 3135
85b430b4
AD
3136 rxdctl |= IGB_RX_PTHRESH;
3137 rxdctl |= IGB_RX_HTHRESH << 8;
3138 rxdctl |= IGB_RX_WTHRESH << 16;
a74420e0
AD
3139
3140 /* enable receive descriptor fetching */
3141 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
85b430b4
AD
3142 wr32(E1000_RXDCTL(reg_idx), rxdctl);
3143}
3144
9d5c8243
AK
3145/**
3146 * igb_configure_rx - Configure receive Unit after Reset
3147 * @adapter: board private structure
3148 *
3149 * Configure the Rx unit of the MAC after a reset.
3150 **/
3151static void igb_configure_rx(struct igb_adapter *adapter)
3152{
9107584e 3153 int i;
9d5c8243 3154
68d480c4
AD
3155 /* set UTA to appropriate mode */
3156 igb_set_uta(adapter);
3157
26ad9178
AD
3158 /* set the correct pool for the PF default MAC address in entry 0 */
3159 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3160 adapter->vfs_allocated_count);
3161
06cf2666
AD
3162 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3163 * the Base and Length of the Rx Descriptor Ring */
3164 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3165 igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
9d5c8243
AK
3166}
3167
3168/**
3169 * igb_free_tx_resources - Free Tx Resources per Queue
9d5c8243
AK
3170 * @tx_ring: Tx descriptor ring for a specific queue
3171 *
3172 * Free all transmit software resources
3173 **/
68fd9910 3174void igb_free_tx_resources(struct igb_ring *tx_ring)
9d5c8243 3175{
3b644cf6 3176 igb_clean_tx_ring(tx_ring);
9d5c8243 3177
06034649
AD
3178 vfree(tx_ring->tx_buffer_info);
3179 tx_ring->tx_buffer_info = NULL;
9d5c8243 3180
439705e1
AD
3181 /* if not set, then don't free */
3182 if (!tx_ring->desc)
3183 return;
3184
59d71989
AD
3185 dma_free_coherent(tx_ring->dev, tx_ring->size,
3186 tx_ring->desc, tx_ring->dma);
9d5c8243
AK
3187
3188 tx_ring->desc = NULL;
3189}
3190
3191/**
3192 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3193 * @adapter: board private structure
3194 *
3195 * Free all transmit software resources
3196 **/
3197static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3198{
3199 int i;
3200
3201 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 3202 igb_free_tx_resources(adapter->tx_ring[i]);
9d5c8243
AK
3203}
3204
ebe42d16
AD
3205void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3206 struct igb_tx_buffer *tx_buffer)
3207{
3208 if (tx_buffer->skb) {
3209 dev_kfree_skb_any(tx_buffer->skb);
3210 if (tx_buffer->dma)
3211 dma_unmap_single(ring->dev,
3212 tx_buffer->dma,
3213 tx_buffer->length,
3214 DMA_TO_DEVICE);
3215 } else if (tx_buffer->dma) {
3216 dma_unmap_page(ring->dev,
3217 tx_buffer->dma,
3218 tx_buffer->length,
3219 DMA_TO_DEVICE);
3220 }
3221 tx_buffer->next_to_watch = NULL;
3222 tx_buffer->skb = NULL;
3223 tx_buffer->dma = 0;
3224 /* buffer_info must be completely set up in the transmit path */
9d5c8243
AK
3225}
3226
3227/**
3228 * igb_clean_tx_ring - Free Tx Buffers
9d5c8243
AK
3229 * @tx_ring: ring to be cleaned
3230 **/
3b644cf6 3231static void igb_clean_tx_ring(struct igb_ring *tx_ring)
9d5c8243 3232{
06034649 3233 struct igb_tx_buffer *buffer_info;
9d5c8243 3234 unsigned long size;
6ad4edfc 3235 u16 i;
9d5c8243 3236
06034649 3237 if (!tx_ring->tx_buffer_info)
9d5c8243
AK
3238 return;
3239 /* Free all the Tx ring sk_buffs */
3240
3241 for (i = 0; i < tx_ring->count; i++) {
06034649 3242 buffer_info = &tx_ring->tx_buffer_info[i];
80785298 3243 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
9d5c8243
AK
3244 }
3245
dad8a3b3
JF
3246 netdev_tx_reset_queue(txring_txq(tx_ring));
3247
06034649
AD
3248 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
3249 memset(tx_ring->tx_buffer_info, 0, size);
9d5c8243
AK
3250
3251 /* Zero out the descriptor ring */
9d5c8243
AK
3252 memset(tx_ring->desc, 0, tx_ring->size);
3253
3254 tx_ring->next_to_use = 0;
3255 tx_ring->next_to_clean = 0;
9d5c8243
AK
3256}
3257
3258/**
3259 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3260 * @adapter: board private structure
3261 **/
3262static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3263{
3264 int i;
3265
3266 for (i = 0; i < adapter->num_tx_queues; i++)
3025a446 3267 igb_clean_tx_ring(adapter->tx_ring[i]);
9d5c8243
AK
3268}
3269
3270/**
3271 * igb_free_rx_resources - Free Rx Resources
9d5c8243
AK
3272 * @rx_ring: ring to clean the resources from
3273 *
3274 * Free all receive software resources
3275 **/
68fd9910 3276void igb_free_rx_resources(struct igb_ring *rx_ring)
9d5c8243 3277{
3b644cf6 3278 igb_clean_rx_ring(rx_ring);
9d5c8243 3279
06034649
AD
3280 vfree(rx_ring->rx_buffer_info);
3281 rx_ring->rx_buffer_info = NULL;
9d5c8243 3282
439705e1
AD
3283 /* if not set, then don't free */
3284 if (!rx_ring->desc)
3285 return;
3286
59d71989
AD
3287 dma_free_coherent(rx_ring->dev, rx_ring->size,
3288 rx_ring->desc, rx_ring->dma);
9d5c8243
AK
3289
3290 rx_ring->desc = NULL;
3291}
3292
3293/**
3294 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3295 * @adapter: board private structure
3296 *
3297 * Free all receive software resources
3298 **/
3299static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3300{
3301 int i;
3302
3303 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3304 igb_free_rx_resources(adapter->rx_ring[i]);
9d5c8243
AK
3305}
3306
3307/**
3308 * igb_clean_rx_ring - Free Rx Buffers per Queue
9d5c8243
AK
3309 * @rx_ring: ring to free buffers from
3310 **/
3b644cf6 3311static void igb_clean_rx_ring(struct igb_ring *rx_ring)
9d5c8243 3312{
9d5c8243 3313 unsigned long size;
c023cd88 3314 u16 i;
9d5c8243 3315
06034649 3316 if (!rx_ring->rx_buffer_info)
9d5c8243 3317 return;
439705e1 3318
9d5c8243
AK
3319 /* Free all the Rx ring sk_buffs */
3320 for (i = 0; i < rx_ring->count; i++) {
06034649 3321 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
9d5c8243 3322 if (buffer_info->dma) {
59d71989 3323 dma_unmap_single(rx_ring->dev,
80785298 3324 buffer_info->dma,
44390ca6 3325 IGB_RX_HDR_LEN,
59d71989 3326 DMA_FROM_DEVICE);
9d5c8243
AK
3327 buffer_info->dma = 0;
3328 }
3329
3330 if (buffer_info->skb) {
3331 dev_kfree_skb(buffer_info->skb);
3332 buffer_info->skb = NULL;
3333 }
6ec43fe6 3334 if (buffer_info->page_dma) {
59d71989 3335 dma_unmap_page(rx_ring->dev,
80785298 3336 buffer_info->page_dma,
6ec43fe6 3337 PAGE_SIZE / 2,
59d71989 3338 DMA_FROM_DEVICE);
6ec43fe6
AD
3339 buffer_info->page_dma = 0;
3340 }
9d5c8243 3341 if (buffer_info->page) {
9d5c8243
AK
3342 put_page(buffer_info->page);
3343 buffer_info->page = NULL;
bf36c1a0 3344 buffer_info->page_offset = 0;
9d5c8243
AK
3345 }
3346 }
3347
06034649
AD
3348 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
3349 memset(rx_ring->rx_buffer_info, 0, size);
9d5c8243
AK
3350
3351 /* Zero out the descriptor ring */
3352 memset(rx_ring->desc, 0, rx_ring->size);
3353
3354 rx_ring->next_to_clean = 0;
3355 rx_ring->next_to_use = 0;
9d5c8243
AK
3356}
3357
3358/**
3359 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3360 * @adapter: board private structure
3361 **/
3362static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3363{
3364 int i;
3365
3366 for (i = 0; i < adapter->num_rx_queues; i++)
3025a446 3367 igb_clean_rx_ring(adapter->rx_ring[i]);
9d5c8243
AK
3368}
3369
3370/**
3371 * igb_set_mac - Change the Ethernet Address of the NIC
3372 * @netdev: network interface device structure
3373 * @p: pointer to an address structure
3374 *
3375 * Returns 0 on success, negative on failure
3376 **/
3377static int igb_set_mac(struct net_device *netdev, void *p)
3378{
3379 struct igb_adapter *adapter = netdev_priv(netdev);
28b0759c 3380 struct e1000_hw *hw = &adapter->hw;
9d5c8243
AK
3381 struct sockaddr *addr = p;
3382
3383 if (!is_valid_ether_addr(addr->sa_data))
3384 return -EADDRNOTAVAIL;
3385
3386 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
28b0759c 3387 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
9d5c8243 3388
26ad9178
AD
3389 /* set the correct pool for the new PF MAC address in entry 0 */
3390 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3391 adapter->vfs_allocated_count);
e1739522 3392
9d5c8243
AK
3393 return 0;
3394}
3395
3396/**
68d480c4 3397 * igb_write_mc_addr_list - write multicast addresses to MTA
9d5c8243
AK
3398 * @netdev: network interface device structure
3399 *
68d480c4
AD
3400 * Writes multicast address list to the MTA hash table.
3401 * Returns: -ENOMEM on failure
3402 * 0 on no addresses written
3403 * X on writing X addresses to MTA
9d5c8243 3404 **/
68d480c4 3405static int igb_write_mc_addr_list(struct net_device *netdev)
9d5c8243
AK
3406{
3407 struct igb_adapter *adapter = netdev_priv(netdev);
3408 struct e1000_hw *hw = &adapter->hw;
22bedad3 3409 struct netdev_hw_addr *ha;
68d480c4 3410 u8 *mta_list;
9d5c8243
AK
3411 int i;
3412
4cd24eaf 3413 if (netdev_mc_empty(netdev)) {
68d480c4
AD
3414 /* nothing to program, so clear mc list */
3415 igb_update_mc_addr_list(hw, NULL, 0);
3416 igb_restore_vf_multicasts(adapter);
3417 return 0;
3418 }
9d5c8243 3419
4cd24eaf 3420 mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
68d480c4
AD
3421 if (!mta_list)
3422 return -ENOMEM;
ff41f8dc 3423
68d480c4 3424 /* The shared function expects a packed array of only addresses. */
48e2f183 3425 i = 0;
22bedad3
JP
3426 netdev_for_each_mc_addr(ha, netdev)
3427 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
68d480c4 3428
68d480c4
AD
3429 igb_update_mc_addr_list(hw, mta_list, i);
3430 kfree(mta_list);
3431
4cd24eaf 3432 return netdev_mc_count(netdev);
68d480c4
AD
3433}
3434
3435/**
3436 * igb_write_uc_addr_list - write unicast addresses to RAR table
3437 * @netdev: network interface device structure
3438 *
3439 * Writes unicast address list to the RAR table.
3440 * Returns: -ENOMEM on failure/insufficient address space
3441 * 0 on no addresses written
3442 * X on writing X addresses to the RAR table
3443 **/
3444static int igb_write_uc_addr_list(struct net_device *netdev)
3445{
3446 struct igb_adapter *adapter = netdev_priv(netdev);
3447 struct e1000_hw *hw = &adapter->hw;
3448 unsigned int vfn = adapter->vfs_allocated_count;
3449 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
3450 int count = 0;
3451
3452 /* return ENOMEM indicating insufficient memory for addresses */
32e7bfc4 3453 if (netdev_uc_count(netdev) > rar_entries)
68d480c4 3454 return -ENOMEM;
9d5c8243 3455
32e7bfc4 3456 if (!netdev_uc_empty(netdev) && rar_entries) {
ff41f8dc 3457 struct netdev_hw_addr *ha;
32e7bfc4
JP
3458
3459 netdev_for_each_uc_addr(ha, netdev) {
ff41f8dc
AD
3460 if (!rar_entries)
3461 break;
26ad9178
AD
3462 igb_rar_set_qsel(adapter, ha->addr,
3463 rar_entries--,
68d480c4
AD
3464 vfn);
3465 count++;
ff41f8dc
AD
3466 }
3467 }
3468 /* write the addresses in reverse order to avoid write combining */
3469 for (; rar_entries > 0 ; rar_entries--) {
3470 wr32(E1000_RAH(rar_entries), 0);
3471 wr32(E1000_RAL(rar_entries), 0);
3472 }
3473 wrfl();
3474
68d480c4
AD
3475 return count;
3476}
3477
3478/**
3479 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3480 * @netdev: network interface device structure
3481 *
3482 * The set_rx_mode entry point is called whenever the unicast or multicast
3483 * address lists or the network interface flags are updated. This routine is
3484 * responsible for configuring the hardware for proper unicast, multicast,
3485 * promiscuous mode, and all-multi behavior.
3486 **/
3487static void igb_set_rx_mode(struct net_device *netdev)
3488{
3489 struct igb_adapter *adapter = netdev_priv(netdev);
3490 struct e1000_hw *hw = &adapter->hw;
3491 unsigned int vfn = adapter->vfs_allocated_count;
3492 u32 rctl, vmolr = 0;
3493 int count;
3494
3495 /* Check for Promiscuous and All Multicast modes */
3496 rctl = rd32(E1000_RCTL);
3497
3498 /* clear the effected bits */
3499 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
3500
3501 if (netdev->flags & IFF_PROMISC) {
3502 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3503 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
3504 } else {
3505 if (netdev->flags & IFF_ALLMULTI) {
3506 rctl |= E1000_RCTL_MPE;
3507 vmolr |= E1000_VMOLR_MPME;
3508 } else {
3509 /*
3510 * Write addresses to the MTA, if the attempt fails
25985edc 3511 * then we should just turn on promiscuous mode so
68d480c4
AD
3512 * that we can at least receive multicast traffic
3513 */
3514 count = igb_write_mc_addr_list(netdev);
3515 if (count < 0) {
3516 rctl |= E1000_RCTL_MPE;
3517 vmolr |= E1000_VMOLR_MPME;
3518 } else if (count) {
3519 vmolr |= E1000_VMOLR_ROMPE;
3520 }
3521 }
3522 /*
3523 * Write addresses to available RAR registers, if there is not
3524 * sufficient space to store all the addresses then enable
25985edc 3525 * unicast promiscuous mode
68d480c4
AD
3526 */
3527 count = igb_write_uc_addr_list(netdev);
3528 if (count < 0) {
3529 rctl |= E1000_RCTL_UPE;
3530 vmolr |= E1000_VMOLR_ROPE;
3531 }
3532 rctl |= E1000_RCTL_VFE;
28fc06f5 3533 }
68d480c4 3534 wr32(E1000_RCTL, rctl);
28fc06f5 3535
68d480c4
AD
3536 /*
3537 * In order to support SR-IOV and eventually VMDq it is necessary to set
3538 * the VMOLR to enable the appropriate modes. Without this workaround
3539 * we will have issues with VLAN tag stripping not being done for frames
3540 * that are only arriving because we are the default pool
3541 */
f96a8a0b 3542 if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
28fc06f5 3543 return;
9d5c8243 3544
68d480c4
AD
3545 vmolr |= rd32(E1000_VMOLR(vfn)) &
3546 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3547 wr32(E1000_VMOLR(vfn), vmolr);
28fc06f5 3548 igb_restore_vf_multicasts(adapter);
9d5c8243
AK
3549}
3550
13800469
GR
3551static void igb_check_wvbr(struct igb_adapter *adapter)
3552{
3553 struct e1000_hw *hw = &adapter->hw;
3554 u32 wvbr = 0;
3555
3556 switch (hw->mac.type) {
3557 case e1000_82576:
3558 case e1000_i350:
3559 if (!(wvbr = rd32(E1000_WVBR)))
3560 return;
3561 break;
3562 default:
3563 break;
3564 }
3565
3566 adapter->wvbr |= wvbr;
3567}
3568
3569#define IGB_STAGGERED_QUEUE_OFFSET 8
3570
3571static void igb_spoof_check(struct igb_adapter *adapter)
3572{
3573 int j;
3574
3575 if (!adapter->wvbr)
3576 return;
3577
3578 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3579 if (adapter->wvbr & (1 << j) ||
3580 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3581 dev_warn(&adapter->pdev->dev,
3582 "Spoof event(s) detected on VF %d\n", j);
3583 adapter->wvbr &=
3584 ~((1 << j) |
3585 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3586 }
3587 }
3588}
3589
9d5c8243
AK
3590/* Need to wait a few seconds after link up to get diagnostic information from
3591 * the phy */
3592static void igb_update_phy_info(unsigned long data)
3593{
3594 struct igb_adapter *adapter = (struct igb_adapter *) data;
f5f4cf08 3595 igb_get_phy_info(&adapter->hw);
9d5c8243
AK
3596}
3597
4d6b725e
AD
3598/**
3599 * igb_has_link - check shared code for link and determine up/down
3600 * @adapter: pointer to driver private info
3601 **/
3145535a 3602bool igb_has_link(struct igb_adapter *adapter)
4d6b725e
AD
3603{
3604 struct e1000_hw *hw = &adapter->hw;
3605 bool link_active = false;
3606 s32 ret_val = 0;
3607
3608 /* get_link_status is set on LSC (link status) interrupt or
3609 * rx sequence error interrupt. get_link_status will stay
3610 * false until the e1000_check_for_link establishes link
3611 * for copper adapters ONLY
3612 */
3613 switch (hw->phy.media_type) {
3614 case e1000_media_type_copper:
3615 if (hw->mac.get_link_status) {
3616 ret_val = hw->mac.ops.check_for_link(hw);
3617 link_active = !hw->mac.get_link_status;
3618 } else {
3619 link_active = true;
3620 }
3621 break;
4d6b725e
AD
3622 case e1000_media_type_internal_serdes:
3623 ret_val = hw->mac.ops.check_for_link(hw);
3624 link_active = hw->mac.serdes_has_link;
3625 break;
3626 default:
3627 case e1000_media_type_unknown:
3628 break;
3629 }
3630
3631 return link_active;
3632}
3633
563988dc
SA
3634static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3635{
3636 bool ret = false;
3637 u32 ctrl_ext, thstat;
3638
f96a8a0b 3639 /* check for thermal sensor event on i350 copper only */
563988dc
SA
3640 if (hw->mac.type == e1000_i350) {
3641 thstat = rd32(E1000_THSTAT);
3642 ctrl_ext = rd32(E1000_CTRL_EXT);
3643
3644 if ((hw->phy.media_type == e1000_media_type_copper) &&
3645 !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
3646 ret = !!(thstat & event);
3647 }
3648 }
3649
3650 return ret;
3651}
3652
9d5c8243
AK
3653/**
3654 * igb_watchdog - Timer Call-back
3655 * @data: pointer to adapter cast into an unsigned long
3656 **/
3657static void igb_watchdog(unsigned long data)
3658{
3659 struct igb_adapter *adapter = (struct igb_adapter *)data;
3660 /* Do the rest outside of interrupt context */
3661 schedule_work(&adapter->watchdog_task);
3662}
3663
3664static void igb_watchdog_task(struct work_struct *work)
3665{
3666 struct igb_adapter *adapter = container_of(work,
559e9c49
AD
3667 struct igb_adapter,
3668 watchdog_task);
9d5c8243 3669 struct e1000_hw *hw = &adapter->hw;
9d5c8243 3670 struct net_device *netdev = adapter->netdev;
563988dc 3671 u32 link;
7a6ea550 3672 int i;
9d5c8243 3673
4d6b725e 3674 link = igb_has_link(adapter);
9d5c8243 3675 if (link) {
749ab2cd
YZ
3676 /* Cancel scheduled suspend requests. */
3677 pm_runtime_resume(netdev->dev.parent);
3678
9d5c8243
AK
3679 if (!netif_carrier_ok(netdev)) {
3680 u32 ctrl;
330a6d6a
AD
3681 hw->mac.ops.get_speed_and_duplex(hw,
3682 &adapter->link_speed,
3683 &adapter->link_duplex);
9d5c8243
AK
3684
3685 ctrl = rd32(E1000_CTRL);
527d47c1 3686 /* Links status message must follow this format */
876d2d6f
JK
3687 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
3688 "Duplex, Flow Control: %s\n",
559e9c49
AD
3689 netdev->name,
3690 adapter->link_speed,
3691 adapter->link_duplex == FULL_DUPLEX ?
876d2d6f
JK
3692 "Full" : "Half",
3693 (ctrl & E1000_CTRL_TFCE) &&
3694 (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
3695 (ctrl & E1000_CTRL_RFCE) ? "RX" :
3696 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
9d5c8243 3697
563988dc 3698 /* check for thermal sensor event */
876d2d6f
JK
3699 if (igb_thermal_sensor_event(hw,
3700 E1000_THSTAT_LINK_THROTTLE)) {
3701 netdev_info(netdev, "The network adapter link "
3702 "speed was downshifted because it "
3703 "overheated\n");
7ef5ed1c 3704 }
563988dc 3705
d07f3e37 3706 /* adjust timeout factor according to speed/duplex */
9d5c8243
AK
3707 adapter->tx_timeout_factor = 1;
3708 switch (adapter->link_speed) {
3709 case SPEED_10:
9d5c8243
AK
3710 adapter->tx_timeout_factor = 14;
3711 break;
3712 case SPEED_100:
9d5c8243
AK
3713 /* maybe add some timeout factor ? */
3714 break;
3715 }
3716
3717 netif_carrier_on(netdev);
9d5c8243 3718
4ae196df 3719 igb_ping_all_vfs(adapter);
17dc566c 3720 igb_check_vf_rate_limit(adapter);
4ae196df 3721
4b1a9877 3722 /* link state has changed, schedule phy info update */
9d5c8243
AK
3723 if (!test_bit(__IGB_DOWN, &adapter->state))
3724 mod_timer(&adapter->phy_info_timer,
3725 round_jiffies(jiffies + 2 * HZ));
3726 }
3727 } else {
3728 if (netif_carrier_ok(netdev)) {
3729 adapter->link_speed = 0;
3730 adapter->link_duplex = 0;
563988dc
SA
3731
3732 /* check for thermal sensor event */
876d2d6f
JK
3733 if (igb_thermal_sensor_event(hw,
3734 E1000_THSTAT_PWR_DOWN)) {
3735 netdev_err(netdev, "The network adapter was "
3736 "stopped because it overheated\n");
7ef5ed1c 3737 }
563988dc 3738
527d47c1
AD
3739 /* Links status message must follow this format */
3740 printk(KERN_INFO "igb: %s NIC Link is Down\n",
3741 netdev->name);
9d5c8243 3742 netif_carrier_off(netdev);
4b1a9877 3743
4ae196df
AD
3744 igb_ping_all_vfs(adapter);
3745
4b1a9877 3746 /* link state has changed, schedule phy info update */
9d5c8243
AK
3747 if (!test_bit(__IGB_DOWN, &adapter->state))
3748 mod_timer(&adapter->phy_info_timer,
3749 round_jiffies(jiffies + 2 * HZ));
749ab2cd
YZ
3750
3751 pm_schedule_suspend(netdev->dev.parent,
3752 MSEC_PER_SEC * 5);
9d5c8243
AK
3753 }
3754 }
3755
12dcd86b
ED
3756 spin_lock(&adapter->stats64_lock);
3757 igb_update_stats(adapter, &adapter->stats64);
3758 spin_unlock(&adapter->stats64_lock);
9d5c8243 3759
dbabb065 3760 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 3761 struct igb_ring *tx_ring = adapter->tx_ring[i];
dbabb065 3762 if (!netif_carrier_ok(netdev)) {
9d5c8243
AK
3763 /* We've lost link, so the controller stops DMA,
3764 * but we've got queued Tx work that's never going
3765 * to get done, so reset controller to flush Tx.
3766 * (Do the reset outside of interrupt context). */
dbabb065
AD
3767 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3768 adapter->tx_timeout_count++;
3769 schedule_work(&adapter->reset_task);
3770 /* return immediately since reset is imminent */
3771 return;
3772 }
9d5c8243 3773 }
9d5c8243 3774
dbabb065 3775 /* Force detection of hung controller every watchdog period */
6d095fa8 3776 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
dbabb065 3777 }
f7ba205e 3778
9d5c8243 3779 /* Cause software interrupt to ensure rx ring is cleaned */
7a6ea550 3780 if (adapter->msix_entries) {
047e0030 3781 u32 eics = 0;
0d1ae7f4
AD
3782 for (i = 0; i < adapter->num_q_vectors; i++)
3783 eics |= adapter->q_vector[i]->eims_value;
7a6ea550
AD
3784 wr32(E1000_EICS, eics);
3785 } else {
3786 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3787 }
9d5c8243 3788
13800469
GR
3789 igb_spoof_check(adapter);
3790
9d5c8243
AK
3791 /* Reset the timer */
3792 if (!test_bit(__IGB_DOWN, &adapter->state))
3793 mod_timer(&adapter->watchdog_timer,
3794 round_jiffies(jiffies + 2 * HZ));
3795}
3796
3797enum latency_range {
3798 lowest_latency = 0,
3799 low_latency = 1,
3800 bulk_latency = 2,
3801 latency_invalid = 255
3802};
3803
6eb5a7f1
AD
3804/**
3805 * igb_update_ring_itr - update the dynamic ITR value based on packet size
3806 *
3807 * Stores a new ITR value based on strictly on packet size. This
3808 * algorithm is less sophisticated than that used in igb_update_itr,
3809 * due to the difficulty of synchronizing statistics across multiple
eef35c2d 3810 * receive rings. The divisors and thresholds used by this function
6eb5a7f1
AD
3811 * were determined based on theoretical maximum wire speed and testing
3812 * data, in order to minimize response time while increasing bulk
3813 * throughput.
3814 * This functionality is controlled by the InterruptThrottleRate module
3815 * parameter (see igb_param.c)
3816 * NOTE: This function is called only when operating in a multiqueue
3817 * receive environment.
047e0030 3818 * @q_vector: pointer to q_vector
6eb5a7f1 3819 **/
047e0030 3820static void igb_update_ring_itr(struct igb_q_vector *q_vector)
9d5c8243 3821{
047e0030 3822 int new_val = q_vector->itr_val;
6eb5a7f1 3823 int avg_wire_size = 0;
047e0030 3824 struct igb_adapter *adapter = q_vector->adapter;
12dcd86b 3825 unsigned int packets;
9d5c8243 3826
6eb5a7f1
AD
3827 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3828 * ints/sec - ITR timer value of 120 ticks.
3829 */
3830 if (adapter->link_speed != SPEED_1000) {
0ba82994 3831 new_val = IGB_4K_ITR;
6eb5a7f1 3832 goto set_itr_val;
9d5c8243 3833 }
047e0030 3834
0ba82994
AD
3835 packets = q_vector->rx.total_packets;
3836 if (packets)
3837 avg_wire_size = q_vector->rx.total_bytes / packets;
047e0030 3838
0ba82994
AD
3839 packets = q_vector->tx.total_packets;
3840 if (packets)
3841 avg_wire_size = max_t(u32, avg_wire_size,
3842 q_vector->tx.total_bytes / packets);
047e0030
AD
3843
3844 /* if avg_wire_size isn't set no work was done */
3845 if (!avg_wire_size)
3846 goto clear_counts;
9d5c8243 3847
6eb5a7f1
AD
3848 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3849 avg_wire_size += 24;
3850
3851 /* Don't starve jumbo frames */
3852 avg_wire_size = min(avg_wire_size, 3000);
9d5c8243 3853
6eb5a7f1
AD
3854 /* Give a little boost to mid-size frames */
3855 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
3856 new_val = avg_wire_size / 3;
3857 else
3858 new_val = avg_wire_size / 2;
9d5c8243 3859
0ba82994
AD
3860 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3861 if (new_val < IGB_20K_ITR &&
3862 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3863 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3864 new_val = IGB_20K_ITR;
abe1c363 3865
6eb5a7f1 3866set_itr_val:
047e0030
AD
3867 if (new_val != q_vector->itr_val) {
3868 q_vector->itr_val = new_val;
3869 q_vector->set_itr = 1;
9d5c8243 3870 }
6eb5a7f1 3871clear_counts:
0ba82994
AD
3872 q_vector->rx.total_bytes = 0;
3873 q_vector->rx.total_packets = 0;
3874 q_vector->tx.total_bytes = 0;
3875 q_vector->tx.total_packets = 0;
9d5c8243
AK
3876}
3877
3878/**
3879 * igb_update_itr - update the dynamic ITR value based on statistics
3880 * Stores a new ITR value based on packets and byte
3881 * counts during the last interrupt. The advantage of per interrupt
3882 * computation is faster updates and more accurate ITR for the current
3883 * traffic pattern. Constants in this function were computed
3884 * based on theoretical maximum wire speed and thresholds were set based
3885 * on testing data as well as attempting to minimize response time
3886 * while increasing bulk throughput.
3887 * this functionality is controlled by the InterruptThrottleRate module
3888 * parameter (see igb_param.c)
3889 * NOTE: These calculations are only valid when operating in a single-
3890 * queue environment.
0ba82994
AD
3891 * @q_vector: pointer to q_vector
3892 * @ring_container: ring info to update the itr for
9d5c8243 3893 **/
0ba82994
AD
3894static void igb_update_itr(struct igb_q_vector *q_vector,
3895 struct igb_ring_container *ring_container)
9d5c8243 3896{
0ba82994
AD
3897 unsigned int packets = ring_container->total_packets;
3898 unsigned int bytes = ring_container->total_bytes;
3899 u8 itrval = ring_container->itr;
9d5c8243 3900
0ba82994 3901 /* no packets, exit with status unchanged */
9d5c8243 3902 if (packets == 0)
0ba82994 3903 return;
9d5c8243 3904
0ba82994 3905 switch (itrval) {
9d5c8243
AK
3906 case lowest_latency:
3907 /* handle TSO and jumbo frames */
3908 if (bytes/packets > 8000)
0ba82994 3909 itrval = bulk_latency;
9d5c8243 3910 else if ((packets < 5) && (bytes > 512))
0ba82994 3911 itrval = low_latency;
9d5c8243
AK
3912 break;
3913 case low_latency: /* 50 usec aka 20000 ints/s */
3914 if (bytes > 10000) {
3915 /* this if handles the TSO accounting */
3916 if (bytes/packets > 8000) {
0ba82994 3917 itrval = bulk_latency;
9d5c8243 3918 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
0ba82994 3919 itrval = bulk_latency;
9d5c8243 3920 } else if ((packets > 35)) {
0ba82994 3921 itrval = lowest_latency;
9d5c8243
AK
3922 }
3923 } else if (bytes/packets > 2000) {
0ba82994 3924 itrval = bulk_latency;
9d5c8243 3925 } else if (packets <= 2 && bytes < 512) {
0ba82994 3926 itrval = lowest_latency;
9d5c8243
AK
3927 }
3928 break;
3929 case bulk_latency: /* 250 usec aka 4000 ints/s */
3930 if (bytes > 25000) {
3931 if (packets > 35)
0ba82994 3932 itrval = low_latency;
1e5c3d21 3933 } else if (bytes < 1500) {
0ba82994 3934 itrval = low_latency;
9d5c8243
AK
3935 }
3936 break;
3937 }
3938
0ba82994
AD
3939 /* clear work counters since we have the values we need */
3940 ring_container->total_bytes = 0;
3941 ring_container->total_packets = 0;
3942
3943 /* write updated itr to ring container */
3944 ring_container->itr = itrval;
9d5c8243
AK
3945}
3946
0ba82994 3947static void igb_set_itr(struct igb_q_vector *q_vector)
9d5c8243 3948{
0ba82994 3949 struct igb_adapter *adapter = q_vector->adapter;
047e0030 3950 u32 new_itr = q_vector->itr_val;
0ba82994 3951 u8 current_itr = 0;
9d5c8243
AK
3952
3953 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3954 if (adapter->link_speed != SPEED_1000) {
3955 current_itr = 0;
0ba82994 3956 new_itr = IGB_4K_ITR;
9d5c8243
AK
3957 goto set_itr_now;
3958 }
3959
0ba82994
AD
3960 igb_update_itr(q_vector, &q_vector->tx);
3961 igb_update_itr(q_vector, &q_vector->rx);
9d5c8243 3962
0ba82994 3963 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
9d5c8243 3964
6eb5a7f1 3965 /* conservative mode (itr 3) eliminates the lowest_latency setting */
0ba82994
AD
3966 if (current_itr == lowest_latency &&
3967 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3968 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
6eb5a7f1
AD
3969 current_itr = low_latency;
3970
9d5c8243
AK
3971 switch (current_itr) {
3972 /* counts and packets in update_itr are dependent on these numbers */
3973 case lowest_latency:
0ba82994 3974 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
9d5c8243
AK
3975 break;
3976 case low_latency:
0ba82994 3977 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
9d5c8243
AK
3978 break;
3979 case bulk_latency:
0ba82994 3980 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
9d5c8243
AK
3981 break;
3982 default:
3983 break;
3984 }
3985
3986set_itr_now:
047e0030 3987 if (new_itr != q_vector->itr_val) {
9d5c8243
AK
3988 /* this attempts to bias the interrupt rate towards Bulk
3989 * by adding intermediate steps when interrupt rate is
3990 * increasing */
047e0030
AD
3991 new_itr = new_itr > q_vector->itr_val ?
3992 max((new_itr * q_vector->itr_val) /
3993 (new_itr + (q_vector->itr_val >> 2)),
0ba82994 3994 new_itr) :
9d5c8243
AK
3995 new_itr;
3996 /* Don't write the value here; it resets the adapter's
3997 * internal timer, and causes us to delay far longer than
3998 * we should between interrupts. Instead, we write the ITR
3999 * value at the beginning of the next interrupt so the timing
4000 * ends up being correct.
4001 */
047e0030
AD
4002 q_vector->itr_val = new_itr;
4003 q_vector->set_itr = 1;
9d5c8243 4004 }
9d5c8243
AK
4005}
4006
c50b52a0
SH
4007static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
4008 u32 type_tucmd, u32 mss_l4len_idx)
7d13a7d0
AD
4009{
4010 struct e1000_adv_tx_context_desc *context_desc;
4011 u16 i = tx_ring->next_to_use;
4012
4013 context_desc = IGB_TX_CTXTDESC(tx_ring, i);
4014
4015 i++;
4016 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
4017
4018 /* set bits to identify this as an advanced context descriptor */
4019 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
4020
4021 /* For 82575, context index must be unique per ring. */
866cff06 4022 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
7d13a7d0
AD
4023 mss_l4len_idx |= tx_ring->reg_idx << 4;
4024
4025 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4026 context_desc->seqnum_seed = 0;
4027 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
4028 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4029}
4030
7af40ad9
AD
4031static int igb_tso(struct igb_ring *tx_ring,
4032 struct igb_tx_buffer *first,
4033 u8 *hdr_len)
9d5c8243 4034{
7af40ad9 4035 struct sk_buff *skb = first->skb;
7d13a7d0
AD
4036 u32 vlan_macip_lens, type_tucmd;
4037 u32 mss_l4len_idx, l4len;
4038
4039 if (!skb_is_gso(skb))
4040 return 0;
9d5c8243
AK
4041
4042 if (skb_header_cloned(skb)) {
7af40ad9 4043 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
9d5c8243
AK
4044 if (err)
4045 return err;
4046 }
4047
7d13a7d0
AD
4048 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4049 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
9d5c8243 4050
7af40ad9 4051 if (first->protocol == __constant_htons(ETH_P_IP)) {
9d5c8243
AK
4052 struct iphdr *iph = ip_hdr(skb);
4053 iph->tot_len = 0;
4054 iph->check = 0;
4055 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4056 iph->daddr, 0,
4057 IPPROTO_TCP,
4058 0);
7d13a7d0 4059 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
7af40ad9
AD
4060 first->tx_flags |= IGB_TX_FLAGS_TSO |
4061 IGB_TX_FLAGS_CSUM |
4062 IGB_TX_FLAGS_IPV4;
8e1e8a47 4063 } else if (skb_is_gso_v6(skb)) {
9d5c8243
AK
4064 ipv6_hdr(skb)->payload_len = 0;
4065 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4066 &ipv6_hdr(skb)->daddr,
4067 0, IPPROTO_TCP, 0);
7af40ad9
AD
4068 first->tx_flags |= IGB_TX_FLAGS_TSO |
4069 IGB_TX_FLAGS_CSUM;
9d5c8243
AK
4070 }
4071
7af40ad9 4072 /* compute header lengths */
7d13a7d0
AD
4073 l4len = tcp_hdrlen(skb);
4074 *hdr_len = skb_transport_offset(skb) + l4len;
9d5c8243 4075
7af40ad9
AD
4076 /* update gso size and bytecount with header size */
4077 first->gso_segs = skb_shinfo(skb)->gso_segs;
4078 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4079
9d5c8243 4080 /* MSS L4LEN IDX */
7d13a7d0
AD
4081 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4082 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
9d5c8243 4083
7d13a7d0
AD
4084 /* VLAN MACLEN IPLEN */
4085 vlan_macip_lens = skb_network_header_len(skb);
4086 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
7af40ad9 4087 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
9d5c8243 4088
7d13a7d0 4089 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
9d5c8243 4090
7d13a7d0 4091 return 1;
9d5c8243
AK
4092}
4093
7af40ad9 4094static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
9d5c8243 4095{
7af40ad9 4096 struct sk_buff *skb = first->skb;
7d13a7d0
AD
4097 u32 vlan_macip_lens = 0;
4098 u32 mss_l4len_idx = 0;
4099 u32 type_tucmd = 0;
9d5c8243 4100
7d13a7d0 4101 if (skb->ip_summed != CHECKSUM_PARTIAL) {
7af40ad9
AD
4102 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4103 return;
7d13a7d0
AD
4104 } else {
4105 u8 l4_hdr = 0;
7af40ad9 4106 switch (first->protocol) {
7d13a7d0
AD
4107 case __constant_htons(ETH_P_IP):
4108 vlan_macip_lens |= skb_network_header_len(skb);
4109 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4110 l4_hdr = ip_hdr(skb)->protocol;
4111 break;
4112 case __constant_htons(ETH_P_IPV6):
4113 vlan_macip_lens |= skb_network_header_len(skb);
4114 l4_hdr = ipv6_hdr(skb)->nexthdr;
4115 break;
4116 default:
4117 if (unlikely(net_ratelimit())) {
4118 dev_warn(tx_ring->dev,
4119 "partial checksum but proto=%x!\n",
7af40ad9 4120 first->protocol);
fa4a7ef3 4121 }
7d13a7d0
AD
4122 break;
4123 }
fa4a7ef3 4124
7d13a7d0
AD
4125 switch (l4_hdr) {
4126 case IPPROTO_TCP:
4127 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
4128 mss_l4len_idx = tcp_hdrlen(skb) <<
4129 E1000_ADVTXD_L4LEN_SHIFT;
4130 break;
4131 case IPPROTO_SCTP:
4132 type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP;
4133 mss_l4len_idx = sizeof(struct sctphdr) <<
4134 E1000_ADVTXD_L4LEN_SHIFT;
4135 break;
4136 case IPPROTO_UDP:
4137 mss_l4len_idx = sizeof(struct udphdr) <<
4138 E1000_ADVTXD_L4LEN_SHIFT;
4139 break;
4140 default:
4141 if (unlikely(net_ratelimit())) {
4142 dev_warn(tx_ring->dev,
4143 "partial checksum but l4 proto=%x!\n",
4144 l4_hdr);
44b0cda3 4145 }
7d13a7d0 4146 break;
9d5c8243 4147 }
7af40ad9
AD
4148
4149 /* update TX checksum flag */
4150 first->tx_flags |= IGB_TX_FLAGS_CSUM;
7d13a7d0 4151 }
9d5c8243 4152
7d13a7d0 4153 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
7af40ad9 4154 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
9d5c8243 4155
7d13a7d0 4156 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
9d5c8243
AK
4157}
4158
e032afc8
AD
4159static __le32 igb_tx_cmd_type(u32 tx_flags)
4160{
4161 /* set type for advanced descriptor with frame checksum insertion */
4162 __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
4163 E1000_ADVTXD_DCMD_IFCS |
4164 E1000_ADVTXD_DCMD_DEXT);
4165
4166 /* set HW vlan bit if vlan is present */
4167 if (tx_flags & IGB_TX_FLAGS_VLAN)
4168 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
4169
3c89f6d0 4170#ifdef CONFIG_IGB_PTP
e032afc8 4171 /* set timestamp bit if present */
1f6e8178 4172 if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
e032afc8 4173 cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
3c89f6d0 4174#endif /* CONFIG_IGB_PTP */
e032afc8
AD
4175
4176 /* set segmentation bits for TSO */
4177 if (tx_flags & IGB_TX_FLAGS_TSO)
4178 cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
4179
4180 return cmd_type;
4181}
4182
7af40ad9
AD
4183static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4184 union e1000_adv_tx_desc *tx_desc,
4185 u32 tx_flags, unsigned int paylen)
e032afc8
AD
4186{
4187 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4188
4189 /* 82575 requires a unique index per ring if any offload is enabled */
4190 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
866cff06 4191 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
e032afc8
AD
4192 olinfo_status |= tx_ring->reg_idx << 4;
4193
4194 /* insert L4 checksum */
4195 if (tx_flags & IGB_TX_FLAGS_CSUM) {
4196 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
4197
4198 /* insert IPv4 checksum */
4199 if (tx_flags & IGB_TX_FLAGS_IPV4)
4200 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4201 }
4202
7af40ad9 4203 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
e032afc8
AD
4204}
4205
ebe42d16
AD
4206/*
4207 * The largest size we can write to the descriptor is 65535. In order to
4208 * maintain a power of two alignment we have to limit ourselves to 32K.
4209 */
4210#define IGB_MAX_TXD_PWR 15
7af40ad9 4211#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
9d5c8243 4212
7af40ad9
AD
4213static void igb_tx_map(struct igb_ring *tx_ring,
4214 struct igb_tx_buffer *first,
ebe42d16 4215 const u8 hdr_len)
9d5c8243 4216{
7af40ad9 4217 struct sk_buff *skb = first->skb;
ebe42d16
AD
4218 struct igb_tx_buffer *tx_buffer_info;
4219 union e1000_adv_tx_desc *tx_desc;
4220 dma_addr_t dma;
4221 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
4222 unsigned int data_len = skb->data_len;
4223 unsigned int size = skb_headlen(skb);
4224 unsigned int paylen = skb->len - hdr_len;
4225 __le32 cmd_type;
7af40ad9 4226 u32 tx_flags = first->tx_flags;
ebe42d16 4227 u16 i = tx_ring->next_to_use;
ebe42d16
AD
4228
4229 tx_desc = IGB_TX_DESC(tx_ring, i);
4230
7af40ad9 4231 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
ebe42d16
AD
4232 cmd_type = igb_tx_cmd_type(tx_flags);
4233
4234 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
4235 if (dma_mapping_error(tx_ring->dev, dma))
6366ad33 4236 goto dma_error;
9d5c8243 4237
ebe42d16
AD
4238 /* record length, and DMA address */
4239 first->length = size;
4240 first->dma = dma;
ebe42d16
AD
4241 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4242
4243 for (;;) {
4244 while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
4245 tx_desc->read.cmd_type_len =
4246 cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
4247
4248 i++;
4249 tx_desc++;
4250 if (i == tx_ring->count) {
4251 tx_desc = IGB_TX_DESC(tx_ring, 0);
4252 i = 0;
4253 }
4254
4255 dma += IGB_MAX_DATA_PER_TXD;
4256 size -= IGB_MAX_DATA_PER_TXD;
4257
4258 tx_desc->read.olinfo_status = 0;
4259 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4260 }
4261
4262 if (likely(!data_len))
4263 break;
2bbfebe2 4264
ebe42d16 4265 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
9d5c8243 4266
65689fef 4267 i++;
ebe42d16
AD
4268 tx_desc++;
4269 if (i == tx_ring->count) {
4270 tx_desc = IGB_TX_DESC(tx_ring, 0);
65689fef 4271 i = 0;
ebe42d16 4272 }
65689fef 4273
9e903e08 4274 size = skb_frag_size(frag);
ebe42d16
AD
4275 data_len -= size;
4276
4277 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
4278 size, DMA_TO_DEVICE);
4279 if (dma_mapping_error(tx_ring->dev, dma))
6366ad33
AD
4280 goto dma_error;
4281
ebe42d16
AD
4282 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4283 tx_buffer_info->length = size;
4284 tx_buffer_info->dma = dma;
4285
4286 tx_desc->read.olinfo_status = 0;
4287 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4288
4289 frag++;
9d5c8243
AK
4290 }
4291
bdbc0631
ED
4292 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
4293
ebe42d16
AD
4294 /* write last descriptor with RS and EOP bits */
4295 cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
6b8f0922
BG
4296 if (unlikely(skb->no_fcs))
4297 cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
ebe42d16 4298 tx_desc->read.cmd_type_len = cmd_type;
8542db05
AD
4299
4300 /* set the timestamp */
4301 first->time_stamp = jiffies;
4302
ebe42d16
AD
4303 /*
4304 * Force memory writes to complete before letting h/w know there
4305 * are new descriptors to fetch. (Only applicable for weak-ordered
4306 * memory model archs, such as IA-64).
4307 *
4308 * We also need this memory barrier to make certain all of the
4309 * status bits have been updated before next_to_watch is written.
4310 */
4311 wmb();
4312
8542db05 4313 /* set next_to_watch value indicating a packet is present */
ebe42d16 4314 first->next_to_watch = tx_desc;
9d5c8243 4315
ebe42d16
AD
4316 i++;
4317 if (i == tx_ring->count)
4318 i = 0;
6366ad33 4319
ebe42d16 4320 tx_ring->next_to_use = i;
6366ad33 4321
ebe42d16 4322 writel(i, tx_ring->tail);
6366ad33 4323
ebe42d16
AD
4324 /* we need this if more than one processor can write to our tail
4325 * at a time, it syncronizes IO on IA64/Altix systems */
4326 mmiowb();
4327
4328 return;
4329
4330dma_error:
4331 dev_err(tx_ring->dev, "TX DMA map failed\n");
4332
4333 /* clear dma mappings for failed tx_buffer_info map */
4334 for (;;) {
4335 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4336 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
4337 if (tx_buffer_info == first)
4338 break;
a77ff709
NN
4339 if (i == 0)
4340 i = tx_ring->count;
6366ad33 4341 i--;
6366ad33
AD
4342 }
4343
9d5c8243 4344 tx_ring->next_to_use = i;
9d5c8243
AK
4345}
4346
6ad4edfc 4347static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
9d5c8243 4348{
e694e964
AD
4349 struct net_device *netdev = tx_ring->netdev;
4350
661086df 4351 netif_stop_subqueue(netdev, tx_ring->queue_index);
661086df 4352
9d5c8243
AK
4353 /* Herbert's original patch had:
4354 * smp_mb__after_netif_stop_queue();
4355 * but since that doesn't exist yet, just open code it. */
4356 smp_mb();
4357
4358 /* We need to check again in a case another CPU has just
4359 * made room available. */
c493ea45 4360 if (igb_desc_unused(tx_ring) < size)
9d5c8243
AK
4361 return -EBUSY;
4362
4363 /* A reprieve! */
661086df 4364 netif_wake_subqueue(netdev, tx_ring->queue_index);
12dcd86b
ED
4365
4366 u64_stats_update_begin(&tx_ring->tx_syncp2);
4367 tx_ring->tx_stats.restart_queue2++;
4368 u64_stats_update_end(&tx_ring->tx_syncp2);
4369
9d5c8243
AK
4370 return 0;
4371}
4372
6ad4edfc 4373static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
9d5c8243 4374{
c493ea45 4375 if (igb_desc_unused(tx_ring) >= size)
9d5c8243 4376 return 0;
e694e964 4377 return __igb_maybe_stop_tx(tx_ring, size);
9d5c8243
AK
4378}
4379
cd392f5c
AD
4380netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4381 struct igb_ring *tx_ring)
9d5c8243 4382{
1f6e8178
MV
4383#ifdef CONFIG_IGB_PTP
4384 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
4385#endif /* CONFIG_IGB_PTP */
8542db05 4386 struct igb_tx_buffer *first;
ebe42d16 4387 int tso;
91d4ee33 4388 u32 tx_flags = 0;
31f6adbb 4389 __be16 protocol = vlan_get_protocol(skb);
91d4ee33 4390 u8 hdr_len = 0;
9d5c8243 4391
9d5c8243
AK
4392 /* need: 1 descriptor per page,
4393 * + 2 desc gap to keep tail from touching head,
4394 * + 1 desc for skb->data,
4395 * + 1 desc for context descriptor,
4396 * otherwise try next time */
e694e964 4397 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
9d5c8243 4398 /* this is a hard error */
9d5c8243
AK
4399 return NETDEV_TX_BUSY;
4400 }
33af6bcc 4401
7af40ad9
AD
4402 /* record the location of the first descriptor for this packet */
4403 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4404 first->skb = skb;
4405 first->bytecount = skb->len;
4406 first->gso_segs = 1;
4407
3c89f6d0 4408#ifdef CONFIG_IGB_PTP
1f6e8178
MV
4409 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4410 !(adapter->ptp_tx_skb))) {
2244d07b 4411 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
33af6bcc 4412 tx_flags |= IGB_TX_FLAGS_TSTAMP;
1f6e8178
MV
4413
4414 adapter->ptp_tx_skb = skb_get(skb);
4415 if (adapter->hw.mac.type == e1000_82576)
4416 schedule_work(&adapter->ptp_tx_work);
33af6bcc 4417 }
3c89f6d0 4418#endif /* CONFIG_IGB_PTP */
9d5c8243 4419
eab6d18d 4420 if (vlan_tx_tag_present(skb)) {
9d5c8243
AK
4421 tx_flags |= IGB_TX_FLAGS_VLAN;
4422 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4423 }
4424
7af40ad9
AD
4425 /* record initial flags and protocol */
4426 first->tx_flags = tx_flags;
4427 first->protocol = protocol;
cdfd01fc 4428
7af40ad9
AD
4429 tso = igb_tso(tx_ring, first, &hdr_len);
4430 if (tso < 0)
7d13a7d0 4431 goto out_drop;
7af40ad9
AD
4432 else if (!tso)
4433 igb_tx_csum(tx_ring, first);
9d5c8243 4434
7af40ad9 4435 igb_tx_map(tx_ring, first, hdr_len);
85ad76b2
AD
4436
4437 /* Make sure there is space in the ring for the next send. */
e694e964 4438 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
85ad76b2 4439
9d5c8243 4440 return NETDEV_TX_OK;
7d13a7d0
AD
4441
4442out_drop:
7af40ad9
AD
4443 igb_unmap_and_free_tx_resource(tx_ring, first);
4444
7d13a7d0 4445 return NETDEV_TX_OK;
9d5c8243
AK
4446}
4447
1cc3bd87
AD
4448static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
4449 struct sk_buff *skb)
4450{
4451 unsigned int r_idx = skb->queue_mapping;
4452
4453 if (r_idx >= adapter->num_tx_queues)
4454 r_idx = r_idx % adapter->num_tx_queues;
4455
4456 return adapter->tx_ring[r_idx];
4457}
4458
cd392f5c
AD
4459static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4460 struct net_device *netdev)
9d5c8243
AK
4461{
4462 struct igb_adapter *adapter = netdev_priv(netdev);
b1a436c3
AD
4463
4464 if (test_bit(__IGB_DOWN, &adapter->state)) {
4465 dev_kfree_skb_any(skb);
4466 return NETDEV_TX_OK;
4467 }
4468
4469 if (skb->len <= 0) {
4470 dev_kfree_skb_any(skb);
4471 return NETDEV_TX_OK;
4472 }
4473
1cc3bd87
AD
4474 /*
4475 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4476 * in order to meet this minimum size requirement.
4477 */
4478 if (skb->len < 17) {
4479 if (skb_padto(skb, 17))
4480 return NETDEV_TX_OK;
4481 skb->len = 17;
4482 }
9d5c8243 4483
1cc3bd87 4484 return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
9d5c8243
AK
4485}
4486
4487/**
4488 * igb_tx_timeout - Respond to a Tx Hang
4489 * @netdev: network interface device structure
4490 **/
4491static void igb_tx_timeout(struct net_device *netdev)
4492{
4493 struct igb_adapter *adapter = netdev_priv(netdev);
4494 struct e1000_hw *hw = &adapter->hw;
4495
4496 /* Do the reset outside of interrupt context */
4497 adapter->tx_timeout_count++;
f7ba205e 4498
06218a8d 4499 if (hw->mac.type >= e1000_82580)
55cac248
AD
4500 hw->dev_spec._82575.global_device_reset = true;
4501
9d5c8243 4502 schedule_work(&adapter->reset_task);
265de409
AD
4503 wr32(E1000_EICS,
4504 (adapter->eims_enable_mask & ~adapter->eims_other));
9d5c8243
AK
4505}
4506
4507static void igb_reset_task(struct work_struct *work)
4508{
4509 struct igb_adapter *adapter;
4510 adapter = container_of(work, struct igb_adapter, reset_task);
4511
c97ec42a
TI
4512 igb_dump(adapter);
4513 netdev_err(adapter->netdev, "Reset adapter\n");
9d5c8243
AK
4514 igb_reinit_locked(adapter);
4515}
4516
4517/**
12dcd86b 4518 * igb_get_stats64 - Get System Network Statistics
9d5c8243 4519 * @netdev: network interface device structure
12dcd86b 4520 * @stats: rtnl_link_stats64 pointer
9d5c8243 4521 *
9d5c8243 4522 **/
12dcd86b
ED
4523static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4524 struct rtnl_link_stats64 *stats)
9d5c8243 4525{
12dcd86b
ED
4526 struct igb_adapter *adapter = netdev_priv(netdev);
4527
4528 spin_lock(&adapter->stats64_lock);
4529 igb_update_stats(adapter, &adapter->stats64);
4530 memcpy(stats, &adapter->stats64, sizeof(*stats));
4531 spin_unlock(&adapter->stats64_lock);
4532
4533 return stats;
9d5c8243
AK
4534}
4535
4536/**
4537 * igb_change_mtu - Change the Maximum Transfer Unit
4538 * @netdev: network interface device structure
4539 * @new_mtu: new value for maximum frame size
4540 *
4541 * Returns 0 on success, negative on failure
4542 **/
4543static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4544{
4545 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 4546 struct pci_dev *pdev = adapter->pdev;
153285f9 4547 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
9d5c8243 4548
c809d227 4549 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
090b1795 4550 dev_err(&pdev->dev, "Invalid MTU setting\n");
9d5c8243
AK
4551 return -EINVAL;
4552 }
4553
153285f9 4554#define MAX_STD_JUMBO_FRAME_SIZE 9238
9d5c8243 4555 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
090b1795 4556 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
9d5c8243
AK
4557 return -EINVAL;
4558 }
4559
4560 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
4561 msleep(1);
73cd78f1 4562
9d5c8243
AK
4563 /* igb_down has a dependency on max_frame_size */
4564 adapter->max_frame_size = max_frame;
559e9c49 4565
4c844851
AD
4566 if (netif_running(netdev))
4567 igb_down(adapter);
9d5c8243 4568
090b1795 4569 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
9d5c8243
AK
4570 netdev->mtu, new_mtu);
4571 netdev->mtu = new_mtu;
4572
4573 if (netif_running(netdev))
4574 igb_up(adapter);
4575 else
4576 igb_reset(adapter);
4577
4578 clear_bit(__IGB_RESETTING, &adapter->state);
4579
4580 return 0;
4581}
4582
4583/**
4584 * igb_update_stats - Update the board statistics counters
4585 * @adapter: board private structure
4586 **/
4587
12dcd86b
ED
4588void igb_update_stats(struct igb_adapter *adapter,
4589 struct rtnl_link_stats64 *net_stats)
9d5c8243
AK
4590{
4591 struct e1000_hw *hw = &adapter->hw;
4592 struct pci_dev *pdev = adapter->pdev;
fa3d9a6d 4593 u32 reg, mpc;
9d5c8243 4594 u16 phy_tmp;
3f9c0164
AD
4595 int i;
4596 u64 bytes, packets;
12dcd86b
ED
4597 unsigned int start;
4598 u64 _bytes, _packets;
9d5c8243
AK
4599
4600#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4601
4602 /*
4603 * Prevent stats update while adapter is being reset, or if the pci
4604 * connection is down.
4605 */
4606 if (adapter->link_speed == 0)
4607 return;
4608 if (pci_channel_offline(pdev))
4609 return;
4610
3f9c0164
AD
4611 bytes = 0;
4612 packets = 0;
4613 for (i = 0; i < adapter->num_rx_queues; i++) {
ae1c07a6 4614 u32 rqdpc = rd32(E1000_RQDPC(i));
3025a446 4615 struct igb_ring *ring = adapter->rx_ring[i];
12dcd86b 4616
ae1c07a6
AD
4617 if (rqdpc) {
4618 ring->rx_stats.drops += rqdpc;
4619 net_stats->rx_fifo_errors += rqdpc;
4620 }
12dcd86b
ED
4621
4622 do {
4623 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4624 _bytes = ring->rx_stats.bytes;
4625 _packets = ring->rx_stats.packets;
4626 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4627 bytes += _bytes;
4628 packets += _packets;
3f9c0164
AD
4629 }
4630
128e45eb
AD
4631 net_stats->rx_bytes = bytes;
4632 net_stats->rx_packets = packets;
3f9c0164
AD
4633
4634 bytes = 0;
4635 packets = 0;
4636 for (i = 0; i < adapter->num_tx_queues; i++) {
3025a446 4637 struct igb_ring *ring = adapter->tx_ring[i];
12dcd86b
ED
4638 do {
4639 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4640 _bytes = ring->tx_stats.bytes;
4641 _packets = ring->tx_stats.packets;
4642 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4643 bytes += _bytes;
4644 packets += _packets;
3f9c0164 4645 }
128e45eb
AD
4646 net_stats->tx_bytes = bytes;
4647 net_stats->tx_packets = packets;
3f9c0164
AD
4648
4649 /* read stats registers */
9d5c8243
AK
4650 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
4651 adapter->stats.gprc += rd32(E1000_GPRC);
4652 adapter->stats.gorc += rd32(E1000_GORCL);
4653 rd32(E1000_GORCH); /* clear GORCL */
4654 adapter->stats.bprc += rd32(E1000_BPRC);
4655 adapter->stats.mprc += rd32(E1000_MPRC);
4656 adapter->stats.roc += rd32(E1000_ROC);
4657
4658 adapter->stats.prc64 += rd32(E1000_PRC64);
4659 adapter->stats.prc127 += rd32(E1000_PRC127);
4660 adapter->stats.prc255 += rd32(E1000_PRC255);
4661 adapter->stats.prc511 += rd32(E1000_PRC511);
4662 adapter->stats.prc1023 += rd32(E1000_PRC1023);
4663 adapter->stats.prc1522 += rd32(E1000_PRC1522);
4664 adapter->stats.symerrs += rd32(E1000_SYMERRS);
4665 adapter->stats.sec += rd32(E1000_SEC);
4666
fa3d9a6d
MW
4667 mpc = rd32(E1000_MPC);
4668 adapter->stats.mpc += mpc;
4669 net_stats->rx_fifo_errors += mpc;
9d5c8243
AK
4670 adapter->stats.scc += rd32(E1000_SCC);
4671 adapter->stats.ecol += rd32(E1000_ECOL);
4672 adapter->stats.mcc += rd32(E1000_MCC);
4673 adapter->stats.latecol += rd32(E1000_LATECOL);
4674 adapter->stats.dc += rd32(E1000_DC);
4675 adapter->stats.rlec += rd32(E1000_RLEC);
4676 adapter->stats.xonrxc += rd32(E1000_XONRXC);
4677 adapter->stats.xontxc += rd32(E1000_XONTXC);
4678 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
4679 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
4680 adapter->stats.fcruc += rd32(E1000_FCRUC);
4681 adapter->stats.gptc += rd32(E1000_GPTC);
4682 adapter->stats.gotc += rd32(E1000_GOTCL);
4683 rd32(E1000_GOTCH); /* clear GOTCL */
fa3d9a6d 4684 adapter->stats.rnbc += rd32(E1000_RNBC);
9d5c8243
AK
4685 adapter->stats.ruc += rd32(E1000_RUC);
4686 adapter->stats.rfc += rd32(E1000_RFC);
4687 adapter->stats.rjc += rd32(E1000_RJC);
4688 adapter->stats.tor += rd32(E1000_TORH);
4689 adapter->stats.tot += rd32(E1000_TOTH);
4690 adapter->stats.tpr += rd32(E1000_TPR);
4691
4692 adapter->stats.ptc64 += rd32(E1000_PTC64);
4693 adapter->stats.ptc127 += rd32(E1000_PTC127);
4694 adapter->stats.ptc255 += rd32(E1000_PTC255);
4695 adapter->stats.ptc511 += rd32(E1000_PTC511);
4696 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
4697 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
4698
4699 adapter->stats.mptc += rd32(E1000_MPTC);
4700 adapter->stats.bptc += rd32(E1000_BPTC);
4701
2d0b0f69
NN
4702 adapter->stats.tpt += rd32(E1000_TPT);
4703 adapter->stats.colc += rd32(E1000_COLC);
9d5c8243
AK
4704
4705 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
43915c7c
NN
4706 /* read internal phy specific stats */
4707 reg = rd32(E1000_CTRL_EXT);
4708 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4709 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3dbdf969
CW
4710
4711 /* this stat has invalid values on i210/i211 */
4712 if ((hw->mac.type != e1000_i210) &&
4713 (hw->mac.type != e1000_i211))
4714 adapter->stats.tncrs += rd32(E1000_TNCRS);
43915c7c
NN
4715 }
4716
9d5c8243
AK
4717 adapter->stats.tsctc += rd32(E1000_TSCTC);
4718 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
4719
4720 adapter->stats.iac += rd32(E1000_IAC);
4721 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
4722 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
4723 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
4724 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
4725 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
4726 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
4727 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
4728 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
4729
4730 /* Fill out the OS statistics structure */
128e45eb
AD
4731 net_stats->multicast = adapter->stats.mprc;
4732 net_stats->collisions = adapter->stats.colc;
9d5c8243
AK
4733
4734 /* Rx Errors */
4735
4736 /* RLEC on some newer hardware can be incorrect so build
8c0ab70a 4737 * our own version based on RUC and ROC */
128e45eb 4738 net_stats->rx_errors = adapter->stats.rxerrc +
9d5c8243
AK
4739 adapter->stats.crcerrs + adapter->stats.algnerrc +
4740 adapter->stats.ruc + adapter->stats.roc +
4741 adapter->stats.cexterr;
128e45eb
AD
4742 net_stats->rx_length_errors = adapter->stats.ruc +
4743 adapter->stats.roc;
4744 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4745 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4746 net_stats->rx_missed_errors = adapter->stats.mpc;
9d5c8243
AK
4747
4748 /* Tx Errors */
128e45eb
AD
4749 net_stats->tx_errors = adapter->stats.ecol +
4750 adapter->stats.latecol;
4751 net_stats->tx_aborted_errors = adapter->stats.ecol;
4752 net_stats->tx_window_errors = adapter->stats.latecol;
4753 net_stats->tx_carrier_errors = adapter->stats.tncrs;
9d5c8243
AK
4754
4755 /* Tx Dropped needs to be maintained elsewhere */
4756
4757 /* Phy Stats */
4758 if (hw->phy.media_type == e1000_media_type_copper) {
4759 if ((adapter->link_speed == SPEED_1000) &&
73cd78f1 4760 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
9d5c8243
AK
4761 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
4762 adapter->phy_stats.idle_errors += phy_tmp;
4763 }
4764 }
4765
4766 /* Management Stats */
4767 adapter->stats.mgptc += rd32(E1000_MGTPTC);
4768 adapter->stats.mgprc += rd32(E1000_MGTPRC);
4769 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
0a915b95
CW
4770
4771 /* OS2BMC Stats */
4772 reg = rd32(E1000_MANC);
4773 if (reg & E1000_MANC_EN_BMC2OS) {
4774 adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
4775 adapter->stats.o2bspc += rd32(E1000_O2BSPC);
4776 adapter->stats.b2ospc += rd32(E1000_B2OSPC);
4777 adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
4778 }
9d5c8243
AK
4779}
4780
9d5c8243
AK
4781static irqreturn_t igb_msix_other(int irq, void *data)
4782{
047e0030 4783 struct igb_adapter *adapter = data;
9d5c8243 4784 struct e1000_hw *hw = &adapter->hw;
844290e5 4785 u32 icr = rd32(E1000_ICR);
844290e5 4786 /* reading ICR causes bit 31 of EICR to be cleared */
dda0e083 4787
7f081d40
AD
4788 if (icr & E1000_ICR_DRSTA)
4789 schedule_work(&adapter->reset_task);
4790
047e0030 4791 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
4792 /* HW is reporting DMA is out of sync */
4793 adapter->stats.doosync++;
13800469
GR
4794 /* The DMA Out of Sync is also indication of a spoof event
4795 * in IOV mode. Check the Wrong VM Behavior register to
4796 * see if it is really a spoof event. */
4797 igb_check_wvbr(adapter);
dda0e083 4798 }
eebbbdba 4799
4ae196df
AD
4800 /* Check for a mailbox event */
4801 if (icr & E1000_ICR_VMMB)
4802 igb_msg_task(adapter);
4803
4804 if (icr & E1000_ICR_LSC) {
4805 hw->mac.get_link_status = 1;
4806 /* guard against interrupt when we're going down */
4807 if (!test_bit(__IGB_DOWN, &adapter->state))
4808 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4809 }
4810
1f6e8178
MV
4811#ifdef CONFIG_IGB_PTP
4812 if (icr & E1000_ICR_TS) {
4813 u32 tsicr = rd32(E1000_TSICR);
4814
4815 if (tsicr & E1000_TSICR_TXTS) {
4816 /* acknowledge the interrupt */
4817 wr32(E1000_TSICR, E1000_TSICR_TXTS);
4818 /* retrieve hardware timestamp */
4819 schedule_work(&adapter->ptp_tx_work);
4820 }
4821 }
4822#endif /* CONFIG_IGB_PTP */
4823
844290e5 4824 wr32(E1000_EIMS, adapter->eims_other);
9d5c8243
AK
4825
4826 return IRQ_HANDLED;
4827}
4828
047e0030 4829static void igb_write_itr(struct igb_q_vector *q_vector)
9d5c8243 4830{
26b39276 4831 struct igb_adapter *adapter = q_vector->adapter;
047e0030 4832 u32 itr_val = q_vector->itr_val & 0x7FFC;
9d5c8243 4833
047e0030
AD
4834 if (!q_vector->set_itr)
4835 return;
73cd78f1 4836
047e0030
AD
4837 if (!itr_val)
4838 itr_val = 0x4;
661086df 4839
26b39276
AD
4840 if (adapter->hw.mac.type == e1000_82575)
4841 itr_val |= itr_val << 16;
661086df 4842 else
0ba82994 4843 itr_val |= E1000_EITR_CNT_IGNR;
661086df 4844
047e0030
AD
4845 writel(itr_val, q_vector->itr_register);
4846 q_vector->set_itr = 0;
6eb5a7f1
AD
4847}
4848
047e0030 4849static irqreturn_t igb_msix_ring(int irq, void *data)
9d5c8243 4850{
047e0030 4851 struct igb_q_vector *q_vector = data;
9d5c8243 4852
047e0030
AD
4853 /* Write the ITR value calculated from the previous interrupt. */
4854 igb_write_itr(q_vector);
9d5c8243 4855
047e0030 4856 napi_schedule(&q_vector->napi);
844290e5 4857
047e0030 4858 return IRQ_HANDLED;
fe4506b6
JC
4859}
4860
421e02f0 4861#ifdef CONFIG_IGB_DCA
047e0030 4862static void igb_update_dca(struct igb_q_vector *q_vector)
fe4506b6 4863{
047e0030 4864 struct igb_adapter *adapter = q_vector->adapter;
fe4506b6
JC
4865 struct e1000_hw *hw = &adapter->hw;
4866 int cpu = get_cpu();
fe4506b6 4867
047e0030
AD
4868 if (q_vector->cpu == cpu)
4869 goto out_no_update;
4870
0ba82994
AD
4871 if (q_vector->tx.ring) {
4872 int q = q_vector->tx.ring->reg_idx;
047e0030
AD
4873 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4874 if (hw->mac.type == e1000_82575) {
4875 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4876 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
2d064c06 4877 } else {
047e0030
AD
4878 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4879 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4880 E1000_DCA_TXCTRL_CPUID_SHIFT;
4881 }
4882 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4883 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4884 }
0ba82994
AD
4885 if (q_vector->rx.ring) {
4886 int q = q_vector->rx.ring->reg_idx;
047e0030
AD
4887 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4888 if (hw->mac.type == e1000_82575) {
2d064c06 4889 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
92be7917 4890 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
047e0030
AD
4891 } else {
4892 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4893 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4894 E1000_DCA_RXCTRL_CPUID_SHIFT;
2d064c06 4895 }
fe4506b6
JC
4896 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
4897 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
4898 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
4899 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
fe4506b6 4900 }
047e0030
AD
4901 q_vector->cpu = cpu;
4902out_no_update:
fe4506b6
JC
4903 put_cpu();
4904}
4905
4906static void igb_setup_dca(struct igb_adapter *adapter)
4907{
7e0e99ef 4908 struct e1000_hw *hw = &adapter->hw;
fe4506b6
JC
4909 int i;
4910
7dfc16fa 4911 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
fe4506b6
JC
4912 return;
4913
7e0e99ef
AD
4914 /* Always use CB2 mode, difference is masked in the CB driver. */
4915 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
4916
047e0030 4917 for (i = 0; i < adapter->num_q_vectors; i++) {
26b39276
AD
4918 adapter->q_vector[i]->cpu = -1;
4919 igb_update_dca(adapter->q_vector[i]);
fe4506b6
JC
4920 }
4921}
4922
4923static int __igb_notify_dca(struct device *dev, void *data)
4924{
4925 struct net_device *netdev = dev_get_drvdata(dev);
4926 struct igb_adapter *adapter = netdev_priv(netdev);
090b1795 4927 struct pci_dev *pdev = adapter->pdev;
fe4506b6
JC
4928 struct e1000_hw *hw = &adapter->hw;
4929 unsigned long event = *(unsigned long *)data;
4930
4931 switch (event) {
4932 case DCA_PROVIDER_ADD:
4933 /* if already enabled, don't do it again */
7dfc16fa 4934 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
fe4506b6 4935 break;
fe4506b6 4936 if (dca_add_requester(dev) == 0) {
bbd98fe4 4937 adapter->flags |= IGB_FLAG_DCA_ENABLED;
090b1795 4938 dev_info(&pdev->dev, "DCA enabled\n");
fe4506b6
JC
4939 igb_setup_dca(adapter);
4940 break;
4941 }
4942 /* Fall Through since DCA is disabled. */
4943 case DCA_PROVIDER_REMOVE:
7dfc16fa 4944 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
fe4506b6 4945 /* without this a class_device is left
047e0030 4946 * hanging around in the sysfs model */
fe4506b6 4947 dca_remove_requester(dev);
090b1795 4948 dev_info(&pdev->dev, "DCA disabled\n");
7dfc16fa 4949 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
cbd347ad 4950 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
fe4506b6
JC
4951 }
4952 break;
4953 }
bbd98fe4 4954
fe4506b6 4955 return 0;
9d5c8243
AK
4956}
4957
fe4506b6
JC
4958static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
4959 void *p)
4960{
4961 int ret_val;
4962
4963 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
4964 __igb_notify_dca);
4965
4966 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4967}
421e02f0 4968#endif /* CONFIG_IGB_DCA */
9d5c8243 4969
0224d663
GR
4970#ifdef CONFIG_PCI_IOV
4971static int igb_vf_configure(struct igb_adapter *adapter, int vf)
4972{
4973 unsigned char mac_addr[ETH_ALEN];
0224d663 4974
7efd26d0 4975 eth_random_addr(mac_addr);
0224d663
GR
4976 igb_set_vf_mac(adapter, vf, mac_addr);
4977
f557147c 4978 return 0;
0224d663
GR
4979}
4980
f557147c 4981static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
0224d663 4982{
0224d663 4983 struct pci_dev *pdev = adapter->pdev;
f557147c
SA
4984 struct pci_dev *vfdev;
4985 int dev_id;
0224d663
GR
4986
4987 switch (adapter->hw.mac.type) {
4988 case e1000_82576:
f557147c 4989 dev_id = IGB_82576_VF_DEV_ID;
0224d663
GR
4990 break;
4991 case e1000_i350:
f557147c 4992 dev_id = IGB_I350_VF_DEV_ID;
0224d663
GR
4993 break;
4994 default:
f557147c 4995 return false;
0224d663
GR
4996 }
4997
f557147c
SA
4998 /* loop through all the VFs to see if we own any that are assigned */
4999 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
5000 while (vfdev) {
5001 /* if we don't own it we don't care */
5002 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5003 /* if it is assigned we cannot release it */
5004 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
0224d663
GR
5005 return true;
5006 }
f557147c
SA
5007
5008 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
0224d663 5009 }
f557147c 5010
0224d663
GR
5011 return false;
5012}
5013
5014#endif
4ae196df
AD
5015static void igb_ping_all_vfs(struct igb_adapter *adapter)
5016{
5017 struct e1000_hw *hw = &adapter->hw;
5018 u32 ping;
5019 int i;
5020
5021 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
5022 ping = E1000_PF_CONTROL_MSG;
f2ca0dbe 5023 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
4ae196df
AD
5024 ping |= E1000_VT_MSGTYPE_CTS;
5025 igb_write_mbx(hw, &ping, 1, i);
5026 }
5027}
5028
7d5753f0
AD
5029static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5030{
5031 struct e1000_hw *hw = &adapter->hw;
5032 u32 vmolr = rd32(E1000_VMOLR(vf));
5033 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5034
d85b9004 5035 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
7d5753f0
AD
5036 IGB_VF_FLAG_MULTI_PROMISC);
5037 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5038
5039 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
5040 vmolr |= E1000_VMOLR_MPME;
d85b9004 5041 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
7d5753f0
AD
5042 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5043 } else {
5044 /*
5045 * if we have hashes and we are clearing a multicast promisc
5046 * flag we need to write the hashes to the MTA as this step
5047 * was previously skipped
5048 */
5049 if (vf_data->num_vf_mc_hashes > 30) {
5050 vmolr |= E1000_VMOLR_MPME;
5051 } else if (vf_data->num_vf_mc_hashes) {
5052 int j;
5053 vmolr |= E1000_VMOLR_ROMPE;
5054 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5055 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5056 }
5057 }
5058
5059 wr32(E1000_VMOLR(vf), vmolr);
5060
5061 /* there are flags left unprocessed, likely not supported */
5062 if (*msgbuf & E1000_VT_MSGINFO_MASK)
5063 return -EINVAL;
5064
5065 return 0;
5066
5067}
5068
4ae196df
AD
5069static int igb_set_vf_multicasts(struct igb_adapter *adapter,
5070 u32 *msgbuf, u32 vf)
5071{
5072 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5073 u16 *hash_list = (u16 *)&msgbuf[1];
5074 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5075 int i;
5076
7d5753f0 5077 /* salt away the number of multicast addresses assigned
4ae196df
AD
5078 * to this VF for later use to restore when the PF multi cast
5079 * list changes
5080 */
5081 vf_data->num_vf_mc_hashes = n;
5082
7d5753f0
AD
5083 /* only up to 30 hash values supported */
5084 if (n > 30)
5085 n = 30;
5086
5087 /* store the hashes for later use */
4ae196df 5088 for (i = 0; i < n; i++)
a419aef8 5089 vf_data->vf_mc_hashes[i] = hash_list[i];
4ae196df
AD
5090
5091 /* Flush and reset the mta with the new values */
ff41f8dc 5092 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
5093
5094 return 0;
5095}
5096
5097static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
5098{
5099 struct e1000_hw *hw = &adapter->hw;
5100 struct vf_data_storage *vf_data;
5101 int i, j;
5102
5103 for (i = 0; i < adapter->vfs_allocated_count; i++) {
7d5753f0
AD
5104 u32 vmolr = rd32(E1000_VMOLR(i));
5105 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5106
4ae196df 5107 vf_data = &adapter->vf_data[i];
7d5753f0
AD
5108
5109 if ((vf_data->num_vf_mc_hashes > 30) ||
5110 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
5111 vmolr |= E1000_VMOLR_MPME;
5112 } else if (vf_data->num_vf_mc_hashes) {
5113 vmolr |= E1000_VMOLR_ROMPE;
5114 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
5115 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
5116 }
5117 wr32(E1000_VMOLR(i), vmolr);
4ae196df
AD
5118 }
5119}
5120
5121static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
5122{
5123 struct e1000_hw *hw = &adapter->hw;
5124 u32 pool_mask, reg, vid;
5125 int i;
5126
5127 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5128
5129 /* Find the vlan filter for this id */
5130 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5131 reg = rd32(E1000_VLVF(i));
5132
5133 /* remove the vf from the pool */
5134 reg &= ~pool_mask;
5135
5136 /* if pool is empty then remove entry from vfta */
5137 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
5138 (reg & E1000_VLVF_VLANID_ENABLE)) {
5139 reg = 0;
5140 vid = reg & E1000_VLVF_VLANID_MASK;
5141 igb_vfta_set(hw, vid, false);
5142 }
5143
5144 wr32(E1000_VLVF(i), reg);
5145 }
ae641bdc
AD
5146
5147 adapter->vf_data[vf].vlans_enabled = 0;
4ae196df
AD
5148}
5149
5150static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5151{
5152 struct e1000_hw *hw = &adapter->hw;
5153 u32 reg, i;
5154
51466239
AD
5155 /* The vlvf table only exists on 82576 hardware and newer */
5156 if (hw->mac.type < e1000_82576)
5157 return -1;
5158
5159 /* we only need to do this if VMDq is enabled */
4ae196df
AD
5160 if (!adapter->vfs_allocated_count)
5161 return -1;
5162
5163 /* Find the vlan filter for this id */
5164 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5165 reg = rd32(E1000_VLVF(i));
5166 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
5167 vid == (reg & E1000_VLVF_VLANID_MASK))
5168 break;
5169 }
5170
5171 if (add) {
5172 if (i == E1000_VLVF_ARRAY_SIZE) {
5173 /* Did not find a matching VLAN ID entry that was
5174 * enabled. Search for a free filter entry, i.e.
5175 * one without the enable bit set
5176 */
5177 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
5178 reg = rd32(E1000_VLVF(i));
5179 if (!(reg & E1000_VLVF_VLANID_ENABLE))
5180 break;
5181 }
5182 }
5183 if (i < E1000_VLVF_ARRAY_SIZE) {
5184 /* Found an enabled/available entry */
5185 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
5186
5187 /* if !enabled we need to set this up in vfta */
5188 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
51466239
AD
5189 /* add VID to filter table */
5190 igb_vfta_set(hw, vid, true);
4ae196df
AD
5191 reg |= E1000_VLVF_VLANID_ENABLE;
5192 }
cad6d05f
AD
5193 reg &= ~E1000_VLVF_VLANID_MASK;
5194 reg |= vid;
4ae196df 5195 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
5196
5197 /* do not modify RLPML for PF devices */
5198 if (vf >= adapter->vfs_allocated_count)
5199 return 0;
5200
5201 if (!adapter->vf_data[vf].vlans_enabled) {
5202 u32 size;
5203 reg = rd32(E1000_VMOLR(vf));
5204 size = reg & E1000_VMOLR_RLPML_MASK;
5205 size += 4;
5206 reg &= ~E1000_VMOLR_RLPML_MASK;
5207 reg |= size;
5208 wr32(E1000_VMOLR(vf), reg);
5209 }
ae641bdc 5210
51466239 5211 adapter->vf_data[vf].vlans_enabled++;
4ae196df
AD
5212 }
5213 } else {
5214 if (i < E1000_VLVF_ARRAY_SIZE) {
5215 /* remove vf from the pool */
5216 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
5217 /* if pool is empty then remove entry from vfta */
5218 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
5219 reg = 0;
5220 igb_vfta_set(hw, vid, false);
5221 }
5222 wr32(E1000_VLVF(i), reg);
ae641bdc
AD
5223
5224 /* do not modify RLPML for PF devices */
5225 if (vf >= adapter->vfs_allocated_count)
5226 return 0;
5227
5228 adapter->vf_data[vf].vlans_enabled--;
5229 if (!adapter->vf_data[vf].vlans_enabled) {
5230 u32 size;
5231 reg = rd32(E1000_VMOLR(vf));
5232 size = reg & E1000_VMOLR_RLPML_MASK;
5233 size -= 4;
5234 reg &= ~E1000_VMOLR_RLPML_MASK;
5235 reg |= size;
5236 wr32(E1000_VMOLR(vf), reg);
5237 }
4ae196df
AD
5238 }
5239 }
8151d294
WM
5240 return 0;
5241}
5242
5243static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
5244{
5245 struct e1000_hw *hw = &adapter->hw;
5246
5247 if (vid)
5248 wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
5249 else
5250 wr32(E1000_VMVIR(vf), 0);
5251}
5252
5253static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5254 int vf, u16 vlan, u8 qos)
5255{
5256 int err = 0;
5257 struct igb_adapter *adapter = netdev_priv(netdev);
5258
5259 if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
5260 return -EINVAL;
5261 if (vlan || qos) {
5262 err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
5263 if (err)
5264 goto out;
5265 igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
5266 igb_set_vmolr(adapter, vf, !vlan);
5267 adapter->vf_data[vf].pf_vlan = vlan;
5268 adapter->vf_data[vf].pf_qos = qos;
5269 dev_info(&adapter->pdev->dev,
5270 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5271 if (test_bit(__IGB_DOWN, &adapter->state)) {
5272 dev_warn(&adapter->pdev->dev,
5273 "The VF VLAN has been set,"
5274 " but the PF device is not up.\n");
5275 dev_warn(&adapter->pdev->dev,
5276 "Bring the PF device up before"
5277 " attempting to use the VF device.\n");
5278 }
5279 } else {
5280 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5281 false, vf);
5282 igb_set_vmvir(adapter, vlan, vf);
5283 igb_set_vmolr(adapter, vf, true);
5284 adapter->vf_data[vf].pf_vlan = 0;
5285 adapter->vf_data[vf].pf_qos = 0;
5286 }
5287out:
5288 return err;
4ae196df
AD
5289}
5290
5291static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5292{
5293 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
5294 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
5295
5296 return igb_vlvf_set(adapter, vid, add, vf);
5297}
5298
f2ca0dbe 5299static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4ae196df 5300{
8fa7e0f7
GR
5301 /* clear flags - except flag that indicates PF has set the MAC */
5302 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
f2ca0dbe 5303 adapter->vf_data[vf].last_nack = jiffies;
4ae196df
AD
5304
5305 /* reset offloads to defaults */
8151d294 5306 igb_set_vmolr(adapter, vf, true);
4ae196df
AD
5307
5308 /* reset vlans for device */
5309 igb_clear_vf_vfta(adapter, vf);
8151d294
WM
5310 if (adapter->vf_data[vf].pf_vlan)
5311 igb_ndo_set_vf_vlan(adapter->netdev, vf,
5312 adapter->vf_data[vf].pf_vlan,
5313 adapter->vf_data[vf].pf_qos);
5314 else
5315 igb_clear_vf_vfta(adapter, vf);
4ae196df
AD
5316
5317 /* reset multicast table array for vf */
5318 adapter->vf_data[vf].num_vf_mc_hashes = 0;
5319
5320 /* Flush and reset the mta with the new values */
ff41f8dc 5321 igb_set_rx_mode(adapter->netdev);
4ae196df
AD
5322}
5323
f2ca0dbe
AD
5324static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
5325{
5326 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
5327
5328 /* generate a new mac address as we were hotplug removed/added */
8151d294 5329 if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
7efd26d0 5330 eth_random_addr(vf_mac);
f2ca0dbe
AD
5331
5332 /* process remaining reset events */
5333 igb_vf_reset(adapter, vf);
5334}
5335
5336static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4ae196df
AD
5337{
5338 struct e1000_hw *hw = &adapter->hw;
5339 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
ff41f8dc 5340 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df
AD
5341 u32 reg, msgbuf[3];
5342 u8 *addr = (u8 *)(&msgbuf[1]);
5343
5344 /* process all the same items cleared in a function level reset */
f2ca0dbe 5345 igb_vf_reset(adapter, vf);
4ae196df
AD
5346
5347 /* set vf mac address */
26ad9178 5348 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4ae196df
AD
5349
5350 /* enable transmit and receive for vf */
5351 reg = rd32(E1000_VFTE);
5352 wr32(E1000_VFTE, reg | (1 << vf));
5353 reg = rd32(E1000_VFRE);
5354 wr32(E1000_VFRE, reg | (1 << vf));
5355
8fa7e0f7 5356 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
4ae196df
AD
5357
5358 /* reply to reset with ack and vf mac address */
5359 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
5360 memcpy(addr, vf_mac, 6);
5361 igb_write_mbx(hw, msgbuf, 3, vf);
5362}
5363
5364static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5365{
de42edde
GR
5366 /*
5367 * The VF MAC Address is stored in a packed array of bytes
5368 * starting at the second 32 bit word of the msg array
5369 */
f2ca0dbe
AD
5370 unsigned char *addr = (char *)&msg[1];
5371 int err = -1;
4ae196df 5372
f2ca0dbe
AD
5373 if (is_valid_ether_addr(addr))
5374 err = igb_set_vf_mac(adapter, vf, addr);
4ae196df 5375
f2ca0dbe 5376 return err;
4ae196df
AD
5377}
5378
5379static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
5380{
5381 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 5382 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
5383 u32 msg = E1000_VT_MSGTYPE_NACK;
5384
5385 /* if device isn't clear to send it shouldn't be reading either */
f2ca0dbe
AD
5386 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
5387 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4ae196df 5388 igb_write_mbx(hw, &msg, 1, vf);
f2ca0dbe 5389 vf_data->last_nack = jiffies;
4ae196df
AD
5390 }
5391}
5392
f2ca0dbe 5393static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4ae196df 5394{
f2ca0dbe
AD
5395 struct pci_dev *pdev = adapter->pdev;
5396 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4ae196df 5397 struct e1000_hw *hw = &adapter->hw;
f2ca0dbe 5398 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4ae196df
AD
5399 s32 retval;
5400
f2ca0dbe 5401 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4ae196df 5402
fef45f4c
AD
5403 if (retval) {
5404 /* if receive failed revoke VF CTS stats and restart init */
f2ca0dbe 5405 dev_err(&pdev->dev, "Error receiving message from VF\n");
fef45f4c
AD
5406 vf_data->flags &= ~IGB_VF_FLAG_CTS;
5407 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5408 return;
5409 goto out;
5410 }
4ae196df
AD
5411
5412 /* this is a message we already processed, do nothing */
5413 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
f2ca0dbe 5414 return;
4ae196df
AD
5415
5416 /*
5417 * until the vf completes a reset it should not be
5418 * allowed to start any configuration.
5419 */
5420
5421 if (msgbuf[0] == E1000_VF_RESET) {
5422 igb_vf_reset_msg(adapter, vf);
f2ca0dbe 5423 return;
4ae196df
AD
5424 }
5425
f2ca0dbe 5426 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
fef45f4c
AD
5427 if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
5428 return;
5429 retval = -1;
5430 goto out;
4ae196df
AD
5431 }
5432
5433 switch ((msgbuf[0] & 0xFFFF)) {
5434 case E1000_VF_SET_MAC_ADDR:
a6b5ea35
GR
5435 retval = -EINVAL;
5436 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5437 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5438 else
5439 dev_warn(&pdev->dev,
5440 "VF %d attempted to override administratively "
5441 "set MAC address\nReload the VF driver to "
5442 "resume operations\n", vf);
4ae196df 5443 break;
7d5753f0
AD
5444 case E1000_VF_SET_PROMISC:
5445 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
5446 break;
4ae196df
AD
5447 case E1000_VF_SET_MULTICAST:
5448 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
5449 break;
5450 case E1000_VF_SET_LPE:
5451 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5452 break;
5453 case E1000_VF_SET_VLAN:
a6b5ea35
GR
5454 retval = -1;
5455 if (vf_data->pf_vlan)
5456 dev_warn(&pdev->dev,
5457 "VF %d attempted to override administratively "
5458 "set VLAN tag\nReload the VF driver to "
5459 "resume operations\n", vf);
8151d294
WM
5460 else
5461 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4ae196df
AD
5462 break;
5463 default:
090b1795 5464 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4ae196df
AD
5465 retval = -1;
5466 break;
5467 }
5468
fef45f4c
AD
5469 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
5470out:
4ae196df
AD
5471 /* notify the VF of the results of what it sent us */
5472 if (retval)
5473 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
5474 else
5475 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
5476
4ae196df 5477 igb_write_mbx(hw, msgbuf, 1, vf);
f2ca0dbe 5478}
4ae196df 5479
f2ca0dbe
AD
5480static void igb_msg_task(struct igb_adapter *adapter)
5481{
5482 struct e1000_hw *hw = &adapter->hw;
5483 u32 vf;
5484
5485 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
5486 /* process any reset requests */
5487 if (!igb_check_for_rst(hw, vf))
5488 igb_vf_reset_event(adapter, vf);
5489
5490 /* process any messages pending */
5491 if (!igb_check_for_msg(hw, vf))
5492 igb_rcv_msg_from_vf(adapter, vf);
5493
5494 /* process any acks */
5495 if (!igb_check_for_ack(hw, vf))
5496 igb_rcv_ack_from_vf(adapter, vf);
5497 }
4ae196df
AD
5498}
5499
68d480c4
AD
5500/**
5501 * igb_set_uta - Set unicast filter table address
5502 * @adapter: board private structure
5503 *
5504 * The unicast table address is a register array of 32-bit registers.
5505 * The table is meant to be used in a way similar to how the MTA is used
5506 * however due to certain limitations in the hardware it is necessary to
25985edc
LDM
5507 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
5508 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
68d480c4
AD
5509 **/
5510static void igb_set_uta(struct igb_adapter *adapter)
5511{
5512 struct e1000_hw *hw = &adapter->hw;
5513 int i;
5514
5515 /* The UTA table only exists on 82576 hardware and newer */
5516 if (hw->mac.type < e1000_82576)
5517 return;
5518
5519 /* we only need to do this if VMDq is enabled */
5520 if (!adapter->vfs_allocated_count)
5521 return;
5522
5523 for (i = 0; i < hw->mac.uta_reg_count; i++)
5524 array_wr32(E1000_UTA, i, ~0);
5525}
5526
9d5c8243
AK
5527/**
5528 * igb_intr_msi - Interrupt Handler
5529 * @irq: interrupt number
5530 * @data: pointer to a network interface device structure
5531 **/
5532static irqreturn_t igb_intr_msi(int irq, void *data)
5533{
047e0030
AD
5534 struct igb_adapter *adapter = data;
5535 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
5536 struct e1000_hw *hw = &adapter->hw;
5537 /* read ICR disables interrupts using IAM */
5538 u32 icr = rd32(E1000_ICR);
5539
047e0030 5540 igb_write_itr(q_vector);
9d5c8243 5541
7f081d40
AD
5542 if (icr & E1000_ICR_DRSTA)
5543 schedule_work(&adapter->reset_task);
5544
047e0030 5545 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
5546 /* HW is reporting DMA is out of sync */
5547 adapter->stats.doosync++;
5548 }
5549
9d5c8243
AK
5550 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5551 hw->mac.get_link_status = 1;
5552 if (!test_bit(__IGB_DOWN, &adapter->state))
5553 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5554 }
5555
1f6e8178
MV
5556#ifdef CONFIG_IGB_PTP
5557 if (icr & E1000_ICR_TS) {
5558 u32 tsicr = rd32(E1000_TSICR);
5559
5560 if (tsicr & E1000_TSICR_TXTS) {
5561 /* acknowledge the interrupt */
5562 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5563 /* retrieve hardware timestamp */
5564 schedule_work(&adapter->ptp_tx_work);
5565 }
5566 }
5567#endif /* CONFIG_IGB_PTP */
5568
047e0030 5569 napi_schedule(&q_vector->napi);
9d5c8243
AK
5570
5571 return IRQ_HANDLED;
5572}
5573
5574/**
4a3c6433 5575 * igb_intr - Legacy Interrupt Handler
9d5c8243
AK
5576 * @irq: interrupt number
5577 * @data: pointer to a network interface device structure
5578 **/
5579static irqreturn_t igb_intr(int irq, void *data)
5580{
047e0030
AD
5581 struct igb_adapter *adapter = data;
5582 struct igb_q_vector *q_vector = adapter->q_vector[0];
9d5c8243
AK
5583 struct e1000_hw *hw = &adapter->hw;
5584 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5585 * need for the IMC write */
5586 u32 icr = rd32(E1000_ICR);
9d5c8243
AK
5587
5588 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5589 * not set, then the adapter didn't send an interrupt */
5590 if (!(icr & E1000_ICR_INT_ASSERTED))
5591 return IRQ_NONE;
5592
0ba82994
AD
5593 igb_write_itr(q_vector);
5594
7f081d40
AD
5595 if (icr & E1000_ICR_DRSTA)
5596 schedule_work(&adapter->reset_task);
5597
047e0030 5598 if (icr & E1000_ICR_DOUTSYNC) {
dda0e083
AD
5599 /* HW is reporting DMA is out of sync */
5600 adapter->stats.doosync++;
5601 }
5602
9d5c8243
AK
5603 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
5604 hw->mac.get_link_status = 1;
5605 /* guard against interrupt when we're going down */
5606 if (!test_bit(__IGB_DOWN, &adapter->state))
5607 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5608 }
5609
1f6e8178
MV
5610#ifdef CONFIG_IGB_PTP
5611 if (icr & E1000_ICR_TS) {
5612 u32 tsicr = rd32(E1000_TSICR);
5613
5614 if (tsicr & E1000_TSICR_TXTS) {
5615 /* acknowledge the interrupt */
5616 wr32(E1000_TSICR, E1000_TSICR_TXTS);
5617 /* retrieve hardware timestamp */
5618 schedule_work(&adapter->ptp_tx_work);
5619 }
5620 }
5621#endif /* CONFIG_IGB_PTP */
5622
047e0030 5623 napi_schedule(&q_vector->napi);
9d5c8243
AK
5624
5625 return IRQ_HANDLED;
5626}
5627
c50b52a0 5628static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
9d5c8243 5629{
047e0030 5630 struct igb_adapter *adapter = q_vector->adapter;
46544258 5631 struct e1000_hw *hw = &adapter->hw;
9d5c8243 5632
0ba82994
AD
5633 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5634 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5635 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5636 igb_set_itr(q_vector);
46544258 5637 else
047e0030 5638 igb_update_ring_itr(q_vector);
9d5c8243
AK
5639 }
5640
46544258
AD
5641 if (!test_bit(__IGB_DOWN, &adapter->state)) {
5642 if (adapter->msix_entries)
047e0030 5643 wr32(E1000_EIMS, q_vector->eims_value);
46544258
AD
5644 else
5645 igb_irq_enable(adapter);
5646 }
9d5c8243
AK
5647}
5648
46544258
AD
5649/**
5650 * igb_poll - NAPI Rx polling callback
5651 * @napi: napi polling structure
5652 * @budget: count of how many packets we should handle
5653 **/
5654static int igb_poll(struct napi_struct *napi, int budget)
9d5c8243 5655{
047e0030
AD
5656 struct igb_q_vector *q_vector = container_of(napi,
5657 struct igb_q_vector,
5658 napi);
16eb8815 5659 bool clean_complete = true;
9d5c8243 5660
421e02f0 5661#ifdef CONFIG_IGB_DCA
047e0030
AD
5662 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5663 igb_update_dca(q_vector);
fe4506b6 5664#endif
0ba82994 5665 if (q_vector->tx.ring)
13fde97a 5666 clean_complete = igb_clean_tx_irq(q_vector);
9d5c8243 5667
0ba82994 5668 if (q_vector->rx.ring)
cd392f5c 5669 clean_complete &= igb_clean_rx_irq(q_vector, budget);
047e0030 5670
16eb8815
AD
5671 /* If all work not completed, return budget and keep polling */
5672 if (!clean_complete)
5673 return budget;
46544258 5674
9d5c8243 5675 /* If not enough Rx work done, exit the polling mode */
16eb8815
AD
5676 napi_complete(napi);
5677 igb_ring_irq_enable(q_vector);
9d5c8243 5678
16eb8815 5679 return 0;
9d5c8243 5680}
6d8126f9 5681
9d5c8243
AK
5682/**
5683 * igb_clean_tx_irq - Reclaim resources after transmit completes
047e0030 5684 * @q_vector: pointer to q_vector containing needed info
49ce9c2c 5685 *
9d5c8243
AK
5686 * returns true if ring is completely cleaned
5687 **/
047e0030 5688static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
9d5c8243 5689{
047e0030 5690 struct igb_adapter *adapter = q_vector->adapter;
0ba82994 5691 struct igb_ring *tx_ring = q_vector->tx.ring;
06034649 5692 struct igb_tx_buffer *tx_buffer;
f4128785 5693 union e1000_adv_tx_desc *tx_desc;
9d5c8243 5694 unsigned int total_bytes = 0, total_packets = 0;
0ba82994 5695 unsigned int budget = q_vector->tx.work_limit;
8542db05 5696 unsigned int i = tx_ring->next_to_clean;
9d5c8243 5697
13fde97a
AD
5698 if (test_bit(__IGB_DOWN, &adapter->state))
5699 return true;
0e014cb1 5700
06034649 5701 tx_buffer = &tx_ring->tx_buffer_info[i];
13fde97a 5702 tx_desc = IGB_TX_DESC(tx_ring, i);
8542db05 5703 i -= tx_ring->count;
9d5c8243 5704
f4128785
AD
5705 do {
5706 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
8542db05
AD
5707
5708 /* if next_to_watch is not set then there is no work pending */
5709 if (!eop_desc)
5710 break;
13fde97a 5711
f4128785
AD
5712 /* prevent any other reads prior to eop_desc */
5713 rmb();
5714
13fde97a
AD
5715 /* if DD is not set pending work has not been completed */
5716 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5717 break;
5718
8542db05
AD
5719 /* clear next_to_watch to prevent false hangs */
5720 tx_buffer->next_to_watch = NULL;
9d5c8243 5721
ebe42d16
AD
5722 /* update the statistics for this packet */
5723 total_bytes += tx_buffer->bytecount;
5724 total_packets += tx_buffer->gso_segs;
13fde97a 5725
ebe42d16
AD
5726 /* free the skb */
5727 dev_kfree_skb_any(tx_buffer->skb);
5728 tx_buffer->skb = NULL;
13fde97a 5729
ebe42d16
AD
5730 /* unmap skb header data */
5731 dma_unmap_single(tx_ring->dev,
5732 tx_buffer->dma,
5733 tx_buffer->length,
5734 DMA_TO_DEVICE);
5735
5736 /* clear last DMA location and unmap remaining buffers */
5737 while (tx_desc != eop_desc) {
5738 tx_buffer->dma = 0;
9d5c8243 5739
13fde97a
AD
5740 tx_buffer++;
5741 tx_desc++;
9d5c8243 5742 i++;
8542db05
AD
5743 if (unlikely(!i)) {
5744 i -= tx_ring->count;
06034649 5745 tx_buffer = tx_ring->tx_buffer_info;
13fde97a
AD
5746 tx_desc = IGB_TX_DESC(tx_ring, 0);
5747 }
ebe42d16
AD
5748
5749 /* unmap any remaining paged data */
5750 if (tx_buffer->dma) {
5751 dma_unmap_page(tx_ring->dev,
5752 tx_buffer->dma,
5753 tx_buffer->length,
5754 DMA_TO_DEVICE);
5755 }
5756 }
5757
5758 /* clear last DMA location */
5759 tx_buffer->dma = 0;
5760
5761 /* move us one more past the eop_desc for start of next pkt */
5762 tx_buffer++;
5763 tx_desc++;
5764 i++;
5765 if (unlikely(!i)) {
5766 i -= tx_ring->count;
5767 tx_buffer = tx_ring->tx_buffer_info;
5768 tx_desc = IGB_TX_DESC(tx_ring, 0);
5769 }
f4128785
AD
5770
5771 /* issue prefetch for next Tx descriptor */
5772 prefetch(tx_desc);
5773
5774 /* update budget accounting */
5775 budget--;
5776 } while (likely(budget));
0e014cb1 5777
bdbc0631
ED
5778 netdev_tx_completed_queue(txring_txq(tx_ring),
5779 total_packets, total_bytes);
8542db05 5780 i += tx_ring->count;
9d5c8243 5781 tx_ring->next_to_clean = i;
13fde97a
AD
5782 u64_stats_update_begin(&tx_ring->tx_syncp);
5783 tx_ring->tx_stats.bytes += total_bytes;
5784 tx_ring->tx_stats.packets += total_packets;
5785 u64_stats_update_end(&tx_ring->tx_syncp);
0ba82994
AD
5786 q_vector->tx.total_bytes += total_bytes;
5787 q_vector->tx.total_packets += total_packets;
9d5c8243 5788
6d095fa8 5789 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
13fde97a 5790 struct e1000_hw *hw = &adapter->hw;
12dcd86b 5791
9d5c8243
AK
5792 /* Detect a transmit hang in hardware, this serializes the
5793 * check with the clearing of time_stamp and movement of i */
6d095fa8 5794 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
f4128785 5795 if (tx_buffer->next_to_watch &&
8542db05 5796 time_after(jiffies, tx_buffer->time_stamp +
8e95a202
JP
5797 (adapter->tx_timeout_factor * HZ)) &&
5798 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
9d5c8243 5799
9d5c8243 5800 /* detected Tx unit hang */
59d71989 5801 dev_err(tx_ring->dev,
9d5c8243 5802 "Detected Tx Unit Hang\n"
2d064c06 5803 " Tx Queue <%d>\n"
9d5c8243
AK
5804 " TDH <%x>\n"
5805 " TDT <%x>\n"
5806 " next_to_use <%x>\n"
5807 " next_to_clean <%x>\n"
9d5c8243
AK
5808 "buffer_info[next_to_clean]\n"
5809 " time_stamp <%lx>\n"
8542db05 5810 " next_to_watch <%p>\n"
9d5c8243
AK
5811 " jiffies <%lx>\n"
5812 " desc.status <%x>\n",
2d064c06 5813 tx_ring->queue_index,
238ac817 5814 rd32(E1000_TDH(tx_ring->reg_idx)),
fce99e34 5815 readl(tx_ring->tail),
9d5c8243
AK
5816 tx_ring->next_to_use,
5817 tx_ring->next_to_clean,
8542db05 5818 tx_buffer->time_stamp,
f4128785 5819 tx_buffer->next_to_watch,
9d5c8243 5820 jiffies,
f4128785 5821 tx_buffer->next_to_watch->wb.status);
13fde97a
AD
5822 netif_stop_subqueue(tx_ring->netdev,
5823 tx_ring->queue_index);
5824
5825 /* we are about to reset, no point in enabling stuff */
5826 return true;
9d5c8243
AK
5827 }
5828 }
13fde97a
AD
5829
5830 if (unlikely(total_packets &&
5831 netif_carrier_ok(tx_ring->netdev) &&
5832 igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
5833 /* Make sure that anybody stopping the queue after this
5834 * sees the new next_to_clean.
5835 */
5836 smp_mb();
5837 if (__netif_subqueue_stopped(tx_ring->netdev,
5838 tx_ring->queue_index) &&
5839 !(test_bit(__IGB_DOWN, &adapter->state))) {
5840 netif_wake_subqueue(tx_ring->netdev,
5841 tx_ring->queue_index);
5842
5843 u64_stats_update_begin(&tx_ring->tx_syncp);
5844 tx_ring->tx_stats.restart_queue++;
5845 u64_stats_update_end(&tx_ring->tx_syncp);
5846 }
5847 }
5848
5849 return !!budget;
9d5c8243
AK
5850}
5851
cd392f5c 5852static inline void igb_rx_checksum(struct igb_ring *ring,
3ceb90fd
AD
5853 union e1000_adv_rx_desc *rx_desc,
5854 struct sk_buff *skb)
9d5c8243 5855{
bc8acf2c 5856 skb_checksum_none_assert(skb);
9d5c8243 5857
294e7d78 5858 /* Ignore Checksum bit is set */
3ceb90fd 5859 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
294e7d78
AD
5860 return;
5861
5862 /* Rx checksum disabled via ethtool */
5863 if (!(ring->netdev->features & NETIF_F_RXCSUM))
9d5c8243 5864 return;
85ad76b2 5865
9d5c8243 5866 /* TCP/UDP checksum error bit is set */
3ceb90fd
AD
5867 if (igb_test_staterr(rx_desc,
5868 E1000_RXDEXT_STATERR_TCPE |
5869 E1000_RXDEXT_STATERR_IPE)) {
b9473560
JB
5870 /*
5871 * work around errata with sctp packets where the TCPE aka
5872 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5873 * packets, (aka let the stack check the crc32c)
5874 */
866cff06
AD
5875 if (!((skb->len == 60) &&
5876 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
12dcd86b 5877 u64_stats_update_begin(&ring->rx_syncp);
04a5fcaa 5878 ring->rx_stats.csum_err++;
12dcd86b
ED
5879 u64_stats_update_end(&ring->rx_syncp);
5880 }
9d5c8243 5881 /* let the stack verify checksum errors */
9d5c8243
AK
5882 return;
5883 }
5884 /* It must be a TCP or UDP packet with a valid checksum */
3ceb90fd
AD
5885 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5886 E1000_RXD_STAT_UDPCS))
9d5c8243
AK
5887 skb->ip_summed = CHECKSUM_UNNECESSARY;
5888
3ceb90fd
AD
5889 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5890 le32_to_cpu(rx_desc->wb.upper.status_error));
9d5c8243
AK
5891}
5892
077887c3
AD
5893static inline void igb_rx_hash(struct igb_ring *ring,
5894 union e1000_adv_rx_desc *rx_desc,
5895 struct sk_buff *skb)
5896{
5897 if (ring->netdev->features & NETIF_F_RXHASH)
5898 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5899}
5900
8be10e91
AD
5901static void igb_rx_vlan(struct igb_ring *ring,
5902 union e1000_adv_rx_desc *rx_desc,
5903 struct sk_buff *skb)
5904{
5905 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5906 u16 vid;
5907 if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
5908 test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
5909 vid = be16_to_cpu(rx_desc->wb.upper.vlan);
5910 else
5911 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5912
5913 __vlan_hwaccel_put_tag(skb, vid);
5914 }
5915}
5916
44390ca6 5917static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
2d94d8ab
AD
5918{
5919 /* HW will not DMA in data larger than the given buffer, even if it
5920 * parses the (NFS, of course) header to be larger. In that case, it
5921 * fills the header buffer and spills the rest into the page.
5922 */
5923 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
5924 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
44390ca6
AD
5925 if (hlen > IGB_RX_HDR_LEN)
5926 hlen = IGB_RX_HDR_LEN;
2d94d8ab
AD
5927 return hlen;
5928}
5929
cd392f5c 5930static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
9d5c8243 5931{
0ba82994 5932 struct igb_ring *rx_ring = q_vector->rx.ring;
16eb8815
AD
5933 union e1000_adv_rx_desc *rx_desc;
5934 const int current_node = numa_node_id();
9d5c8243 5935 unsigned int total_bytes = 0, total_packets = 0;
16eb8815
AD
5936 u16 cleaned_count = igb_desc_unused(rx_ring);
5937 u16 i = rx_ring->next_to_clean;
9d5c8243 5938
60136906 5939 rx_desc = IGB_RX_DESC(rx_ring, i);
9d5c8243 5940
3ceb90fd 5941 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
06034649 5942 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
16eb8815
AD
5943 struct sk_buff *skb = buffer_info->skb;
5944 union e1000_adv_rx_desc *next_rxd;
9d5c8243 5945
69d3ca53 5946 buffer_info->skb = NULL;
16eb8815 5947 prefetch(skb->data);
69d3ca53
AD
5948
5949 i++;
5950 if (i == rx_ring->count)
5951 i = 0;
42d0781a 5952
60136906 5953 next_rxd = IGB_RX_DESC(rx_ring, i);
69d3ca53 5954 prefetch(next_rxd);
9d5c8243 5955
16eb8815
AD
5956 /*
5957 * This memory barrier is needed to keep us from reading
5958 * any other fields out of the rx_desc until we know the
5959 * RXD_STAT_DD bit is set
5960 */
5961 rmb();
9d5c8243 5962
16eb8815
AD
5963 if (!skb_is_nonlinear(skb)) {
5964 __skb_put(skb, igb_get_hlen(rx_desc));
5965 dma_unmap_single(rx_ring->dev, buffer_info->dma,
44390ca6 5966 IGB_RX_HDR_LEN,
59d71989 5967 DMA_FROM_DEVICE);
91615f76 5968 buffer_info->dma = 0;
bf36c1a0
AD
5969 }
5970
16eb8815
AD
5971 if (rx_desc->wb.upper.length) {
5972 u16 length = le16_to_cpu(rx_desc->wb.upper.length);
bf36c1a0 5973
aa913403 5974 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
bf36c1a0
AD
5975 buffer_info->page,
5976 buffer_info->page_offset,
5977 length);
5978
16eb8815
AD
5979 skb->len += length;
5980 skb->data_len += length;
95b9c1df 5981 skb->truesize += PAGE_SIZE / 2;
16eb8815 5982
d1eff350
AD
5983 if ((page_count(buffer_info->page) != 1) ||
5984 (page_to_nid(buffer_info->page) != current_node))
bf36c1a0
AD
5985 buffer_info->page = NULL;
5986 else
5987 get_page(buffer_info->page);
9d5c8243 5988
16eb8815
AD
5989 dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
5990 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5991 buffer_info->page_dma = 0;
9d5c8243 5992 }
9d5c8243 5993
3ceb90fd 5994 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
06034649
AD
5995 struct igb_rx_buffer *next_buffer;
5996 next_buffer = &rx_ring->rx_buffer_info[i];
b2d56536
AD
5997 buffer_info->skb = next_buffer->skb;
5998 buffer_info->dma = next_buffer->dma;
5999 next_buffer->skb = skb;
6000 next_buffer->dma = 0;
bf36c1a0
AD
6001 goto next_desc;
6002 }
44390ca6 6003
89eaefb6
BG
6004 if (unlikely((igb_test_staterr(rx_desc,
6005 E1000_RXDEXT_ERR_FRAME_ERR_MASK))
6006 && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
16eb8815 6007 dev_kfree_skb_any(skb);
9d5c8243
AK
6008 goto next_desc;
6009 }
9d5c8243 6010
7ebae817 6011#ifdef CONFIG_IGB_PTP
a79f4f88 6012 igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
3c89f6d0 6013#endif /* CONFIG_IGB_PTP */
077887c3 6014 igb_rx_hash(rx_ring, rx_desc, skb);
3ceb90fd 6015 igb_rx_checksum(rx_ring, rx_desc, skb);
8be10e91 6016 igb_rx_vlan(rx_ring, rx_desc, skb);
3ceb90fd
AD
6017
6018 total_bytes += skb->len;
6019 total_packets++;
6020
6021 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
6022
b2cb09b1 6023 napi_gro_receive(&q_vector->napi, skb);
9d5c8243 6024
16eb8815 6025 budget--;
9d5c8243 6026next_desc:
16eb8815
AD
6027 if (!budget)
6028 break;
6029
6030 cleaned_count++;
9d5c8243
AK
6031 /* return some buffers to hardware, one at a time is too slow */
6032 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
cd392f5c 6033 igb_alloc_rx_buffers(rx_ring, cleaned_count);
9d5c8243
AK
6034 cleaned_count = 0;
6035 }
6036
6037 /* use prefetched values */
6038 rx_desc = next_rxd;
9d5c8243 6039 }
bf36c1a0 6040
9d5c8243 6041 rx_ring->next_to_clean = i;
12dcd86b 6042 u64_stats_update_begin(&rx_ring->rx_syncp);
9d5c8243
AK
6043 rx_ring->rx_stats.packets += total_packets;
6044 rx_ring->rx_stats.bytes += total_bytes;
12dcd86b 6045 u64_stats_update_end(&rx_ring->rx_syncp);
0ba82994
AD
6046 q_vector->rx.total_packets += total_packets;
6047 q_vector->rx.total_bytes += total_bytes;
c023cd88
AD
6048
6049 if (cleaned_count)
cd392f5c 6050 igb_alloc_rx_buffers(rx_ring, cleaned_count);
c023cd88 6051
16eb8815 6052 return !!budget;
9d5c8243
AK
6053}
6054
c023cd88 6055static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
06034649 6056 struct igb_rx_buffer *bi)
c023cd88
AD
6057{
6058 struct sk_buff *skb = bi->skb;
6059 dma_addr_t dma = bi->dma;
6060
6061 if (dma)
6062 return true;
6063
6064 if (likely(!skb)) {
6065 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
6066 IGB_RX_HDR_LEN);
6067 bi->skb = skb;
6068 if (!skb) {
6069 rx_ring->rx_stats.alloc_failed++;
6070 return false;
6071 }
6072
6073 /* initialize skb for ring */
6074 skb_record_rx_queue(skb, rx_ring->queue_index);
6075 }
6076
6077 dma = dma_map_single(rx_ring->dev, skb->data,
6078 IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
6079
6080 if (dma_mapping_error(rx_ring->dev, dma)) {
6081 rx_ring->rx_stats.alloc_failed++;
6082 return false;
6083 }
6084
6085 bi->dma = dma;
6086 return true;
6087}
6088
6089static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
06034649 6090 struct igb_rx_buffer *bi)
c023cd88
AD
6091{
6092 struct page *page = bi->page;
6093 dma_addr_t page_dma = bi->page_dma;
6094 unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
6095
6096 if (page_dma)
6097 return true;
6098
6099 if (!page) {
0614002b 6100 page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
c023cd88
AD
6101 bi->page = page;
6102 if (unlikely(!page)) {
6103 rx_ring->rx_stats.alloc_failed++;
6104 return false;
6105 }
6106 }
6107
6108 page_dma = dma_map_page(rx_ring->dev, page,
6109 page_offset, PAGE_SIZE / 2,
6110 DMA_FROM_DEVICE);
6111
6112 if (dma_mapping_error(rx_ring->dev, page_dma)) {
6113 rx_ring->rx_stats.alloc_failed++;
6114 return false;
6115 }
6116
6117 bi->page_dma = page_dma;
6118 bi->page_offset = page_offset;
6119 return true;
6120}
6121
9d5c8243 6122/**
cd392f5c 6123 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
9d5c8243
AK
6124 * @adapter: address of board private structure
6125 **/
cd392f5c 6126void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
9d5c8243 6127{
9d5c8243 6128 union e1000_adv_rx_desc *rx_desc;
06034649 6129 struct igb_rx_buffer *bi;
c023cd88 6130 u16 i = rx_ring->next_to_use;
9d5c8243 6131
60136906 6132 rx_desc = IGB_RX_DESC(rx_ring, i);
06034649 6133 bi = &rx_ring->rx_buffer_info[i];
c023cd88 6134 i -= rx_ring->count;
9d5c8243
AK
6135
6136 while (cleaned_count--) {
c023cd88
AD
6137 if (!igb_alloc_mapped_skb(rx_ring, bi))
6138 break;
9d5c8243 6139
c023cd88
AD
6140 /* Refresh the desc even if buffer_addrs didn't change
6141 * because each write-back erases this info. */
6142 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
9d5c8243 6143
c023cd88
AD
6144 if (!igb_alloc_mapped_page(rx_ring, bi))
6145 break;
6146
6147 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
9d5c8243 6148
c023cd88
AD
6149 rx_desc++;
6150 bi++;
9d5c8243 6151 i++;
c023cd88 6152 if (unlikely(!i)) {
60136906 6153 rx_desc = IGB_RX_DESC(rx_ring, 0);
06034649 6154 bi = rx_ring->rx_buffer_info;
c023cd88
AD
6155 i -= rx_ring->count;
6156 }
6157
6158 /* clear the hdr_addr for the next_to_use descriptor */
6159 rx_desc->read.hdr_addr = 0;
9d5c8243
AK
6160 }
6161
c023cd88
AD
6162 i += rx_ring->count;
6163
9d5c8243
AK
6164 if (rx_ring->next_to_use != i) {
6165 rx_ring->next_to_use = i;
9d5c8243
AK
6166
6167 /* Force memory writes to complete before letting h/w
6168 * know there are new descriptors to fetch. (Only
6169 * applicable for weak-ordered memory model archs,
6170 * such as IA-64). */
6171 wmb();
fce99e34 6172 writel(i, rx_ring->tail);
9d5c8243
AK
6173 }
6174}
6175
6176/**
6177 * igb_mii_ioctl -
6178 * @netdev:
6179 * @ifreq:
6180 * @cmd:
6181 **/
6182static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6183{
6184 struct igb_adapter *adapter = netdev_priv(netdev);
6185 struct mii_ioctl_data *data = if_mii(ifr);
6186
6187 if (adapter->hw.phy.media_type != e1000_media_type_copper)
6188 return -EOPNOTSUPP;
6189
6190 switch (cmd) {
6191 case SIOCGMIIPHY:
6192 data->phy_id = adapter->hw.phy.addr;
6193 break;
6194 case SIOCGMIIREG:
f5f4cf08
AD
6195 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
6196 &data->val_out))
9d5c8243
AK
6197 return -EIO;
6198 break;
6199 case SIOCSMIIREG:
6200 default:
6201 return -EOPNOTSUPP;
6202 }
6203 return 0;
6204}
6205
6206/**
6207 * igb_ioctl -
6208 * @netdev:
6209 * @ifreq:
6210 * @cmd:
6211 **/
6212static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6213{
6214 switch (cmd) {
6215 case SIOCGMIIPHY:
6216 case SIOCGMIIREG:
6217 case SIOCSMIIREG:
6218 return igb_mii_ioctl(netdev, ifr, cmd);
3c89f6d0 6219#ifdef CONFIG_IGB_PTP
c6cb090b 6220 case SIOCSHWTSTAMP:
a79f4f88 6221 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
3c89f6d0 6222#endif /* CONFIG_IGB_PTP */
9d5c8243
AK
6223 default:
6224 return -EOPNOTSUPP;
6225 }
6226}
6227
009bc06e
AD
6228s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6229{
6230 struct igb_adapter *adapter = hw->back;
6231 u16 cap_offset;
6232
bdaae04c 6233 cap_offset = adapter->pdev->pcie_cap;
009bc06e
AD
6234 if (!cap_offset)
6235 return -E1000_ERR_CONFIG;
6236
6237 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
6238
6239 return 0;
6240}
6241
6242s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
6243{
6244 struct igb_adapter *adapter = hw->back;
6245 u16 cap_offset;
6246
bdaae04c 6247 cap_offset = adapter->pdev->pcie_cap;
009bc06e
AD
6248 if (!cap_offset)
6249 return -E1000_ERR_CONFIG;
6250
6251 pci_write_config_word(adapter->pdev, cap_offset + reg, *value);
6252
6253 return 0;
6254}
6255
c8f44aff 6256static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
9d5c8243
AK
6257{
6258 struct igb_adapter *adapter = netdev_priv(netdev);
6259 struct e1000_hw *hw = &adapter->hw;
6260 u32 ctrl, rctl;
5faf030c 6261 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
9d5c8243 6262
5faf030c 6263 if (enable) {
9d5c8243
AK
6264 /* enable VLAN tag insert/strip */
6265 ctrl = rd32(E1000_CTRL);
6266 ctrl |= E1000_CTRL_VME;
6267 wr32(E1000_CTRL, ctrl);
6268
51466239 6269 /* Disable CFI check */
9d5c8243 6270 rctl = rd32(E1000_RCTL);
9d5c8243
AK
6271 rctl &= ~E1000_RCTL_CFIEN;
6272 wr32(E1000_RCTL, rctl);
9d5c8243
AK
6273 } else {
6274 /* disable VLAN tag insert/strip */
6275 ctrl = rd32(E1000_CTRL);
6276 ctrl &= ~E1000_CTRL_VME;
6277 wr32(E1000_CTRL, ctrl);
9d5c8243
AK
6278 }
6279
e1739522 6280 igb_rlpml_set(adapter);
9d5c8243
AK
6281}
6282
8e586137 6283static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
9d5c8243
AK
6284{
6285 struct igb_adapter *adapter = netdev_priv(netdev);
6286 struct e1000_hw *hw = &adapter->hw;
4ae196df 6287 int pf_id = adapter->vfs_allocated_count;
9d5c8243 6288
51466239
AD
6289 /* attempt to add filter to vlvf array */
6290 igb_vlvf_set(adapter, vid, true, pf_id);
4ae196df 6291
51466239
AD
6292 /* add the filter since PF can receive vlans w/o entry in vlvf */
6293 igb_vfta_set(hw, vid, true);
b2cb09b1
JP
6294
6295 set_bit(vid, adapter->active_vlans);
8e586137
JP
6296
6297 return 0;
9d5c8243
AK
6298}
6299
8e586137 6300static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
9d5c8243
AK
6301{
6302 struct igb_adapter *adapter = netdev_priv(netdev);
6303 struct e1000_hw *hw = &adapter->hw;
4ae196df 6304 int pf_id = adapter->vfs_allocated_count;
51466239 6305 s32 err;
9d5c8243 6306
51466239
AD
6307 /* remove vlan from VLVF table array */
6308 err = igb_vlvf_set(adapter, vid, false, pf_id);
9d5c8243 6309
51466239
AD
6310 /* if vid was not present in VLVF just remove it from table */
6311 if (err)
4ae196df 6312 igb_vfta_set(hw, vid, false);
b2cb09b1
JP
6313
6314 clear_bit(vid, adapter->active_vlans);
8e586137
JP
6315
6316 return 0;
9d5c8243
AK
6317}
6318
6319static void igb_restore_vlan(struct igb_adapter *adapter)
6320{
b2cb09b1 6321 u16 vid;
9d5c8243 6322
5faf030c
AD
6323 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6324
b2cb09b1
JP
6325 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6326 igb_vlan_rx_add_vid(adapter->netdev, vid);
9d5c8243
AK
6327}
6328
14ad2513 6329int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
9d5c8243 6330{
090b1795 6331 struct pci_dev *pdev = adapter->pdev;
9d5c8243
AK
6332 struct e1000_mac_info *mac = &adapter->hw.mac;
6333
6334 mac->autoneg = 0;
6335
14ad2513
DD
6336 /* Make sure dplx is at most 1 bit and lsb of speed is not set
6337 * for the switch() below to work */
6338 if ((spd & 1) || (dplx & ~1))
6339 goto err_inval;
6340
cd2638a8
CW
6341 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6342 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
14ad2513
DD
6343 spd != SPEED_1000 &&
6344 dplx != DUPLEX_FULL)
6345 goto err_inval;
cd2638a8 6346
14ad2513 6347 switch (spd + dplx) {
9d5c8243
AK
6348 case SPEED_10 + DUPLEX_HALF:
6349 mac->forced_speed_duplex = ADVERTISE_10_HALF;
6350 break;
6351 case SPEED_10 + DUPLEX_FULL:
6352 mac->forced_speed_duplex = ADVERTISE_10_FULL;
6353 break;
6354 case SPEED_100 + DUPLEX_HALF:
6355 mac->forced_speed_duplex = ADVERTISE_100_HALF;
6356 break;
6357 case SPEED_100 + DUPLEX_FULL:
6358 mac->forced_speed_duplex = ADVERTISE_100_FULL;
6359 break;
6360 case SPEED_1000 + DUPLEX_FULL:
6361 mac->autoneg = 1;
6362 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
6363 break;
6364 case SPEED_1000 + DUPLEX_HALF: /* not supported */
6365 default:
14ad2513 6366 goto err_inval;
9d5c8243 6367 }
8376dad0
JB
6368
6369 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
6370 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6371
9d5c8243 6372 return 0;
14ad2513
DD
6373
6374err_inval:
6375 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6376 return -EINVAL;
9d5c8243
AK
6377}
6378
749ab2cd
YZ
6379static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
6380 bool runtime)
9d5c8243
AK
6381{
6382 struct net_device *netdev = pci_get_drvdata(pdev);
6383 struct igb_adapter *adapter = netdev_priv(netdev);
6384 struct e1000_hw *hw = &adapter->hw;
2d064c06 6385 u32 ctrl, rctl, status;
749ab2cd 6386 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
9d5c8243
AK
6387#ifdef CONFIG_PM
6388 int retval = 0;
6389#endif
6390
6391 netif_device_detach(netdev);
6392
a88f10ec 6393 if (netif_running(netdev))
749ab2cd 6394 __igb_close(netdev, true);
a88f10ec 6395
047e0030 6396 igb_clear_interrupt_scheme(adapter);
9d5c8243
AK
6397
6398#ifdef CONFIG_PM
6399 retval = pci_save_state(pdev);
6400 if (retval)
6401 return retval;
6402#endif
6403
6404 status = rd32(E1000_STATUS);
6405 if (status & E1000_STATUS_LU)
6406 wufc &= ~E1000_WUFC_LNKC;
6407
6408 if (wufc) {
6409 igb_setup_rctl(adapter);
ff41f8dc 6410 igb_set_rx_mode(netdev);
9d5c8243
AK
6411
6412 /* turn on all-multi mode if wake on multicast is enabled */
6413 if (wufc & E1000_WUFC_MC) {
6414 rctl = rd32(E1000_RCTL);
6415 rctl |= E1000_RCTL_MPE;
6416 wr32(E1000_RCTL, rctl);
6417 }
6418
6419 ctrl = rd32(E1000_CTRL);
6420 /* advertise wake from D3Cold */
6421 #define E1000_CTRL_ADVD3WUC 0x00100000
6422 /* phy power management enable */
6423 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
6424 ctrl |= E1000_CTRL_ADVD3WUC;
6425 wr32(E1000_CTRL, ctrl);
6426
9d5c8243 6427 /* Allow time for pending master requests to run */
330a6d6a 6428 igb_disable_pcie_master(hw);
9d5c8243
AK
6429
6430 wr32(E1000_WUC, E1000_WUC_PME_EN);
6431 wr32(E1000_WUFC, wufc);
9d5c8243
AK
6432 } else {
6433 wr32(E1000_WUC, 0);
6434 wr32(E1000_WUFC, 0);
9d5c8243
AK
6435 }
6436
3fe7c4c9
RW
6437 *enable_wake = wufc || adapter->en_mng_pt;
6438 if (!*enable_wake)
88a268c1
NN
6439 igb_power_down_link(adapter);
6440 else
6441 igb_power_up_link(adapter);
9d5c8243
AK
6442
6443 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6444 * would have already happened in close and is redundant. */
6445 igb_release_hw_control(adapter);
6446
6447 pci_disable_device(pdev);
6448
9d5c8243
AK
6449 return 0;
6450}
6451
6452#ifdef CONFIG_PM
d9dd966d 6453#ifdef CONFIG_PM_SLEEP
749ab2cd 6454static int igb_suspend(struct device *dev)
3fe7c4c9
RW
6455{
6456 int retval;
6457 bool wake;
749ab2cd 6458 struct pci_dev *pdev = to_pci_dev(dev);
3fe7c4c9 6459
749ab2cd 6460 retval = __igb_shutdown(pdev, &wake, 0);
3fe7c4c9
RW
6461 if (retval)
6462 return retval;
6463
6464 if (wake) {
6465 pci_prepare_to_sleep(pdev);
6466 } else {
6467 pci_wake_from_d3(pdev, false);
6468 pci_set_power_state(pdev, PCI_D3hot);
6469 }
6470
6471 return 0;
6472}
d9dd966d 6473#endif /* CONFIG_PM_SLEEP */
3fe7c4c9 6474
749ab2cd 6475static int igb_resume(struct device *dev)
9d5c8243 6476{
749ab2cd 6477 struct pci_dev *pdev = to_pci_dev(dev);
9d5c8243
AK
6478 struct net_device *netdev = pci_get_drvdata(pdev);
6479 struct igb_adapter *adapter = netdev_priv(netdev);
6480 struct e1000_hw *hw = &adapter->hw;
6481 u32 err;
6482
6483 pci_set_power_state(pdev, PCI_D0);
6484 pci_restore_state(pdev);
b94f2d77 6485 pci_save_state(pdev);
42bfd33a 6486
aed5dec3 6487 err = pci_enable_device_mem(pdev);
9d5c8243
AK
6488 if (err) {
6489 dev_err(&pdev->dev,
6490 "igb: Cannot enable PCI device from suspend\n");
6491 return err;
6492 }
6493 pci_set_master(pdev);
6494
6495 pci_enable_wake(pdev, PCI_D3hot, 0);
6496 pci_enable_wake(pdev, PCI_D3cold, 0);
6497
cfb8c3aa 6498 if (igb_init_interrupt_scheme(adapter)) {
a88f10ec
AD
6499 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
6500 return -ENOMEM;
9d5c8243
AK
6501 }
6502
9d5c8243 6503 igb_reset(adapter);
a8564f03
AD
6504
6505 /* let the f/w know that the h/w is now under the control of the
6506 * driver. */
6507 igb_get_hw_control(adapter);
6508
9d5c8243
AK
6509 wr32(E1000_WUS, ~0);
6510
749ab2cd
YZ
6511 if (netdev->flags & IFF_UP) {
6512 err = __igb_open(netdev, true);
a88f10ec
AD
6513 if (err)
6514 return err;
6515 }
9d5c8243
AK
6516
6517 netif_device_attach(netdev);
749ab2cd
YZ
6518 return 0;
6519}
6520
6521#ifdef CONFIG_PM_RUNTIME
6522static int igb_runtime_idle(struct device *dev)
6523{
6524 struct pci_dev *pdev = to_pci_dev(dev);
6525 struct net_device *netdev = pci_get_drvdata(pdev);
6526 struct igb_adapter *adapter = netdev_priv(netdev);
6527
6528 if (!igb_has_link(adapter))
6529 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6530
6531 return -EBUSY;
6532}
6533
6534static int igb_runtime_suspend(struct device *dev)
6535{
6536 struct pci_dev *pdev = to_pci_dev(dev);
6537 int retval;
6538 bool wake;
6539
6540 retval = __igb_shutdown(pdev, &wake, 1);
6541 if (retval)
6542 return retval;
6543
6544 if (wake) {
6545 pci_prepare_to_sleep(pdev);
6546 } else {
6547 pci_wake_from_d3(pdev, false);
6548 pci_set_power_state(pdev, PCI_D3hot);
6549 }
9d5c8243 6550
9d5c8243
AK
6551 return 0;
6552}
749ab2cd
YZ
6553
6554static int igb_runtime_resume(struct device *dev)
6555{
6556 return igb_resume(dev);
6557}
6558#endif /* CONFIG_PM_RUNTIME */
9d5c8243
AK
6559#endif
6560
6561static void igb_shutdown(struct pci_dev *pdev)
6562{
3fe7c4c9
RW
6563 bool wake;
6564
749ab2cd 6565 __igb_shutdown(pdev, &wake, 0);
3fe7c4c9
RW
6566
6567 if (system_state == SYSTEM_POWER_OFF) {
6568 pci_wake_from_d3(pdev, wake);
6569 pci_set_power_state(pdev, PCI_D3hot);
6570 }
9d5c8243
AK
6571}
6572
6573#ifdef CONFIG_NET_POLL_CONTROLLER
6574/*
6575 * Polling 'interrupt' - used by things like netconsole to send skbs
6576 * without having to re-enable interrupts. It's not called while
6577 * the interrupt routine is executing.
6578 */
6579static void igb_netpoll(struct net_device *netdev)
6580{
6581 struct igb_adapter *adapter = netdev_priv(netdev);
eebbbdba 6582 struct e1000_hw *hw = &adapter->hw;
0d1ae7f4 6583 struct igb_q_vector *q_vector;
9d5c8243 6584 int i;
9d5c8243 6585
047e0030 6586 for (i = 0; i < adapter->num_q_vectors; i++) {
0d1ae7f4
AD
6587 q_vector = adapter->q_vector[i];
6588 if (adapter->msix_entries)
6589 wr32(E1000_EIMC, q_vector->eims_value);
6590 else
6591 igb_irq_disable(adapter);
047e0030 6592 napi_schedule(&q_vector->napi);
eebbbdba 6593 }
9d5c8243
AK
6594}
6595#endif /* CONFIG_NET_POLL_CONTROLLER */
6596
6597/**
6598 * igb_io_error_detected - called when PCI error is detected
6599 * @pdev: Pointer to PCI device
6600 * @state: The current pci connection state
6601 *
6602 * This function is called after a PCI bus error affecting
6603 * this device has been detected.
6604 */
6605static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
6606 pci_channel_state_t state)
6607{
6608 struct net_device *netdev = pci_get_drvdata(pdev);
6609 struct igb_adapter *adapter = netdev_priv(netdev);
6610
6611 netif_device_detach(netdev);
6612
59ed6eec
AD
6613 if (state == pci_channel_io_perm_failure)
6614 return PCI_ERS_RESULT_DISCONNECT;
6615
9d5c8243
AK
6616 if (netif_running(netdev))
6617 igb_down(adapter);
6618 pci_disable_device(pdev);
6619
6620 /* Request a slot slot reset. */
6621 return PCI_ERS_RESULT_NEED_RESET;
6622}
6623
6624/**
6625 * igb_io_slot_reset - called after the pci bus has been reset.
6626 * @pdev: Pointer to PCI device
6627 *
6628 * Restart the card from scratch, as if from a cold-boot. Implementation
6629 * resembles the first-half of the igb_resume routine.
6630 */
6631static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
6632{
6633 struct net_device *netdev = pci_get_drvdata(pdev);
6634 struct igb_adapter *adapter = netdev_priv(netdev);
6635 struct e1000_hw *hw = &adapter->hw;
40a914fa 6636 pci_ers_result_t result;
42bfd33a 6637 int err;
9d5c8243 6638
aed5dec3 6639 if (pci_enable_device_mem(pdev)) {
9d5c8243
AK
6640 dev_err(&pdev->dev,
6641 "Cannot re-enable PCI device after reset.\n");
40a914fa
AD
6642 result = PCI_ERS_RESULT_DISCONNECT;
6643 } else {
6644 pci_set_master(pdev);
6645 pci_restore_state(pdev);
b94f2d77 6646 pci_save_state(pdev);
9d5c8243 6647
40a914fa
AD
6648 pci_enable_wake(pdev, PCI_D3hot, 0);
6649 pci_enable_wake(pdev, PCI_D3cold, 0);
9d5c8243 6650
40a914fa
AD
6651 igb_reset(adapter);
6652 wr32(E1000_WUS, ~0);
6653 result = PCI_ERS_RESULT_RECOVERED;
6654 }
9d5c8243 6655
ea943d41
JK
6656 err = pci_cleanup_aer_uncorrect_error_status(pdev);
6657 if (err) {
6658 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
6659 "failed 0x%0x\n", err);
6660 /* non-fatal, continue */
6661 }
40a914fa
AD
6662
6663 return result;
9d5c8243
AK
6664}
6665
6666/**
6667 * igb_io_resume - called when traffic can start flowing again.
6668 * @pdev: Pointer to PCI device
6669 *
6670 * This callback is called when the error recovery driver tells us that
6671 * its OK to resume normal operation. Implementation resembles the
6672 * second-half of the igb_resume routine.
6673 */
6674static void igb_io_resume(struct pci_dev *pdev)
6675{
6676 struct net_device *netdev = pci_get_drvdata(pdev);
6677 struct igb_adapter *adapter = netdev_priv(netdev);
6678
9d5c8243
AK
6679 if (netif_running(netdev)) {
6680 if (igb_up(adapter)) {
6681 dev_err(&pdev->dev, "igb_up failed after reset\n");
6682 return;
6683 }
6684 }
6685
6686 netif_device_attach(netdev);
6687
6688 /* let the f/w know that the h/w is now under the control of the
6689 * driver. */
6690 igb_get_hw_control(adapter);
9d5c8243
AK
6691}
6692
26ad9178
AD
6693static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
6694 u8 qsel)
6695{
6696 u32 rar_low, rar_high;
6697 struct e1000_hw *hw = &adapter->hw;
6698
6699 /* HW expects these in little endian so we reverse the byte order
6700 * from network order (big endian) to little endian
6701 */
6702 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
6703 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
6704 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
6705
6706 /* Indicate to hardware the Address is Valid. */
6707 rar_high |= E1000_RAH_AV;
6708
6709 if (hw->mac.type == e1000_82575)
6710 rar_high |= E1000_RAH_POOL_1 * qsel;
6711 else
6712 rar_high |= E1000_RAH_POOL_1 << qsel;
6713
6714 wr32(E1000_RAL(index), rar_low);
6715 wrfl();
6716 wr32(E1000_RAH(index), rar_high);
6717 wrfl();
6718}
6719
4ae196df
AD
6720static int igb_set_vf_mac(struct igb_adapter *adapter,
6721 int vf, unsigned char *mac_addr)
6722{
6723 struct e1000_hw *hw = &adapter->hw;
ff41f8dc
AD
6724 /* VF MAC addresses start at end of receive addresses and moves
6725 * torwards the first, as a result a collision should not be possible */
6726 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
4ae196df 6727
37680117 6728 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
4ae196df 6729
26ad9178 6730 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
4ae196df
AD
6731
6732 return 0;
6733}
6734
8151d294
WM
6735static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
6736{
6737 struct igb_adapter *adapter = netdev_priv(netdev);
6738 if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
6739 return -EINVAL;
6740 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
6741 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
6742 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
6743 " change effective.");
6744 if (test_bit(__IGB_DOWN, &adapter->state)) {
6745 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
6746 " but the PF device is not up.\n");
6747 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
6748 " attempting to use the VF device.\n");
6749 }
6750 return igb_set_vf_mac(adapter, vf, mac);
6751}
6752
17dc566c
LL
6753static int igb_link_mbps(int internal_link_speed)
6754{
6755 switch (internal_link_speed) {
6756 case SPEED_100:
6757 return 100;
6758 case SPEED_1000:
6759 return 1000;
6760 default:
6761 return 0;
6762 }
6763}
6764
6765static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
6766 int link_speed)
6767{
6768 int rf_dec, rf_int;
6769 u32 bcnrc_val;
6770
6771 if (tx_rate != 0) {
6772 /* Calculate the rate factor values to set */
6773 rf_int = link_speed / tx_rate;
6774 rf_dec = (link_speed - (rf_int * tx_rate));
6775 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate;
6776
6777 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
6778 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) &
6779 E1000_RTTBCNRC_RF_INT_MASK);
6780 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
6781 } else {
6782 bcnrc_val = 0;
6783 }
6784
6785 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
f00b0da7
LL
6786 /*
6787 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
6788 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
6789 */
6790 wr32(E1000_RTTBCNRM, 0x14);
17dc566c
LL
6791 wr32(E1000_RTTBCNRC, bcnrc_val);
6792}
6793
6794static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
6795{
6796 int actual_link_speed, i;
6797 bool reset_rate = false;
6798
6799 /* VF TX rate limit was not set or not supported */
6800 if ((adapter->vf_rate_link_speed == 0) ||
6801 (adapter->hw.mac.type != e1000_82576))
6802 return;
6803
6804 actual_link_speed = igb_link_mbps(adapter->link_speed);
6805 if (actual_link_speed != adapter->vf_rate_link_speed) {
6806 reset_rate = true;
6807 adapter->vf_rate_link_speed = 0;
6808 dev_info(&adapter->pdev->dev,
6809 "Link speed has been changed. VF Transmit "
6810 "rate is disabled\n");
6811 }
6812
6813 for (i = 0; i < adapter->vfs_allocated_count; i++) {
6814 if (reset_rate)
6815 adapter->vf_data[i].tx_rate = 0;
6816
6817 igb_set_vf_rate_limit(&adapter->hw, i,
6818 adapter->vf_data[i].tx_rate,
6819 actual_link_speed);
6820 }
6821}
6822
8151d294
WM
6823static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
6824{
17dc566c
LL
6825 struct igb_adapter *adapter = netdev_priv(netdev);
6826 struct e1000_hw *hw = &adapter->hw;
6827 int actual_link_speed;
6828
6829 if (hw->mac.type != e1000_82576)
6830 return -EOPNOTSUPP;
6831
6832 actual_link_speed = igb_link_mbps(adapter->link_speed);
6833 if ((vf >= adapter->vfs_allocated_count) ||
6834 (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
6835 (tx_rate < 0) || (tx_rate > actual_link_speed))
6836 return -EINVAL;
6837
6838 adapter->vf_rate_link_speed = actual_link_speed;
6839 adapter->vf_data[vf].tx_rate = (u16)tx_rate;
6840 igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
6841
6842 return 0;
8151d294
WM
6843}
6844
6845static int igb_ndo_get_vf_config(struct net_device *netdev,
6846 int vf, struct ifla_vf_info *ivi)
6847{
6848 struct igb_adapter *adapter = netdev_priv(netdev);
6849 if (vf >= adapter->vfs_allocated_count)
6850 return -EINVAL;
6851 ivi->vf = vf;
6852 memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
17dc566c 6853 ivi->tx_rate = adapter->vf_data[vf].tx_rate;
8151d294
WM
6854 ivi->vlan = adapter->vf_data[vf].pf_vlan;
6855 ivi->qos = adapter->vf_data[vf].pf_qos;
6856 return 0;
6857}
6858
4ae196df
AD
6859static void igb_vmm_control(struct igb_adapter *adapter)
6860{
6861 struct e1000_hw *hw = &adapter->hw;
10d8e907 6862 u32 reg;
4ae196df 6863
52a1dd4d
AD
6864 switch (hw->mac.type) {
6865 case e1000_82575:
f96a8a0b
CW
6866 case e1000_i210:
6867 case e1000_i211:
52a1dd4d
AD
6868 default:
6869 /* replication is not supported for 82575 */
4ae196df 6870 return;
52a1dd4d
AD
6871 case e1000_82576:
6872 /* notify HW that the MAC is adding vlan tags */
6873 reg = rd32(E1000_DTXCTL);
6874 reg |= E1000_DTXCTL_VLAN_ADDED;
6875 wr32(E1000_DTXCTL, reg);
6876 case e1000_82580:
6877 /* enable replication vlan tag stripping */
6878 reg = rd32(E1000_RPLOLR);
6879 reg |= E1000_RPLOLR_STRVLAN;
6880 wr32(E1000_RPLOLR, reg);
d2ba2ed8
AD
6881 case e1000_i350:
6882 /* none of the above registers are supported by i350 */
52a1dd4d
AD
6883 break;
6884 }
10d8e907 6885
d4960307
AD
6886 if (adapter->vfs_allocated_count) {
6887 igb_vmdq_set_loopback_pf(hw, true);
6888 igb_vmdq_set_replication_pf(hw, true);
13800469
GR
6889 igb_vmdq_set_anti_spoofing_pf(hw, true,
6890 adapter->vfs_allocated_count);
d4960307
AD
6891 } else {
6892 igb_vmdq_set_loopback_pf(hw, false);
6893 igb_vmdq_set_replication_pf(hw, false);
6894 }
4ae196df
AD
6895}
6896
b6e0c419
CW
6897static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
6898{
6899 struct e1000_hw *hw = &adapter->hw;
6900 u32 dmac_thr;
6901 u16 hwm;
6902
6903 if (hw->mac.type > e1000_82580) {
6904 if (adapter->flags & IGB_FLAG_DMAC) {
6905 u32 reg;
6906
6907 /* force threshold to 0. */
6908 wr32(E1000_DMCTXTH, 0);
6909
6910 /*
e8c626e9
MV
6911 * DMA Coalescing high water mark needs to be greater
6912 * than the Rx threshold. Set hwm to PBA - max frame
6913 * size in 16B units, capping it at PBA - 6KB.
b6e0c419 6914 */
e8c626e9
MV
6915 hwm = 64 * pba - adapter->max_frame_size / 16;
6916 if (hwm < 64 * (pba - 6))
6917 hwm = 64 * (pba - 6);
6918 reg = rd32(E1000_FCRTC);
6919 reg &= ~E1000_FCRTC_RTH_COAL_MASK;
6920 reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
6921 & E1000_FCRTC_RTH_COAL_MASK);
6922 wr32(E1000_FCRTC, reg);
6923
6924 /*
6925 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
6926 * frame size, capping it at PBA - 10KB.
6927 */
6928 dmac_thr = pba - adapter->max_frame_size / 512;
6929 if (dmac_thr < pba - 10)
6930 dmac_thr = pba - 10;
b6e0c419
CW
6931 reg = rd32(E1000_DMACR);
6932 reg &= ~E1000_DMACR_DMACTHR_MASK;
b6e0c419
CW
6933 reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
6934 & E1000_DMACR_DMACTHR_MASK);
6935
6936 /* transition to L0x or L1 if available..*/
6937 reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
6938
6939 /* watchdog timer= +-1000 usec in 32usec intervals */
6940 reg |= (1000 >> 5);
0c02dd98
MV
6941
6942 /* Disable BMC-to-OS Watchdog Enable */
6943 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
b6e0c419
CW
6944 wr32(E1000_DMACR, reg);
6945
6946 /*
6947 * no lower threshold to disable
6948 * coalescing(smart fifb)-UTRESH=0
6949 */
6950 wr32(E1000_DMCRTRH, 0);
b6e0c419
CW
6951
6952 reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
6953
6954 wr32(E1000_DMCTLX, reg);
6955
6956 /*
6957 * free space in tx packet buffer to wake from
6958 * DMA coal
6959 */
6960 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
6961 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
6962
6963 /*
6964 * make low power state decision controlled
6965 * by DMA coal
6966 */
6967 reg = rd32(E1000_PCIEMISC);
6968 reg &= ~E1000_PCIEMISC_LX_DECISION;
6969 wr32(E1000_PCIEMISC, reg);
6970 } /* endif adapter->dmac is not disabled */
6971 } else if (hw->mac.type == e1000_82580) {
6972 u32 reg = rd32(E1000_PCIEMISC);
6973 wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
6974 wr32(E1000_DMACR, 0);
6975 }
6976}
6977
9d5c8243 6978/* igb_main.c */