ixgbe: Enable support for VF API version 1.1 in the PF.
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
CommitLineData
92915f71
GR
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
5c47a2b6 4 Copyright(c) 1999 - 2012 Intel Corporation.
92915f71
GR
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
dbd9636e
JK
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
92915f71 35#include <linux/types.h>
dadcd65f 36#include <linux/bitops.h>
92915f71
GR
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
70a10e25 45#include <linux/sctp.h>
92915f71 46#include <linux/ipv6.h>
5a0e3ad6 47#include <linux/slab.h>
92915f71
GR
48#include <net/checksum.h>
49#include <net/ip6_checksum.h>
50#include <linux/ethtool.h>
01789349 51#include <linux/if.h>
92915f71 52#include <linux/if_vlan.h>
70c71606 53#include <linux/prefetch.h>
92915f71
GR
54
55#include "ixgbevf.h"
56
3d8fe98f 57const char ixgbevf_driver_name[] = "ixgbevf";
92915f71 58static const char ixgbevf_driver_string[] =
422e05d1 59 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
92915f71 60
9cd9130d 61#define DRV_VERSION "2.6.0-k"
92915f71 62const char ixgbevf_driver_version[] = DRV_VERSION;
66c87bd5 63static char ixgbevf_copyright[] =
5c47a2b6 64 "Copyright (c) 2009 - 2012 Intel Corporation.";
92915f71
GR
65
66static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
2316aa2a
GR
67 [board_82599_vf] = &ixgbevf_82599_vf_info,
68 [board_X540_vf] = &ixgbevf_X540_vf_info,
92915f71
GR
69};
70
71/* ixgbevf_pci_tbl - PCI Device ID Table
72 *
73 * Wildcard entries (PCI_ANY_ID) should come last
74 * Last entry must be all 0s
75 *
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) }
78 */
79static struct pci_device_id ixgbevf_pci_tbl[] = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
81 board_82599_vf},
2316aa2a
GR
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
83 board_X540_vf},
92915f71
GR
84
85 /* required last entry */
86 {0, }
87};
88MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
89
90MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
91MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
92MODULE_LICENSE("GPL");
93MODULE_VERSION(DRV_VERSION);
94
b3f4d599 95#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
96static int debug = -1;
97module_param(debug, int, 0);
98MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
92915f71
GR
99
100/* forward decls */
fa71ae27 101static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
92915f71
GR
102
103static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
104 struct ixgbevf_ring *rx_ring,
105 u32 val)
106{
107 /*
108 * Force memory writes to complete before letting h/w
109 * know there are new descriptors to fetch. (Only
110 * applicable for weak-ordered memory model archs,
111 * such as IA-64).
112 */
113 wmb();
114 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
115}
116
49ce9c2c 117/**
65d676c8 118 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
92915f71
GR
119 * @adapter: pointer to adapter struct
120 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
121 * @queue: queue to map the corresponding interrupt to
122 * @msix_vector: the vector to map to the corresponding queue
123 *
124 */
125static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
126 u8 queue, u8 msix_vector)
127{
128 u32 ivar, index;
129 struct ixgbe_hw *hw = &adapter->hw;
130 if (direction == -1) {
131 /* other causes */
132 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
133 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
134 ivar &= ~0xFF;
135 ivar |= msix_vector;
136 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
137 } else {
138 /* tx or rx causes */
139 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
140 index = ((16 * (queue & 1)) + (8 * direction));
141 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
142 ivar &= ~(0xFF << index);
143 ivar |= (msix_vector << index);
144 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
145 }
146}
147
70a10e25 148static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
92915f71
GR
149 struct ixgbevf_tx_buffer
150 *tx_buffer_info)
151{
152 if (tx_buffer_info->dma) {
153 if (tx_buffer_info->mapped_as_page)
70a10e25 154 dma_unmap_page(tx_ring->dev,
92915f71
GR
155 tx_buffer_info->dma,
156 tx_buffer_info->length,
2a1f8794 157 DMA_TO_DEVICE);
92915f71 158 else
70a10e25 159 dma_unmap_single(tx_ring->dev,
92915f71
GR
160 tx_buffer_info->dma,
161 tx_buffer_info->length,
2a1f8794 162 DMA_TO_DEVICE);
92915f71
GR
163 tx_buffer_info->dma = 0;
164 }
165 if (tx_buffer_info->skb) {
166 dev_kfree_skb_any(tx_buffer_info->skb);
167 tx_buffer_info->skb = NULL;
168 }
169 tx_buffer_info->time_stamp = 0;
170 /* tx_buffer_info must be completely set up in the transmit path */
171}
172
92915f71
GR
173#define IXGBE_MAX_TXD_PWR 14
174#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
175
176/* Tx Descriptors needed, worst case */
3595990a
AD
177#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
178#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
92915f71
GR
179
180static void ixgbevf_tx_timeout(struct net_device *netdev);
181
182/**
183 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
fa71ae27 184 * @q_vector: board private structure
92915f71
GR
185 * @tx_ring: tx ring to clean
186 **/
fa71ae27 187static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
92915f71
GR
188 struct ixgbevf_ring *tx_ring)
189{
fa71ae27 190 struct ixgbevf_adapter *adapter = q_vector->adapter;
92915f71
GR
191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192 struct ixgbevf_tx_buffer *tx_buffer_info;
193 unsigned int i, eop, count = 0;
194 unsigned int total_bytes = 0, total_packets = 0;
195
10cc1bdd
AD
196 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197 return true;
198
92915f71
GR
199 i = tx_ring->next_to_clean;
200 eop = tx_ring->tx_buffer_info[i].next_to_watch;
908421f6 201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
92915f71
GR
202
203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
fa71ae27 204 (count < tx_ring->count)) {
92915f71 205 bool cleaned = false;
2d0bb1c1 206 rmb(); /* read buffer_info after eop_desc */
98b9e48f
GR
207 /* eop could change between read and DD-check */
208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
209 goto cont_loop;
92915f71
GR
210 for ( ; !cleaned; count++) {
211 struct sk_buff *skb;
908421f6 212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71
GR
213 tx_buffer_info = &tx_ring->tx_buffer_info[i];
214 cleaned = (i == eop);
215 skb = tx_buffer_info->skb;
216
217 if (cleaned && skb) {
218 unsigned int segs, bytecount;
219
220 /* gso_segs is currently only valid for tcp */
221 segs = skb_shinfo(skb)->gso_segs ?: 1;
222 /* multiply data chunks by size of headers */
223 bytecount = ((segs - 1) * skb_headlen(skb)) +
224 skb->len;
225 total_packets += segs;
226 total_bytes += bytecount;
227 }
228
70a10e25 229 ixgbevf_unmap_and_free_tx_resource(tx_ring,
92915f71
GR
230 tx_buffer_info);
231
232 tx_desc->wb.status = 0;
233
234 i++;
235 if (i == tx_ring->count)
236 i = 0;
237 }
238
98b9e48f 239cont_loop:
92915f71 240 eop = tx_ring->tx_buffer_info[i].next_to_watch;
908421f6 241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
92915f71
GR
242 }
243
244 tx_ring->next_to_clean = i;
245
246#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
fb40195c 247 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
92915f71
GR
248 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
249 /* Make sure that anybody stopping the queue after this
250 * sees the new next_to_clean.
251 */
252 smp_mb();
fb40195c
AD
253 if (__netif_subqueue_stopped(tx_ring->netdev,
254 tx_ring->queue_index) &&
92915f71 255 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
fb40195c
AD
256 netif_wake_subqueue(tx_ring->netdev,
257 tx_ring->queue_index);
92915f71
GR
258 ++adapter->restart_queue;
259 }
92915f71
GR
260 }
261
4197aa7b 262 u64_stats_update_begin(&tx_ring->syncp);
92915f71
GR
263 tx_ring->total_bytes += total_bytes;
264 tx_ring->total_packets += total_packets;
4197aa7b 265 u64_stats_update_end(&tx_ring->syncp);
ac6ed8f0
GR
266 q_vector->tx.total_bytes += total_bytes;
267 q_vector->tx.total_packets += total_packets;
92915f71 268
fa71ae27 269 return count < tx_ring->count;
92915f71
GR
270}
271
272/**
273 * ixgbevf_receive_skb - Send a completed packet up the stack
274 * @q_vector: structure containing interrupt and ring information
275 * @skb: packet to send up
276 * @status: hardware indication of status of receive
92915f71
GR
277 * @rx_desc: rx descriptor
278 **/
279static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
280 struct sk_buff *skb, u8 status,
92915f71
GR
281 union ixgbe_adv_rx_desc *rx_desc)
282{
283 struct ixgbevf_adapter *adapter = q_vector->adapter;
284 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
dd1ed3b7 285 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
92915f71 286
5d9a533b 287 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
dadcd65f 288 __vlan_hwaccel_put_tag(skb, tag);
dadcd65f 289
77d5dfca 290 napi_gro_receive(&q_vector->napi, skb);
92915f71
GR
291}
292
293/**
294 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
295 * @adapter: address of board private structure
296 * @status_err: hardware indication of status of receive
297 * @skb: skb currently being received and modified
298 **/
299static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
fb40195c 300 struct ixgbevf_ring *ring,
92915f71
GR
301 u32 status_err, struct sk_buff *skb)
302{
bc8acf2c 303 skb_checksum_none_assert(skb);
92915f71
GR
304
305 /* Rx csum disabled */
fb40195c 306 if (!(ring->netdev->features & NETIF_F_RXCSUM))
92915f71
GR
307 return;
308
309 /* if IP and error */
310 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
311 (status_err & IXGBE_RXDADV_ERR_IPE)) {
312 adapter->hw_csum_rx_error++;
313 return;
314 }
315
316 if (!(status_err & IXGBE_RXD_STAT_L4CS))
317 return;
318
319 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
320 adapter->hw_csum_rx_error++;
321 return;
322 }
323
324 /* It must be a TCP or UDP packet with a valid checksum */
325 skb->ip_summed = CHECKSUM_UNNECESSARY;
326 adapter->hw_csum_rx_good++;
327}
328
329/**
330 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
331 * @adapter: address of board private structure
332 **/
333static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
334 struct ixgbevf_ring *rx_ring,
335 int cleaned_count)
336{
337 struct pci_dev *pdev = adapter->pdev;
338 union ixgbe_adv_rx_desc *rx_desc;
339 struct ixgbevf_rx_buffer *bi;
340 struct sk_buff *skb;
fb40195c 341 unsigned int i = rx_ring->next_to_use;
92915f71 342
92915f71
GR
343 bi = &rx_ring->rx_buffer_info[i];
344
345 while (cleaned_count--) {
908421f6 346 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
347 skb = bi->skb;
348 if (!skb) {
fb40195c
AD
349 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
350 rx_ring->rx_buf_len);
92915f71
GR
351 if (!skb) {
352 adapter->alloc_rx_buff_failed++;
353 goto no_buffers;
354 }
92915f71
GR
355 bi->skb = skb;
356 }
357 if (!bi->dma) {
2a1f8794 358 bi->dma = dma_map_single(&pdev->dev, skb->data,
92915f71 359 rx_ring->rx_buf_len,
2a1f8794 360 DMA_FROM_DEVICE);
92915f71 361 }
77d5dfca 362 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
92915f71
GR
363
364 i++;
365 if (i == rx_ring->count)
366 i = 0;
367 bi = &rx_ring->rx_buffer_info[i];
368 }
369
370no_buffers:
371 if (rx_ring->next_to_use != i) {
372 rx_ring->next_to_use = i;
92915f71
GR
373
374 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
375 }
376}
377
378static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
5f3600eb 379 u32 qmask)
92915f71 380{
92915f71
GR
381 struct ixgbe_hw *hw = &adapter->hw;
382
5f3600eb 383 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
92915f71
GR
384}
385
92915f71
GR
386static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
387 struct ixgbevf_ring *rx_ring,
fa71ae27 388 int budget)
92915f71
GR
389{
390 struct ixgbevf_adapter *adapter = q_vector->adapter;
391 struct pci_dev *pdev = adapter->pdev;
392 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
393 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
394 struct sk_buff *skb;
395 unsigned int i;
396 u32 len, staterr;
92915f71
GR
397 int cleaned_count = 0;
398 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
399
400 i = rx_ring->next_to_clean;
908421f6 401 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
402 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
403 rx_buffer_info = &rx_ring->rx_buffer_info[i];
404
405 while (staterr & IXGBE_RXD_STAT_DD) {
fa71ae27 406 if (!budget)
92915f71 407 break;
fa71ae27 408 budget--;
92915f71 409
2d0bb1c1 410 rmb(); /* read descriptor and rx_buffer_info after status DD */
77d5dfca 411 len = le16_to_cpu(rx_desc->wb.upper.length);
92915f71
GR
412 skb = rx_buffer_info->skb;
413 prefetch(skb->data - NET_IP_ALIGN);
414 rx_buffer_info->skb = NULL;
415
416 if (rx_buffer_info->dma) {
2a1f8794 417 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
92915f71 418 rx_ring->rx_buf_len,
2a1f8794 419 DMA_FROM_DEVICE);
92915f71
GR
420 rx_buffer_info->dma = 0;
421 skb_put(skb, len);
422 }
423
92915f71
GR
424 i++;
425 if (i == rx_ring->count)
426 i = 0;
427
908421f6 428 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
429 prefetch(next_rxd);
430 cleaned_count++;
431
432 next_buffer = &rx_ring->rx_buffer_info[i];
433
434 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
77d5dfca 435 skb->next = next_buffer->skb;
5c60f81a 436 IXGBE_CB(skb->next)->prev = skb;
92915f71
GR
437 adapter->non_eop_descs++;
438 goto next_desc;
439 }
440
5c60f81a
AD
441 /* we should not be chaining buffers, if we did drop the skb */
442 if (IXGBE_CB(skb)->prev) {
443 do {
444 struct sk_buff *this = skb;
445 skb = IXGBE_CB(skb)->prev;
446 dev_kfree_skb(this);
447 } while (skb);
448 goto next_desc;
449 }
450
92915f71
GR
451 /* ERR_MASK will only have valid bits if EOP set */
452 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
453 dev_kfree_skb_irq(skb);
454 goto next_desc;
455 }
456
fb40195c 457 ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
92915f71
GR
458
459 /* probably a little skewed due to removing CRC */
460 total_rx_bytes += skb->len;
461 total_rx_packets++;
462
463 /*
464 * Work around issue of some types of VM to VM loop back
465 * packets not getting split correctly
466 */
467 if (staterr & IXGBE_RXD_STAT_LB) {
e743d313 468 u32 header_fixup_len = skb_headlen(skb);
92915f71
GR
469 if (header_fixup_len < 14)
470 skb_push(skb, header_fixup_len);
471 }
fb40195c 472 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
92915f71 473
b3d58a8f 474 ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
92915f71
GR
475
476next_desc:
477 rx_desc->wb.upper.status_error = 0;
478
479 /* return some buffers to hardware, one at a time is too slow */
480 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
481 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
482 cleaned_count);
483 cleaned_count = 0;
484 }
485
486 /* use prefetched values */
487 rx_desc = next_rxd;
488 rx_buffer_info = &rx_ring->rx_buffer_info[i];
489
490 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
491 }
492
493 rx_ring->next_to_clean = i;
494 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
495
496 if (cleaned_count)
497 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
498
4197aa7b 499 u64_stats_update_begin(&rx_ring->syncp);
92915f71
GR
500 rx_ring->total_packets += total_rx_packets;
501 rx_ring->total_bytes += total_rx_bytes;
4197aa7b 502 u64_stats_update_end(&rx_ring->syncp);
ac6ed8f0
GR
503 q_vector->rx.total_packets += total_rx_packets;
504 q_vector->rx.total_bytes += total_rx_bytes;
92915f71 505
fa71ae27 506 return !!budget;
92915f71
GR
507}
508
509/**
fa71ae27 510 * ixgbevf_poll - NAPI polling calback
92915f71
GR
511 * @napi: napi struct with our devices info in it
512 * @budget: amount of work driver is allowed to do this pass, in packets
513 *
fa71ae27 514 * This function will clean more than one or more rings associated with a
92915f71
GR
515 * q_vector.
516 **/
fa71ae27 517static int ixgbevf_poll(struct napi_struct *napi, int budget)
92915f71
GR
518{
519 struct ixgbevf_q_vector *q_vector =
520 container_of(napi, struct ixgbevf_q_vector, napi);
521 struct ixgbevf_adapter *adapter = q_vector->adapter;
fa71ae27
AD
522 struct ixgbevf_ring *ring;
523 int per_ring_budget;
524 bool clean_complete = true;
525
526 ixgbevf_for_each_ring(ring, q_vector->tx)
527 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
92915f71
GR
528
529 /* attempt to distribute budget to each queue fairly, but don't allow
530 * the budget to go below 1 because we'll exit polling */
fa71ae27
AD
531 if (q_vector->rx.count > 1)
532 per_ring_budget = max(budget/q_vector->rx.count, 1);
533 else
534 per_ring_budget = budget;
535
536 ixgbevf_for_each_ring(ring, q_vector->rx)
537 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
538 per_ring_budget);
539
540 /* If all work not completed, return budget and keep polling */
541 if (!clean_complete)
542 return budget;
543 /* all work done, exit the polling mode */
544 napi_complete(napi);
545 if (adapter->rx_itr_setting & 1)
546 ixgbevf_set_itr(q_vector);
547 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
548 ixgbevf_irq_enable_queues(adapter,
549 1 << q_vector->v_idx);
92915f71 550
fa71ae27 551 return 0;
92915f71
GR
552}
553
ce422606
GR
554/**
555 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
556 * @q_vector: structure containing interrupt and ring information
557 */
558static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
559{
560 struct ixgbevf_adapter *adapter = q_vector->adapter;
561 struct ixgbe_hw *hw = &adapter->hw;
562 int v_idx = q_vector->v_idx;
563 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
564
565 /*
566 * set the WDIS bit to not clear the timer bits and cause an
567 * immediate assertion of the interrupt
568 */
569 itr_reg |= IXGBE_EITR_CNT_WDIS;
570
571 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
572}
92915f71
GR
573
574/**
575 * ixgbevf_configure_msix - Configure MSI-X hardware
576 * @adapter: board private structure
577 *
578 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
579 * interrupts.
580 **/
581static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
582{
583 struct ixgbevf_q_vector *q_vector;
6b43c446 584 int q_vectors, v_idx;
92915f71
GR
585
586 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5f3600eb 587 adapter->eims_enable_mask = 0;
92915f71
GR
588
589 /*
590 * Populate the IVAR table and set the ITR values to the
591 * corresponding register.
592 */
593 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
6b43c446 594 struct ixgbevf_ring *ring;
92915f71 595 q_vector = adapter->q_vector[v_idx];
6b43c446
AD
596
597 ixgbevf_for_each_ring(ring, q_vector->rx)
598 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
599
600 ixgbevf_for_each_ring(ring, q_vector->tx)
601 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
92915f71 602
5f3600eb
AD
603 if (q_vector->tx.ring && !q_vector->rx.ring) {
604 /* tx only vector */
605 if (adapter->tx_itr_setting == 1)
606 q_vector->itr = IXGBE_10K_ITR;
607 else
608 q_vector->itr = adapter->tx_itr_setting;
609 } else {
610 /* rx or rx/tx vector */
611 if (adapter->rx_itr_setting == 1)
612 q_vector->itr = IXGBE_20K_ITR;
613 else
614 q_vector->itr = adapter->rx_itr_setting;
615 }
616
617 /* add q_vector eims value to global eims_enable_mask */
618 adapter->eims_enable_mask |= 1 << v_idx;
92915f71 619
5f3600eb 620 ixgbevf_write_eitr(q_vector);
92915f71
GR
621 }
622
623 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
5f3600eb
AD
624 /* setup eims_other and add value to global eims_enable_mask */
625 adapter->eims_other = 1 << v_idx;
626 adapter->eims_enable_mask |= adapter->eims_other;
92915f71
GR
627}
628
629enum latency_range {
630 lowest_latency = 0,
631 low_latency = 1,
632 bulk_latency = 2,
633 latency_invalid = 255
634};
635
636/**
637 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
5f3600eb
AD
638 * @q_vector: structure containing interrupt and ring information
639 * @ring_container: structure containing ring performance data
92915f71
GR
640 *
641 * Stores a new ITR value based on packets and byte
642 * counts during the last interrupt. The advantage of per interrupt
643 * computation is faster updates and more accurate ITR for the current
644 * traffic pattern. Constants in this function were computed
645 * based on theoretical maximum wire speed and thresholds were set based
646 * on testing data as well as attempting to minimize response time
647 * while increasing bulk throughput.
648 **/
5f3600eb
AD
649static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
650 struct ixgbevf_ring_container *ring_container)
92915f71 651{
5f3600eb
AD
652 int bytes = ring_container->total_bytes;
653 int packets = ring_container->total_packets;
92915f71
GR
654 u32 timepassed_us;
655 u64 bytes_perint;
5f3600eb 656 u8 itr_setting = ring_container->itr;
92915f71
GR
657
658 if (packets == 0)
5f3600eb 659 return;
92915f71
GR
660
661 /* simple throttlerate management
662 * 0-20MB/s lowest (100000 ints/s)
663 * 20-100MB/s low (20000 ints/s)
664 * 100-1249MB/s bulk (8000 ints/s)
665 */
666 /* what was last interrupt timeslice? */
5f3600eb 667 timepassed_us = q_vector->itr >> 2;
92915f71
GR
668 bytes_perint = bytes / timepassed_us; /* bytes/usec */
669
670 switch (itr_setting) {
671 case lowest_latency:
e2c28ce7 672 if (bytes_perint > 10)
5f3600eb 673 itr_setting = low_latency;
92915f71
GR
674 break;
675 case low_latency:
e2c28ce7 676 if (bytes_perint > 20)
5f3600eb 677 itr_setting = bulk_latency;
e2c28ce7 678 else if (bytes_perint <= 10)
5f3600eb 679 itr_setting = lowest_latency;
92915f71
GR
680 break;
681 case bulk_latency:
e2c28ce7 682 if (bytes_perint <= 20)
5f3600eb 683 itr_setting = low_latency;
92915f71
GR
684 break;
685 }
686
5f3600eb
AD
687 /* clear work counters since we have the values we need */
688 ring_container->total_bytes = 0;
689 ring_container->total_packets = 0;
690
691 /* write updated itr to ring container */
692 ring_container->itr = itr_setting;
92915f71
GR
693}
694
fa71ae27 695static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
92915f71 696{
5f3600eb
AD
697 u32 new_itr = q_vector->itr;
698 u8 current_itr;
92915f71 699
5f3600eb
AD
700 ixgbevf_update_itr(q_vector, &q_vector->tx);
701 ixgbevf_update_itr(q_vector, &q_vector->rx);
92915f71 702
6b43c446 703 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
92915f71
GR
704
705 switch (current_itr) {
706 /* counts and packets in update_itr are dependent on these numbers */
707 case lowest_latency:
5f3600eb 708 new_itr = IXGBE_100K_ITR;
92915f71
GR
709 break;
710 case low_latency:
5f3600eb 711 new_itr = IXGBE_20K_ITR;
92915f71
GR
712 break;
713 case bulk_latency:
714 default:
5f3600eb 715 new_itr = IXGBE_8K_ITR;
92915f71
GR
716 break;
717 }
718
5f3600eb 719 if (new_itr != q_vector->itr) {
92915f71 720 /* do an exponential smoothing */
5f3600eb
AD
721 new_itr = (10 * new_itr * q_vector->itr) /
722 ((9 * new_itr) + q_vector->itr);
723
724 /* save the algorithm value here */
725 q_vector->itr = new_itr;
726
727 ixgbevf_write_eitr(q_vector);
92915f71 728 }
92915f71
GR
729}
730
4b2cd27f 731static irqreturn_t ixgbevf_msix_other(int irq, void *data)
92915f71 732{
fa71ae27 733 struct ixgbevf_adapter *adapter = data;
92915f71 734 struct ixgbe_hw *hw = &adapter->hw;
08259594 735
4b2cd27f 736 hw->mac.get_link_status = 1;
a9ee25a2 737
4b2cd27f
AD
738 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
739 mod_timer(&adapter->watchdog_timer, jiffies);
3a2c4033 740
5f3600eb
AD
741 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
742
92915f71
GR
743 return IRQ_HANDLED;
744}
745
92915f71
GR
746
747/**
fa71ae27 748 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
92915f71
GR
749 * @irq: unused
750 * @data: pointer to our q_vector struct for this interrupt vector
751 **/
fa71ae27 752static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
92915f71
GR
753{
754 struct ixgbevf_q_vector *q_vector = data;
92915f71 755
5f3600eb 756 /* EIAM disabled interrupts (on this vector) for us */
fa71ae27
AD
757 if (q_vector->rx.ring || q_vector->tx.ring)
758 napi_schedule(&q_vector->napi);
92915f71
GR
759
760 return IRQ_HANDLED;
761}
762
763static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
764 int r_idx)
765{
766 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
767
6b43c446
AD
768 a->rx_ring[r_idx].next = q_vector->rx.ring;
769 q_vector->rx.ring = &a->rx_ring[r_idx];
770 q_vector->rx.count++;
92915f71
GR
771}
772
773static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
774 int t_idx)
775{
776 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
777
6b43c446
AD
778 a->tx_ring[t_idx].next = q_vector->tx.ring;
779 q_vector->tx.ring = &a->tx_ring[t_idx];
780 q_vector->tx.count++;
92915f71
GR
781}
782
783/**
784 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
785 * @adapter: board private structure to initialize
786 *
787 * This function maps descriptor rings to the queue-specific vectors
788 * we were allotted through the MSI-X enabling code. Ideally, we'd have
789 * one vector per ring/queue, but on a constrained vector budget, we
790 * group the rings as "efficiently" as possible. You would add new
791 * mapping configurations in here.
792 **/
793static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
794{
795 int q_vectors;
796 int v_start = 0;
797 int rxr_idx = 0, txr_idx = 0;
798 int rxr_remaining = adapter->num_rx_queues;
799 int txr_remaining = adapter->num_tx_queues;
800 int i, j;
801 int rqpv, tqpv;
802 int err = 0;
803
804 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
805
806 /*
807 * The ideal configuration...
808 * We have enough vectors to map one per queue.
809 */
810 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
811 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
812 map_vector_to_rxq(adapter, v_start, rxr_idx);
813
814 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
815 map_vector_to_txq(adapter, v_start, txr_idx);
816 goto out;
817 }
818
819 /*
820 * If we don't have enough vectors for a 1-to-1
821 * mapping, we'll have to group them so there are
822 * multiple queues per vector.
823 */
824 /* Re-adjusting *qpv takes care of the remainder. */
825 for (i = v_start; i < q_vectors; i++) {
826 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
827 for (j = 0; j < rqpv; j++) {
828 map_vector_to_rxq(adapter, i, rxr_idx);
829 rxr_idx++;
830 rxr_remaining--;
831 }
832 }
833 for (i = v_start; i < q_vectors; i++) {
834 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
835 for (j = 0; j < tqpv; j++) {
836 map_vector_to_txq(adapter, i, txr_idx);
837 txr_idx++;
838 txr_remaining--;
839 }
840 }
841
842out:
843 return err;
844}
845
846/**
847 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
848 * @adapter: board private structure
849 *
850 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
851 * interrupts from the kernel.
852 **/
853static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
854{
855 struct net_device *netdev = adapter->netdev;
fa71ae27
AD
856 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
857 int vector, err;
92915f71
GR
858 int ri = 0, ti = 0;
859
92915f71 860 for (vector = 0; vector < q_vectors; vector++) {
fa71ae27
AD
861 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
862 struct msix_entry *entry = &adapter->msix_entries[vector];
863
864 if (q_vector->tx.ring && q_vector->rx.ring) {
865 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
866 "%s-%s-%d", netdev->name, "TxRx", ri++);
867 ti++;
868 } else if (q_vector->rx.ring) {
869 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
870 "%s-%s-%d", netdev->name, "rx", ri++);
871 } else if (q_vector->tx.ring) {
872 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
873 "%s-%s-%d", netdev->name, "tx", ti++);
92915f71
GR
874 } else {
875 /* skip this unused q_vector */
876 continue;
877 }
fa71ae27
AD
878 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
879 q_vector->name, q_vector);
92915f71
GR
880 if (err) {
881 hw_dbg(&adapter->hw,
882 "request_irq failed for MSIX interrupt "
883 "Error: %d\n", err);
884 goto free_queue_irqs;
885 }
886 }
887
92915f71 888 err = request_irq(adapter->msix_entries[vector].vector,
4b2cd27f 889 &ixgbevf_msix_other, 0, netdev->name, adapter);
92915f71
GR
890 if (err) {
891 hw_dbg(&adapter->hw,
4b2cd27f 892 "request_irq for msix_other failed: %d\n", err);
92915f71
GR
893 goto free_queue_irqs;
894 }
895
896 return 0;
897
898free_queue_irqs:
fa71ae27
AD
899 while (vector) {
900 vector--;
901 free_irq(adapter->msix_entries[vector].vector,
902 adapter->q_vector[vector]);
903 }
92915f71
GR
904 pci_disable_msix(adapter->pdev);
905 kfree(adapter->msix_entries);
906 adapter->msix_entries = NULL;
907 return err;
908}
909
910static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
911{
912 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
913
914 for (i = 0; i < q_vectors; i++) {
915 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
6b43c446
AD
916 q_vector->rx.ring = NULL;
917 q_vector->tx.ring = NULL;
918 q_vector->rx.count = 0;
919 q_vector->tx.count = 0;
92915f71
GR
920 }
921}
922
923/**
924 * ixgbevf_request_irq - initialize interrupts
925 * @adapter: board private structure
926 *
927 * Attempts to configure interrupts using the best available
928 * capabilities of the hardware and kernel.
929 **/
930static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
931{
932 int err = 0;
933
934 err = ixgbevf_request_msix_irqs(adapter);
935
936 if (err)
937 hw_dbg(&adapter->hw,
938 "request_irq failed, Error %d\n", err);
939
940 return err;
941}
942
943static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
944{
92915f71
GR
945 int i, q_vectors;
946
947 q_vectors = adapter->num_msix_vectors;
92915f71
GR
948 i = q_vectors - 1;
949
fa71ae27 950 free_irq(adapter->msix_entries[i].vector, adapter);
92915f71
GR
951 i--;
952
953 for (; i >= 0; i--) {
fa71ae27
AD
954 /* free only the irqs that were actually requested */
955 if (!adapter->q_vector[i]->rx.ring &&
956 !adapter->q_vector[i]->tx.ring)
957 continue;
958
92915f71
GR
959 free_irq(adapter->msix_entries[i].vector,
960 adapter->q_vector[i]);
961 }
962
963 ixgbevf_reset_q_vectors(adapter);
964}
965
966/**
967 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
968 * @adapter: board private structure
969 **/
970static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
971{
92915f71 972 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 973 int i;
92915f71 974
5f3600eb 975 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
92915f71 976 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
5f3600eb 977 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
92915f71
GR
978
979 IXGBE_WRITE_FLUSH(hw);
980
981 for (i = 0; i < adapter->num_msix_vectors; i++)
982 synchronize_irq(adapter->msix_entries[i].vector);
983}
984
985/**
986 * ixgbevf_irq_enable - Enable default interrupt generation settings
987 * @adapter: board private structure
988 **/
5f3600eb 989static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
92915f71
GR
990{
991 struct ixgbe_hw *hw = &adapter->hw;
92915f71 992
5f3600eb
AD
993 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
994 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
995 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
92915f71
GR
996}
997
998/**
999 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1000 * @adapter: board private structure
1001 *
1002 * Configure the Tx unit of the MAC after a reset.
1003 **/
1004static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1005{
1006 u64 tdba;
1007 struct ixgbe_hw *hw = &adapter->hw;
1008 u32 i, j, tdlen, txctrl;
1009
1010 /* Setup the HW Tx Head and Tail descriptor pointers */
1011 for (i = 0; i < adapter->num_tx_queues; i++) {
1012 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1013 j = ring->reg_idx;
1014 tdba = ring->dma;
1015 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1016 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1017 (tdba & DMA_BIT_MASK(32)));
1018 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1019 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1020 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1021 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1022 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1023 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1024 /* Disable Tx Head Writeback RO bit, since this hoses
1025 * bookkeeping if things aren't delivered in order.
1026 */
1027 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1028 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1029 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1030 }
1031}
1032
1033#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1034
1035static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1036{
1037 struct ixgbevf_ring *rx_ring;
1038 struct ixgbe_hw *hw = &adapter->hw;
1039 u32 srrctl;
1040
1041 rx_ring = &adapter->rx_ring[index];
1042
1043 srrctl = IXGBE_SRRCTL_DROP_EN;
1044
77d5dfca 1045 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
92915f71 1046
dd1fe113
AD
1047 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1048 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1049
92915f71
GR
1050 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1051}
1052
dd1fe113
AD
1053static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
1054{
1055 struct ixgbe_hw *hw = &adapter->hw;
1056 struct net_device *netdev = adapter->netdev;
1057 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1058 int i;
1059 u16 rx_buf_len;
1060
1061 /* notify the PF of our intent to use this size of frame */
1062 ixgbevf_rlpml_set_vf(hw, max_frame);
1063
1064 /* PF will allow an extra 4 bytes past for vlan tagged frames */
1065 max_frame += VLAN_HLEN;
1066
1067 /*
1068 * Make best use of allocation by using all but 1K of a
1069 * power of 2 allocation that will be used for skb->head.
1070 */
1071 if ((hw->mac.type == ixgbe_mac_X540_vf) &&
1072 (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
1073 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1074 else if (max_frame <= IXGBEVF_RXBUFFER_3K)
1075 rx_buf_len = IXGBEVF_RXBUFFER_3K;
1076 else if (max_frame <= IXGBEVF_RXBUFFER_7K)
1077 rx_buf_len = IXGBEVF_RXBUFFER_7K;
1078 else if (max_frame <= IXGBEVF_RXBUFFER_15K)
1079 rx_buf_len = IXGBEVF_RXBUFFER_15K;
1080 else
1081 rx_buf_len = IXGBEVF_MAX_RXBUFFER;
1082
1083 for (i = 0; i < adapter->num_rx_queues; i++)
1084 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1085}
1086
92915f71
GR
1087/**
1088 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1089 * @adapter: board private structure
1090 *
1091 * Configure the Rx unit of the MAC after a reset.
1092 **/
1093static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1094{
1095 u64 rdba;
1096 struct ixgbe_hw *hw = &adapter->hw;
92915f71
GR
1097 int i, j;
1098 u32 rdlen;
92915f71 1099
77d5dfca
AD
1100 /* PSRTYPE must be initialized in 82599 */
1101 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
dd1fe113
AD
1102
1103 /* set_rx_buffer_len must be called before ring initialization */
1104 ixgbevf_set_rx_buffer_len(adapter);
92915f71
GR
1105
1106 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1107 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1108 * the Base and Length of the Rx Descriptor Ring */
1109 for (i = 0; i < adapter->num_rx_queues; i++) {
1110 rdba = adapter->rx_ring[i].dma;
1111 j = adapter->rx_ring[i].reg_idx;
1112 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1113 (rdba & DMA_BIT_MASK(32)));
1114 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1115 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1116 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1117 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1118 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1119 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
92915f71
GR
1120
1121 ixgbevf_configure_srrctl(adapter, j);
1122 }
1123}
1124
8e586137 1125static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
92915f71
GR
1126{
1127 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1128 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1
AD
1129 int err;
1130
1131 if (!hw->mac.ops.set_vfta)
1132 return -EOPNOTSUPP;
92915f71 1133
1c55ed76
AD
1134 spin_lock(&adapter->mbx_lock);
1135
92915f71 1136 /* add VID to filter table */
2ddc7fe1 1137 err = hw->mac.ops.set_vfta(hw, vid, 0, true);
1c55ed76
AD
1138
1139 spin_unlock(&adapter->mbx_lock);
1140
2ddc7fe1
AD
1141 /* translate error return types so error makes sense */
1142 if (err == IXGBE_ERR_MBX)
1143 return -EIO;
1144
1145 if (err == IXGBE_ERR_INVALID_ARGUMENT)
1146 return -EACCES;
1147
dadcd65f 1148 set_bit(vid, adapter->active_vlans);
8e586137 1149
2ddc7fe1 1150 return err;
92915f71
GR
1151}
1152
8e586137 1153static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
92915f71
GR
1154{
1155 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1156 struct ixgbe_hw *hw = &adapter->hw;
2ddc7fe1 1157 int err = -EOPNOTSUPP;
92915f71 1158
1c55ed76
AD
1159 spin_lock(&adapter->mbx_lock);
1160
92915f71
GR
1161 /* remove VID from filter table */
1162 if (hw->mac.ops.set_vfta)
2ddc7fe1 1163 err = hw->mac.ops.set_vfta(hw, vid, 0, false);
1c55ed76
AD
1164
1165 spin_unlock(&adapter->mbx_lock);
1166
dadcd65f 1167 clear_bit(vid, adapter->active_vlans);
8e586137 1168
2ddc7fe1 1169 return err;
92915f71
GR
1170}
1171
1172static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1173{
dadcd65f 1174 u16 vid;
92915f71 1175
dadcd65f
JP
1176 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1177 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
92915f71
GR
1178}
1179
46ec20ff
GR
1180static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1181{
1182 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1183 struct ixgbe_hw *hw = &adapter->hw;
1184 int count = 0;
1185
1186 if ((netdev_uc_count(netdev)) > 10) {
dbd9636e 1187 pr_err("Too many unicast filters - No Space\n");
46ec20ff
GR
1188 return -ENOSPC;
1189 }
1190
1191 if (!netdev_uc_empty(netdev)) {
1192 struct netdev_hw_addr *ha;
1193 netdev_for_each_uc_addr(ha, netdev) {
1194 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1195 udelay(200);
1196 }
1197 } else {
1198 /*
1199 * If the list is empty then send message to PF driver to
1200 * clear all macvlans on this VF.
1201 */
1202 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1203 }
1204
1205 return count;
1206}
1207
92915f71
GR
1208/**
1209 * ixgbevf_set_rx_mode - Multicast set
1210 * @netdev: network interface device structure
1211 *
1212 * The set_rx_method entry point is called whenever the multicast address
1213 * list or the network interface flags are updated. This routine is
1214 * responsible for configuring the hardware for proper multicast mode.
1215 **/
1216static void ixgbevf_set_rx_mode(struct net_device *netdev)
1217{
1218 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1219 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1220
1c55ed76
AD
1221 spin_lock(&adapter->mbx_lock);
1222
92915f71 1223 /* reprogram multicast list */
92915f71 1224 if (hw->mac.ops.update_mc_addr_list)
5c58c47a 1225 hw->mac.ops.update_mc_addr_list(hw, netdev);
46ec20ff
GR
1226
1227 ixgbevf_write_uc_addr_list(netdev);
1c55ed76
AD
1228
1229 spin_unlock(&adapter->mbx_lock);
92915f71
GR
1230}
1231
1232static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1233{
1234 int q_idx;
1235 struct ixgbevf_q_vector *q_vector;
1236 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1237
1238 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
92915f71 1239 q_vector = adapter->q_vector[q_idx];
fa71ae27 1240 napi_enable(&q_vector->napi);
92915f71
GR
1241 }
1242}
1243
1244static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1245{
1246 int q_idx;
1247 struct ixgbevf_q_vector *q_vector;
1248 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1249
1250 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1251 q_vector = adapter->q_vector[q_idx];
92915f71
GR
1252 napi_disable(&q_vector->napi);
1253 }
1254}
1255
1256static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1257{
1258 struct net_device *netdev = adapter->netdev;
1259 int i;
1260
1261 ixgbevf_set_rx_mode(netdev);
1262
1263 ixgbevf_restore_vlan(adapter);
1264
1265 ixgbevf_configure_tx(adapter);
1266 ixgbevf_configure_rx(adapter);
1267 for (i = 0; i < adapter->num_rx_queues; i++) {
1268 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
18c63089
AD
1269 ixgbevf_alloc_rx_buffers(adapter, ring,
1270 IXGBE_DESC_UNUSED(ring));
92915f71
GR
1271 }
1272}
1273
1274#define IXGBE_MAX_RX_DESC_POLL 10
1275static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1276 int rxr)
1277{
1278 struct ixgbe_hw *hw = &adapter->hw;
1279 int j = adapter->rx_ring[rxr].reg_idx;
1280 int k;
1281
1282 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1283 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1284 break;
1285 else
1286 msleep(1);
1287 }
1288 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1289 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1290 "not set within the polling period\n", rxr);
1291 }
1292
1293 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1294 (adapter->rx_ring[rxr].count - 1));
1295}
1296
33bd9f60
GR
1297static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1298{
1299 /* Only save pre-reset stats if there are some */
1300 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1301 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1302 adapter->stats.base_vfgprc;
1303 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1304 adapter->stats.base_vfgptc;
1305 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1306 adapter->stats.base_vfgorc;
1307 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1308 adapter->stats.base_vfgotc;
1309 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1310 adapter->stats.base_vfmprc;
1311 }
1312}
1313
1314static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1315{
1316 struct ixgbe_hw *hw = &adapter->hw;
1317
1318 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1319 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1320 adapter->stats.last_vfgorc |=
1321 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1322 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1323 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1324 adapter->stats.last_vfgotc |=
1325 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1326 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1327
1328 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1329 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1330 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1331 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1332 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1333}
1334
31186785
AD
1335static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
1336{
1337 struct ixgbe_hw *hw = &adapter->hw;
1338 int api[] = { ixgbe_mbox_api_10,
1339 ixgbe_mbox_api_unknown };
1340 int err = 0, idx = 0;
1341
1342 spin_lock(&adapter->mbx_lock);
1343
1344 while (api[idx] != ixgbe_mbox_api_unknown) {
1345 err = ixgbevf_negotiate_api_version(hw, api[idx]);
1346 if (!err)
1347 break;
1348 idx++;
1349 }
1350
1351 spin_unlock(&adapter->mbx_lock);
1352}
1353
795180d8 1354static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
92915f71
GR
1355{
1356 struct net_device *netdev = adapter->netdev;
1357 struct ixgbe_hw *hw = &adapter->hw;
1358 int i, j = 0;
1359 int num_rx_rings = adapter->num_rx_queues;
1360 u32 txdctl, rxdctl;
1361
1362 for (i = 0; i < adapter->num_tx_queues; i++) {
1363 j = adapter->tx_ring[i].reg_idx;
1364 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1365 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1366 txdctl |= (8 << 16);
1367 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1368 }
1369
1370 for (i = 0; i < adapter->num_tx_queues; i++) {
1371 j = adapter->tx_ring[i].reg_idx;
1372 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1373 txdctl |= IXGBE_TXDCTL_ENABLE;
1374 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1375 }
1376
1377 for (i = 0; i < num_rx_rings; i++) {
1378 j = adapter->rx_ring[i].reg_idx;
1379 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
dadcd65f 1380 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
69bfbec4
GR
1381 if (hw->mac.type == ixgbe_mac_X540_vf) {
1382 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1383 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1384 IXGBE_RXDCTL_RLPML_EN);
1385 }
92915f71
GR
1386 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1387 ixgbevf_rx_desc_queue_enable(adapter, i);
1388 }
1389
1390 ixgbevf_configure_msix(adapter);
1391
1c55ed76
AD
1392 spin_lock(&adapter->mbx_lock);
1393
92915f71
GR
1394 if (hw->mac.ops.set_rar) {
1395 if (is_valid_ether_addr(hw->mac.addr))
1396 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1397 else
1398 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1399 }
1400
1c55ed76
AD
1401 spin_unlock(&adapter->mbx_lock);
1402
92915f71
GR
1403 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1404 ixgbevf_napi_enable_all(adapter);
1405
1406 /* enable transmits */
1407 netif_tx_start_all_queues(netdev);
1408
33bd9f60
GR
1409 ixgbevf_save_reset_stats(adapter);
1410 ixgbevf_init_last_counter_stats(adapter);
1411
4b2cd27f 1412 hw->mac.get_link_status = 1;
92915f71 1413 mod_timer(&adapter->watchdog_timer, jiffies);
92915f71
GR
1414}
1415
795180d8 1416void ixgbevf_up(struct ixgbevf_adapter *adapter)
92915f71 1417{
92915f71
GR
1418 struct ixgbe_hw *hw = &adapter->hw;
1419
31186785
AD
1420 ixgbevf_negotiate_api(adapter);
1421
92915f71
GR
1422 ixgbevf_configure(adapter);
1423
795180d8 1424 ixgbevf_up_complete(adapter);
92915f71
GR
1425
1426 /* clear any pending interrupts, may auto mask */
1427 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1428
5f3600eb 1429 ixgbevf_irq_enable(adapter);
92915f71
GR
1430}
1431
1432/**
1433 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1434 * @adapter: board private structure
1435 * @rx_ring: ring to free buffers from
1436 **/
1437static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1438 struct ixgbevf_ring *rx_ring)
1439{
1440 struct pci_dev *pdev = adapter->pdev;
1441 unsigned long size;
1442 unsigned int i;
1443
c0456c23
GR
1444 if (!rx_ring->rx_buffer_info)
1445 return;
92915f71 1446
c0456c23 1447 /* Free all the Rx ring sk_buffs */
92915f71
GR
1448 for (i = 0; i < rx_ring->count; i++) {
1449 struct ixgbevf_rx_buffer *rx_buffer_info;
1450
1451 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1452 if (rx_buffer_info->dma) {
2a1f8794 1453 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
92915f71 1454 rx_ring->rx_buf_len,
2a1f8794 1455 DMA_FROM_DEVICE);
92915f71
GR
1456 rx_buffer_info->dma = 0;
1457 }
1458 if (rx_buffer_info->skb) {
1459 struct sk_buff *skb = rx_buffer_info->skb;
1460 rx_buffer_info->skb = NULL;
1461 do {
1462 struct sk_buff *this = skb;
5c60f81a 1463 skb = IXGBE_CB(skb)->prev;
92915f71
GR
1464 dev_kfree_skb(this);
1465 } while (skb);
1466 }
92915f71
GR
1467 }
1468
1469 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1470 memset(rx_ring->rx_buffer_info, 0, size);
1471
1472 /* Zero out the descriptor ring */
1473 memset(rx_ring->desc, 0, rx_ring->size);
1474
1475 rx_ring->next_to_clean = 0;
1476 rx_ring->next_to_use = 0;
1477
1478 if (rx_ring->head)
1479 writel(0, adapter->hw.hw_addr + rx_ring->head);
1480 if (rx_ring->tail)
1481 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1482}
1483
1484/**
1485 * ixgbevf_clean_tx_ring - Free Tx Buffers
1486 * @adapter: board private structure
1487 * @tx_ring: ring to be cleaned
1488 **/
1489static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1490 struct ixgbevf_ring *tx_ring)
1491{
1492 struct ixgbevf_tx_buffer *tx_buffer_info;
1493 unsigned long size;
1494 unsigned int i;
1495
c0456c23
GR
1496 if (!tx_ring->tx_buffer_info)
1497 return;
1498
92915f71
GR
1499 /* Free all the Tx ring sk_buffs */
1500
1501 for (i = 0; i < tx_ring->count; i++) {
1502 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 1503 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
1504 }
1505
1506 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1507 memset(tx_ring->tx_buffer_info, 0, size);
1508
1509 memset(tx_ring->desc, 0, tx_ring->size);
1510
1511 tx_ring->next_to_use = 0;
1512 tx_ring->next_to_clean = 0;
1513
1514 if (tx_ring->head)
1515 writel(0, adapter->hw.hw_addr + tx_ring->head);
1516 if (tx_ring->tail)
1517 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1518}
1519
1520/**
1521 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1522 * @adapter: board private structure
1523 **/
1524static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1525{
1526 int i;
1527
1528 for (i = 0; i < adapter->num_rx_queues; i++)
1529 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1530}
1531
1532/**
1533 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1534 * @adapter: board private structure
1535 **/
1536static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1537{
1538 int i;
1539
1540 for (i = 0; i < adapter->num_tx_queues; i++)
1541 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1542}
1543
1544void ixgbevf_down(struct ixgbevf_adapter *adapter)
1545{
1546 struct net_device *netdev = adapter->netdev;
1547 struct ixgbe_hw *hw = &adapter->hw;
1548 u32 txdctl;
1549 int i, j;
1550
1551 /* signal that we are down to the interrupt handler */
1552 set_bit(__IXGBEVF_DOWN, &adapter->state);
1553 /* disable receives */
1554
1555 netif_tx_disable(netdev);
1556
1557 msleep(10);
1558
1559 netif_tx_stop_all_queues(netdev);
1560
1561 ixgbevf_irq_disable(adapter);
1562
1563 ixgbevf_napi_disable_all(adapter);
1564
1565 del_timer_sync(&adapter->watchdog_timer);
1566 /* can't call flush scheduled work here because it can deadlock
1567 * if linkwatch_event tries to acquire the rtnl_lock which we are
1568 * holding */
1569 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1570 msleep(1);
1571
1572 /* disable transmits in the hardware now that interrupts are off */
1573 for (i = 0; i < adapter->num_tx_queues; i++) {
1574 j = adapter->tx_ring[i].reg_idx;
1575 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1576 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1577 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1578 }
1579
1580 netif_carrier_off(netdev);
1581
1582 if (!pci_channel_offline(adapter->pdev))
1583 ixgbevf_reset(adapter);
1584
1585 ixgbevf_clean_all_tx_rings(adapter);
1586 ixgbevf_clean_all_rx_rings(adapter);
1587}
1588
1589void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1590{
1591 WARN_ON(in_interrupt());
c0456c23 1592
92915f71
GR
1593 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1594 msleep(1);
1595
c0456c23
GR
1596 /*
1597 * Check if PF is up before re-init. If not then skip until
1598 * later when the PF is up and ready to service requests from
1599 * the VF via mailbox. If the VF is up and running then the
1600 * watchdog task will continue to schedule reset tasks until
1601 * the PF is up and running.
1602 */
4b2cd27f
AD
1603 ixgbevf_down(adapter);
1604 ixgbevf_up(adapter);
92915f71
GR
1605
1606 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1607}
1608
1609void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1610{
1611 struct ixgbe_hw *hw = &adapter->hw;
1612 struct net_device *netdev = adapter->netdev;
1613
1c55ed76
AD
1614 spin_lock(&adapter->mbx_lock);
1615
92915f71
GR
1616 if (hw->mac.ops.reset_hw(hw))
1617 hw_dbg(hw, "PF still resetting\n");
1618 else
1619 hw->mac.ops.init_hw(hw);
1620
1c55ed76
AD
1621 spin_unlock(&adapter->mbx_lock);
1622
92915f71
GR
1623 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1624 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1625 netdev->addr_len);
1626 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1627 netdev->addr_len);
1628 }
1629}
1630
1631static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1632 int vectors)
1633{
1634 int err, vector_threshold;
1635
fa71ae27
AD
1636 /* We'll want at least 2 (vector_threshold):
1637 * 1) TxQ[0] + RxQ[0] handler
1638 * 2) Other (Link Status Change, etc.)
92915f71
GR
1639 */
1640 vector_threshold = MIN_MSIX_COUNT;
1641
1642 /* The more we get, the more we will assign to Tx/Rx Cleanup
1643 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1644 * Right now, we simply care about how many we'll get; we'll
1645 * set them up later while requesting irq's.
1646 */
1647 while (vectors >= vector_threshold) {
1648 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1649 vectors);
1650 if (!err) /* Success in acquiring all requested vectors. */
1651 break;
1652 else if (err < 0)
1653 vectors = 0; /* Nasty failure, quit now */
1654 else /* err == number of vectors we should try again with */
1655 vectors = err;
1656 }
1657
1658 if (vectors < vector_threshold) {
1659 /* Can't allocate enough MSI-X interrupts? Oh well.
1660 * This just means we'll go with either a single MSI
1661 * vector or fall back to legacy interrupts.
1662 */
1663 hw_dbg(&adapter->hw,
1664 "Unable to allocate MSI-X interrupts\n");
1665 kfree(adapter->msix_entries);
1666 adapter->msix_entries = NULL;
1667 } else {
1668 /*
1669 * Adjust for only the vectors we'll use, which is minimum
1670 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1671 * vectors we were allocated.
1672 */
1673 adapter->num_msix_vectors = vectors;
1674 }
1675}
1676
49ce9c2c
BH
1677/**
1678 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
92915f71
GR
1679 * @adapter: board private structure to initialize
1680 *
1681 * This is the top level queue allocation routine. The order here is very
1682 * important, starting with the "most" number of features turned on at once,
1683 * and ending with the smallest set of features. This way large combinations
1684 * can be allocated if they're turned on, and smaller combinations are the
1685 * fallthrough conditions.
1686 *
1687 **/
1688static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1689{
1690 /* Start with base case */
1691 adapter->num_rx_queues = 1;
1692 adapter->num_tx_queues = 1;
92915f71
GR
1693}
1694
1695/**
1696 * ixgbevf_alloc_queues - Allocate memory for all rings
1697 * @adapter: board private structure to initialize
1698 *
1699 * We allocate one ring per queue at run-time since we don't know the
1700 * number of queues at compile-time. The polling_netdev array is
1701 * intended for Multiqueue, but should work fine with a single queue.
1702 **/
1703static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1704{
1705 int i;
1706
1707 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1708 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1709 if (!adapter->tx_ring)
1710 goto err_tx_ring_allocation;
1711
1712 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1713 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1714 if (!adapter->rx_ring)
1715 goto err_rx_ring_allocation;
1716
1717 for (i = 0; i < adapter->num_tx_queues; i++) {
1718 adapter->tx_ring[i].count = adapter->tx_ring_count;
1719 adapter->tx_ring[i].queue_index = i;
1720 adapter->tx_ring[i].reg_idx = i;
fb40195c
AD
1721 adapter->tx_ring[i].dev = &adapter->pdev->dev;
1722 adapter->tx_ring[i].netdev = adapter->netdev;
92915f71
GR
1723 }
1724
1725 for (i = 0; i < adapter->num_rx_queues; i++) {
1726 adapter->rx_ring[i].count = adapter->rx_ring_count;
1727 adapter->rx_ring[i].queue_index = i;
1728 adapter->rx_ring[i].reg_idx = i;
fb40195c
AD
1729 adapter->rx_ring[i].dev = &adapter->pdev->dev;
1730 adapter->rx_ring[i].netdev = adapter->netdev;
92915f71
GR
1731 }
1732
1733 return 0;
1734
1735err_rx_ring_allocation:
1736 kfree(adapter->tx_ring);
1737err_tx_ring_allocation:
1738 return -ENOMEM;
1739}
1740
1741/**
1742 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1743 * @adapter: board private structure to initialize
1744 *
1745 * Attempt to configure the interrupts using the best available
1746 * capabilities of the hardware and the kernel.
1747 **/
1748static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1749{
91e2b89b 1750 struct net_device *netdev = adapter->netdev;
92915f71
GR
1751 int err = 0;
1752 int vector, v_budget;
1753
1754 /*
1755 * It's easy to be greedy for MSI-X vectors, but it really
1756 * doesn't do us much good if we have a lot more vectors
1757 * than CPU's. So let's be conservative and only ask for
fa71ae27
AD
1758 * (roughly) the same number of vectors as there are CPU's.
1759 * The default is to use pairs of vectors.
92915f71 1760 */
fa71ae27
AD
1761 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1762 v_budget = min_t(int, v_budget, num_online_cpus());
1763 v_budget += NON_Q_VECTORS;
92915f71
GR
1764
1765 /* A failure in MSI-X entry allocation isn't fatal, but it does
1766 * mean we disable MSI-X capabilities of the adapter. */
1767 adapter->msix_entries = kcalloc(v_budget,
1768 sizeof(struct msix_entry), GFP_KERNEL);
1769 if (!adapter->msix_entries) {
1770 err = -ENOMEM;
1771 goto out;
1772 }
1773
1774 for (vector = 0; vector < v_budget; vector++)
1775 adapter->msix_entries[vector].entry = vector;
1776
1777 ixgbevf_acquire_msix_vectors(adapter, v_budget);
1778
91e2b89b
GR
1779 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
1780 if (err)
1781 goto out;
1782
1783 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
1784
92915f71
GR
1785out:
1786 return err;
1787}
1788
1789/**
1790 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1791 * @adapter: board private structure to initialize
1792 *
1793 * We allocate one q_vector per queue interrupt. If allocation fails we
1794 * return -ENOMEM.
1795 **/
1796static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1797{
1798 int q_idx, num_q_vectors;
1799 struct ixgbevf_q_vector *q_vector;
92915f71
GR
1800
1801 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
1802
1803 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1804 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1805 if (!q_vector)
1806 goto err_out;
1807 q_vector->adapter = adapter;
1808 q_vector->v_idx = q_idx;
fa71ae27
AD
1809 netif_napi_add(adapter->netdev, &q_vector->napi,
1810 ixgbevf_poll, 64);
92915f71
GR
1811 adapter->q_vector[q_idx] = q_vector;
1812 }
1813
1814 return 0;
1815
1816err_out:
1817 while (q_idx) {
1818 q_idx--;
1819 q_vector = adapter->q_vector[q_idx];
1820 netif_napi_del(&q_vector->napi);
1821 kfree(q_vector);
1822 adapter->q_vector[q_idx] = NULL;
1823 }
1824 return -ENOMEM;
1825}
1826
1827/**
1828 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1829 * @adapter: board private structure to initialize
1830 *
1831 * This function frees the memory allocated to the q_vectors. In addition if
1832 * NAPI is enabled it will delete any references to the NAPI struct prior
1833 * to freeing the q_vector.
1834 **/
1835static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1836{
1837 int q_idx, num_q_vectors;
1838 int napi_vectors;
1839
1840 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1841 napi_vectors = adapter->num_rx_queues;
1842
1843 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1844 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1845
1846 adapter->q_vector[q_idx] = NULL;
1847 if (q_idx < napi_vectors)
1848 netif_napi_del(&q_vector->napi);
1849 kfree(q_vector);
1850 }
1851}
1852
1853/**
1854 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1855 * @adapter: board private structure
1856 *
1857 **/
1858static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1859{
1860 pci_disable_msix(adapter->pdev);
1861 kfree(adapter->msix_entries);
1862 adapter->msix_entries = NULL;
92915f71
GR
1863}
1864
1865/**
1866 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1867 * @adapter: board private structure to initialize
1868 *
1869 **/
1870static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1871{
1872 int err;
1873
1874 /* Number of supported queues */
1875 ixgbevf_set_num_queues(adapter);
1876
1877 err = ixgbevf_set_interrupt_capability(adapter);
1878 if (err) {
1879 hw_dbg(&adapter->hw,
1880 "Unable to setup interrupt capabilities\n");
1881 goto err_set_interrupt;
1882 }
1883
1884 err = ixgbevf_alloc_q_vectors(adapter);
1885 if (err) {
1886 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1887 "vectors\n");
1888 goto err_alloc_q_vectors;
1889 }
1890
1891 err = ixgbevf_alloc_queues(adapter);
1892 if (err) {
dbd9636e 1893 pr_err("Unable to allocate memory for queues\n");
92915f71
GR
1894 goto err_alloc_queues;
1895 }
1896
1897 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1898 "Tx Queue count = %u\n",
1899 (adapter->num_rx_queues > 1) ? "Enabled" :
1900 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1901
1902 set_bit(__IXGBEVF_DOWN, &adapter->state);
1903
1904 return 0;
1905err_alloc_queues:
1906 ixgbevf_free_q_vectors(adapter);
1907err_alloc_q_vectors:
1908 ixgbevf_reset_interrupt_capability(adapter);
1909err_set_interrupt:
1910 return err;
1911}
1912
0ac1e8ce
AD
1913/**
1914 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
1915 * @adapter: board private structure to clear interrupt scheme on
1916 *
1917 * We go through and clear interrupt specific resources and reset the structure
1918 * to pre-load conditions
1919 **/
1920static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
1921{
1922 adapter->num_tx_queues = 0;
1923 adapter->num_rx_queues = 0;
1924
1925 ixgbevf_free_q_vectors(adapter);
1926 ixgbevf_reset_interrupt_capability(adapter);
1927}
1928
92915f71
GR
1929/**
1930 * ixgbevf_sw_init - Initialize general software structures
1931 * (struct ixgbevf_adapter)
1932 * @adapter: board private structure to initialize
1933 *
1934 * ixgbevf_sw_init initializes the Adapter private data structure.
1935 * Fields are initialized based on PCI device information and
1936 * OS network device settings (MTU size).
1937 **/
1938static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1939{
1940 struct ixgbe_hw *hw = &adapter->hw;
1941 struct pci_dev *pdev = adapter->pdev;
1942 int err;
1943
1944 /* PCI config space info */
1945
1946 hw->vendor_id = pdev->vendor;
1947 hw->device_id = pdev->device;
ff938e43 1948 hw->revision_id = pdev->revision;
92915f71
GR
1949 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1950 hw->subsystem_device_id = pdev->subsystem_device;
1951
1952 hw->mbx.ops.init_params(hw);
1953 hw->mac.max_tx_queues = MAX_TX_QUEUES;
1954 hw->mac.max_rx_queues = MAX_RX_QUEUES;
1955 err = hw->mac.ops.reset_hw(hw);
1956 if (err) {
1957 dev_info(&pdev->dev,
1958 "PF still in reset state, assigning new address\n");
1a0d6ae5
DK
1959 eth_hw_addr_random(adapter->netdev);
1960 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
1961 adapter->netdev->addr_len);
92915f71
GR
1962 } else {
1963 err = hw->mac.ops.init_hw(hw);
1964 if (err) {
dbd9636e 1965 pr_err("init_shared_code failed: %d\n", err);
92915f71
GR
1966 goto out;
1967 }
1a0d6ae5
DK
1968 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
1969 adapter->netdev->addr_len);
92915f71
GR
1970 }
1971
1c55ed76
AD
1972 /* lock to protect mailbox accesses */
1973 spin_lock_init(&adapter->mbx_lock);
1974
92915f71 1975 /* Enable dynamic interrupt throttling rates */
5f3600eb
AD
1976 adapter->rx_itr_setting = 1;
1977 adapter->tx_itr_setting = 1;
92915f71 1978
92915f71
GR
1979 /* set default ring sizes */
1980 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
1981 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
1982
92915f71 1983 set_bit(__IXGBEVF_DOWN, &adapter->state);
1a0d6ae5 1984 return 0;
92915f71
GR
1985
1986out:
1987 return err;
1988}
1989
92915f71
GR
1990#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
1991 { \
1992 u32 current_counter = IXGBE_READ_REG(hw, reg); \
1993 if (current_counter < last_counter) \
1994 counter += 0x100000000LL; \
1995 last_counter = current_counter; \
1996 counter &= 0xFFFFFFFF00000000LL; \
1997 counter |= current_counter; \
1998 }
1999
2000#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2001 { \
2002 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2003 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2004 u64 current_counter = (current_counter_msb << 32) | \
2005 current_counter_lsb; \
2006 if (current_counter < last_counter) \
2007 counter += 0x1000000000LL; \
2008 last_counter = current_counter; \
2009 counter &= 0xFFFFFFF000000000LL; \
2010 counter |= current_counter; \
2011 }
2012/**
2013 * ixgbevf_update_stats - Update the board statistics counters.
2014 * @adapter: board private structure
2015 **/
2016void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2017{
2018 struct ixgbe_hw *hw = &adapter->hw;
2019
2020 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2021 adapter->stats.vfgprc);
2022 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2023 adapter->stats.vfgptc);
2024 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2025 adapter->stats.last_vfgorc,
2026 adapter->stats.vfgorc);
2027 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2028 adapter->stats.last_vfgotc,
2029 adapter->stats.vfgotc);
2030 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2031 adapter->stats.vfmprc);
92915f71
GR
2032}
2033
2034/**
2035 * ixgbevf_watchdog - Timer Call-back
2036 * @data: pointer to adapter cast into an unsigned long
2037 **/
2038static void ixgbevf_watchdog(unsigned long data)
2039{
2040 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2041 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 2042 u32 eics = 0;
92915f71
GR
2043 int i;
2044
2045 /*
2046 * Do the watchdog outside of interrupt context due to the lovely
2047 * delays that some of the newer hardware requires
2048 */
2049
2050 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2051 goto watchdog_short_circuit;
2052
2053 /* get one bit for every active tx/rx interrupt vector */
2054 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2055 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
6b43c446 2056 if (qv->rx.ring || qv->tx.ring)
5f3600eb 2057 eics |= 1 << i;
92915f71
GR
2058 }
2059
5f3600eb 2060 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
92915f71
GR
2061
2062watchdog_short_circuit:
2063 schedule_work(&adapter->watchdog_task);
2064}
2065
2066/**
2067 * ixgbevf_tx_timeout - Respond to a Tx Hang
2068 * @netdev: network interface device structure
2069 **/
2070static void ixgbevf_tx_timeout(struct net_device *netdev)
2071{
2072 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2073
2074 /* Do the reset outside of interrupt context */
2075 schedule_work(&adapter->reset_task);
2076}
2077
2078static void ixgbevf_reset_task(struct work_struct *work)
2079{
2080 struct ixgbevf_adapter *adapter;
2081 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2082
2083 /* If we're already down or resetting, just bail */
2084 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2085 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2086 return;
2087
2088 adapter->tx_timeout_count++;
2089
2090 ixgbevf_reinit_locked(adapter);
2091}
2092
2093/**
2094 * ixgbevf_watchdog_task - worker thread to bring link up
2095 * @work: pointer to work_struct containing our data
2096 **/
2097static void ixgbevf_watchdog_task(struct work_struct *work)
2098{
2099 struct ixgbevf_adapter *adapter = container_of(work,
2100 struct ixgbevf_adapter,
2101 watchdog_task);
2102 struct net_device *netdev = adapter->netdev;
2103 struct ixgbe_hw *hw = &adapter->hw;
2104 u32 link_speed = adapter->link_speed;
2105 bool link_up = adapter->link_up;
2106
2107 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2108
2109 /*
2110 * Always check the link on the watchdog because we have
2111 * no LSC interrupt
2112 */
2113 if (hw->mac.ops.check_link) {
1c55ed76
AD
2114 s32 need_reset;
2115
2116 spin_lock(&adapter->mbx_lock);
2117
2118 need_reset = hw->mac.ops.check_link(hw, &link_speed,
2119 &link_up, false);
2120
2121 spin_unlock(&adapter->mbx_lock);
2122
2123 if (need_reset) {
92915f71
GR
2124 adapter->link_up = link_up;
2125 adapter->link_speed = link_speed;
da6b3330
GR
2126 netif_carrier_off(netdev);
2127 netif_tx_stop_all_queues(netdev);
92915f71
GR
2128 schedule_work(&adapter->reset_task);
2129 goto pf_has_reset;
2130 }
2131 } else {
2132 /* always assume link is up, if no check link
2133 * function */
2134 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2135 link_up = true;
2136 }
2137 adapter->link_up = link_up;
2138 adapter->link_speed = link_speed;
2139
2140 if (link_up) {
2141 if (!netif_carrier_ok(netdev)) {
300bc060
JP
2142 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2143 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2144 10 : 1);
92915f71
GR
2145 netif_carrier_on(netdev);
2146 netif_tx_wake_all_queues(netdev);
92915f71
GR
2147 }
2148 } else {
2149 adapter->link_up = false;
2150 adapter->link_speed = 0;
2151 if (netif_carrier_ok(netdev)) {
2152 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2153 netif_carrier_off(netdev);
2154 netif_tx_stop_all_queues(netdev);
2155 }
2156 }
2157
92915f71
GR
2158 ixgbevf_update_stats(adapter);
2159
33bd9f60 2160pf_has_reset:
92915f71
GR
2161 /* Reset the timer */
2162 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2163 mod_timer(&adapter->watchdog_timer,
2164 round_jiffies(jiffies + (2 * HZ)));
2165
2166 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2167}
2168
2169/**
2170 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2171 * @adapter: board private structure
2172 * @tx_ring: Tx descriptor ring for a specific queue
2173 *
2174 * Free all transmit software resources
2175 **/
2176void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2177 struct ixgbevf_ring *tx_ring)
2178{
2179 struct pci_dev *pdev = adapter->pdev;
2180
92915f71
GR
2181 ixgbevf_clean_tx_ring(adapter, tx_ring);
2182
2183 vfree(tx_ring->tx_buffer_info);
2184 tx_ring->tx_buffer_info = NULL;
2185
2a1f8794
NN
2186 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2187 tx_ring->dma);
92915f71
GR
2188
2189 tx_ring->desc = NULL;
2190}
2191
2192/**
2193 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2194 * @adapter: board private structure
2195 *
2196 * Free all transmit software resources
2197 **/
2198static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2199{
2200 int i;
2201
2202 for (i = 0; i < adapter->num_tx_queues; i++)
2203 if (adapter->tx_ring[i].desc)
2204 ixgbevf_free_tx_resources(adapter,
2205 &adapter->tx_ring[i]);
2206
2207}
2208
2209/**
2210 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2211 * @adapter: board private structure
2212 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2213 *
2214 * Return 0 on success, negative on failure
2215 **/
2216int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2217 struct ixgbevf_ring *tx_ring)
2218{
2219 struct pci_dev *pdev = adapter->pdev;
2220 int size;
2221
2222 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
89bf67f1 2223 tx_ring->tx_buffer_info = vzalloc(size);
92915f71
GR
2224 if (!tx_ring->tx_buffer_info)
2225 goto err;
92915f71
GR
2226
2227 /* round up to nearest 4K */
2228 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2229 tx_ring->size = ALIGN(tx_ring->size, 4096);
2230
2a1f8794
NN
2231 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2232 &tx_ring->dma, GFP_KERNEL);
92915f71
GR
2233 if (!tx_ring->desc)
2234 goto err;
2235
2236 tx_ring->next_to_use = 0;
2237 tx_ring->next_to_clean = 0;
92915f71
GR
2238 return 0;
2239
2240err:
2241 vfree(tx_ring->tx_buffer_info);
2242 tx_ring->tx_buffer_info = NULL;
2243 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2244 "descriptor ring\n");
2245 return -ENOMEM;
2246}
2247
2248/**
2249 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2250 * @adapter: board private structure
2251 *
2252 * If this function returns with an error, then it's possible one or
2253 * more of the rings is populated (while the rest are not). It is the
2254 * callers duty to clean those orphaned rings.
2255 *
2256 * Return 0 on success, negative on failure
2257 **/
2258static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2259{
2260 int i, err = 0;
2261
2262 for (i = 0; i < adapter->num_tx_queues; i++) {
2263 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2264 if (!err)
2265 continue;
2266 hw_dbg(&adapter->hw,
2267 "Allocation for Tx Queue %u failed\n", i);
2268 break;
2269 }
2270
2271 return err;
2272}
2273
2274/**
2275 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2276 * @adapter: board private structure
2277 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2278 *
2279 * Returns 0 on success, negative on failure
2280 **/
2281int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2282 struct ixgbevf_ring *rx_ring)
2283{
2284 struct pci_dev *pdev = adapter->pdev;
2285 int size;
2286
2287 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
89bf67f1 2288 rx_ring->rx_buffer_info = vzalloc(size);
e404decb 2289 if (!rx_ring->rx_buffer_info)
92915f71 2290 goto alloc_failed;
92915f71
GR
2291
2292 /* Round up to nearest 4K */
2293 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2294 rx_ring->size = ALIGN(rx_ring->size, 4096);
2295
2a1f8794
NN
2296 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2297 &rx_ring->dma, GFP_KERNEL);
92915f71
GR
2298
2299 if (!rx_ring->desc) {
2300 hw_dbg(&adapter->hw,
2301 "Unable to allocate memory for "
2302 "the receive descriptor ring\n");
2303 vfree(rx_ring->rx_buffer_info);
2304 rx_ring->rx_buffer_info = NULL;
2305 goto alloc_failed;
2306 }
2307
2308 rx_ring->next_to_clean = 0;
2309 rx_ring->next_to_use = 0;
2310
2311 return 0;
2312alloc_failed:
2313 return -ENOMEM;
2314}
2315
2316/**
2317 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2318 * @adapter: board private structure
2319 *
2320 * If this function returns with an error, then it's possible one or
2321 * more of the rings is populated (while the rest are not). It is the
2322 * callers duty to clean those orphaned rings.
2323 *
2324 * Return 0 on success, negative on failure
2325 **/
2326static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2327{
2328 int i, err = 0;
2329
2330 for (i = 0; i < adapter->num_rx_queues; i++) {
2331 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2332 if (!err)
2333 continue;
2334 hw_dbg(&adapter->hw,
2335 "Allocation for Rx Queue %u failed\n", i);
2336 break;
2337 }
2338 return err;
2339}
2340
2341/**
2342 * ixgbevf_free_rx_resources - Free Rx Resources
2343 * @adapter: board private structure
2344 * @rx_ring: ring to clean the resources from
2345 *
2346 * Free all receive software resources
2347 **/
2348void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2349 struct ixgbevf_ring *rx_ring)
2350{
2351 struct pci_dev *pdev = adapter->pdev;
2352
2353 ixgbevf_clean_rx_ring(adapter, rx_ring);
2354
2355 vfree(rx_ring->rx_buffer_info);
2356 rx_ring->rx_buffer_info = NULL;
2357
2a1f8794
NN
2358 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2359 rx_ring->dma);
92915f71
GR
2360
2361 rx_ring->desc = NULL;
2362}
2363
2364/**
2365 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2366 * @adapter: board private structure
2367 *
2368 * Free all receive software resources
2369 **/
2370static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2371{
2372 int i;
2373
2374 for (i = 0; i < adapter->num_rx_queues; i++)
2375 if (adapter->rx_ring[i].desc)
2376 ixgbevf_free_rx_resources(adapter,
2377 &adapter->rx_ring[i]);
2378}
2379
2380/**
2381 * ixgbevf_open - Called when a network interface is made active
2382 * @netdev: network interface device structure
2383 *
2384 * Returns 0 on success, negative value on failure
2385 *
2386 * The open entry point is called when a network interface is made
2387 * active by the system (IFF_UP). At this point all resources needed
2388 * for transmit and receive operations are allocated, the interrupt
2389 * handler is registered with the OS, the watchdog timer is started,
2390 * and the stack is notified that the interface is ready.
2391 **/
2392static int ixgbevf_open(struct net_device *netdev)
2393{
2394 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2395 struct ixgbe_hw *hw = &adapter->hw;
2396 int err;
2397
2398 /* disallow open during test */
2399 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2400 return -EBUSY;
2401
2402 if (hw->adapter_stopped) {
2403 ixgbevf_reset(adapter);
2404 /* if adapter is still stopped then PF isn't up and
2405 * the vf can't start. */
2406 if (hw->adapter_stopped) {
2407 err = IXGBE_ERR_MBX;
dbd9636e
JK
2408 pr_err("Unable to start - perhaps the PF Driver isn't "
2409 "up yet\n");
92915f71
GR
2410 goto err_setup_reset;
2411 }
2412 }
2413
31186785
AD
2414 ixgbevf_negotiate_api(adapter);
2415
92915f71
GR
2416 /* allocate transmit descriptors */
2417 err = ixgbevf_setup_all_tx_resources(adapter);
2418 if (err)
2419 goto err_setup_tx;
2420
2421 /* allocate receive descriptors */
2422 err = ixgbevf_setup_all_rx_resources(adapter);
2423 if (err)
2424 goto err_setup_rx;
2425
2426 ixgbevf_configure(adapter);
2427
2428 /*
2429 * Map the Tx/Rx rings to the vectors we were allotted.
2430 * if request_irq will be called in this function map_rings
2431 * must be called *before* up_complete
2432 */
2433 ixgbevf_map_rings_to_vectors(adapter);
2434
795180d8 2435 ixgbevf_up_complete(adapter);
92915f71
GR
2436
2437 /* clear any pending interrupts, may auto mask */
2438 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2439 err = ixgbevf_request_irq(adapter);
2440 if (err)
2441 goto err_req_irq;
2442
5f3600eb 2443 ixgbevf_irq_enable(adapter);
92915f71
GR
2444
2445 return 0;
2446
2447err_req_irq:
2448 ixgbevf_down(adapter);
92915f71
GR
2449 ixgbevf_free_irq(adapter);
2450err_setup_rx:
2451 ixgbevf_free_all_rx_resources(adapter);
2452err_setup_tx:
2453 ixgbevf_free_all_tx_resources(adapter);
2454 ixgbevf_reset(adapter);
2455
2456err_setup_reset:
2457
2458 return err;
2459}
2460
2461/**
2462 * ixgbevf_close - Disables a network interface
2463 * @netdev: network interface device structure
2464 *
2465 * Returns 0, this is not allowed to fail
2466 *
2467 * The close entry point is called when an interface is de-activated
2468 * by the OS. The hardware is still under the drivers control, but
2469 * needs to be disabled. A global MAC reset is issued to stop the
2470 * hardware, and all transmit and receive resources are freed.
2471 **/
2472static int ixgbevf_close(struct net_device *netdev)
2473{
2474 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2475
2476 ixgbevf_down(adapter);
2477 ixgbevf_free_irq(adapter);
2478
2479 ixgbevf_free_all_tx_resources(adapter);
2480 ixgbevf_free_all_rx_resources(adapter);
2481
2482 return 0;
2483}
2484
70a10e25
AD
2485static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
2486 u32 vlan_macip_lens, u32 type_tucmd,
2487 u32 mss_l4len_idx)
92915f71
GR
2488{
2489 struct ixgbe_adv_tx_context_desc *context_desc;
70a10e25 2490 u16 i = tx_ring->next_to_use;
92915f71 2491
70a10e25 2492 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
92915f71 2493
70a10e25
AD
2494 i++;
2495 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
92915f71 2496
70a10e25
AD
2497 /* set bits to identify this as an advanced context descriptor */
2498 type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
92915f71 2499
70a10e25
AD
2500 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2501 context_desc->seqnum_seed = 0;
2502 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
2503 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2504}
2505
2506static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2507 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2508{
2509 u32 vlan_macip_lens, type_tucmd;
2510 u32 mss_l4len_idx, l4len;
2511
2512 if (!skb_is_gso(skb))
2513 return 0;
92915f71 2514
70a10e25
AD
2515 if (skb_header_cloned(skb)) {
2516 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2517 if (err)
2518 return err;
92915f71
GR
2519 }
2520
70a10e25
AD
2521 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2522 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
2523
2524 if (skb->protocol == htons(ETH_P_IP)) {
2525 struct iphdr *iph = ip_hdr(skb);
2526 iph->tot_len = 0;
2527 iph->check = 0;
2528 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2529 iph->daddr, 0,
2530 IPPROTO_TCP,
2531 0);
2532 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2533 } else if (skb_is_gso_v6(skb)) {
2534 ipv6_hdr(skb)->payload_len = 0;
2535 tcp_hdr(skb)->check =
2536 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2537 &ipv6_hdr(skb)->daddr,
2538 0, IPPROTO_TCP, 0);
2539 }
2540
2541 /* compute header lengths */
2542 l4len = tcp_hdrlen(skb);
2543 *hdr_len += l4len;
2544 *hdr_len = skb_transport_offset(skb) + l4len;
2545
2546 /* mss_l4len_id: use 1 as index for TSO */
2547 mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
2548 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
2549 mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
2550
2551 /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
2552 vlan_macip_lens = skb_network_header_len(skb);
2553 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2554 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2555
2556 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2557 type_tucmd, mss_l4len_idx);
2558
2559 return 1;
92915f71
GR
2560}
2561
70a10e25 2562static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
92915f71
GR
2563 struct sk_buff *skb, u32 tx_flags)
2564{
92915f71 2565
92915f71 2566
92915f71 2567
70a10e25
AD
2568 u32 vlan_macip_lens = 0;
2569 u32 mss_l4len_idx = 0;
2570 u32 type_tucmd = 0;
92915f71 2571
70a10e25
AD
2572 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2573 u8 l4_hdr = 0;
2574 switch (skb->protocol) {
2575 case __constant_htons(ETH_P_IP):
2576 vlan_macip_lens |= skb_network_header_len(skb);
2577 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2578 l4_hdr = ip_hdr(skb)->protocol;
2579 break;
2580 case __constant_htons(ETH_P_IPV6):
2581 vlan_macip_lens |= skb_network_header_len(skb);
2582 l4_hdr = ipv6_hdr(skb)->nexthdr;
2583 break;
2584 default:
2585 if (unlikely(net_ratelimit())) {
2586 dev_warn(tx_ring->dev,
2587 "partial checksum but proto=%x!\n",
2588 skb->protocol);
2589 }
2590 break;
2591 }
92915f71 2592
70a10e25
AD
2593 switch (l4_hdr) {
2594 case IPPROTO_TCP:
2595 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2596 mss_l4len_idx = tcp_hdrlen(skb) <<
2597 IXGBE_ADVTXD_L4LEN_SHIFT;
2598 break;
2599 case IPPROTO_SCTP:
2600 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
2601 mss_l4len_idx = sizeof(struct sctphdr) <<
2602 IXGBE_ADVTXD_L4LEN_SHIFT;
2603 break;
2604 case IPPROTO_UDP:
2605 mss_l4len_idx = sizeof(struct udphdr) <<
2606 IXGBE_ADVTXD_L4LEN_SHIFT;
2607 break;
2608 default:
2609 if (unlikely(net_ratelimit())) {
2610 dev_warn(tx_ring->dev,
2611 "partial checksum but l4 proto=%x!\n",
2612 l4_hdr);
2613 }
2614 break;
2615 }
92915f71
GR
2616 }
2617
70a10e25
AD
2618 /* vlan_macip_lens: MACLEN, VLAN tag */
2619 vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
2620 vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
2621
2622 ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
2623 type_tucmd, mss_l4len_idx);
2624
2625 return (skb->ip_summed == CHECKSUM_PARTIAL);
92915f71
GR
2626}
2627
70a10e25 2628static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
92915f71
GR
2629 struct sk_buff *skb, u32 tx_flags,
2630 unsigned int first)
2631{
92915f71
GR
2632 struct ixgbevf_tx_buffer *tx_buffer_info;
2633 unsigned int len;
2634 unsigned int total = skb->len;
2540ddb5
KV
2635 unsigned int offset = 0, size;
2636 int count = 0;
92915f71
GR
2637 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2638 unsigned int f;
65deeed7 2639 int i;
92915f71
GR
2640
2641 i = tx_ring->next_to_use;
2642
2643 len = min(skb_headlen(skb), total);
2644 while (len) {
2645 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2646 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2647
2648 tx_buffer_info->length = size;
2649 tx_buffer_info->mapped_as_page = false;
70a10e25 2650 tx_buffer_info->dma = dma_map_single(tx_ring->dev,
92915f71 2651 skb->data + offset,
2a1f8794 2652 size, DMA_TO_DEVICE);
70a10e25 2653 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
92915f71 2654 goto dma_error;
92915f71
GR
2655 tx_buffer_info->next_to_watch = i;
2656
2657 len -= size;
2658 total -= size;
2659 offset += size;
2660 count++;
2661 i++;
2662 if (i == tx_ring->count)
2663 i = 0;
2664 }
2665
2666 for (f = 0; f < nr_frags; f++) {
9e903e08 2667 const struct skb_frag_struct *frag;
92915f71
GR
2668
2669 frag = &skb_shinfo(skb)->frags[f];
9e903e08 2670 len = min((unsigned int)skb_frag_size(frag), total);
877749bf 2671 offset = 0;
92915f71
GR
2672
2673 while (len) {
2674 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2675 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2676
2677 tx_buffer_info->length = size;
877749bf 2678 tx_buffer_info->dma =
70a10e25 2679 skb_frag_dma_map(tx_ring->dev, frag,
877749bf 2680 offset, size, DMA_TO_DEVICE);
92915f71 2681 tx_buffer_info->mapped_as_page = true;
70a10e25
AD
2682 if (dma_mapping_error(tx_ring->dev,
2683 tx_buffer_info->dma))
92915f71 2684 goto dma_error;
92915f71
GR
2685 tx_buffer_info->next_to_watch = i;
2686
2687 len -= size;
2688 total -= size;
2689 offset += size;
2690 count++;
2691 i++;
2692 if (i == tx_ring->count)
2693 i = 0;
2694 }
2695 if (total == 0)
2696 break;
2697 }
2698
2699 if (i == 0)
2700 i = tx_ring->count - 1;
2701 else
2702 i = i - 1;
2703 tx_ring->tx_buffer_info[i].skb = skb;
2704 tx_ring->tx_buffer_info[first].next_to_watch = i;
70a10e25 2705 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
92915f71
GR
2706
2707 return count;
2708
2709dma_error:
70a10e25 2710 dev_err(tx_ring->dev, "TX DMA map failed\n");
92915f71
GR
2711
2712 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2713 tx_buffer_info->dma = 0;
92915f71
GR
2714 tx_buffer_info->next_to_watch = 0;
2715 count--;
2716
2717 /* clear timestamp and dma mappings for remaining portion of packet */
2718 while (count >= 0) {
2719 count--;
2720 i--;
2721 if (i < 0)
2722 i += tx_ring->count;
2723 tx_buffer_info = &tx_ring->tx_buffer_info[i];
70a10e25 2724 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
92915f71
GR
2725 }
2726
2727 return count;
2728}
2729
70a10e25 2730static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
92915f71
GR
2731 int count, u32 paylen, u8 hdr_len)
2732{
2733 union ixgbe_adv_tx_desc *tx_desc = NULL;
2734 struct ixgbevf_tx_buffer *tx_buffer_info;
2735 u32 olinfo_status = 0, cmd_type_len = 0;
2736 unsigned int i;
2737
2738 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2739
2740 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2741
2742 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2743
2744 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2745 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2746
70a10e25
AD
2747 if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2748 olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
2749
92915f71
GR
2750 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2751 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2752
92915f71
GR
2753 /* use index 1 context for tso */
2754 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2755 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
70a10e25
AD
2756 olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
2757
2758 }
92915f71 2759
70a10e25
AD
2760 /*
2761 * Check Context must be set if Tx switch is enabled, which it
2762 * always is for case where virtual functions are running
2763 */
2764 olinfo_status |= IXGBE_ADVTXD_CC;
92915f71
GR
2765
2766 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2767
2768 i = tx_ring->next_to_use;
2769 while (count--) {
2770 tx_buffer_info = &tx_ring->tx_buffer_info[i];
908421f6 2771 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71
GR
2772 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2773 tx_desc->read.cmd_type_len =
2774 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2775 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2776 i++;
2777 if (i == tx_ring->count)
2778 i = 0;
2779 }
2780
2781 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2782
92915f71 2783 tx_ring->next_to_use = i;
92915f71
GR
2784}
2785
fb40195c 2786static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71 2787{
fb40195c 2788 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
92915f71 2789
fb40195c 2790 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
2791 /* Herbert's original patch had:
2792 * smp_mb__after_netif_stop_queue();
2793 * but since that doesn't exist yet, just open code it. */
2794 smp_mb();
2795
2796 /* We need to check again in a case another CPU has just
2797 * made room available. */
2798 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2799 return -EBUSY;
2800
2801 /* A reprieve! - use start_queue because it doesn't call schedule */
fb40195c 2802 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
92915f71
GR
2803 ++adapter->restart_queue;
2804 return 0;
2805}
2806
fb40195c 2807static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
92915f71
GR
2808{
2809 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2810 return 0;
fb40195c 2811 return __ixgbevf_maybe_stop_tx(tx_ring, size);
92915f71
GR
2812}
2813
2814static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2815{
2816 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2817 struct ixgbevf_ring *tx_ring;
2818 unsigned int first;
2819 unsigned int tx_flags = 0;
2820 u8 hdr_len = 0;
2821 int r_idx = 0, tso;
3595990a
AD
2822 u16 count = TXD_USE_COUNT(skb_headlen(skb));
2823#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2824 unsigned short f;
2825#endif
92915f71
GR
2826
2827 tx_ring = &adapter->tx_ring[r_idx];
2828
3595990a
AD
2829 /*
2830 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2831 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2832 * + 2 desc gap to keep tail from touching head,
2833 * + 1 desc for context descriptor,
2834 * otherwise try next time
2835 */
2836#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2837 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2838 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2839#else
2840 count += skb_shinfo(skb)->nr_frags;
2841#endif
fb40195c 2842 if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
3595990a
AD
2843 adapter->tx_busy++;
2844 return NETDEV_TX_BUSY;
2845 }
2846
eab6d18d 2847 if (vlan_tx_tag_present(skb)) {
92915f71
GR
2848 tx_flags |= vlan_tx_tag_get(skb);
2849 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2850 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2851 }
2852
92915f71
GR
2853 first = tx_ring->next_to_use;
2854
2855 if (skb->protocol == htons(ETH_P_IP))
2856 tx_flags |= IXGBE_TX_FLAGS_IPV4;
70a10e25 2857 tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
92915f71
GR
2858 if (tso < 0) {
2859 dev_kfree_skb_any(skb);
2860 return NETDEV_TX_OK;
2861 }
2862
2863 if (tso)
70a10e25
AD
2864 tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
2865 else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
92915f71
GR
2866 tx_flags |= IXGBE_TX_FLAGS_CSUM;
2867
70a10e25
AD
2868 ixgbevf_tx_queue(tx_ring, tx_flags,
2869 ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
92915f71 2870 skb->len, hdr_len);
70a10e25
AD
2871 /*
2872 * Force memory writes to complete before letting h/w
2873 * know there are new descriptors to fetch. (Only
2874 * applicable for weak-ordered memory model archs,
2875 * such as IA-64).
2876 */
2877 wmb();
2878
2879 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
92915f71 2880
fb40195c 2881 ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
92915f71
GR
2882
2883 return NETDEV_TX_OK;
2884}
2885
92915f71
GR
2886/**
2887 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
2888 * @netdev: network interface device structure
2889 * @p: pointer to an address structure
2890 *
2891 * Returns 0 on success, negative on failure
2892 **/
2893static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2894{
2895 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2896 struct ixgbe_hw *hw = &adapter->hw;
2897 struct sockaddr *addr = p;
2898
2899 if (!is_valid_ether_addr(addr->sa_data))
2900 return -EADDRNOTAVAIL;
2901
2902 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2903 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2904
1c55ed76
AD
2905 spin_lock(&adapter->mbx_lock);
2906
92915f71
GR
2907 if (hw->mac.ops.set_rar)
2908 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2909
1c55ed76
AD
2910 spin_unlock(&adapter->mbx_lock);
2911
92915f71
GR
2912 return 0;
2913}
2914
2915/**
2916 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
2917 * @netdev: network interface device structure
2918 * @new_mtu: new value for maximum frame size
2919 *
2920 * Returns 0 on success, negative on failure
2921 **/
2922static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2923{
2924 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2925 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
69bfbec4 2926 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
69bfbec4
GR
2927
2928 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2929 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
92915f71
GR
2930
2931 /* MTU < 68 is an error and causes problems on some kernels */
69bfbec4 2932 if ((new_mtu < 68) || (max_frame > max_possible_frame))
92915f71
GR
2933 return -EINVAL;
2934
2935 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
2936 netdev->mtu, new_mtu);
2937 /* must set new MTU before calling down or up */
2938 netdev->mtu = new_mtu;
2939
2940 if (netif_running(netdev))
2941 ixgbevf_reinit_locked(adapter);
2942
2943 return 0;
2944}
2945
0ac1e8ce 2946static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
92915f71
GR
2947{
2948 struct net_device *netdev = pci_get_drvdata(pdev);
2949 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
0ac1e8ce
AD
2950#ifdef CONFIG_PM
2951 int retval = 0;
2952#endif
92915f71
GR
2953
2954 netif_device_detach(netdev);
2955
2956 if (netif_running(netdev)) {
0ac1e8ce 2957 rtnl_lock();
92915f71
GR
2958 ixgbevf_down(adapter);
2959 ixgbevf_free_irq(adapter);
2960 ixgbevf_free_all_tx_resources(adapter);
2961 ixgbevf_free_all_rx_resources(adapter);
0ac1e8ce 2962 rtnl_unlock();
92915f71
GR
2963 }
2964
0ac1e8ce 2965 ixgbevf_clear_interrupt_scheme(adapter);
92915f71 2966
0ac1e8ce
AD
2967#ifdef CONFIG_PM
2968 retval = pci_save_state(pdev);
2969 if (retval)
2970 return retval;
92915f71 2971
0ac1e8ce 2972#endif
92915f71 2973 pci_disable_device(pdev);
0ac1e8ce
AD
2974
2975 return 0;
2976}
2977
2978#ifdef CONFIG_PM
2979static int ixgbevf_resume(struct pci_dev *pdev)
2980{
2981 struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev);
2982 struct net_device *netdev = adapter->netdev;
2983 u32 err;
2984
2985 pci_set_power_state(pdev, PCI_D0);
2986 pci_restore_state(pdev);
2987 /*
2988 * pci_restore_state clears dev->state_saved so call
2989 * pci_save_state to restore it.
2990 */
2991 pci_save_state(pdev);
2992
2993 err = pci_enable_device_mem(pdev);
2994 if (err) {
2995 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2996 return err;
2997 }
2998 pci_set_master(pdev);
2999
3000 rtnl_lock();
3001 err = ixgbevf_init_interrupt_scheme(adapter);
3002 rtnl_unlock();
3003 if (err) {
3004 dev_err(&pdev->dev, "Cannot initialize interrupts\n");
3005 return err;
3006 }
3007
3008 ixgbevf_reset(adapter);
3009
3010 if (netif_running(netdev)) {
3011 err = ixgbevf_open(netdev);
3012 if (err)
3013 return err;
3014 }
3015
3016 netif_device_attach(netdev);
3017
3018 return err;
3019}
3020
3021#endif /* CONFIG_PM */
3022static void ixgbevf_shutdown(struct pci_dev *pdev)
3023{
3024 ixgbevf_suspend(pdev, PMSG_SUSPEND);
92915f71
GR
3025}
3026
4197aa7b
ED
3027static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3028 struct rtnl_link_stats64 *stats)
3029{
3030 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3031 unsigned int start;
3032 u64 bytes, packets;
3033 const struct ixgbevf_ring *ring;
3034 int i;
3035
3036 ixgbevf_update_stats(adapter);
3037
3038 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3039
3040 for (i = 0; i < adapter->num_rx_queues; i++) {
3041 ring = &adapter->rx_ring[i];
3042 do {
3043 start = u64_stats_fetch_begin_bh(&ring->syncp);
3044 bytes = ring->total_bytes;
3045 packets = ring->total_packets;
3046 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3047 stats->rx_bytes += bytes;
3048 stats->rx_packets += packets;
3049 }
3050
3051 for (i = 0; i < adapter->num_tx_queues; i++) {
3052 ring = &adapter->tx_ring[i];
3053 do {
3054 start = u64_stats_fetch_begin_bh(&ring->syncp);
3055 bytes = ring->total_bytes;
3056 packets = ring->total_packets;
3057 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3058 stats->tx_bytes += bytes;
3059 stats->tx_packets += packets;
3060 }
3061
3062 return stats;
3063}
3064
0ac1e8ce 3065static const struct net_device_ops ixgbevf_netdev_ops = {
c12db769
SH
3066 .ndo_open = ixgbevf_open,
3067 .ndo_stop = ixgbevf_close,
3068 .ndo_start_xmit = ixgbevf_xmit_frame,
3069 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4197aa7b 3070 .ndo_get_stats64 = ixgbevf_get_stats,
92915f71 3071 .ndo_validate_addr = eth_validate_addr,
c12db769
SH
3072 .ndo_set_mac_address = ixgbevf_set_mac,
3073 .ndo_change_mtu = ixgbevf_change_mtu,
3074 .ndo_tx_timeout = ixgbevf_tx_timeout,
c12db769
SH
3075 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3076 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
92915f71 3077};
92915f71
GR
3078
3079static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3080{
0ac1e8ce 3081 dev->netdev_ops = &ixgbevf_netdev_ops;
92915f71
GR
3082 ixgbevf_set_ethtool_ops(dev);
3083 dev->watchdog_timeo = 5 * HZ;
3084}
3085
3086/**
3087 * ixgbevf_probe - Device Initialization Routine
3088 * @pdev: PCI device information struct
3089 * @ent: entry in ixgbevf_pci_tbl
3090 *
3091 * Returns 0 on success, negative on failure
3092 *
3093 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3094 * The OS initialization, configuring of the adapter private structure,
3095 * and a hardware reset occur.
3096 **/
3097static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3098 const struct pci_device_id *ent)
3099{
3100 struct net_device *netdev;
3101 struct ixgbevf_adapter *adapter = NULL;
3102 struct ixgbe_hw *hw = NULL;
3103 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3104 static int cards_found;
3105 int err, pci_using_dac;
3106
3107 err = pci_enable_device(pdev);
3108 if (err)
3109 return err;
3110
2a1f8794
NN
3111 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3112 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
92915f71
GR
3113 pci_using_dac = 1;
3114 } else {
2a1f8794 3115 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
92915f71 3116 if (err) {
2a1f8794
NN
3117 err = dma_set_coherent_mask(&pdev->dev,
3118 DMA_BIT_MASK(32));
92915f71
GR
3119 if (err) {
3120 dev_err(&pdev->dev, "No usable DMA "
3121 "configuration, aborting\n");
3122 goto err_dma;
3123 }
3124 }
3125 pci_using_dac = 0;
3126 }
3127
3128 err = pci_request_regions(pdev, ixgbevf_driver_name);
3129 if (err) {
3130 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3131 goto err_pci_reg;
3132 }
3133
3134 pci_set_master(pdev);
3135
92915f71
GR
3136 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3137 MAX_TX_QUEUES);
92915f71
GR
3138 if (!netdev) {
3139 err = -ENOMEM;
3140 goto err_alloc_etherdev;
3141 }
3142
3143 SET_NETDEV_DEV(netdev, &pdev->dev);
3144
3145 pci_set_drvdata(pdev, netdev);
3146 adapter = netdev_priv(netdev);
3147
3148 adapter->netdev = netdev;
3149 adapter->pdev = pdev;
3150 hw = &adapter->hw;
3151 hw->back = adapter;
b3f4d599 3152 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
92915f71
GR
3153
3154 /*
3155 * call save state here in standalone driver because it relies on
3156 * adapter struct to exist, and needs to call netdev_priv
3157 */
3158 pci_save_state(pdev);
3159
3160 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3161 pci_resource_len(pdev, 0));
3162 if (!hw->hw_addr) {
3163 err = -EIO;
3164 goto err_ioremap;
3165 }
3166
3167 ixgbevf_assign_netdev_ops(netdev);
3168
3169 adapter->bd_number = cards_found;
3170
3171 /* Setup hw api */
3172 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3173 hw->mac.type = ii->mac;
3174
3175 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
f416dfc0 3176 sizeof(struct ixgbe_mbx_operations));
92915f71 3177
92915f71
GR
3178 /* setup the private structure */
3179 err = ixgbevf_sw_init(adapter);
1a0d6ae5
DK
3180 if (err)
3181 goto err_sw_init;
3182
3183 /* The HW MAC address was set and/or determined in sw_init */
3184 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3185
3186 if (!is_valid_ether_addr(netdev->dev_addr)) {
3187 pr_err("invalid MAC address\n");
3188 err = -EIO;
3189 goto err_sw_init;
3190 }
92915f71 3191
471a76de 3192 netdev->hw_features = NETIF_F_SG |
92915f71 3193 NETIF_F_IP_CSUM |
471a76de
MM
3194 NETIF_F_IPV6_CSUM |
3195 NETIF_F_TSO |
3196 NETIF_F_TSO6 |
3197 NETIF_F_RXCSUM;
3198
3199 netdev->features = netdev->hw_features |
92915f71
GR
3200 NETIF_F_HW_VLAN_TX |
3201 NETIF_F_HW_VLAN_RX |
3202 NETIF_F_HW_VLAN_FILTER;
3203
92915f71
GR
3204 netdev->vlan_features |= NETIF_F_TSO;
3205 netdev->vlan_features |= NETIF_F_TSO6;
3206 netdev->vlan_features |= NETIF_F_IP_CSUM;
3bfacf96 3207 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
92915f71
GR
3208 netdev->vlan_features |= NETIF_F_SG;
3209
3210 if (pci_using_dac)
3211 netdev->features |= NETIF_F_HIGHDMA;
3212
01789349
JP
3213 netdev->priv_flags |= IFF_UNICAST_FLT;
3214
92915f71 3215 init_timer(&adapter->watchdog_timer);
c061b18d 3216 adapter->watchdog_timer.function = ixgbevf_watchdog;
92915f71
GR
3217 adapter->watchdog_timer.data = (unsigned long)adapter;
3218
3219 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3220 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3221
3222 err = ixgbevf_init_interrupt_scheme(adapter);
3223 if (err)
3224 goto err_sw_init;
3225
3226 /* pick up the PCI bus settings for reporting later */
3227 if (hw->mac.ops.get_bus_info)
3228 hw->mac.ops.get_bus_info(hw);
3229
92915f71
GR
3230 strcpy(netdev->name, "eth%d");
3231
3232 err = register_netdev(netdev);
3233 if (err)
3234 goto err_register;
3235
5d426ad1
GR
3236 netif_carrier_off(netdev);
3237
33bd9f60
GR
3238 ixgbevf_init_last_counter_stats(adapter);
3239
92915f71 3240 /* print the MAC address */
f794e7ef 3241 hw_dbg(hw, "%pM\n", netdev->dev_addr);
92915f71
GR
3242
3243 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3244
92915f71
GR
3245 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3246 cards_found++;
3247 return 0;
3248
3249err_register:
0ac1e8ce 3250 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3251err_sw_init:
3252 ixgbevf_reset_interrupt_capability(adapter);
3253 iounmap(hw->hw_addr);
3254err_ioremap:
3255 free_netdev(netdev);
3256err_alloc_etherdev:
3257 pci_release_regions(pdev);
3258err_pci_reg:
3259err_dma:
3260 pci_disable_device(pdev);
3261 return err;
3262}
3263
3264/**
3265 * ixgbevf_remove - Device Removal Routine
3266 * @pdev: PCI device information struct
3267 *
3268 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3269 * that it should release a PCI device. The could be caused by a
3270 * Hot-Plug event, or because the driver is going to be removed from
3271 * memory.
3272 **/
3273static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3274{
3275 struct net_device *netdev = pci_get_drvdata(pdev);
3276 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3277
3278 set_bit(__IXGBEVF_DOWN, &adapter->state);
3279
3280 del_timer_sync(&adapter->watchdog_timer);
3281
23f333a2 3282 cancel_work_sync(&adapter->reset_task);
92915f71
GR
3283 cancel_work_sync(&adapter->watchdog_task);
3284
fd13a9ab 3285 if (netdev->reg_state == NETREG_REGISTERED)
92915f71 3286 unregister_netdev(netdev);
92915f71 3287
0ac1e8ce 3288 ixgbevf_clear_interrupt_scheme(adapter);
92915f71
GR
3289 ixgbevf_reset_interrupt_capability(adapter);
3290
3291 iounmap(adapter->hw.hw_addr);
3292 pci_release_regions(pdev);
3293
3294 hw_dbg(&adapter->hw, "Remove complete\n");
3295
3296 kfree(adapter->tx_ring);
3297 kfree(adapter->rx_ring);
3298
3299 free_netdev(netdev);
3300
3301 pci_disable_device(pdev);
3302}
3303
9f19f31d
AD
3304/**
3305 * ixgbevf_io_error_detected - called when PCI error is detected
3306 * @pdev: Pointer to PCI device
3307 * @state: The current pci connection state
3308 *
3309 * This function is called after a PCI bus error affecting
3310 * this device has been detected.
3311 */
3312static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
3313 pci_channel_state_t state)
3314{
3315 struct net_device *netdev = pci_get_drvdata(pdev);
3316 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3317
3318 netif_device_detach(netdev);
3319
3320 if (state == pci_channel_io_perm_failure)
3321 return PCI_ERS_RESULT_DISCONNECT;
3322
3323 if (netif_running(netdev))
3324 ixgbevf_down(adapter);
3325
3326 pci_disable_device(pdev);
3327
3328 /* Request a slot slot reset. */
3329 return PCI_ERS_RESULT_NEED_RESET;
3330}
3331
3332/**
3333 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
3334 * @pdev: Pointer to PCI device
3335 *
3336 * Restart the card from scratch, as if from a cold-boot. Implementation
3337 * resembles the first-half of the ixgbevf_resume routine.
3338 */
3339static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
3340{
3341 struct net_device *netdev = pci_get_drvdata(pdev);
3342 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3343
3344 if (pci_enable_device_mem(pdev)) {
3345 dev_err(&pdev->dev,
3346 "Cannot re-enable PCI device after reset.\n");
3347 return PCI_ERS_RESULT_DISCONNECT;
3348 }
3349
3350 pci_set_master(pdev);
3351
3352 ixgbevf_reset(adapter);
3353
3354 return PCI_ERS_RESULT_RECOVERED;
3355}
3356
3357/**
3358 * ixgbevf_io_resume - called when traffic can start flowing again.
3359 * @pdev: Pointer to PCI device
3360 *
3361 * This callback is called when the error recovery driver tells us that
3362 * its OK to resume normal operation. Implementation resembles the
3363 * second-half of the ixgbevf_resume routine.
3364 */
3365static void ixgbevf_io_resume(struct pci_dev *pdev)
3366{
3367 struct net_device *netdev = pci_get_drvdata(pdev);
3368 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3369
3370 if (netif_running(netdev))
3371 ixgbevf_up(adapter);
3372
3373 netif_device_attach(netdev);
3374}
3375
3376/* PCI Error Recovery (ERS) */
3646f0e5 3377static const struct pci_error_handlers ixgbevf_err_handler = {
9f19f31d
AD
3378 .error_detected = ixgbevf_io_error_detected,
3379 .slot_reset = ixgbevf_io_slot_reset,
3380 .resume = ixgbevf_io_resume,
3381};
3382
92915f71
GR
3383static struct pci_driver ixgbevf_driver = {
3384 .name = ixgbevf_driver_name,
3385 .id_table = ixgbevf_pci_tbl,
3386 .probe = ixgbevf_probe,
3387 .remove = __devexit_p(ixgbevf_remove),
0ac1e8ce
AD
3388#ifdef CONFIG_PM
3389 /* Power Management Hooks */
3390 .suspend = ixgbevf_suspend,
3391 .resume = ixgbevf_resume,
3392#endif
92915f71 3393 .shutdown = ixgbevf_shutdown,
9f19f31d 3394 .err_handler = &ixgbevf_err_handler
92915f71
GR
3395};
3396
3397/**
65d676c8 3398 * ixgbevf_init_module - Driver Registration Routine
92915f71 3399 *
65d676c8 3400 * ixgbevf_init_module is the first routine called when the driver is
92915f71
GR
3401 * loaded. All it does is register with the PCI subsystem.
3402 **/
3403static int __init ixgbevf_init_module(void)
3404{
3405 int ret;
dbd9636e
JK
3406 pr_info("%s - version %s\n", ixgbevf_driver_string,
3407 ixgbevf_driver_version);
92915f71 3408
dbd9636e 3409 pr_info("%s\n", ixgbevf_copyright);
92915f71
GR
3410
3411 ret = pci_register_driver(&ixgbevf_driver);
3412 return ret;
3413}
3414
3415module_init(ixgbevf_init_module);
3416
3417/**
65d676c8 3418 * ixgbevf_exit_module - Driver Exit Cleanup Routine
92915f71 3419 *
65d676c8 3420 * ixgbevf_exit_module is called just before the driver is removed
92915f71
GR
3421 * from memory.
3422 **/
3423static void __exit ixgbevf_exit_module(void)
3424{
3425 pci_unregister_driver(&ixgbevf_driver);
3426}
3427
3428#ifdef DEBUG
3429/**
65d676c8 3430 * ixgbevf_get_hw_dev_name - return device name string
92915f71
GR
3431 * used by hardware layer to print debugging information
3432 **/
3433char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3434{
3435 struct ixgbevf_adapter *adapter = hw->back;
3436 return adapter->netdev->name;
3437}
3438
3439#endif
3440module_exit(ixgbevf_exit_module);
3441
3442/* ixgbevf_main.c */