ixgbevf: fix VF untagging when 802.1 prio is set
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / ethernet / intel / ixgbevf / ixgbevf_main.c
CommitLineData
92915f71
GR
1/*******************************************************************************
2
3 Intel 82599 Virtual Function driver
5c47a2b6 4 Copyright(c) 1999 - 2012 Intel Corporation.
92915f71
GR
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28
29/******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31******************************************************************************/
dbd9636e
JK
32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
92915f71 35#include <linux/types.h>
dadcd65f 36#include <linux/bitops.h>
92915f71
GR
37#include <linux/module.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/vmalloc.h>
41#include <linux/string.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/tcp.h>
45#include <linux/ipv6.h>
5a0e3ad6 46#include <linux/slab.h>
92915f71
GR
47#include <net/checksum.h>
48#include <net/ip6_checksum.h>
49#include <linux/ethtool.h>
01789349 50#include <linux/if.h>
92915f71 51#include <linux/if_vlan.h>
70c71606 52#include <linux/prefetch.h>
92915f71
GR
53
54#include "ixgbevf.h"
55
3d8fe98f 56const char ixgbevf_driver_name[] = "ixgbevf";
92915f71 57static const char ixgbevf_driver_string[] =
422e05d1 58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
92915f71 59
9cd9130d 60#define DRV_VERSION "2.6.0-k"
92915f71 61const char ixgbevf_driver_version[] = DRV_VERSION;
66c87bd5 62static char ixgbevf_copyright[] =
5c47a2b6 63 "Copyright (c) 2009 - 2012 Intel Corporation.";
92915f71
GR
64
65static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
2316aa2a
GR
66 [board_82599_vf] = &ixgbevf_82599_vf_info,
67 [board_X540_vf] = &ixgbevf_X540_vf_info,
92915f71
GR
68};
69
70/* ixgbevf_pci_tbl - PCI Device ID Table
71 *
72 * Wildcard entries (PCI_ANY_ID) should come last
73 * Last entry must be all 0s
74 *
75 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
76 * Class, Class Mask, private data (not used) }
77 */
78static struct pci_device_id ixgbevf_pci_tbl[] = {
79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
80 board_82599_vf},
2316aa2a
GR
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
82 board_X540_vf},
92915f71
GR
83
84 /* required last entry */
85 {0, }
86};
87MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
88
89MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
90MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION);
93
b3f4d599 94#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95static int debug = -1;
96module_param(debug, int, 0);
97MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
92915f71
GR
98
99/* forward decls */
fa71ae27 100static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
92915f71
GR
101
102static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
103 struct ixgbevf_ring *rx_ring,
104 u32 val)
105{
106 /*
107 * Force memory writes to complete before letting h/w
108 * know there are new descriptors to fetch. (Only
109 * applicable for weak-ordered memory model archs,
110 * such as IA-64).
111 */
112 wmb();
113 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
114}
115
49ce9c2c 116/**
65d676c8 117 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
92915f71
GR
118 * @adapter: pointer to adapter struct
119 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
120 * @queue: queue to map the corresponding interrupt to
121 * @msix_vector: the vector to map to the corresponding queue
122 *
123 */
124static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
125 u8 queue, u8 msix_vector)
126{
127 u32 ivar, index;
128 struct ixgbe_hw *hw = &adapter->hw;
129 if (direction == -1) {
130 /* other causes */
131 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
132 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
133 ivar &= ~0xFF;
134 ivar |= msix_vector;
135 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
136 } else {
137 /* tx or rx causes */
138 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
139 index = ((16 * (queue & 1)) + (8 * direction));
140 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
141 ivar &= ~(0xFF << index);
142 ivar |= (msix_vector << index);
143 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
144 }
145}
146
147static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
148 struct ixgbevf_tx_buffer
149 *tx_buffer_info)
150{
151 if (tx_buffer_info->dma) {
152 if (tx_buffer_info->mapped_as_page)
2a1f8794 153 dma_unmap_page(&adapter->pdev->dev,
92915f71
GR
154 tx_buffer_info->dma,
155 tx_buffer_info->length,
2a1f8794 156 DMA_TO_DEVICE);
92915f71 157 else
2a1f8794 158 dma_unmap_single(&adapter->pdev->dev,
92915f71
GR
159 tx_buffer_info->dma,
160 tx_buffer_info->length,
2a1f8794 161 DMA_TO_DEVICE);
92915f71
GR
162 tx_buffer_info->dma = 0;
163 }
164 if (tx_buffer_info->skb) {
165 dev_kfree_skb_any(tx_buffer_info->skb);
166 tx_buffer_info->skb = NULL;
167 }
168 tx_buffer_info->time_stamp = 0;
169 /* tx_buffer_info must be completely set up in the transmit path */
170}
171
92915f71
GR
172#define IXGBE_MAX_TXD_PWR 14
173#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
174
175/* Tx Descriptors needed, worst case */
3595990a
AD
176#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
177#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
92915f71
GR
178
179static void ixgbevf_tx_timeout(struct net_device *netdev);
180
181/**
182 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
fa71ae27 183 * @q_vector: board private structure
92915f71
GR
184 * @tx_ring: tx ring to clean
185 **/
fa71ae27 186static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
92915f71
GR
187 struct ixgbevf_ring *tx_ring)
188{
fa71ae27 189 struct ixgbevf_adapter *adapter = q_vector->adapter;
92915f71 190 struct net_device *netdev = adapter->netdev;
92915f71
GR
191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192 struct ixgbevf_tx_buffer *tx_buffer_info;
193 unsigned int i, eop, count = 0;
194 unsigned int total_bytes = 0, total_packets = 0;
195
196 i = tx_ring->next_to_clean;
197 eop = tx_ring->tx_buffer_info[i].next_to_watch;
908421f6 198 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
92915f71
GR
199
200 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
fa71ae27 201 (count < tx_ring->count)) {
92915f71 202 bool cleaned = false;
2d0bb1c1 203 rmb(); /* read buffer_info after eop_desc */
98b9e48f
GR
204 /* eop could change between read and DD-check */
205 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
206 goto cont_loop;
92915f71
GR
207 for ( ; !cleaned; count++) {
208 struct sk_buff *skb;
908421f6 209 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71
GR
210 tx_buffer_info = &tx_ring->tx_buffer_info[i];
211 cleaned = (i == eop);
212 skb = tx_buffer_info->skb;
213
214 if (cleaned && skb) {
215 unsigned int segs, bytecount;
216
217 /* gso_segs is currently only valid for tcp */
218 segs = skb_shinfo(skb)->gso_segs ?: 1;
219 /* multiply data chunks by size of headers */
220 bytecount = ((segs - 1) * skb_headlen(skb)) +
221 skb->len;
222 total_packets += segs;
223 total_bytes += bytecount;
224 }
225
226 ixgbevf_unmap_and_free_tx_resource(adapter,
227 tx_buffer_info);
228
229 tx_desc->wb.status = 0;
230
231 i++;
232 if (i == tx_ring->count)
233 i = 0;
234 }
235
98b9e48f 236cont_loop:
92915f71 237 eop = tx_ring->tx_buffer_info[i].next_to_watch;
908421f6 238 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
92915f71
GR
239 }
240
241 tx_ring->next_to_clean = i;
242
243#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
244 if (unlikely(count && netif_carrier_ok(netdev) &&
245 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
246 /* Make sure that anybody stopping the queue after this
247 * sees the new next_to_clean.
248 */
249 smp_mb();
92915f71
GR
250 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
251 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
252 netif_wake_subqueue(netdev, tx_ring->queue_index);
253 ++adapter->restart_queue;
254 }
92915f71
GR
255 }
256
4197aa7b 257 u64_stats_update_begin(&tx_ring->syncp);
92915f71
GR
258 tx_ring->total_bytes += total_bytes;
259 tx_ring->total_packets += total_packets;
4197aa7b 260 u64_stats_update_end(&tx_ring->syncp);
92915f71 261
fa71ae27 262 return count < tx_ring->count;
92915f71
GR
263}
264
265/**
266 * ixgbevf_receive_skb - Send a completed packet up the stack
267 * @q_vector: structure containing interrupt and ring information
268 * @skb: packet to send up
269 * @status: hardware indication of status of receive
270 * @rx_ring: rx descriptor ring (for a specific queue) to setup
271 * @rx_desc: rx descriptor
272 **/
273static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
274 struct sk_buff *skb, u8 status,
275 struct ixgbevf_ring *ring,
276 union ixgbe_adv_rx_desc *rx_desc)
277{
278 struct ixgbevf_adapter *adapter = q_vector->adapter;
279 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
dd1ed3b7 280 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
92915f71 281
5d9a533b 282 if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
dadcd65f 283 __vlan_hwaccel_put_tag(skb, tag);
dadcd65f 284
77d5dfca 285 napi_gro_receive(&q_vector->napi, skb);
92915f71
GR
286}
287
288/**
289 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
290 * @adapter: address of board private structure
291 * @status_err: hardware indication of status of receive
292 * @skb: skb currently being received and modified
293 **/
294static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
295 u32 status_err, struct sk_buff *skb)
296{
bc8acf2c 297 skb_checksum_none_assert(skb);
92915f71
GR
298
299 /* Rx csum disabled */
525a940c 300 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
92915f71
GR
301 return;
302
303 /* if IP and error */
304 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
305 (status_err & IXGBE_RXDADV_ERR_IPE)) {
306 adapter->hw_csum_rx_error++;
307 return;
308 }
309
310 if (!(status_err & IXGBE_RXD_STAT_L4CS))
311 return;
312
313 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
314 adapter->hw_csum_rx_error++;
315 return;
316 }
317
318 /* It must be a TCP or UDP packet with a valid checksum */
319 skb->ip_summed = CHECKSUM_UNNECESSARY;
320 adapter->hw_csum_rx_good++;
321}
322
323/**
324 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
325 * @adapter: address of board private structure
326 **/
327static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
328 struct ixgbevf_ring *rx_ring,
329 int cleaned_count)
330{
331 struct pci_dev *pdev = adapter->pdev;
332 union ixgbe_adv_rx_desc *rx_desc;
333 struct ixgbevf_rx_buffer *bi;
334 struct sk_buff *skb;
335 unsigned int i;
336 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
337
338 i = rx_ring->next_to_use;
339 bi = &rx_ring->rx_buffer_info[i];
340
341 while (cleaned_count--) {
908421f6 342 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
343 skb = bi->skb;
344 if (!skb) {
345 skb = netdev_alloc_skb(adapter->netdev,
346 bufsz);
347
348 if (!skb) {
349 adapter->alloc_rx_buff_failed++;
350 goto no_buffers;
351 }
352
353 /*
354 * Make buffer alignment 2 beyond a 16 byte boundary
355 * this will result in a 16 byte aligned IP header after
356 * the 14 byte MAC header is removed
357 */
358 skb_reserve(skb, NET_IP_ALIGN);
359
360 bi->skb = skb;
361 }
362 if (!bi->dma) {
2a1f8794 363 bi->dma = dma_map_single(&pdev->dev, skb->data,
92915f71 364 rx_ring->rx_buf_len,
2a1f8794 365 DMA_FROM_DEVICE);
92915f71 366 }
77d5dfca 367 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
92915f71
GR
368
369 i++;
370 if (i == rx_ring->count)
371 i = 0;
372 bi = &rx_ring->rx_buffer_info[i];
373 }
374
375no_buffers:
376 if (rx_ring->next_to_use != i) {
377 rx_ring->next_to_use = i;
378 if (i-- == 0)
379 i = (rx_ring->count - 1);
380
381 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
382 }
383}
384
385static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
5f3600eb 386 u32 qmask)
92915f71 387{
92915f71
GR
388 struct ixgbe_hw *hw = &adapter->hw;
389
5f3600eb 390 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
92915f71
GR
391}
392
92915f71
GR
393static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
394 struct ixgbevf_ring *rx_ring,
fa71ae27 395 int budget)
92915f71
GR
396{
397 struct ixgbevf_adapter *adapter = q_vector->adapter;
398 struct pci_dev *pdev = adapter->pdev;
399 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
400 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
401 struct sk_buff *skb;
402 unsigned int i;
403 u32 len, staterr;
92915f71
GR
404 int cleaned_count = 0;
405 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
406
407 i = rx_ring->next_to_clean;
908421f6 408 rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
409 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
410 rx_buffer_info = &rx_ring->rx_buffer_info[i];
411
412 while (staterr & IXGBE_RXD_STAT_DD) {
fa71ae27 413 if (!budget)
92915f71 414 break;
fa71ae27 415 budget--;
92915f71 416
2d0bb1c1 417 rmb(); /* read descriptor and rx_buffer_info after status DD */
77d5dfca 418 len = le16_to_cpu(rx_desc->wb.upper.length);
92915f71
GR
419 skb = rx_buffer_info->skb;
420 prefetch(skb->data - NET_IP_ALIGN);
421 rx_buffer_info->skb = NULL;
422
423 if (rx_buffer_info->dma) {
2a1f8794 424 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
92915f71 425 rx_ring->rx_buf_len,
2a1f8794 426 DMA_FROM_DEVICE);
92915f71
GR
427 rx_buffer_info->dma = 0;
428 skb_put(skb, len);
429 }
430
92915f71
GR
431 i++;
432 if (i == rx_ring->count)
433 i = 0;
434
908421f6 435 next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
92915f71
GR
436 prefetch(next_rxd);
437 cleaned_count++;
438
439 next_buffer = &rx_ring->rx_buffer_info[i];
440
441 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
77d5dfca
AD
442 skb->next = next_buffer->skb;
443 skb->next->prev = skb;
92915f71
GR
444 adapter->non_eop_descs++;
445 goto next_desc;
446 }
447
448 /* ERR_MASK will only have valid bits if EOP set */
449 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
450 dev_kfree_skb_irq(skb);
451 goto next_desc;
452 }
453
454 ixgbevf_rx_checksum(adapter, staterr, skb);
455
456 /* probably a little skewed due to removing CRC */
457 total_rx_bytes += skb->len;
458 total_rx_packets++;
459
460 /*
461 * Work around issue of some types of VM to VM loop back
462 * packets not getting split correctly
463 */
464 if (staterr & IXGBE_RXD_STAT_LB) {
e743d313 465 u32 header_fixup_len = skb_headlen(skb);
92915f71
GR
466 if (header_fixup_len < 14)
467 skb_push(skb, header_fixup_len);
468 }
469 skb->protocol = eth_type_trans(skb, adapter->netdev);
470
471 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
92915f71
GR
472
473next_desc:
474 rx_desc->wb.upper.status_error = 0;
475
476 /* return some buffers to hardware, one at a time is too slow */
477 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
478 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
479 cleaned_count);
480 cleaned_count = 0;
481 }
482
483 /* use prefetched values */
484 rx_desc = next_rxd;
485 rx_buffer_info = &rx_ring->rx_buffer_info[i];
486
487 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
488 }
489
490 rx_ring->next_to_clean = i;
491 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
492
493 if (cleaned_count)
494 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
495
4197aa7b 496 u64_stats_update_begin(&rx_ring->syncp);
92915f71
GR
497 rx_ring->total_packets += total_rx_packets;
498 rx_ring->total_bytes += total_rx_bytes;
4197aa7b 499 u64_stats_update_end(&rx_ring->syncp);
92915f71 500
fa71ae27 501 return !!budget;
92915f71
GR
502}
503
504/**
fa71ae27 505 * ixgbevf_poll - NAPI polling calback
92915f71
GR
506 * @napi: napi struct with our devices info in it
507 * @budget: amount of work driver is allowed to do this pass, in packets
508 *
fa71ae27 509 * This function will clean more than one or more rings associated with a
92915f71
GR
510 * q_vector.
511 **/
fa71ae27 512static int ixgbevf_poll(struct napi_struct *napi, int budget)
92915f71
GR
513{
514 struct ixgbevf_q_vector *q_vector =
515 container_of(napi, struct ixgbevf_q_vector, napi);
516 struct ixgbevf_adapter *adapter = q_vector->adapter;
fa71ae27
AD
517 struct ixgbevf_ring *ring;
518 int per_ring_budget;
519 bool clean_complete = true;
520
521 ixgbevf_for_each_ring(ring, q_vector->tx)
522 clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
92915f71
GR
523
524 /* attempt to distribute budget to each queue fairly, but don't allow
525 * the budget to go below 1 because we'll exit polling */
fa71ae27
AD
526 if (q_vector->rx.count > 1)
527 per_ring_budget = max(budget/q_vector->rx.count, 1);
528 else
529 per_ring_budget = budget;
530
531 ixgbevf_for_each_ring(ring, q_vector->rx)
532 clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
533 per_ring_budget);
534
535 /* If all work not completed, return budget and keep polling */
536 if (!clean_complete)
537 return budget;
538 /* all work done, exit the polling mode */
539 napi_complete(napi);
540 if (adapter->rx_itr_setting & 1)
541 ixgbevf_set_itr(q_vector);
542 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
543 ixgbevf_irq_enable_queues(adapter,
544 1 << q_vector->v_idx);
92915f71 545
fa71ae27 546 return 0;
92915f71
GR
547}
548
549
550/**
551 * ixgbevf_configure_msix - Configure MSI-X hardware
552 * @adapter: board private structure
553 *
554 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
555 * interrupts.
556 **/
557static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
558{
559 struct ixgbevf_q_vector *q_vector;
6b43c446 560 int q_vectors, v_idx;
92915f71
GR
561
562 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5f3600eb 563 adapter->eims_enable_mask = 0;
92915f71
GR
564
565 /*
566 * Populate the IVAR table and set the ITR values to the
567 * corresponding register.
568 */
569 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
6b43c446 570 struct ixgbevf_ring *ring;
92915f71 571 q_vector = adapter->q_vector[v_idx];
6b43c446
AD
572
573 ixgbevf_for_each_ring(ring, q_vector->rx)
574 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
575
576 ixgbevf_for_each_ring(ring, q_vector->tx)
577 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
92915f71 578
5f3600eb
AD
579 if (q_vector->tx.ring && !q_vector->rx.ring) {
580 /* tx only vector */
581 if (adapter->tx_itr_setting == 1)
582 q_vector->itr = IXGBE_10K_ITR;
583 else
584 q_vector->itr = adapter->tx_itr_setting;
585 } else {
586 /* rx or rx/tx vector */
587 if (adapter->rx_itr_setting == 1)
588 q_vector->itr = IXGBE_20K_ITR;
589 else
590 q_vector->itr = adapter->rx_itr_setting;
591 }
592
593 /* add q_vector eims value to global eims_enable_mask */
594 adapter->eims_enable_mask |= 1 << v_idx;
92915f71 595
5f3600eb 596 ixgbevf_write_eitr(q_vector);
92915f71
GR
597 }
598
599 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
5f3600eb
AD
600 /* setup eims_other and add value to global eims_enable_mask */
601 adapter->eims_other = 1 << v_idx;
602 adapter->eims_enable_mask |= adapter->eims_other;
92915f71
GR
603}
604
605enum latency_range {
606 lowest_latency = 0,
607 low_latency = 1,
608 bulk_latency = 2,
609 latency_invalid = 255
610};
611
612/**
613 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
5f3600eb
AD
614 * @q_vector: structure containing interrupt and ring information
615 * @ring_container: structure containing ring performance data
92915f71
GR
616 *
617 * Stores a new ITR value based on packets and byte
618 * counts during the last interrupt. The advantage of per interrupt
619 * computation is faster updates and more accurate ITR for the current
620 * traffic pattern. Constants in this function were computed
621 * based on theoretical maximum wire speed and thresholds were set based
622 * on testing data as well as attempting to minimize response time
623 * while increasing bulk throughput.
624 **/
5f3600eb
AD
625static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
626 struct ixgbevf_ring_container *ring_container)
92915f71 627{
5f3600eb
AD
628 int bytes = ring_container->total_bytes;
629 int packets = ring_container->total_packets;
92915f71
GR
630 u32 timepassed_us;
631 u64 bytes_perint;
5f3600eb 632 u8 itr_setting = ring_container->itr;
92915f71
GR
633
634 if (packets == 0)
5f3600eb 635 return;
92915f71
GR
636
637 /* simple throttlerate management
638 * 0-20MB/s lowest (100000 ints/s)
639 * 20-100MB/s low (20000 ints/s)
640 * 100-1249MB/s bulk (8000 ints/s)
641 */
642 /* what was last interrupt timeslice? */
5f3600eb 643 timepassed_us = q_vector->itr >> 2;
92915f71
GR
644 bytes_perint = bytes / timepassed_us; /* bytes/usec */
645
646 switch (itr_setting) {
647 case lowest_latency:
e2c28ce7 648 if (bytes_perint > 10)
5f3600eb 649 itr_setting = low_latency;
92915f71
GR
650 break;
651 case low_latency:
e2c28ce7 652 if (bytes_perint > 20)
5f3600eb 653 itr_setting = bulk_latency;
e2c28ce7 654 else if (bytes_perint <= 10)
5f3600eb 655 itr_setting = lowest_latency;
92915f71
GR
656 break;
657 case bulk_latency:
e2c28ce7 658 if (bytes_perint <= 20)
5f3600eb 659 itr_setting = low_latency;
92915f71
GR
660 break;
661 }
662
5f3600eb
AD
663 /* clear work counters since we have the values we need */
664 ring_container->total_bytes = 0;
665 ring_container->total_packets = 0;
666
667 /* write updated itr to ring container */
668 ring_container->itr = itr_setting;
92915f71
GR
669}
670
671/**
672 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
5f3600eb 673 * @q_vector: structure containing interrupt and ring information
92915f71
GR
674 *
675 * This function is made to be called by ethtool and by the driver
676 * when it needs to update VTEITR registers at runtime. Hardware
677 * specific quirks/differences are taken care of here.
678 */
5f3600eb 679void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
92915f71 680{
5f3600eb 681 struct ixgbevf_adapter *adapter = q_vector->adapter;
92915f71 682 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb
AD
683 int v_idx = q_vector->v_idx;
684 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
92915f71
GR
685
686 /*
687 * set the WDIS bit to not clear the timer bits and cause an
688 * immediate assertion of the interrupt
689 */
690 itr_reg |= IXGBE_EITR_CNT_WDIS;
691
692 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
693}
694
fa71ae27 695static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
92915f71 696{
5f3600eb
AD
697 u32 new_itr = q_vector->itr;
698 u8 current_itr;
92915f71 699
5f3600eb
AD
700 ixgbevf_update_itr(q_vector, &q_vector->tx);
701 ixgbevf_update_itr(q_vector, &q_vector->rx);
92915f71 702
6b43c446 703 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
92915f71
GR
704
705 switch (current_itr) {
706 /* counts and packets in update_itr are dependent on these numbers */
707 case lowest_latency:
5f3600eb 708 new_itr = IXGBE_100K_ITR;
92915f71
GR
709 break;
710 case low_latency:
5f3600eb 711 new_itr = IXGBE_20K_ITR;
92915f71
GR
712 break;
713 case bulk_latency:
714 default:
5f3600eb 715 new_itr = IXGBE_8K_ITR;
92915f71
GR
716 break;
717 }
718
5f3600eb 719 if (new_itr != q_vector->itr) {
92915f71 720 /* do an exponential smoothing */
5f3600eb
AD
721 new_itr = (10 * new_itr * q_vector->itr) /
722 ((9 * new_itr) + q_vector->itr);
723
724 /* save the algorithm value here */
725 q_vector->itr = new_itr;
726
727 ixgbevf_write_eitr(q_vector);
92915f71 728 }
92915f71
GR
729}
730
731static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
732{
fa71ae27 733 struct ixgbevf_adapter *adapter = data;
92915f71 734 struct ixgbe_hw *hw = &adapter->hw;
a9ee25a2 735 u32 msg;
375b27cf 736 bool got_ack = false;
92915f71 737
375b27cf
GR
738 if (!hw->mbx.ops.check_for_ack(hw))
739 got_ack = true;
08259594 740
375b27cf
GR
741 if (!hw->mbx.ops.check_for_msg(hw)) {
742 hw->mbx.ops.read(hw, &msg, 1);
a9ee25a2 743
375b27cf
GR
744 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
745 mod_timer(&adapter->watchdog_timer,
746 round_jiffies(jiffies + 1));
a9ee25a2 747
375b27cf
GR
748 if (msg & IXGBE_VT_MSGTYPE_NACK)
749 pr_warn("Last Request of type %2.2x to PF Nacked\n",
750 msg & 0xFF);
3a2c4033
GR
751 /*
752 * Restore the PFSTS bit in case someone is polling for a
753 * return message from the PF
754 */
755 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
375b27cf
GR
756 }
757
758 /*
759 * checking for the ack clears the PFACK bit. Place
760 * it back in the v2p_mailbox cache so that anyone
761 * polling for an ack will not miss it
762 */
763 if (got_ack)
764 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
3a2c4033 765
5f3600eb
AD
766 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
767
92915f71
GR
768 return IRQ_HANDLED;
769}
770
92915f71
GR
771
772/**
fa71ae27 773 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
92915f71
GR
774 * @irq: unused
775 * @data: pointer to our q_vector struct for this interrupt vector
776 **/
fa71ae27 777static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
92915f71
GR
778{
779 struct ixgbevf_q_vector *q_vector = data;
92915f71 780
5f3600eb 781 /* EIAM disabled interrupts (on this vector) for us */
fa71ae27
AD
782 if (q_vector->rx.ring || q_vector->tx.ring)
783 napi_schedule(&q_vector->napi);
92915f71
GR
784
785 return IRQ_HANDLED;
786}
787
788static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
789 int r_idx)
790{
791 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
792
6b43c446
AD
793 a->rx_ring[r_idx].next = q_vector->rx.ring;
794 q_vector->rx.ring = &a->rx_ring[r_idx];
795 q_vector->rx.count++;
92915f71
GR
796}
797
798static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
799 int t_idx)
800{
801 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
802
6b43c446
AD
803 a->tx_ring[t_idx].next = q_vector->tx.ring;
804 q_vector->tx.ring = &a->tx_ring[t_idx];
805 q_vector->tx.count++;
92915f71
GR
806}
807
808/**
809 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
810 * @adapter: board private structure to initialize
811 *
812 * This function maps descriptor rings to the queue-specific vectors
813 * we were allotted through the MSI-X enabling code. Ideally, we'd have
814 * one vector per ring/queue, but on a constrained vector budget, we
815 * group the rings as "efficiently" as possible. You would add new
816 * mapping configurations in here.
817 **/
818static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
819{
820 int q_vectors;
821 int v_start = 0;
822 int rxr_idx = 0, txr_idx = 0;
823 int rxr_remaining = adapter->num_rx_queues;
824 int txr_remaining = adapter->num_tx_queues;
825 int i, j;
826 int rqpv, tqpv;
827 int err = 0;
828
829 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
830
831 /*
832 * The ideal configuration...
833 * We have enough vectors to map one per queue.
834 */
835 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
836 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
837 map_vector_to_rxq(adapter, v_start, rxr_idx);
838
839 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
840 map_vector_to_txq(adapter, v_start, txr_idx);
841 goto out;
842 }
843
844 /*
845 * If we don't have enough vectors for a 1-to-1
846 * mapping, we'll have to group them so there are
847 * multiple queues per vector.
848 */
849 /* Re-adjusting *qpv takes care of the remainder. */
850 for (i = v_start; i < q_vectors; i++) {
851 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
852 for (j = 0; j < rqpv; j++) {
853 map_vector_to_rxq(adapter, i, rxr_idx);
854 rxr_idx++;
855 rxr_remaining--;
856 }
857 }
858 for (i = v_start; i < q_vectors; i++) {
859 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
860 for (j = 0; j < tqpv; j++) {
861 map_vector_to_txq(adapter, i, txr_idx);
862 txr_idx++;
863 txr_remaining--;
864 }
865 }
866
867out:
868 return err;
869}
870
871/**
872 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
873 * @adapter: board private structure
874 *
875 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
876 * interrupts from the kernel.
877 **/
878static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
879{
880 struct net_device *netdev = adapter->netdev;
fa71ae27
AD
881 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
882 int vector, err;
92915f71
GR
883 int ri = 0, ti = 0;
884
92915f71 885 for (vector = 0; vector < q_vectors; vector++) {
fa71ae27
AD
886 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
887 struct msix_entry *entry = &adapter->msix_entries[vector];
888
889 if (q_vector->tx.ring && q_vector->rx.ring) {
890 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
891 "%s-%s-%d", netdev->name, "TxRx", ri++);
892 ti++;
893 } else if (q_vector->rx.ring) {
894 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
895 "%s-%s-%d", netdev->name, "rx", ri++);
896 } else if (q_vector->tx.ring) {
897 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
898 "%s-%s-%d", netdev->name, "tx", ti++);
92915f71
GR
899 } else {
900 /* skip this unused q_vector */
901 continue;
902 }
fa71ae27
AD
903 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
904 q_vector->name, q_vector);
92915f71
GR
905 if (err) {
906 hw_dbg(&adapter->hw,
907 "request_irq failed for MSIX interrupt "
908 "Error: %d\n", err);
909 goto free_queue_irqs;
910 }
911 }
912
92915f71 913 err = request_irq(adapter->msix_entries[vector].vector,
fa71ae27 914 &ixgbevf_msix_mbx, 0, netdev->name, adapter);
92915f71
GR
915 if (err) {
916 hw_dbg(&adapter->hw,
917 "request_irq for msix_mbx failed: %d\n", err);
918 goto free_queue_irqs;
919 }
920
921 return 0;
922
923free_queue_irqs:
fa71ae27
AD
924 while (vector) {
925 vector--;
926 free_irq(adapter->msix_entries[vector].vector,
927 adapter->q_vector[vector]);
928 }
92915f71
GR
929 pci_disable_msix(adapter->pdev);
930 kfree(adapter->msix_entries);
931 adapter->msix_entries = NULL;
932 return err;
933}
934
935static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
936{
937 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
938
939 for (i = 0; i < q_vectors; i++) {
940 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
6b43c446
AD
941 q_vector->rx.ring = NULL;
942 q_vector->tx.ring = NULL;
943 q_vector->rx.count = 0;
944 q_vector->tx.count = 0;
92915f71
GR
945 }
946}
947
948/**
949 * ixgbevf_request_irq - initialize interrupts
950 * @adapter: board private structure
951 *
952 * Attempts to configure interrupts using the best available
953 * capabilities of the hardware and kernel.
954 **/
955static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
956{
957 int err = 0;
958
959 err = ixgbevf_request_msix_irqs(adapter);
960
961 if (err)
962 hw_dbg(&adapter->hw,
963 "request_irq failed, Error %d\n", err);
964
965 return err;
966}
967
968static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
969{
92915f71
GR
970 int i, q_vectors;
971
972 q_vectors = adapter->num_msix_vectors;
92915f71
GR
973 i = q_vectors - 1;
974
fa71ae27 975 free_irq(adapter->msix_entries[i].vector, adapter);
92915f71
GR
976 i--;
977
978 for (; i >= 0; i--) {
fa71ae27
AD
979 /* free only the irqs that were actually requested */
980 if (!adapter->q_vector[i]->rx.ring &&
981 !adapter->q_vector[i]->tx.ring)
982 continue;
983
92915f71
GR
984 free_irq(adapter->msix_entries[i].vector,
985 adapter->q_vector[i]);
986 }
987
988 ixgbevf_reset_q_vectors(adapter);
989}
990
991/**
992 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
993 * @adapter: board private structure
994 **/
995static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
996{
92915f71 997 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 998 int i;
92915f71 999
5f3600eb 1000 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
92915f71 1001 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
5f3600eb 1002 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
92915f71
GR
1003
1004 IXGBE_WRITE_FLUSH(hw);
1005
1006 for (i = 0; i < adapter->num_msix_vectors; i++)
1007 synchronize_irq(adapter->msix_entries[i].vector);
1008}
1009
1010/**
1011 * ixgbevf_irq_enable - Enable default interrupt generation settings
1012 * @adapter: board private structure
1013 **/
5f3600eb 1014static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
92915f71
GR
1015{
1016 struct ixgbe_hw *hw = &adapter->hw;
92915f71 1017
5f3600eb
AD
1018 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
1019 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
1020 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
92915f71
GR
1021}
1022
1023/**
1024 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1025 * @adapter: board private structure
1026 *
1027 * Configure the Tx unit of the MAC after a reset.
1028 **/
1029static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1030{
1031 u64 tdba;
1032 struct ixgbe_hw *hw = &adapter->hw;
1033 u32 i, j, tdlen, txctrl;
1034
1035 /* Setup the HW Tx Head and Tail descriptor pointers */
1036 for (i = 0; i < adapter->num_tx_queues; i++) {
1037 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1038 j = ring->reg_idx;
1039 tdba = ring->dma;
1040 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1041 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1042 (tdba & DMA_BIT_MASK(32)));
1043 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1044 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1045 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1046 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1047 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1048 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1049 /* Disable Tx Head Writeback RO bit, since this hoses
1050 * bookkeeping if things aren't delivered in order.
1051 */
1052 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1053 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1054 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1055 }
1056}
1057
1058#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1059
1060static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1061{
1062 struct ixgbevf_ring *rx_ring;
1063 struct ixgbe_hw *hw = &adapter->hw;
1064 u32 srrctl;
1065
1066 rx_ring = &adapter->rx_ring[index];
1067
1068 srrctl = IXGBE_SRRCTL_DROP_EN;
1069
77d5dfca 1070 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
92915f71 1071
77d5dfca
AD
1072 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1073 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1074 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1075 else
1076 srrctl |= rx_ring->rx_buf_len >>
1077 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
92915f71
GR
1078 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1079}
1080
1081/**
1082 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1083 * @adapter: board private structure
1084 *
1085 * Configure the Rx unit of the MAC after a reset.
1086 **/
1087static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1088{
1089 u64 rdba;
1090 struct ixgbe_hw *hw = &adapter->hw;
1091 struct net_device *netdev = adapter->netdev;
1092 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1093 int i, j;
1094 u32 rdlen;
1095 int rx_buf_len;
1096
77d5dfca
AD
1097 /* PSRTYPE must be initialized in 82599 */
1098 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1099 if (netdev->mtu <= ETH_DATA_LEN)
1100 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1101 else
1102 rx_buf_len = ALIGN(max_frame, 1024);
92915f71
GR
1103
1104 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1105 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1106 * the Base and Length of the Rx Descriptor Ring */
1107 for (i = 0; i < adapter->num_rx_queues; i++) {
1108 rdba = adapter->rx_ring[i].dma;
1109 j = adapter->rx_ring[i].reg_idx;
1110 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1111 (rdba & DMA_BIT_MASK(32)));
1112 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1113 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1114 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1115 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1116 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1117 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1118 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1119
1120 ixgbevf_configure_srrctl(adapter, j);
1121 }
1122}
1123
8e586137 1124static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
92915f71
GR
1125{
1126 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1127 struct ixgbe_hw *hw = &adapter->hw;
92915f71
GR
1128
1129 /* add VID to filter table */
1130 if (hw->mac.ops.set_vfta)
1131 hw->mac.ops.set_vfta(hw, vid, 0, true);
dadcd65f 1132 set_bit(vid, adapter->active_vlans);
8e586137
JP
1133
1134 return 0;
92915f71
GR
1135}
1136
8e586137 1137static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
92915f71
GR
1138{
1139 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1140 struct ixgbe_hw *hw = &adapter->hw;
1141
92915f71
GR
1142 /* remove VID from filter table */
1143 if (hw->mac.ops.set_vfta)
1144 hw->mac.ops.set_vfta(hw, vid, 0, false);
dadcd65f 1145 clear_bit(vid, adapter->active_vlans);
8e586137
JP
1146
1147 return 0;
92915f71
GR
1148}
1149
1150static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1151{
dadcd65f 1152 u16 vid;
92915f71 1153
dadcd65f
JP
1154 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1155 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
92915f71
GR
1156}
1157
46ec20ff
GR
1158static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1159{
1160 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1161 struct ixgbe_hw *hw = &adapter->hw;
1162 int count = 0;
1163
1164 if ((netdev_uc_count(netdev)) > 10) {
dbd9636e 1165 pr_err("Too many unicast filters - No Space\n");
46ec20ff
GR
1166 return -ENOSPC;
1167 }
1168
1169 if (!netdev_uc_empty(netdev)) {
1170 struct netdev_hw_addr *ha;
1171 netdev_for_each_uc_addr(ha, netdev) {
1172 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1173 udelay(200);
1174 }
1175 } else {
1176 /*
1177 * If the list is empty then send message to PF driver to
1178 * clear all macvlans on this VF.
1179 */
1180 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1181 }
1182
1183 return count;
1184}
1185
92915f71
GR
1186/**
1187 * ixgbevf_set_rx_mode - Multicast set
1188 * @netdev: network interface device structure
1189 *
1190 * The set_rx_method entry point is called whenever the multicast address
1191 * list or the network interface flags are updated. This routine is
1192 * responsible for configuring the hardware for proper multicast mode.
1193 **/
1194static void ixgbevf_set_rx_mode(struct net_device *netdev)
1195{
1196 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1197 struct ixgbe_hw *hw = &adapter->hw;
92915f71
GR
1198
1199 /* reprogram multicast list */
92915f71 1200 if (hw->mac.ops.update_mc_addr_list)
5c58c47a 1201 hw->mac.ops.update_mc_addr_list(hw, netdev);
46ec20ff
GR
1202
1203 ixgbevf_write_uc_addr_list(netdev);
92915f71
GR
1204}
1205
1206static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1207{
1208 int q_idx;
1209 struct ixgbevf_q_vector *q_vector;
1210 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1211
1212 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
92915f71 1213 q_vector = adapter->q_vector[q_idx];
fa71ae27 1214 napi_enable(&q_vector->napi);
92915f71
GR
1215 }
1216}
1217
1218static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1219{
1220 int q_idx;
1221 struct ixgbevf_q_vector *q_vector;
1222 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1223
1224 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1225 q_vector = adapter->q_vector[q_idx];
92915f71
GR
1226 napi_disable(&q_vector->napi);
1227 }
1228}
1229
1230static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1231{
1232 struct net_device *netdev = adapter->netdev;
1233 int i;
1234
1235 ixgbevf_set_rx_mode(netdev);
1236
1237 ixgbevf_restore_vlan(adapter);
1238
1239 ixgbevf_configure_tx(adapter);
1240 ixgbevf_configure_rx(adapter);
1241 for (i = 0; i < adapter->num_rx_queues; i++) {
1242 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1243 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
1244 ring->next_to_use = ring->count - 1;
1245 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1246 }
1247}
1248
1249#define IXGBE_MAX_RX_DESC_POLL 10
1250static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1251 int rxr)
1252{
1253 struct ixgbe_hw *hw = &adapter->hw;
1254 int j = adapter->rx_ring[rxr].reg_idx;
1255 int k;
1256
1257 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1258 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1259 break;
1260 else
1261 msleep(1);
1262 }
1263 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1264 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1265 "not set within the polling period\n", rxr);
1266 }
1267
1268 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1269 (adapter->rx_ring[rxr].count - 1));
1270}
1271
33bd9f60
GR
1272static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1273{
1274 /* Only save pre-reset stats if there are some */
1275 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1276 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1277 adapter->stats.base_vfgprc;
1278 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1279 adapter->stats.base_vfgptc;
1280 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1281 adapter->stats.base_vfgorc;
1282 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1283 adapter->stats.base_vfgotc;
1284 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1285 adapter->stats.base_vfmprc;
1286 }
1287}
1288
1289static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1290{
1291 struct ixgbe_hw *hw = &adapter->hw;
1292
1293 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1294 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1295 adapter->stats.last_vfgorc |=
1296 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1297 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1298 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1299 adapter->stats.last_vfgotc |=
1300 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1301 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1302
1303 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1304 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1305 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1306 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1307 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1308}
1309
795180d8 1310static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
92915f71
GR
1311{
1312 struct net_device *netdev = adapter->netdev;
1313 struct ixgbe_hw *hw = &adapter->hw;
1314 int i, j = 0;
1315 int num_rx_rings = adapter->num_rx_queues;
1316 u32 txdctl, rxdctl;
795180d8 1317 u32 msg[2];
92915f71
GR
1318
1319 for (i = 0; i < adapter->num_tx_queues; i++) {
1320 j = adapter->tx_ring[i].reg_idx;
1321 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1322 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1323 txdctl |= (8 << 16);
1324 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1325 }
1326
1327 for (i = 0; i < adapter->num_tx_queues; i++) {
1328 j = adapter->tx_ring[i].reg_idx;
1329 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1330 txdctl |= IXGBE_TXDCTL_ENABLE;
1331 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1332 }
1333
1334 for (i = 0; i < num_rx_rings; i++) {
1335 j = adapter->rx_ring[i].reg_idx;
1336 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
dadcd65f 1337 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
69bfbec4
GR
1338 if (hw->mac.type == ixgbe_mac_X540_vf) {
1339 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1340 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1341 IXGBE_RXDCTL_RLPML_EN);
1342 }
92915f71
GR
1343 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1344 ixgbevf_rx_desc_queue_enable(adapter, i);
1345 }
1346
1347 ixgbevf_configure_msix(adapter);
1348
1349 if (hw->mac.ops.set_rar) {
1350 if (is_valid_ether_addr(hw->mac.addr))
1351 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1352 else
1353 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1354 }
1355
795180d8
GR
1356 msg[0] = IXGBE_VF_SET_LPE;
1357 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1358 hw->mbx.ops.write_posted(hw, msg, 2);
1359
92915f71
GR
1360 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1361 ixgbevf_napi_enable_all(adapter);
1362
1363 /* enable transmits */
1364 netif_tx_start_all_queues(netdev);
1365
33bd9f60
GR
1366 ixgbevf_save_reset_stats(adapter);
1367 ixgbevf_init_last_counter_stats(adapter);
1368
92915f71 1369 mod_timer(&adapter->watchdog_timer, jiffies);
92915f71
GR
1370}
1371
795180d8 1372void ixgbevf_up(struct ixgbevf_adapter *adapter)
92915f71 1373{
92915f71
GR
1374 struct ixgbe_hw *hw = &adapter->hw;
1375
1376 ixgbevf_configure(adapter);
1377
795180d8 1378 ixgbevf_up_complete(adapter);
92915f71
GR
1379
1380 /* clear any pending interrupts, may auto mask */
1381 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1382
5f3600eb 1383 ixgbevf_irq_enable(adapter);
92915f71
GR
1384}
1385
1386/**
1387 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1388 * @adapter: board private structure
1389 * @rx_ring: ring to free buffers from
1390 **/
1391static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1392 struct ixgbevf_ring *rx_ring)
1393{
1394 struct pci_dev *pdev = adapter->pdev;
1395 unsigned long size;
1396 unsigned int i;
1397
c0456c23
GR
1398 if (!rx_ring->rx_buffer_info)
1399 return;
92915f71 1400
c0456c23 1401 /* Free all the Rx ring sk_buffs */
92915f71
GR
1402 for (i = 0; i < rx_ring->count; i++) {
1403 struct ixgbevf_rx_buffer *rx_buffer_info;
1404
1405 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1406 if (rx_buffer_info->dma) {
2a1f8794 1407 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
92915f71 1408 rx_ring->rx_buf_len,
2a1f8794 1409 DMA_FROM_DEVICE);
92915f71
GR
1410 rx_buffer_info->dma = 0;
1411 }
1412 if (rx_buffer_info->skb) {
1413 struct sk_buff *skb = rx_buffer_info->skb;
1414 rx_buffer_info->skb = NULL;
1415 do {
1416 struct sk_buff *this = skb;
1417 skb = skb->prev;
1418 dev_kfree_skb(this);
1419 } while (skb);
1420 }
92915f71
GR
1421 }
1422
1423 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1424 memset(rx_ring->rx_buffer_info, 0, size);
1425
1426 /* Zero out the descriptor ring */
1427 memset(rx_ring->desc, 0, rx_ring->size);
1428
1429 rx_ring->next_to_clean = 0;
1430 rx_ring->next_to_use = 0;
1431
1432 if (rx_ring->head)
1433 writel(0, adapter->hw.hw_addr + rx_ring->head);
1434 if (rx_ring->tail)
1435 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1436}
1437
1438/**
1439 * ixgbevf_clean_tx_ring - Free Tx Buffers
1440 * @adapter: board private structure
1441 * @tx_ring: ring to be cleaned
1442 **/
1443static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1444 struct ixgbevf_ring *tx_ring)
1445{
1446 struct ixgbevf_tx_buffer *tx_buffer_info;
1447 unsigned long size;
1448 unsigned int i;
1449
c0456c23
GR
1450 if (!tx_ring->tx_buffer_info)
1451 return;
1452
92915f71
GR
1453 /* Free all the Tx ring sk_buffs */
1454
1455 for (i = 0; i < tx_ring->count; i++) {
1456 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1457 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1458 }
1459
1460 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1461 memset(tx_ring->tx_buffer_info, 0, size);
1462
1463 memset(tx_ring->desc, 0, tx_ring->size);
1464
1465 tx_ring->next_to_use = 0;
1466 tx_ring->next_to_clean = 0;
1467
1468 if (tx_ring->head)
1469 writel(0, adapter->hw.hw_addr + tx_ring->head);
1470 if (tx_ring->tail)
1471 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1472}
1473
1474/**
1475 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1476 * @adapter: board private structure
1477 **/
1478static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1479{
1480 int i;
1481
1482 for (i = 0; i < adapter->num_rx_queues; i++)
1483 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1484}
1485
1486/**
1487 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1488 * @adapter: board private structure
1489 **/
1490static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1491{
1492 int i;
1493
1494 for (i = 0; i < adapter->num_tx_queues; i++)
1495 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1496}
1497
1498void ixgbevf_down(struct ixgbevf_adapter *adapter)
1499{
1500 struct net_device *netdev = adapter->netdev;
1501 struct ixgbe_hw *hw = &adapter->hw;
1502 u32 txdctl;
1503 int i, j;
1504
1505 /* signal that we are down to the interrupt handler */
1506 set_bit(__IXGBEVF_DOWN, &adapter->state);
1507 /* disable receives */
1508
1509 netif_tx_disable(netdev);
1510
1511 msleep(10);
1512
1513 netif_tx_stop_all_queues(netdev);
1514
1515 ixgbevf_irq_disable(adapter);
1516
1517 ixgbevf_napi_disable_all(adapter);
1518
1519 del_timer_sync(&adapter->watchdog_timer);
1520 /* can't call flush scheduled work here because it can deadlock
1521 * if linkwatch_event tries to acquire the rtnl_lock which we are
1522 * holding */
1523 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1524 msleep(1);
1525
1526 /* disable transmits in the hardware now that interrupts are off */
1527 for (i = 0; i < adapter->num_tx_queues; i++) {
1528 j = adapter->tx_ring[i].reg_idx;
1529 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1530 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1531 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1532 }
1533
1534 netif_carrier_off(netdev);
1535
1536 if (!pci_channel_offline(adapter->pdev))
1537 ixgbevf_reset(adapter);
1538
1539 ixgbevf_clean_all_tx_rings(adapter);
1540 ixgbevf_clean_all_rx_rings(adapter);
1541}
1542
1543void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1544{
c0456c23
GR
1545 struct ixgbe_hw *hw = &adapter->hw;
1546
92915f71 1547 WARN_ON(in_interrupt());
c0456c23 1548
92915f71
GR
1549 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1550 msleep(1);
1551
c0456c23
GR
1552 /*
1553 * Check if PF is up before re-init. If not then skip until
1554 * later when the PF is up and ready to service requests from
1555 * the VF via mailbox. If the VF is up and running then the
1556 * watchdog task will continue to schedule reset tasks until
1557 * the PF is up and running.
1558 */
1559 if (!hw->mac.ops.reset_hw(hw)) {
1560 ixgbevf_down(adapter);
1561 ixgbevf_up(adapter);
1562 }
92915f71
GR
1563
1564 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1565}
1566
1567void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1568{
1569 struct ixgbe_hw *hw = &adapter->hw;
1570 struct net_device *netdev = adapter->netdev;
1571
1572 if (hw->mac.ops.reset_hw(hw))
1573 hw_dbg(hw, "PF still resetting\n");
1574 else
1575 hw->mac.ops.init_hw(hw);
1576
1577 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1578 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1579 netdev->addr_len);
1580 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1581 netdev->addr_len);
1582 }
1583}
1584
1585static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1586 int vectors)
1587{
1588 int err, vector_threshold;
1589
fa71ae27
AD
1590 /* We'll want at least 2 (vector_threshold):
1591 * 1) TxQ[0] + RxQ[0] handler
1592 * 2) Other (Link Status Change, etc.)
92915f71
GR
1593 */
1594 vector_threshold = MIN_MSIX_COUNT;
1595
1596 /* The more we get, the more we will assign to Tx/Rx Cleanup
1597 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1598 * Right now, we simply care about how many we'll get; we'll
1599 * set them up later while requesting irq's.
1600 */
1601 while (vectors >= vector_threshold) {
1602 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1603 vectors);
1604 if (!err) /* Success in acquiring all requested vectors. */
1605 break;
1606 else if (err < 0)
1607 vectors = 0; /* Nasty failure, quit now */
1608 else /* err == number of vectors we should try again with */
1609 vectors = err;
1610 }
1611
1612 if (vectors < vector_threshold) {
1613 /* Can't allocate enough MSI-X interrupts? Oh well.
1614 * This just means we'll go with either a single MSI
1615 * vector or fall back to legacy interrupts.
1616 */
1617 hw_dbg(&adapter->hw,
1618 "Unable to allocate MSI-X interrupts\n");
1619 kfree(adapter->msix_entries);
1620 adapter->msix_entries = NULL;
1621 } else {
1622 /*
1623 * Adjust for only the vectors we'll use, which is minimum
1624 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1625 * vectors we were allocated.
1626 */
1627 adapter->num_msix_vectors = vectors;
1628 }
1629}
1630
49ce9c2c
BH
1631/**
1632 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
92915f71
GR
1633 * @adapter: board private structure to initialize
1634 *
1635 * This is the top level queue allocation routine. The order here is very
1636 * important, starting with the "most" number of features turned on at once,
1637 * and ending with the smallest set of features. This way large combinations
1638 * can be allocated if they're turned on, and smaller combinations are the
1639 * fallthrough conditions.
1640 *
1641 **/
1642static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1643{
1644 /* Start with base case */
1645 adapter->num_rx_queues = 1;
1646 adapter->num_tx_queues = 1;
92915f71
GR
1647}
1648
1649/**
1650 * ixgbevf_alloc_queues - Allocate memory for all rings
1651 * @adapter: board private structure to initialize
1652 *
1653 * We allocate one ring per queue at run-time since we don't know the
1654 * number of queues at compile-time. The polling_netdev array is
1655 * intended for Multiqueue, but should work fine with a single queue.
1656 **/
1657static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1658{
1659 int i;
1660
1661 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1662 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1663 if (!adapter->tx_ring)
1664 goto err_tx_ring_allocation;
1665
1666 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1667 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1668 if (!adapter->rx_ring)
1669 goto err_rx_ring_allocation;
1670
1671 for (i = 0; i < adapter->num_tx_queues; i++) {
1672 adapter->tx_ring[i].count = adapter->tx_ring_count;
1673 adapter->tx_ring[i].queue_index = i;
1674 adapter->tx_ring[i].reg_idx = i;
1675 }
1676
1677 for (i = 0; i < adapter->num_rx_queues; i++) {
1678 adapter->rx_ring[i].count = adapter->rx_ring_count;
1679 adapter->rx_ring[i].queue_index = i;
1680 adapter->rx_ring[i].reg_idx = i;
1681 }
1682
1683 return 0;
1684
1685err_rx_ring_allocation:
1686 kfree(adapter->tx_ring);
1687err_tx_ring_allocation:
1688 return -ENOMEM;
1689}
1690
1691/**
1692 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
1693 * @adapter: board private structure to initialize
1694 *
1695 * Attempt to configure the interrupts using the best available
1696 * capabilities of the hardware and the kernel.
1697 **/
1698static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
1699{
1700 int err = 0;
1701 int vector, v_budget;
1702
1703 /*
1704 * It's easy to be greedy for MSI-X vectors, but it really
1705 * doesn't do us much good if we have a lot more vectors
1706 * than CPU's. So let's be conservative and only ask for
fa71ae27
AD
1707 * (roughly) the same number of vectors as there are CPU's.
1708 * The default is to use pairs of vectors.
92915f71 1709 */
fa71ae27
AD
1710 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
1711 v_budget = min_t(int, v_budget, num_online_cpus());
1712 v_budget += NON_Q_VECTORS;
92915f71
GR
1713
1714 /* A failure in MSI-X entry allocation isn't fatal, but it does
1715 * mean we disable MSI-X capabilities of the adapter. */
1716 adapter->msix_entries = kcalloc(v_budget,
1717 sizeof(struct msix_entry), GFP_KERNEL);
1718 if (!adapter->msix_entries) {
1719 err = -ENOMEM;
1720 goto out;
1721 }
1722
1723 for (vector = 0; vector < v_budget; vector++)
1724 adapter->msix_entries[vector].entry = vector;
1725
1726 ixgbevf_acquire_msix_vectors(adapter, v_budget);
1727
1728out:
1729 return err;
1730}
1731
1732/**
1733 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
1734 * @adapter: board private structure to initialize
1735 *
1736 * We allocate one q_vector per queue interrupt. If allocation fails we
1737 * return -ENOMEM.
1738 **/
1739static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
1740{
1741 int q_idx, num_q_vectors;
1742 struct ixgbevf_q_vector *q_vector;
92915f71
GR
1743
1744 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
92915f71
GR
1745
1746 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1747 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
1748 if (!q_vector)
1749 goto err_out;
1750 q_vector->adapter = adapter;
1751 q_vector->v_idx = q_idx;
fa71ae27
AD
1752 netif_napi_add(adapter->netdev, &q_vector->napi,
1753 ixgbevf_poll, 64);
92915f71
GR
1754 adapter->q_vector[q_idx] = q_vector;
1755 }
1756
1757 return 0;
1758
1759err_out:
1760 while (q_idx) {
1761 q_idx--;
1762 q_vector = adapter->q_vector[q_idx];
1763 netif_napi_del(&q_vector->napi);
1764 kfree(q_vector);
1765 adapter->q_vector[q_idx] = NULL;
1766 }
1767 return -ENOMEM;
1768}
1769
1770/**
1771 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
1772 * @adapter: board private structure to initialize
1773 *
1774 * This function frees the memory allocated to the q_vectors. In addition if
1775 * NAPI is enabled it will delete any references to the NAPI struct prior
1776 * to freeing the q_vector.
1777 **/
1778static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
1779{
1780 int q_idx, num_q_vectors;
1781 int napi_vectors;
1782
1783 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1784 napi_vectors = adapter->num_rx_queues;
1785
1786 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1787 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
1788
1789 adapter->q_vector[q_idx] = NULL;
1790 if (q_idx < napi_vectors)
1791 netif_napi_del(&q_vector->napi);
1792 kfree(q_vector);
1793 }
1794}
1795
1796/**
1797 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
1798 * @adapter: board private structure
1799 *
1800 **/
1801static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
1802{
1803 pci_disable_msix(adapter->pdev);
1804 kfree(adapter->msix_entries);
1805 adapter->msix_entries = NULL;
92915f71
GR
1806}
1807
1808/**
1809 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
1810 * @adapter: board private structure to initialize
1811 *
1812 **/
1813static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
1814{
1815 int err;
1816
1817 /* Number of supported queues */
1818 ixgbevf_set_num_queues(adapter);
1819
1820 err = ixgbevf_set_interrupt_capability(adapter);
1821 if (err) {
1822 hw_dbg(&adapter->hw,
1823 "Unable to setup interrupt capabilities\n");
1824 goto err_set_interrupt;
1825 }
1826
1827 err = ixgbevf_alloc_q_vectors(adapter);
1828 if (err) {
1829 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
1830 "vectors\n");
1831 goto err_alloc_q_vectors;
1832 }
1833
1834 err = ixgbevf_alloc_queues(adapter);
1835 if (err) {
dbd9636e 1836 pr_err("Unable to allocate memory for queues\n");
92915f71
GR
1837 goto err_alloc_queues;
1838 }
1839
1840 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
1841 "Tx Queue count = %u\n",
1842 (adapter->num_rx_queues > 1) ? "Enabled" :
1843 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
1844
1845 set_bit(__IXGBEVF_DOWN, &adapter->state);
1846
1847 return 0;
1848err_alloc_queues:
1849 ixgbevf_free_q_vectors(adapter);
1850err_alloc_q_vectors:
1851 ixgbevf_reset_interrupt_capability(adapter);
1852err_set_interrupt:
1853 return err;
1854}
1855
1856/**
1857 * ixgbevf_sw_init - Initialize general software structures
1858 * (struct ixgbevf_adapter)
1859 * @adapter: board private structure to initialize
1860 *
1861 * ixgbevf_sw_init initializes the Adapter private data structure.
1862 * Fields are initialized based on PCI device information and
1863 * OS network device settings (MTU size).
1864 **/
1865static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
1866{
1867 struct ixgbe_hw *hw = &adapter->hw;
1868 struct pci_dev *pdev = adapter->pdev;
1869 int err;
1870
1871 /* PCI config space info */
1872
1873 hw->vendor_id = pdev->vendor;
1874 hw->device_id = pdev->device;
ff938e43 1875 hw->revision_id = pdev->revision;
92915f71
GR
1876 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1877 hw->subsystem_device_id = pdev->subsystem_device;
1878
1879 hw->mbx.ops.init_params(hw);
1880 hw->mac.max_tx_queues = MAX_TX_QUEUES;
1881 hw->mac.max_rx_queues = MAX_RX_QUEUES;
1882 err = hw->mac.ops.reset_hw(hw);
1883 if (err) {
1884 dev_info(&pdev->dev,
1885 "PF still in reset state, assigning new address\n");
1a0d6ae5
DK
1886 eth_hw_addr_random(adapter->netdev);
1887 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
1888 adapter->netdev->addr_len);
92915f71
GR
1889 } else {
1890 err = hw->mac.ops.init_hw(hw);
1891 if (err) {
dbd9636e 1892 pr_err("init_shared_code failed: %d\n", err);
92915f71
GR
1893 goto out;
1894 }
1a0d6ae5
DK
1895 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
1896 adapter->netdev->addr_len);
92915f71
GR
1897 }
1898
1899 /* Enable dynamic interrupt throttling rates */
5f3600eb
AD
1900 adapter->rx_itr_setting = 1;
1901 adapter->tx_itr_setting = 1;
92915f71 1902
92915f71
GR
1903 /* set default ring sizes */
1904 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
1905 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
1906
92915f71 1907 set_bit(__IXGBEVF_DOWN, &adapter->state);
1a0d6ae5 1908 return 0;
92915f71
GR
1909
1910out:
1911 return err;
1912}
1913
92915f71
GR
1914#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
1915 { \
1916 u32 current_counter = IXGBE_READ_REG(hw, reg); \
1917 if (current_counter < last_counter) \
1918 counter += 0x100000000LL; \
1919 last_counter = current_counter; \
1920 counter &= 0xFFFFFFFF00000000LL; \
1921 counter |= current_counter; \
1922 }
1923
1924#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
1925 { \
1926 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
1927 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
1928 u64 current_counter = (current_counter_msb << 32) | \
1929 current_counter_lsb; \
1930 if (current_counter < last_counter) \
1931 counter += 0x1000000000LL; \
1932 last_counter = current_counter; \
1933 counter &= 0xFFFFFFF000000000LL; \
1934 counter |= current_counter; \
1935 }
1936/**
1937 * ixgbevf_update_stats - Update the board statistics counters.
1938 * @adapter: board private structure
1939 **/
1940void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
1941{
1942 struct ixgbe_hw *hw = &adapter->hw;
1943
1944 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
1945 adapter->stats.vfgprc);
1946 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
1947 adapter->stats.vfgptc);
1948 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
1949 adapter->stats.last_vfgorc,
1950 adapter->stats.vfgorc);
1951 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
1952 adapter->stats.last_vfgotc,
1953 adapter->stats.vfgotc);
1954 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
1955 adapter->stats.vfmprc);
92915f71
GR
1956}
1957
1958/**
1959 * ixgbevf_watchdog - Timer Call-back
1960 * @data: pointer to adapter cast into an unsigned long
1961 **/
1962static void ixgbevf_watchdog(unsigned long data)
1963{
1964 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
1965 struct ixgbe_hw *hw = &adapter->hw;
5f3600eb 1966 u32 eics = 0;
92915f71
GR
1967 int i;
1968
1969 /*
1970 * Do the watchdog outside of interrupt context due to the lovely
1971 * delays that some of the newer hardware requires
1972 */
1973
1974 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
1975 goto watchdog_short_circuit;
1976
1977 /* get one bit for every active tx/rx interrupt vector */
1978 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
1979 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
6b43c446 1980 if (qv->rx.ring || qv->tx.ring)
5f3600eb 1981 eics |= 1 << i;
92915f71
GR
1982 }
1983
5f3600eb 1984 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
92915f71
GR
1985
1986watchdog_short_circuit:
1987 schedule_work(&adapter->watchdog_task);
1988}
1989
1990/**
1991 * ixgbevf_tx_timeout - Respond to a Tx Hang
1992 * @netdev: network interface device structure
1993 **/
1994static void ixgbevf_tx_timeout(struct net_device *netdev)
1995{
1996 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1997
1998 /* Do the reset outside of interrupt context */
1999 schedule_work(&adapter->reset_task);
2000}
2001
2002static void ixgbevf_reset_task(struct work_struct *work)
2003{
2004 struct ixgbevf_adapter *adapter;
2005 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2006
2007 /* If we're already down or resetting, just bail */
2008 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2009 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2010 return;
2011
2012 adapter->tx_timeout_count++;
2013
2014 ixgbevf_reinit_locked(adapter);
2015}
2016
2017/**
2018 * ixgbevf_watchdog_task - worker thread to bring link up
2019 * @work: pointer to work_struct containing our data
2020 **/
2021static void ixgbevf_watchdog_task(struct work_struct *work)
2022{
2023 struct ixgbevf_adapter *adapter = container_of(work,
2024 struct ixgbevf_adapter,
2025 watchdog_task);
2026 struct net_device *netdev = adapter->netdev;
2027 struct ixgbe_hw *hw = &adapter->hw;
2028 u32 link_speed = adapter->link_speed;
2029 bool link_up = adapter->link_up;
2030
2031 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2032
2033 /*
2034 * Always check the link on the watchdog because we have
2035 * no LSC interrupt
2036 */
2037 if (hw->mac.ops.check_link) {
2038 if ((hw->mac.ops.check_link(hw, &link_speed,
2039 &link_up, false)) != 0) {
2040 adapter->link_up = link_up;
2041 adapter->link_speed = link_speed;
da6b3330
GR
2042 netif_carrier_off(netdev);
2043 netif_tx_stop_all_queues(netdev);
92915f71
GR
2044 schedule_work(&adapter->reset_task);
2045 goto pf_has_reset;
2046 }
2047 } else {
2048 /* always assume link is up, if no check link
2049 * function */
2050 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2051 link_up = true;
2052 }
2053 adapter->link_up = link_up;
2054 adapter->link_speed = link_speed;
2055
2056 if (link_up) {
2057 if (!netif_carrier_ok(netdev)) {
300bc060
JP
2058 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2059 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2060 10 : 1);
92915f71
GR
2061 netif_carrier_on(netdev);
2062 netif_tx_wake_all_queues(netdev);
92915f71
GR
2063 }
2064 } else {
2065 adapter->link_up = false;
2066 adapter->link_speed = 0;
2067 if (netif_carrier_ok(netdev)) {
2068 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2069 netif_carrier_off(netdev);
2070 netif_tx_stop_all_queues(netdev);
2071 }
2072 }
2073
92915f71
GR
2074 ixgbevf_update_stats(adapter);
2075
33bd9f60 2076pf_has_reset:
92915f71
GR
2077 /* Reset the timer */
2078 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2079 mod_timer(&adapter->watchdog_timer,
2080 round_jiffies(jiffies + (2 * HZ)));
2081
2082 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2083}
2084
2085/**
2086 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2087 * @adapter: board private structure
2088 * @tx_ring: Tx descriptor ring for a specific queue
2089 *
2090 * Free all transmit software resources
2091 **/
2092void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2093 struct ixgbevf_ring *tx_ring)
2094{
2095 struct pci_dev *pdev = adapter->pdev;
2096
92915f71
GR
2097 ixgbevf_clean_tx_ring(adapter, tx_ring);
2098
2099 vfree(tx_ring->tx_buffer_info);
2100 tx_ring->tx_buffer_info = NULL;
2101
2a1f8794
NN
2102 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2103 tx_ring->dma);
92915f71
GR
2104
2105 tx_ring->desc = NULL;
2106}
2107
2108/**
2109 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2110 * @adapter: board private structure
2111 *
2112 * Free all transmit software resources
2113 **/
2114static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2115{
2116 int i;
2117
2118 for (i = 0; i < adapter->num_tx_queues; i++)
2119 if (adapter->tx_ring[i].desc)
2120 ixgbevf_free_tx_resources(adapter,
2121 &adapter->tx_ring[i]);
2122
2123}
2124
2125/**
2126 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2127 * @adapter: board private structure
2128 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2129 *
2130 * Return 0 on success, negative on failure
2131 **/
2132int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2133 struct ixgbevf_ring *tx_ring)
2134{
2135 struct pci_dev *pdev = adapter->pdev;
2136 int size;
2137
2138 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
89bf67f1 2139 tx_ring->tx_buffer_info = vzalloc(size);
92915f71
GR
2140 if (!tx_ring->tx_buffer_info)
2141 goto err;
92915f71
GR
2142
2143 /* round up to nearest 4K */
2144 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2145 tx_ring->size = ALIGN(tx_ring->size, 4096);
2146
2a1f8794
NN
2147 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2148 &tx_ring->dma, GFP_KERNEL);
92915f71
GR
2149 if (!tx_ring->desc)
2150 goto err;
2151
2152 tx_ring->next_to_use = 0;
2153 tx_ring->next_to_clean = 0;
92915f71
GR
2154 return 0;
2155
2156err:
2157 vfree(tx_ring->tx_buffer_info);
2158 tx_ring->tx_buffer_info = NULL;
2159 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2160 "descriptor ring\n");
2161 return -ENOMEM;
2162}
2163
2164/**
2165 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2166 * @adapter: board private structure
2167 *
2168 * If this function returns with an error, then it's possible one or
2169 * more of the rings is populated (while the rest are not). It is the
2170 * callers duty to clean those orphaned rings.
2171 *
2172 * Return 0 on success, negative on failure
2173 **/
2174static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2175{
2176 int i, err = 0;
2177
2178 for (i = 0; i < adapter->num_tx_queues; i++) {
2179 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2180 if (!err)
2181 continue;
2182 hw_dbg(&adapter->hw,
2183 "Allocation for Tx Queue %u failed\n", i);
2184 break;
2185 }
2186
2187 return err;
2188}
2189
2190/**
2191 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2192 * @adapter: board private structure
2193 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2194 *
2195 * Returns 0 on success, negative on failure
2196 **/
2197int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2198 struct ixgbevf_ring *rx_ring)
2199{
2200 struct pci_dev *pdev = adapter->pdev;
2201 int size;
2202
2203 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
89bf67f1 2204 rx_ring->rx_buffer_info = vzalloc(size);
e404decb 2205 if (!rx_ring->rx_buffer_info)
92915f71 2206 goto alloc_failed;
92915f71
GR
2207
2208 /* Round up to nearest 4K */
2209 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2210 rx_ring->size = ALIGN(rx_ring->size, 4096);
2211
2a1f8794
NN
2212 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2213 &rx_ring->dma, GFP_KERNEL);
92915f71
GR
2214
2215 if (!rx_ring->desc) {
2216 hw_dbg(&adapter->hw,
2217 "Unable to allocate memory for "
2218 "the receive descriptor ring\n");
2219 vfree(rx_ring->rx_buffer_info);
2220 rx_ring->rx_buffer_info = NULL;
2221 goto alloc_failed;
2222 }
2223
2224 rx_ring->next_to_clean = 0;
2225 rx_ring->next_to_use = 0;
2226
2227 return 0;
2228alloc_failed:
2229 return -ENOMEM;
2230}
2231
2232/**
2233 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2234 * @adapter: board private structure
2235 *
2236 * If this function returns with an error, then it's possible one or
2237 * more of the rings is populated (while the rest are not). It is the
2238 * callers duty to clean those orphaned rings.
2239 *
2240 * Return 0 on success, negative on failure
2241 **/
2242static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2243{
2244 int i, err = 0;
2245
2246 for (i = 0; i < adapter->num_rx_queues; i++) {
2247 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2248 if (!err)
2249 continue;
2250 hw_dbg(&adapter->hw,
2251 "Allocation for Rx Queue %u failed\n", i);
2252 break;
2253 }
2254 return err;
2255}
2256
2257/**
2258 * ixgbevf_free_rx_resources - Free Rx Resources
2259 * @adapter: board private structure
2260 * @rx_ring: ring to clean the resources from
2261 *
2262 * Free all receive software resources
2263 **/
2264void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2265 struct ixgbevf_ring *rx_ring)
2266{
2267 struct pci_dev *pdev = adapter->pdev;
2268
2269 ixgbevf_clean_rx_ring(adapter, rx_ring);
2270
2271 vfree(rx_ring->rx_buffer_info);
2272 rx_ring->rx_buffer_info = NULL;
2273
2a1f8794
NN
2274 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2275 rx_ring->dma);
92915f71
GR
2276
2277 rx_ring->desc = NULL;
2278}
2279
2280/**
2281 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2282 * @adapter: board private structure
2283 *
2284 * Free all receive software resources
2285 **/
2286static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2287{
2288 int i;
2289
2290 for (i = 0; i < adapter->num_rx_queues; i++)
2291 if (adapter->rx_ring[i].desc)
2292 ixgbevf_free_rx_resources(adapter,
2293 &adapter->rx_ring[i]);
2294}
2295
2296/**
2297 * ixgbevf_open - Called when a network interface is made active
2298 * @netdev: network interface device structure
2299 *
2300 * Returns 0 on success, negative value on failure
2301 *
2302 * The open entry point is called when a network interface is made
2303 * active by the system (IFF_UP). At this point all resources needed
2304 * for transmit and receive operations are allocated, the interrupt
2305 * handler is registered with the OS, the watchdog timer is started,
2306 * and the stack is notified that the interface is ready.
2307 **/
2308static int ixgbevf_open(struct net_device *netdev)
2309{
2310 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2311 struct ixgbe_hw *hw = &adapter->hw;
2312 int err;
2313
2314 /* disallow open during test */
2315 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2316 return -EBUSY;
2317
2318 if (hw->adapter_stopped) {
2319 ixgbevf_reset(adapter);
2320 /* if adapter is still stopped then PF isn't up and
2321 * the vf can't start. */
2322 if (hw->adapter_stopped) {
2323 err = IXGBE_ERR_MBX;
dbd9636e
JK
2324 pr_err("Unable to start - perhaps the PF Driver isn't "
2325 "up yet\n");
92915f71
GR
2326 goto err_setup_reset;
2327 }
2328 }
2329
2330 /* allocate transmit descriptors */
2331 err = ixgbevf_setup_all_tx_resources(adapter);
2332 if (err)
2333 goto err_setup_tx;
2334
2335 /* allocate receive descriptors */
2336 err = ixgbevf_setup_all_rx_resources(adapter);
2337 if (err)
2338 goto err_setup_rx;
2339
2340 ixgbevf_configure(adapter);
2341
2342 /*
2343 * Map the Tx/Rx rings to the vectors we were allotted.
2344 * if request_irq will be called in this function map_rings
2345 * must be called *before* up_complete
2346 */
2347 ixgbevf_map_rings_to_vectors(adapter);
2348
795180d8 2349 ixgbevf_up_complete(adapter);
92915f71
GR
2350
2351 /* clear any pending interrupts, may auto mask */
2352 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2353 err = ixgbevf_request_irq(adapter);
2354 if (err)
2355 goto err_req_irq;
2356
5f3600eb 2357 ixgbevf_irq_enable(adapter);
92915f71
GR
2358
2359 return 0;
2360
2361err_req_irq:
2362 ixgbevf_down(adapter);
92915f71
GR
2363 ixgbevf_free_irq(adapter);
2364err_setup_rx:
2365 ixgbevf_free_all_rx_resources(adapter);
2366err_setup_tx:
2367 ixgbevf_free_all_tx_resources(adapter);
2368 ixgbevf_reset(adapter);
2369
2370err_setup_reset:
2371
2372 return err;
2373}
2374
2375/**
2376 * ixgbevf_close - Disables a network interface
2377 * @netdev: network interface device structure
2378 *
2379 * Returns 0, this is not allowed to fail
2380 *
2381 * The close entry point is called when an interface is de-activated
2382 * by the OS. The hardware is still under the drivers control, but
2383 * needs to be disabled. A global MAC reset is issued to stop the
2384 * hardware, and all transmit and receive resources are freed.
2385 **/
2386static int ixgbevf_close(struct net_device *netdev)
2387{
2388 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2389
2390 ixgbevf_down(adapter);
2391 ixgbevf_free_irq(adapter);
2392
2393 ixgbevf_free_all_tx_resources(adapter);
2394 ixgbevf_free_all_rx_resources(adapter);
2395
2396 return 0;
2397}
2398
2399static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
2400 struct ixgbevf_ring *tx_ring,
2401 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2402{
2403 struct ixgbe_adv_tx_context_desc *context_desc;
2404 unsigned int i;
2405 int err;
2406 struct ixgbevf_tx_buffer *tx_buffer_info;
2407 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2408 u32 mss_l4len_idx, l4len;
2409
2410 if (skb_is_gso(skb)) {
2411 if (skb_header_cloned(skb)) {
2412 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2413 if (err)
2414 return err;
2415 }
2416 l4len = tcp_hdrlen(skb);
2417 *hdr_len += l4len;
2418
2419 if (skb->protocol == htons(ETH_P_IP)) {
2420 struct iphdr *iph = ip_hdr(skb);
2421 iph->tot_len = 0;
2422 iph->check = 0;
2423 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2424 iph->daddr, 0,
2425 IPPROTO_TCP,
2426 0);
2427 adapter->hw_tso_ctxt++;
9010bc33 2428 } else if (skb_is_gso_v6(skb)) {
92915f71
GR
2429 ipv6_hdr(skb)->payload_len = 0;
2430 tcp_hdr(skb)->check =
2431 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2432 &ipv6_hdr(skb)->daddr,
2433 0, IPPROTO_TCP, 0);
2434 adapter->hw_tso6_ctxt++;
2435 }
2436
2437 i = tx_ring->next_to_use;
2438
2439 tx_buffer_info = &tx_ring->tx_buffer_info[i];
908421f6 2440 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
92915f71
GR
2441
2442 /* VLAN MACLEN IPLEN */
2443 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2444 vlan_macip_lens |=
2445 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2446 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2447 IXGBE_ADVTXD_MACLEN_SHIFT);
2448 *hdr_len += skb_network_offset(skb);
2449 vlan_macip_lens |=
2450 (skb_transport_header(skb) - skb_network_header(skb));
2451 *hdr_len +=
2452 (skb_transport_header(skb) - skb_network_header(skb));
2453 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2454 context_desc->seqnum_seed = 0;
2455
2456 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2457 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2458 IXGBE_ADVTXD_DTYP_CTXT);
2459
2460 if (skb->protocol == htons(ETH_P_IP))
2461 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2462 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2463 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2464
2465 /* MSS L4LEN IDX */
2466 mss_l4len_idx =
2467 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2468 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2469 /* use index 1 for TSO */
2470 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2471 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2472
2473 tx_buffer_info->time_stamp = jiffies;
2474 tx_buffer_info->next_to_watch = i;
2475
2476 i++;
2477 if (i == tx_ring->count)
2478 i = 0;
2479 tx_ring->next_to_use = i;
2480
2481 return true;
2482 }
2483
2484 return false;
2485}
2486
2487static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
2488 struct ixgbevf_ring *tx_ring,
2489 struct sk_buff *skb, u32 tx_flags)
2490{
2491 struct ixgbe_adv_tx_context_desc *context_desc;
2492 unsigned int i;
2493 struct ixgbevf_tx_buffer *tx_buffer_info;
2494 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2495
2496 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2497 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2498 i = tx_ring->next_to_use;
2499 tx_buffer_info = &tx_ring->tx_buffer_info[i];
908421f6 2500 context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
92915f71
GR
2501
2502 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2503 vlan_macip_lens |= (tx_flags &
2504 IXGBE_TX_FLAGS_VLAN_MASK);
2505 vlan_macip_lens |= (skb_network_offset(skb) <<
2506 IXGBE_ADVTXD_MACLEN_SHIFT);
2507 if (skb->ip_summed == CHECKSUM_PARTIAL)
2508 vlan_macip_lens |= (skb_transport_header(skb) -
2509 skb_network_header(skb));
2510
2511 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2512 context_desc->seqnum_seed = 0;
2513
2514 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2515 IXGBE_ADVTXD_DTYP_CTXT);
2516
2517 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2518 switch (skb->protocol) {
2519 case __constant_htons(ETH_P_IP):
2520 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2521 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2522 type_tucmd_mlhl |=
2523 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2524 break;
2525 case __constant_htons(ETH_P_IPV6):
2526 /* XXX what about other V6 headers?? */
2527 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2528 type_tucmd_mlhl |=
2529 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2530 break;
2531 default:
2532 if (unlikely(net_ratelimit())) {
dbd9636e
JK
2533 pr_warn("partial checksum but "
2534 "proto=%x!\n", skb->protocol);
92915f71
GR
2535 }
2536 break;
2537 }
2538 }
2539
2540 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2541 /* use index zero for tx checksum offload */
2542 context_desc->mss_l4len_idx = 0;
2543
2544 tx_buffer_info->time_stamp = jiffies;
2545 tx_buffer_info->next_to_watch = i;
2546
2547 adapter->hw_csum_tx_good++;
2548 i++;
2549 if (i == tx_ring->count)
2550 i = 0;
2551 tx_ring->next_to_use = i;
2552
2553 return true;
2554 }
2555
2556 return false;
2557}
2558
2559static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2560 struct ixgbevf_ring *tx_ring,
2561 struct sk_buff *skb, u32 tx_flags,
2562 unsigned int first)
2563{
2564 struct pci_dev *pdev = adapter->pdev;
2565 struct ixgbevf_tx_buffer *tx_buffer_info;
2566 unsigned int len;
2567 unsigned int total = skb->len;
2540ddb5
KV
2568 unsigned int offset = 0, size;
2569 int count = 0;
92915f71
GR
2570 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2571 unsigned int f;
65deeed7 2572 int i;
92915f71
GR
2573
2574 i = tx_ring->next_to_use;
2575
2576 len = min(skb_headlen(skb), total);
2577 while (len) {
2578 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2579 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2580
2581 tx_buffer_info->length = size;
2582 tx_buffer_info->mapped_as_page = false;
2a1f8794 2583 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
92915f71 2584 skb->data + offset,
2a1f8794
NN
2585 size, DMA_TO_DEVICE);
2586 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
92915f71
GR
2587 goto dma_error;
2588 tx_buffer_info->time_stamp = jiffies;
2589 tx_buffer_info->next_to_watch = i;
2590
2591 len -= size;
2592 total -= size;
2593 offset += size;
2594 count++;
2595 i++;
2596 if (i == tx_ring->count)
2597 i = 0;
2598 }
2599
2600 for (f = 0; f < nr_frags; f++) {
9e903e08 2601 const struct skb_frag_struct *frag;
92915f71
GR
2602
2603 frag = &skb_shinfo(skb)->frags[f];
9e903e08 2604 len = min((unsigned int)skb_frag_size(frag), total);
877749bf 2605 offset = 0;
92915f71
GR
2606
2607 while (len) {
2608 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2609 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2610
2611 tx_buffer_info->length = size;
877749bf
IC
2612 tx_buffer_info->dma =
2613 skb_frag_dma_map(&adapter->pdev->dev, frag,
2614 offset, size, DMA_TO_DEVICE);
92915f71 2615 tx_buffer_info->mapped_as_page = true;
2a1f8794 2616 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
92915f71
GR
2617 goto dma_error;
2618 tx_buffer_info->time_stamp = jiffies;
2619 tx_buffer_info->next_to_watch = i;
2620
2621 len -= size;
2622 total -= size;
2623 offset += size;
2624 count++;
2625 i++;
2626 if (i == tx_ring->count)
2627 i = 0;
2628 }
2629 if (total == 0)
2630 break;
2631 }
2632
2633 if (i == 0)
2634 i = tx_ring->count - 1;
2635 else
2636 i = i - 1;
2637 tx_ring->tx_buffer_info[i].skb = skb;
2638 tx_ring->tx_buffer_info[first].next_to_watch = i;
2639
2640 return count;
2641
2642dma_error:
2643 dev_err(&pdev->dev, "TX DMA map failed\n");
2644
2645 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2646 tx_buffer_info->dma = 0;
2647 tx_buffer_info->time_stamp = 0;
2648 tx_buffer_info->next_to_watch = 0;
2649 count--;
2650
2651 /* clear timestamp and dma mappings for remaining portion of packet */
2652 while (count >= 0) {
2653 count--;
2654 i--;
2655 if (i < 0)
2656 i += tx_ring->count;
2657 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2658 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2659 }
2660
2661 return count;
2662}
2663
2664static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
2665 struct ixgbevf_ring *tx_ring, int tx_flags,
2666 int count, u32 paylen, u8 hdr_len)
2667{
2668 union ixgbe_adv_tx_desc *tx_desc = NULL;
2669 struct ixgbevf_tx_buffer *tx_buffer_info;
2670 u32 olinfo_status = 0, cmd_type_len = 0;
2671 unsigned int i;
2672
2673 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2674
2675 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2676
2677 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
2678
2679 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2680 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
2681
2682 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
2683 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
2684
2685 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
2686 IXGBE_ADVTXD_POPTS_SHIFT;
2687
2688 /* use index 1 context for tso */
2689 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2690 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
2691 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
2692 IXGBE_ADVTXD_POPTS_SHIFT;
2693
2694 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
2695 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
2696 IXGBE_ADVTXD_POPTS_SHIFT;
2697
2698 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
2699
2700 i = tx_ring->next_to_use;
2701 while (count--) {
2702 tx_buffer_info = &tx_ring->tx_buffer_info[i];
908421f6 2703 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
92915f71
GR
2704 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
2705 tx_desc->read.cmd_type_len =
2706 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
2707 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2708 i++;
2709 if (i == tx_ring->count)
2710 i = 0;
2711 }
2712
2713 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2714
2715 /*
2716 * Force memory writes to complete before letting h/w
2717 * know there are new descriptors to fetch. (Only
2718 * applicable for weak-ordered memory model archs,
2719 * such as IA-64).
2720 */
2721 wmb();
2722
2723 tx_ring->next_to_use = i;
2724 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2725}
2726
2727static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
2728 struct ixgbevf_ring *tx_ring, int size)
2729{
2730 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2731
2732 netif_stop_subqueue(netdev, tx_ring->queue_index);
2733 /* Herbert's original patch had:
2734 * smp_mb__after_netif_stop_queue();
2735 * but since that doesn't exist yet, just open code it. */
2736 smp_mb();
2737
2738 /* We need to check again in a case another CPU has just
2739 * made room available. */
2740 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
2741 return -EBUSY;
2742
2743 /* A reprieve! - use start_queue because it doesn't call schedule */
2744 netif_start_subqueue(netdev, tx_ring->queue_index);
2745 ++adapter->restart_queue;
2746 return 0;
2747}
2748
2749static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
2750 struct ixgbevf_ring *tx_ring, int size)
2751{
2752 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
2753 return 0;
2754 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
2755}
2756
2757static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2758{
2759 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2760 struct ixgbevf_ring *tx_ring;
2761 unsigned int first;
2762 unsigned int tx_flags = 0;
2763 u8 hdr_len = 0;
2764 int r_idx = 0, tso;
3595990a
AD
2765 u16 count = TXD_USE_COUNT(skb_headlen(skb));
2766#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2767 unsigned short f;
2768#endif
92915f71
GR
2769
2770 tx_ring = &adapter->tx_ring[r_idx];
2771
3595990a
AD
2772 /*
2773 * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
2774 * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
2775 * + 2 desc gap to keep tail from touching head,
2776 * + 1 desc for context descriptor,
2777 * otherwise try next time
2778 */
2779#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
2780 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
2781 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
2782#else
2783 count += skb_shinfo(skb)->nr_frags;
2784#endif
2785 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count + 3)) {
2786 adapter->tx_busy++;
2787 return NETDEV_TX_BUSY;
2788 }
2789
eab6d18d 2790 if (vlan_tx_tag_present(skb)) {
92915f71
GR
2791 tx_flags |= vlan_tx_tag_get(skb);
2792 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
2793 tx_flags |= IXGBE_TX_FLAGS_VLAN;
2794 }
2795
92915f71
GR
2796 first = tx_ring->next_to_use;
2797
2798 if (skb->protocol == htons(ETH_P_IP))
2799 tx_flags |= IXGBE_TX_FLAGS_IPV4;
2800 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
2801 if (tso < 0) {
2802 dev_kfree_skb_any(skb);
2803 return NETDEV_TX_OK;
2804 }
2805
2806 if (tso)
2807 tx_flags |= IXGBE_TX_FLAGS_TSO;
2808 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2809 (skb->ip_summed == CHECKSUM_PARTIAL))
2810 tx_flags |= IXGBE_TX_FLAGS_CSUM;
2811
2812 ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
2813 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
2814 skb->len, hdr_len);
2815
92915f71
GR
2816 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
2817
2818 return NETDEV_TX_OK;
2819}
2820
92915f71
GR
2821/**
2822 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
2823 * @netdev: network interface device structure
2824 * @p: pointer to an address structure
2825 *
2826 * Returns 0 on success, negative on failure
2827 **/
2828static int ixgbevf_set_mac(struct net_device *netdev, void *p)
2829{
2830 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2831 struct ixgbe_hw *hw = &adapter->hw;
2832 struct sockaddr *addr = p;
2833
2834 if (!is_valid_ether_addr(addr->sa_data))
2835 return -EADDRNOTAVAIL;
2836
2837 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2838 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2839
2840 if (hw->mac.ops.set_rar)
2841 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
2842
2843 return 0;
2844}
2845
2846/**
2847 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
2848 * @netdev: network interface device structure
2849 * @new_mtu: new value for maximum frame size
2850 *
2851 * Returns 0 on success, negative on failure
2852 **/
2853static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
2854{
2855 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
69bfbec4 2856 struct ixgbe_hw *hw = &adapter->hw;
92915f71 2857 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
69bfbec4
GR
2858 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
2859 u32 msg[2];
2860
2861 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
2862 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
92915f71
GR
2863
2864 /* MTU < 68 is an error and causes problems on some kernels */
69bfbec4 2865 if ((new_mtu < 68) || (max_frame > max_possible_frame))
92915f71
GR
2866 return -EINVAL;
2867
2868 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
2869 netdev->mtu, new_mtu);
2870 /* must set new MTU before calling down or up */
2871 netdev->mtu = new_mtu;
2872
795180d8
GR
2873 if (!netif_running(netdev)) {
2874 msg[0] = IXGBE_VF_SET_LPE;
2875 msg[1] = max_frame;
2876 hw->mbx.ops.write_posted(hw, msg, 2);
2877 }
69bfbec4 2878
92915f71
GR
2879 if (netif_running(netdev))
2880 ixgbevf_reinit_locked(adapter);
2881
2882 return 0;
2883}
2884
2885static void ixgbevf_shutdown(struct pci_dev *pdev)
2886{
2887 struct net_device *netdev = pci_get_drvdata(pdev);
2888 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2889
2890 netif_device_detach(netdev);
2891
2892 if (netif_running(netdev)) {
2893 ixgbevf_down(adapter);
2894 ixgbevf_free_irq(adapter);
2895 ixgbevf_free_all_tx_resources(adapter);
2896 ixgbevf_free_all_rx_resources(adapter);
2897 }
2898
92915f71 2899 pci_save_state(pdev);
92915f71
GR
2900
2901 pci_disable_device(pdev);
2902}
2903
4197aa7b
ED
2904static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
2905 struct rtnl_link_stats64 *stats)
2906{
2907 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2908 unsigned int start;
2909 u64 bytes, packets;
2910 const struct ixgbevf_ring *ring;
2911 int i;
2912
2913 ixgbevf_update_stats(adapter);
2914
2915 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
2916
2917 for (i = 0; i < adapter->num_rx_queues; i++) {
2918 ring = &adapter->rx_ring[i];
2919 do {
2920 start = u64_stats_fetch_begin_bh(&ring->syncp);
2921 bytes = ring->total_bytes;
2922 packets = ring->total_packets;
2923 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
2924 stats->rx_bytes += bytes;
2925 stats->rx_packets += packets;
2926 }
2927
2928 for (i = 0; i < adapter->num_tx_queues; i++) {
2929 ring = &adapter->tx_ring[i];
2930 do {
2931 start = u64_stats_fetch_begin_bh(&ring->syncp);
2932 bytes = ring->total_bytes;
2933 packets = ring->total_packets;
2934 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
2935 stats->tx_bytes += bytes;
2936 stats->tx_packets += packets;
2937 }
2938
2939 return stats;
2940}
2941
92915f71 2942static const struct net_device_ops ixgbe_netdev_ops = {
c12db769
SH
2943 .ndo_open = ixgbevf_open,
2944 .ndo_stop = ixgbevf_close,
2945 .ndo_start_xmit = ixgbevf_xmit_frame,
2946 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
4197aa7b 2947 .ndo_get_stats64 = ixgbevf_get_stats,
92915f71 2948 .ndo_validate_addr = eth_validate_addr,
c12db769
SH
2949 .ndo_set_mac_address = ixgbevf_set_mac,
2950 .ndo_change_mtu = ixgbevf_change_mtu,
2951 .ndo_tx_timeout = ixgbevf_tx_timeout,
c12db769
SH
2952 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
2953 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
92915f71 2954};
92915f71
GR
2955
2956static void ixgbevf_assign_netdev_ops(struct net_device *dev)
2957{
92915f71 2958 dev->netdev_ops = &ixgbe_netdev_ops;
92915f71
GR
2959 ixgbevf_set_ethtool_ops(dev);
2960 dev->watchdog_timeo = 5 * HZ;
2961}
2962
2963/**
2964 * ixgbevf_probe - Device Initialization Routine
2965 * @pdev: PCI device information struct
2966 * @ent: entry in ixgbevf_pci_tbl
2967 *
2968 * Returns 0 on success, negative on failure
2969 *
2970 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
2971 * The OS initialization, configuring of the adapter private structure,
2972 * and a hardware reset occur.
2973 **/
2974static int __devinit ixgbevf_probe(struct pci_dev *pdev,
2975 const struct pci_device_id *ent)
2976{
2977 struct net_device *netdev;
2978 struct ixgbevf_adapter *adapter = NULL;
2979 struct ixgbe_hw *hw = NULL;
2980 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
2981 static int cards_found;
2982 int err, pci_using_dac;
2983
2984 err = pci_enable_device(pdev);
2985 if (err)
2986 return err;
2987
2a1f8794
NN
2988 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
2989 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
92915f71
GR
2990 pci_using_dac = 1;
2991 } else {
2a1f8794 2992 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
92915f71 2993 if (err) {
2a1f8794
NN
2994 err = dma_set_coherent_mask(&pdev->dev,
2995 DMA_BIT_MASK(32));
92915f71
GR
2996 if (err) {
2997 dev_err(&pdev->dev, "No usable DMA "
2998 "configuration, aborting\n");
2999 goto err_dma;
3000 }
3001 }
3002 pci_using_dac = 0;
3003 }
3004
3005 err = pci_request_regions(pdev, ixgbevf_driver_name);
3006 if (err) {
3007 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3008 goto err_pci_reg;
3009 }
3010
3011 pci_set_master(pdev);
3012
92915f71
GR
3013 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3014 MAX_TX_QUEUES);
92915f71
GR
3015 if (!netdev) {
3016 err = -ENOMEM;
3017 goto err_alloc_etherdev;
3018 }
3019
3020 SET_NETDEV_DEV(netdev, &pdev->dev);
3021
3022 pci_set_drvdata(pdev, netdev);
3023 adapter = netdev_priv(netdev);
3024
3025 adapter->netdev = netdev;
3026 adapter->pdev = pdev;
3027 hw = &adapter->hw;
3028 hw->back = adapter;
b3f4d599 3029 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
92915f71
GR
3030
3031 /*
3032 * call save state here in standalone driver because it relies on
3033 * adapter struct to exist, and needs to call netdev_priv
3034 */
3035 pci_save_state(pdev);
3036
3037 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3038 pci_resource_len(pdev, 0));
3039 if (!hw->hw_addr) {
3040 err = -EIO;
3041 goto err_ioremap;
3042 }
3043
3044 ixgbevf_assign_netdev_ops(netdev);
3045
3046 adapter->bd_number = cards_found;
3047
3048 /* Setup hw api */
3049 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3050 hw->mac.type = ii->mac;
3051
3052 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
f416dfc0 3053 sizeof(struct ixgbe_mbx_operations));
92915f71 3054
92915f71
GR
3055 /* setup the private structure */
3056 err = ixgbevf_sw_init(adapter);
1a0d6ae5
DK
3057 if (err)
3058 goto err_sw_init;
3059
3060 /* The HW MAC address was set and/or determined in sw_init */
3061 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3062
3063 if (!is_valid_ether_addr(netdev->dev_addr)) {
3064 pr_err("invalid MAC address\n");
3065 err = -EIO;
3066 goto err_sw_init;
3067 }
92915f71 3068
471a76de 3069 netdev->hw_features = NETIF_F_SG |
92915f71 3070 NETIF_F_IP_CSUM |
471a76de
MM
3071 NETIF_F_IPV6_CSUM |
3072 NETIF_F_TSO |
3073 NETIF_F_TSO6 |
3074 NETIF_F_RXCSUM;
3075
3076 netdev->features = netdev->hw_features |
92915f71
GR
3077 NETIF_F_HW_VLAN_TX |
3078 NETIF_F_HW_VLAN_RX |
3079 NETIF_F_HW_VLAN_FILTER;
3080
92915f71
GR
3081 netdev->vlan_features |= NETIF_F_TSO;
3082 netdev->vlan_features |= NETIF_F_TSO6;
3083 netdev->vlan_features |= NETIF_F_IP_CSUM;
3bfacf96 3084 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
92915f71
GR
3085 netdev->vlan_features |= NETIF_F_SG;
3086
3087 if (pci_using_dac)
3088 netdev->features |= NETIF_F_HIGHDMA;
3089
01789349
JP
3090 netdev->priv_flags |= IFF_UNICAST_FLT;
3091
92915f71 3092 init_timer(&adapter->watchdog_timer);
c061b18d 3093 adapter->watchdog_timer.function = ixgbevf_watchdog;
92915f71
GR
3094 adapter->watchdog_timer.data = (unsigned long)adapter;
3095
3096 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3097 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3098
3099 err = ixgbevf_init_interrupt_scheme(adapter);
3100 if (err)
3101 goto err_sw_init;
3102
3103 /* pick up the PCI bus settings for reporting later */
3104 if (hw->mac.ops.get_bus_info)
3105 hw->mac.ops.get_bus_info(hw);
3106
92915f71
GR
3107 strcpy(netdev->name, "eth%d");
3108
3109 err = register_netdev(netdev);
3110 if (err)
3111 goto err_register;
3112
5d426ad1
GR
3113 netif_carrier_off(netdev);
3114
33bd9f60
GR
3115 ixgbevf_init_last_counter_stats(adapter);
3116
92915f71 3117 /* print the MAC address */
f794e7ef 3118 hw_dbg(hw, "%pM\n", netdev->dev_addr);
92915f71
GR
3119
3120 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3121
92915f71
GR
3122 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3123 cards_found++;
3124 return 0;
3125
3126err_register:
3127err_sw_init:
3128 ixgbevf_reset_interrupt_capability(adapter);
3129 iounmap(hw->hw_addr);
3130err_ioremap:
3131 free_netdev(netdev);
3132err_alloc_etherdev:
3133 pci_release_regions(pdev);
3134err_pci_reg:
3135err_dma:
3136 pci_disable_device(pdev);
3137 return err;
3138}
3139
3140/**
3141 * ixgbevf_remove - Device Removal Routine
3142 * @pdev: PCI device information struct
3143 *
3144 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3145 * that it should release a PCI device. The could be caused by a
3146 * Hot-Plug event, or because the driver is going to be removed from
3147 * memory.
3148 **/
3149static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3150{
3151 struct net_device *netdev = pci_get_drvdata(pdev);
3152 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3153
3154 set_bit(__IXGBEVF_DOWN, &adapter->state);
3155
3156 del_timer_sync(&adapter->watchdog_timer);
3157
23f333a2 3158 cancel_work_sync(&adapter->reset_task);
92915f71
GR
3159 cancel_work_sync(&adapter->watchdog_task);
3160
fd13a9ab 3161 if (netdev->reg_state == NETREG_REGISTERED)
92915f71 3162 unregister_netdev(netdev);
92915f71
GR
3163
3164 ixgbevf_reset_interrupt_capability(adapter);
3165
3166 iounmap(adapter->hw.hw_addr);
3167 pci_release_regions(pdev);
3168
3169 hw_dbg(&adapter->hw, "Remove complete\n");
3170
3171 kfree(adapter->tx_ring);
3172 kfree(adapter->rx_ring);
3173
3174 free_netdev(netdev);
3175
3176 pci_disable_device(pdev);
3177}
3178
3179static struct pci_driver ixgbevf_driver = {
3180 .name = ixgbevf_driver_name,
3181 .id_table = ixgbevf_pci_tbl,
3182 .probe = ixgbevf_probe,
3183 .remove = __devexit_p(ixgbevf_remove),
3184 .shutdown = ixgbevf_shutdown,
3185};
3186
3187/**
65d676c8 3188 * ixgbevf_init_module - Driver Registration Routine
92915f71 3189 *
65d676c8 3190 * ixgbevf_init_module is the first routine called when the driver is
92915f71
GR
3191 * loaded. All it does is register with the PCI subsystem.
3192 **/
3193static int __init ixgbevf_init_module(void)
3194{
3195 int ret;
dbd9636e
JK
3196 pr_info("%s - version %s\n", ixgbevf_driver_string,
3197 ixgbevf_driver_version);
92915f71 3198
dbd9636e 3199 pr_info("%s\n", ixgbevf_copyright);
92915f71
GR
3200
3201 ret = pci_register_driver(&ixgbevf_driver);
3202 return ret;
3203}
3204
3205module_init(ixgbevf_init_module);
3206
3207/**
65d676c8 3208 * ixgbevf_exit_module - Driver Exit Cleanup Routine
92915f71 3209 *
65d676c8 3210 * ixgbevf_exit_module is called just before the driver is removed
92915f71
GR
3211 * from memory.
3212 **/
3213static void __exit ixgbevf_exit_module(void)
3214{
3215 pci_unregister_driver(&ixgbevf_driver);
3216}
3217
3218#ifdef DEBUG
3219/**
65d676c8 3220 * ixgbevf_get_hw_dev_name - return device name string
92915f71
GR
3221 * used by hardware layer to print debugging information
3222 **/
3223char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
3224{
3225 struct ixgbevf_adapter *adapter = hw->back;
3226 return adapter->netdev->name;
3227}
3228
3229#endif
3230module_exit(ixgbevf_exit_module);
3231
3232/* ixgbevf_main.c */