1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/types.h>
36 #include <linux/bitops.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h>
41 #include <linux/string.h>
44 #include <linux/tcp.h>
45 #include <linux/ipv6.h>
46 #include <linux/slab.h>
47 #include <net/checksum.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/ethtool.h>
51 #include <linux/if_vlan.h>
52 #include <linux/prefetch.h>
56 const char ixgbevf_driver_name
[] = "ixgbevf";
57 static const char ixgbevf_driver_string
[] =
58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
60 #define DRV_VERSION "2.6.0-k"
61 const char ixgbevf_driver_version
[] = DRV_VERSION
;
62 static char ixgbevf_copyright
[] =
63 "Copyright (c) 2009 - 2012 Intel Corporation.";
65 static const struct ixgbevf_info
*ixgbevf_info_tbl
[] = {
66 [board_82599_vf
] = &ixgbevf_82599_vf_info
,
67 [board_X540_vf
] = &ixgbevf_X540_vf_info
,
70 /* ixgbevf_pci_tbl - PCI Device ID Table
72 * Wildcard entries (PCI_ANY_ID) should come last
73 * Last entry must be all 0s
75 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
76 * Class, Class Mask, private data (not used) }
78 static struct pci_device_id ixgbevf_pci_tbl
[] = {
79 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_82599_VF
),
81 {PCI_VDEVICE(INTEL
, IXGBE_DEV_ID_X540_VF
),
84 /* required last entry */
87 MODULE_DEVICE_TABLE(pci
, ixgbevf_pci_tbl
);
89 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
90 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION
);
94 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
95 static int debug
= -1;
96 module_param(debug
, int, 0);
97 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
100 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
);
101 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
104 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw
*hw
,
105 struct ixgbevf_ring
*rx_ring
,
109 * Force memory writes to complete before letting h/w
110 * know there are new descriptors to fetch. (Only
111 * applicable for weak-ordered memory model archs,
115 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(rx_ring
->reg_idx
), val
);
119 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
120 * @adapter: pointer to adapter struct
121 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
122 * @queue: queue to map the corresponding interrupt to
123 * @msix_vector: the vector to map to the corresponding queue
126 static void ixgbevf_set_ivar(struct ixgbevf_adapter
*adapter
, s8 direction
,
127 u8 queue
, u8 msix_vector
)
130 struct ixgbe_hw
*hw
= &adapter
->hw
;
131 if (direction
== -1) {
133 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
134 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
137 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR_MISC
, ivar
);
139 /* tx or rx causes */
140 msix_vector
|= IXGBE_IVAR_ALLOC_VAL
;
141 index
= ((16 * (queue
& 1)) + (8 * direction
));
142 ivar
= IXGBE_READ_REG(hw
, IXGBE_VTIVAR(queue
>> 1));
143 ivar
&= ~(0xFF << index
);
144 ivar
|= (msix_vector
<< index
);
145 IXGBE_WRITE_REG(hw
, IXGBE_VTIVAR(queue
>> 1), ivar
);
149 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter
*adapter
,
150 struct ixgbevf_tx_buffer
153 if (tx_buffer_info
->dma
) {
154 if (tx_buffer_info
->mapped_as_page
)
155 dma_unmap_page(&adapter
->pdev
->dev
,
157 tx_buffer_info
->length
,
160 dma_unmap_single(&adapter
->pdev
->dev
,
162 tx_buffer_info
->length
,
164 tx_buffer_info
->dma
= 0;
166 if (tx_buffer_info
->skb
) {
167 dev_kfree_skb_any(tx_buffer_info
->skb
);
168 tx_buffer_info
->skb
= NULL
;
170 tx_buffer_info
->time_stamp
= 0;
171 /* tx_buffer_info must be completely set up in the transmit path */
174 #define IXGBE_MAX_TXD_PWR 14
175 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
177 /* Tx Descriptors needed, worst case */
178 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
179 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
181 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
182 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
184 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
187 static void ixgbevf_tx_timeout(struct net_device
*netdev
);
190 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
191 * @adapter: board private structure
192 * @tx_ring: tx ring to clean
194 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter
*adapter
,
195 struct ixgbevf_ring
*tx_ring
)
197 struct net_device
*netdev
= adapter
->netdev
;
198 struct ixgbe_hw
*hw
= &adapter
->hw
;
199 union ixgbe_adv_tx_desc
*tx_desc
, *eop_desc
;
200 struct ixgbevf_tx_buffer
*tx_buffer_info
;
201 unsigned int i
, eop
, count
= 0;
202 unsigned int total_bytes
= 0, total_packets
= 0;
204 i
= tx_ring
->next_to_clean
;
205 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
206 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
208 while ((eop_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)) &&
209 (count
< tx_ring
->work_limit
)) {
210 bool cleaned
= false;
211 rmb(); /* read buffer_info after eop_desc */
212 /* eop could change between read and DD-check */
213 if (unlikely(eop
!= tx_ring
->tx_buffer_info
[i
].next_to_watch
))
215 for ( ; !cleaned
; count
++) {
217 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
218 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
219 cleaned
= (i
== eop
);
220 skb
= tx_buffer_info
->skb
;
222 if (cleaned
&& skb
) {
223 unsigned int segs
, bytecount
;
225 /* gso_segs is currently only valid for tcp */
226 segs
= skb_shinfo(skb
)->gso_segs
?: 1;
227 /* multiply data chunks by size of headers */
228 bytecount
= ((segs
- 1) * skb_headlen(skb
)) +
230 total_packets
+= segs
;
231 total_bytes
+= bytecount
;
234 ixgbevf_unmap_and_free_tx_resource(adapter
,
237 tx_desc
->wb
.status
= 0;
240 if (i
== tx_ring
->count
)
245 eop
= tx_ring
->tx_buffer_info
[i
].next_to_watch
;
246 eop_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, eop
);
249 tx_ring
->next_to_clean
= i
;
251 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
252 if (unlikely(count
&& netif_carrier_ok(netdev
) &&
253 (IXGBE_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
254 /* Make sure that anybody stopping the queue after this
255 * sees the new next_to_clean.
259 if (__netif_subqueue_stopped(netdev
, tx_ring
->queue_index
) &&
260 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
261 netif_wake_subqueue(netdev
, tx_ring
->queue_index
);
262 ++adapter
->restart_queue
;
265 if (netif_queue_stopped(netdev
) &&
266 !test_bit(__IXGBEVF_DOWN
, &adapter
->state
)) {
267 netif_wake_queue(netdev
);
268 ++adapter
->restart_queue
;
273 /* re-arm the interrupt */
274 if ((count
>= tx_ring
->work_limit
) &&
275 (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))) {
276 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, tx_ring
->v_idx
);
279 u64_stats_update_begin(&tx_ring
->syncp
);
280 tx_ring
->total_bytes
+= total_bytes
;
281 tx_ring
->total_packets
+= total_packets
;
282 u64_stats_update_end(&tx_ring
->syncp
);
284 return count
< tx_ring
->work_limit
;
288 * ixgbevf_receive_skb - Send a completed packet up the stack
289 * @q_vector: structure containing interrupt and ring information
290 * @skb: packet to send up
291 * @status: hardware indication of status of receive
292 * @rx_ring: rx descriptor ring (for a specific queue) to setup
293 * @rx_desc: rx descriptor
295 static void ixgbevf_receive_skb(struct ixgbevf_q_vector
*q_vector
,
296 struct sk_buff
*skb
, u8 status
,
297 struct ixgbevf_ring
*ring
,
298 union ixgbe_adv_rx_desc
*rx_desc
)
300 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
301 bool is_vlan
= (status
& IXGBE_RXD_STAT_VP
);
302 u16 tag
= le16_to_cpu(rx_desc
->wb
.upper
.vlan
);
304 if (is_vlan
&& test_bit(tag
, adapter
->active_vlans
))
305 __vlan_hwaccel_put_tag(skb
, tag
);
307 if (!(adapter
->flags
& IXGBE_FLAG_IN_NETPOLL
))
308 napi_gro_receive(&q_vector
->napi
, skb
);
314 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
315 * @adapter: address of board private structure
316 * @status_err: hardware indication of status of receive
317 * @skb: skb currently being received and modified
319 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter
*adapter
,
320 u32 status_err
, struct sk_buff
*skb
)
322 skb_checksum_none_assert(skb
);
324 /* Rx csum disabled */
325 if (!(adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
))
328 /* if IP and error */
329 if ((status_err
& IXGBE_RXD_STAT_IPCS
) &&
330 (status_err
& IXGBE_RXDADV_ERR_IPE
)) {
331 adapter
->hw_csum_rx_error
++;
335 if (!(status_err
& IXGBE_RXD_STAT_L4CS
))
338 if (status_err
& IXGBE_RXDADV_ERR_TCPE
) {
339 adapter
->hw_csum_rx_error
++;
343 /* It must be a TCP or UDP packet with a valid checksum */
344 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
345 adapter
->hw_csum_rx_good
++;
349 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
350 * @adapter: address of board private structure
352 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter
*adapter
,
353 struct ixgbevf_ring
*rx_ring
,
356 struct pci_dev
*pdev
= adapter
->pdev
;
357 union ixgbe_adv_rx_desc
*rx_desc
;
358 struct ixgbevf_rx_buffer
*bi
;
361 unsigned int bufsz
= rx_ring
->rx_buf_len
+ NET_IP_ALIGN
;
363 i
= rx_ring
->next_to_use
;
364 bi
= &rx_ring
->rx_buffer_info
[i
];
366 while (cleaned_count
--) {
367 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
370 (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
)) {
372 bi
->page
= alloc_page(GFP_ATOMIC
| __GFP_COLD
);
374 adapter
->alloc_rx_page_failed
++;
379 /* use a half page if we're re-using */
380 bi
->page_offset
^= (PAGE_SIZE
/ 2);
383 bi
->page_dma
= dma_map_page(&pdev
->dev
, bi
->page
,
391 skb
= netdev_alloc_skb(adapter
->netdev
,
395 adapter
->alloc_rx_buff_failed
++;
400 * Make buffer alignment 2 beyond a 16 byte boundary
401 * this will result in a 16 byte aligned IP header after
402 * the 14 byte MAC header is removed
404 skb_reserve(skb
, NET_IP_ALIGN
);
409 bi
->dma
= dma_map_single(&pdev
->dev
, skb
->data
,
413 /* Refresh the desc even if buffer_addrs didn't change because
414 * each write-back erases this info. */
415 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
416 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->page_dma
);
417 rx_desc
->read
.hdr_addr
= cpu_to_le64(bi
->dma
);
419 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
);
423 if (i
== rx_ring
->count
)
425 bi
= &rx_ring
->rx_buffer_info
[i
];
429 if (rx_ring
->next_to_use
!= i
) {
430 rx_ring
->next_to_use
= i
;
432 i
= (rx_ring
->count
- 1);
434 ixgbevf_release_rx_desc(&adapter
->hw
, rx_ring
, i
);
438 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter
*adapter
,
442 struct ixgbe_hw
*hw
= &adapter
->hw
;
444 mask
= (qmask
& 0xFFFFFFFF);
445 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
448 static inline u16
ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc
*rx_desc
)
450 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.hdr_info
;
453 static inline u16
ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc
*rx_desc
)
455 return rx_desc
->wb
.lower
.lo_dword
.hs_rss
.pkt_info
;
458 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector
*q_vector
,
459 struct ixgbevf_ring
*rx_ring
,
460 int *work_done
, int work_to_do
)
462 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
463 struct pci_dev
*pdev
= adapter
->pdev
;
464 union ixgbe_adv_rx_desc
*rx_desc
, *next_rxd
;
465 struct ixgbevf_rx_buffer
*rx_buffer_info
, *next_buffer
;
470 bool cleaned
= false;
471 int cleaned_count
= 0;
472 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
474 i
= rx_ring
->next_to_clean
;
475 rx_desc
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
476 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
477 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
479 while (staterr
& IXGBE_RXD_STAT_DD
) {
481 if (*work_done
>= work_to_do
)
485 rmb(); /* read descriptor and rx_buffer_info after status DD */
486 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
487 hdr_info
= le16_to_cpu(ixgbevf_get_hdr_info(rx_desc
));
488 len
= (hdr_info
& IXGBE_RXDADV_HDRBUFLEN_MASK
) >>
489 IXGBE_RXDADV_HDRBUFLEN_SHIFT
;
490 if (hdr_info
& IXGBE_RXDADV_SPH
)
491 adapter
->rx_hdr_split
++;
492 if (len
> IXGBEVF_RX_HDR_SIZE
)
493 len
= IXGBEVF_RX_HDR_SIZE
;
494 upper_len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
496 len
= le16_to_cpu(rx_desc
->wb
.upper
.length
);
499 skb
= rx_buffer_info
->skb
;
500 prefetch(skb
->data
- NET_IP_ALIGN
);
501 rx_buffer_info
->skb
= NULL
;
503 if (rx_buffer_info
->dma
) {
504 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
507 rx_buffer_info
->dma
= 0;
512 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
513 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
514 rx_buffer_info
->page_dma
= 0;
515 skb_fill_page_desc(skb
, skb_shinfo(skb
)->nr_frags
,
516 rx_buffer_info
->page
,
517 rx_buffer_info
->page_offset
,
520 if ((rx_ring
->rx_buf_len
> (PAGE_SIZE
/ 2)) ||
521 (page_count(rx_buffer_info
->page
) != 1))
522 rx_buffer_info
->page
= NULL
;
524 get_page(rx_buffer_info
->page
);
526 skb
->len
+= upper_len
;
527 skb
->data_len
+= upper_len
;
528 skb
->truesize
+= upper_len
;
532 if (i
== rx_ring
->count
)
535 next_rxd
= IXGBE_RX_DESC_ADV(*rx_ring
, i
);
539 next_buffer
= &rx_ring
->rx_buffer_info
[i
];
541 if (!(staterr
& IXGBE_RXD_STAT_EOP
)) {
542 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
543 rx_buffer_info
->skb
= next_buffer
->skb
;
544 rx_buffer_info
->dma
= next_buffer
->dma
;
545 next_buffer
->skb
= skb
;
546 next_buffer
->dma
= 0;
548 skb
->next
= next_buffer
->skb
;
549 skb
->next
->prev
= skb
;
551 adapter
->non_eop_descs
++;
555 /* ERR_MASK will only have valid bits if EOP set */
556 if (unlikely(staterr
& IXGBE_RXDADV_ERR_FRAME_ERR_MASK
)) {
557 dev_kfree_skb_irq(skb
);
561 ixgbevf_rx_checksum(adapter
, staterr
, skb
);
563 /* probably a little skewed due to removing CRC */
564 total_rx_bytes
+= skb
->len
;
568 * Work around issue of some types of VM to VM loop back
569 * packets not getting split correctly
571 if (staterr
& IXGBE_RXD_STAT_LB
) {
572 u32 header_fixup_len
= skb_headlen(skb
);
573 if (header_fixup_len
< 14)
574 skb_push(skb
, header_fixup_len
);
576 skb
->protocol
= eth_type_trans(skb
, adapter
->netdev
);
578 ixgbevf_receive_skb(q_vector
, skb
, staterr
, rx_ring
, rx_desc
);
581 rx_desc
->wb
.upper
.status_error
= 0;
583 /* return some buffers to hardware, one at a time is too slow */
584 if (cleaned_count
>= IXGBEVF_RX_BUFFER_WRITE
) {
585 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
,
590 /* use prefetched values */
592 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
594 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
597 rx_ring
->next_to_clean
= i
;
598 cleaned_count
= IXGBE_DESC_UNUSED(rx_ring
);
601 ixgbevf_alloc_rx_buffers(adapter
, rx_ring
, cleaned_count
);
603 u64_stats_update_begin(&rx_ring
->syncp
);
604 rx_ring
->total_packets
+= total_rx_packets
;
605 rx_ring
->total_bytes
+= total_rx_bytes
;
606 u64_stats_update_end(&rx_ring
->syncp
);
612 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
613 * @napi: napi struct with our devices info in it
614 * @budget: amount of work driver is allowed to do this pass, in packets
616 * This function is optimized for cleaning one queue only on a single
619 static int ixgbevf_clean_rxonly(struct napi_struct
*napi
, int budget
)
621 struct ixgbevf_q_vector
*q_vector
=
622 container_of(napi
, struct ixgbevf_q_vector
, napi
);
623 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
624 struct ixgbevf_ring
*rx_ring
= NULL
;
628 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
629 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
631 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
633 /* If all Rx work done, exit the polling mode */
634 if (work_done
< budget
) {
636 if (adapter
->itr_setting
& 1)
637 ixgbevf_set_itr_msix(q_vector
);
638 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
639 ixgbevf_irq_enable_queues(adapter
, rx_ring
->v_idx
);
646 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
647 * @napi: napi struct with our devices info in it
648 * @budget: amount of work driver is allowed to do this pass, in packets
650 * This function will clean more than one rx queue associated with a
653 static int ixgbevf_clean_rxonly_many(struct napi_struct
*napi
, int budget
)
655 struct ixgbevf_q_vector
*q_vector
=
656 container_of(napi
, struct ixgbevf_q_vector
, napi
);
657 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
658 struct ixgbevf_ring
*rx_ring
= NULL
;
659 int work_done
= 0, i
;
663 /* attempt to distribute budget to each queue fairly, but don't allow
664 * the budget to go below 1 because we'll exit polling */
665 budget
/= (q_vector
->rxr_count
?: 1);
666 budget
= max(budget
, 1);
667 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
668 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
669 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
670 ixgbevf_clean_rx_irq(q_vector
, rx_ring
, &work_done
, budget
);
671 enable_mask
|= rx_ring
->v_idx
;
672 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
676 #ifndef HAVE_NETDEV_NAPI_LIST
677 if (!netif_running(adapter
->netdev
))
681 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
682 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
684 /* If all Rx work done, exit the polling mode */
685 if (work_done
< budget
) {
687 if (adapter
->itr_setting
& 1)
688 ixgbevf_set_itr_msix(q_vector
);
689 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
690 ixgbevf_irq_enable_queues(adapter
, enable_mask
);
698 * ixgbevf_configure_msix - Configure MSI-X hardware
699 * @adapter: board private structure
701 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
704 static void ixgbevf_configure_msix(struct ixgbevf_adapter
*adapter
)
706 struct ixgbevf_q_vector
*q_vector
;
707 struct ixgbe_hw
*hw
= &adapter
->hw
;
708 int i
, j
, q_vectors
, v_idx
, r_idx
;
711 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
714 * Populate the IVAR table and set the ITR values to the
715 * corresponding register.
717 for (v_idx
= 0; v_idx
< q_vectors
; v_idx
++) {
718 q_vector
= adapter
->q_vector
[v_idx
];
719 /* XXX for_each_set_bit(...) */
720 r_idx
= find_first_bit(q_vector
->rxr_idx
,
721 adapter
->num_rx_queues
);
723 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
724 j
= adapter
->rx_ring
[r_idx
].reg_idx
;
725 ixgbevf_set_ivar(adapter
, 0, j
, v_idx
);
726 r_idx
= find_next_bit(q_vector
->rxr_idx
,
727 adapter
->num_rx_queues
,
730 r_idx
= find_first_bit(q_vector
->txr_idx
,
731 adapter
->num_tx_queues
);
733 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
734 j
= adapter
->tx_ring
[r_idx
].reg_idx
;
735 ixgbevf_set_ivar(adapter
, 1, j
, v_idx
);
736 r_idx
= find_next_bit(q_vector
->txr_idx
,
737 adapter
->num_tx_queues
,
741 /* if this is a tx only vector halve the interrupt rate */
742 if (q_vector
->txr_count
&& !q_vector
->rxr_count
)
743 q_vector
->eitr
= (adapter
->eitr_param
>> 1);
744 else if (q_vector
->rxr_count
)
746 q_vector
->eitr
= adapter
->eitr_param
;
748 ixgbevf_write_eitr(adapter
, v_idx
, q_vector
->eitr
);
751 ixgbevf_set_ivar(adapter
, -1, 1, v_idx
);
753 /* set up to autoclear timer, and the vectors */
754 mask
= IXGBE_EIMS_ENABLE_MASK
;
755 mask
&= ~IXGBE_EIMS_OTHER
;
756 IXGBE_WRITE_REG(hw
, IXGBE_VTEIAC
, mask
);
763 latency_invalid
= 255
767 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
768 * @adapter: pointer to adapter
769 * @eitr: eitr setting (ints per sec) to give last timeslice
770 * @itr_setting: current throttle rate in ints/second
771 * @packets: the number of packets during this measurement interval
772 * @bytes: the number of bytes during this measurement interval
774 * Stores a new ITR value based on packets and byte
775 * counts during the last interrupt. The advantage of per interrupt
776 * computation is faster updates and more accurate ITR for the current
777 * traffic pattern. Constants in this function were computed
778 * based on theoretical maximum wire speed and thresholds were set based
779 * on testing data as well as attempting to minimize response time
780 * while increasing bulk throughput.
782 static u8
ixgbevf_update_itr(struct ixgbevf_adapter
*adapter
,
783 u32 eitr
, u8 itr_setting
,
784 int packets
, int bytes
)
786 unsigned int retval
= itr_setting
;
791 goto update_itr_done
;
794 /* simple throttlerate management
795 * 0-20MB/s lowest (100000 ints/s)
796 * 20-100MB/s low (20000 ints/s)
797 * 100-1249MB/s bulk (8000 ints/s)
799 /* what was last interrupt timeslice? */
800 timepassed_us
= 1000000/eitr
;
801 bytes_perint
= bytes
/ timepassed_us
; /* bytes/usec */
803 switch (itr_setting
) {
805 if (bytes_perint
> adapter
->eitr_low
)
806 retval
= low_latency
;
809 if (bytes_perint
> adapter
->eitr_high
)
810 retval
= bulk_latency
;
811 else if (bytes_perint
<= adapter
->eitr_low
)
812 retval
= lowest_latency
;
815 if (bytes_perint
<= adapter
->eitr_high
)
816 retval
= low_latency
;
825 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
826 * @adapter: pointer to adapter struct
827 * @v_idx: vector index into q_vector array
828 * @itr_reg: new value to be written in *register* format, not ints/s
830 * This function is made to be called by ethtool and by the driver
831 * when it needs to update VTEITR registers at runtime. Hardware
832 * specific quirks/differences are taken care of here.
834 static void ixgbevf_write_eitr(struct ixgbevf_adapter
*adapter
, int v_idx
,
837 struct ixgbe_hw
*hw
= &adapter
->hw
;
839 itr_reg
= EITR_INTS_PER_SEC_TO_REG(itr_reg
);
842 * set the WDIS bit to not clear the timer bits and cause an
843 * immediate assertion of the interrupt
845 itr_reg
|= IXGBE_EITR_CNT_WDIS
;
847 IXGBE_WRITE_REG(hw
, IXGBE_VTEITR(v_idx
), itr_reg
);
850 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector
*q_vector
)
852 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
854 u8 current_itr
, ret_itr
;
855 int i
, r_idx
, v_idx
= q_vector
->v_idx
;
856 struct ixgbevf_ring
*rx_ring
, *tx_ring
;
858 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
859 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
860 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
861 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
863 tx_ring
->total_packets
,
864 tx_ring
->total_bytes
);
865 /* if the result for this queue would decrease interrupt
866 * rate for this vector then use that result */
867 q_vector
->tx_itr
= ((q_vector
->tx_itr
> ret_itr
) ?
868 q_vector
->tx_itr
- 1 : ret_itr
);
869 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
873 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
874 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
875 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
876 ret_itr
= ixgbevf_update_itr(adapter
, q_vector
->eitr
,
878 rx_ring
->total_packets
,
879 rx_ring
->total_bytes
);
880 /* if the result for this queue would decrease interrupt
881 * rate for this vector then use that result */
882 q_vector
->rx_itr
= ((q_vector
->rx_itr
> ret_itr
) ?
883 q_vector
->rx_itr
- 1 : ret_itr
);
884 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
888 current_itr
= max(q_vector
->rx_itr
, q_vector
->tx_itr
);
890 switch (current_itr
) {
891 /* counts and packets in update_itr are dependent on these numbers */
896 new_itr
= 20000; /* aka hwitr = ~200 */
904 if (new_itr
!= q_vector
->eitr
) {
907 /* save the algorithm value here, not the smoothed one */
908 q_vector
->eitr
= new_itr
;
909 /* do an exponential smoothing */
910 new_itr
= ((q_vector
->eitr
* 90)/100) + ((new_itr
* 10)/100);
911 itr_reg
= EITR_INTS_PER_SEC_TO_REG(new_itr
);
912 ixgbevf_write_eitr(adapter
, v_idx
, itr_reg
);
916 static irqreturn_t
ixgbevf_msix_mbx(int irq
, void *data
)
918 struct net_device
*netdev
= data
;
919 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
920 struct ixgbe_hw
*hw
= &adapter
->hw
;
923 bool got_ack
= false;
925 eicr
= IXGBE_READ_REG(hw
, IXGBE_VTEICS
);
926 IXGBE_WRITE_REG(hw
, IXGBE_VTEICR
, eicr
);
928 if (!hw
->mbx
.ops
.check_for_ack(hw
))
931 if (!hw
->mbx
.ops
.check_for_msg(hw
)) {
932 hw
->mbx
.ops
.read(hw
, &msg
, 1);
934 if ((msg
& IXGBE_MBVFICR_VFREQ_MASK
) == IXGBE_PF_CONTROL_MSG
)
935 mod_timer(&adapter
->watchdog_timer
,
936 round_jiffies(jiffies
+ 1));
938 if (msg
& IXGBE_VT_MSGTYPE_NACK
)
939 pr_warn("Last Request of type %2.2x to PF Nacked\n",
942 * Restore the PFSTS bit in case someone is polling for a
943 * return message from the PF
945 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFSTS
;
949 * checking for the ack clears the PFACK bit. Place
950 * it back in the v2p_mailbox cache so that anyone
951 * polling for an ack will not miss it
954 hw
->mbx
.v2p_mailbox
|= IXGBE_VFMAILBOX_PFACK
;
959 static irqreturn_t
ixgbevf_msix_clean_tx(int irq
, void *data
)
961 struct ixgbevf_q_vector
*q_vector
= data
;
962 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
963 struct ixgbevf_ring
*tx_ring
;
966 if (!q_vector
->txr_count
)
969 r_idx
= find_first_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
);
970 for (i
= 0; i
< q_vector
->txr_count
; i
++) {
971 tx_ring
= &(adapter
->tx_ring
[r_idx
]);
972 tx_ring
->total_bytes
= 0;
973 tx_ring
->total_packets
= 0;
974 ixgbevf_clean_tx_irq(adapter
, tx_ring
);
975 r_idx
= find_next_bit(q_vector
->txr_idx
, adapter
->num_tx_queues
,
979 if (adapter
->itr_setting
& 1)
980 ixgbevf_set_itr_msix(q_vector
);
986 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
988 * @data: pointer to our q_vector struct for this interrupt vector
990 static irqreturn_t
ixgbevf_msix_clean_rx(int irq
, void *data
)
992 struct ixgbevf_q_vector
*q_vector
= data
;
993 struct ixgbevf_adapter
*adapter
= q_vector
->adapter
;
994 struct ixgbe_hw
*hw
= &adapter
->hw
;
995 struct ixgbevf_ring
*rx_ring
;
999 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1000 for (i
= 0; i
< q_vector
->rxr_count
; i
++) {
1001 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1002 rx_ring
->total_bytes
= 0;
1003 rx_ring
->total_packets
= 0;
1004 r_idx
= find_next_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
,
1008 if (!q_vector
->rxr_count
)
1011 r_idx
= find_first_bit(q_vector
->rxr_idx
, adapter
->num_rx_queues
);
1012 rx_ring
= &(adapter
->rx_ring
[r_idx
]);
1013 /* disable interrupts on this vector only */
1014 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, rx_ring
->v_idx
);
1015 napi_schedule(&q_vector
->napi
);
1021 static irqreturn_t
ixgbevf_msix_clean_many(int irq
, void *data
)
1023 ixgbevf_msix_clean_rx(irq
, data
);
1024 ixgbevf_msix_clean_tx(irq
, data
);
1029 static inline void map_vector_to_rxq(struct ixgbevf_adapter
*a
, int v_idx
,
1032 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1034 set_bit(r_idx
, q_vector
->rxr_idx
);
1035 q_vector
->rxr_count
++;
1036 a
->rx_ring
[r_idx
].v_idx
= 1 << v_idx
;
1039 static inline void map_vector_to_txq(struct ixgbevf_adapter
*a
, int v_idx
,
1042 struct ixgbevf_q_vector
*q_vector
= a
->q_vector
[v_idx
];
1044 set_bit(t_idx
, q_vector
->txr_idx
);
1045 q_vector
->txr_count
++;
1046 a
->tx_ring
[t_idx
].v_idx
= 1 << v_idx
;
1050 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1051 * @adapter: board private structure to initialize
1053 * This function maps descriptor rings to the queue-specific vectors
1054 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1055 * one vector per ring/queue, but on a constrained vector budget, we
1056 * group the rings as "efficiently" as possible. You would add new
1057 * mapping configurations in here.
1059 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter
*adapter
)
1063 int rxr_idx
= 0, txr_idx
= 0;
1064 int rxr_remaining
= adapter
->num_rx_queues
;
1065 int txr_remaining
= adapter
->num_tx_queues
;
1070 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1073 * The ideal configuration...
1074 * We have enough vectors to map one per queue.
1076 if (q_vectors
== adapter
->num_rx_queues
+ adapter
->num_tx_queues
) {
1077 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
1078 map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
1080 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
1081 map_vector_to_txq(adapter
, v_start
, txr_idx
);
1086 * If we don't have enough vectors for a 1-to-1
1087 * mapping, we'll have to group them so there are
1088 * multiple queues per vector.
1090 /* Re-adjusting *qpv takes care of the remainder. */
1091 for (i
= v_start
; i
< q_vectors
; i
++) {
1092 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
1093 for (j
= 0; j
< rqpv
; j
++) {
1094 map_vector_to_rxq(adapter
, i
, rxr_idx
);
1099 for (i
= v_start
; i
< q_vectors
; i
++) {
1100 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
1101 for (j
= 0; j
< tqpv
; j
++) {
1102 map_vector_to_txq(adapter
, i
, txr_idx
);
1113 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1114 * @adapter: board private structure
1116 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1117 * interrupts from the kernel.
1119 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter
*adapter
)
1121 struct net_device
*netdev
= adapter
->netdev
;
1122 irqreturn_t (*handler
)(int, void *);
1123 int i
, vector
, q_vectors
, err
;
1126 /* Decrement for Other and TCP Timer vectors */
1127 q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1129 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1130 ? &ixgbevf_msix_clean_many : \
1131 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1132 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1134 for (vector
= 0; vector
< q_vectors
; vector
++) {
1135 handler
= SET_HANDLER(adapter
->q_vector
[vector
]);
1137 if (handler
== &ixgbevf_msix_clean_rx
) {
1138 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1139 netdev
->name
, "rx", ri
++);
1140 } else if (handler
== &ixgbevf_msix_clean_tx
) {
1141 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1142 netdev
->name
, "tx", ti
++);
1143 } else if (handler
== &ixgbevf_msix_clean_many
) {
1144 sprintf(adapter
->name
[vector
], "%s-%s-%d",
1145 netdev
->name
, "TxRx", vector
);
1147 /* skip this unused q_vector */
1150 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1151 handler
, 0, adapter
->name
[vector
],
1152 adapter
->q_vector
[vector
]);
1154 hw_dbg(&adapter
->hw
,
1155 "request_irq failed for MSIX interrupt "
1156 "Error: %d\n", err
);
1157 goto free_queue_irqs
;
1161 sprintf(adapter
->name
[vector
], "%s:mbx", netdev
->name
);
1162 err
= request_irq(adapter
->msix_entries
[vector
].vector
,
1163 &ixgbevf_msix_mbx
, 0, adapter
->name
[vector
], netdev
);
1165 hw_dbg(&adapter
->hw
,
1166 "request_irq for msix_mbx failed: %d\n", err
);
1167 goto free_queue_irqs
;
1173 for (i
= vector
- 1; i
>= 0; i
--)
1174 free_irq(adapter
->msix_entries
[--vector
].vector
,
1175 &(adapter
->q_vector
[i
]));
1176 pci_disable_msix(adapter
->pdev
);
1177 kfree(adapter
->msix_entries
);
1178 adapter
->msix_entries
= NULL
;
1182 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter
*adapter
)
1184 int i
, q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1186 for (i
= 0; i
< q_vectors
; i
++) {
1187 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[i
];
1188 bitmap_zero(q_vector
->rxr_idx
, MAX_RX_QUEUES
);
1189 bitmap_zero(q_vector
->txr_idx
, MAX_TX_QUEUES
);
1190 q_vector
->rxr_count
= 0;
1191 q_vector
->txr_count
= 0;
1192 q_vector
->eitr
= adapter
->eitr_param
;
1197 * ixgbevf_request_irq - initialize interrupts
1198 * @adapter: board private structure
1200 * Attempts to configure interrupts using the best available
1201 * capabilities of the hardware and kernel.
1203 static int ixgbevf_request_irq(struct ixgbevf_adapter
*adapter
)
1207 err
= ixgbevf_request_msix_irqs(adapter
);
1210 hw_dbg(&adapter
->hw
,
1211 "request_irq failed, Error %d\n", err
);
1216 static void ixgbevf_free_irq(struct ixgbevf_adapter
*adapter
)
1218 struct net_device
*netdev
= adapter
->netdev
;
1221 q_vectors
= adapter
->num_msix_vectors
;
1225 free_irq(adapter
->msix_entries
[i
].vector
, netdev
);
1228 for (; i
>= 0; i
--) {
1229 free_irq(adapter
->msix_entries
[i
].vector
,
1230 adapter
->q_vector
[i
]);
1233 ixgbevf_reset_q_vectors(adapter
);
1237 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1238 * @adapter: board private structure
1240 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter
*adapter
)
1243 struct ixgbe_hw
*hw
= &adapter
->hw
;
1245 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMC
, ~0);
1247 IXGBE_WRITE_FLUSH(hw
);
1249 for (i
= 0; i
< adapter
->num_msix_vectors
; i
++)
1250 synchronize_irq(adapter
->msix_entries
[i
].vector
);
1254 * ixgbevf_irq_enable - Enable default interrupt generation settings
1255 * @adapter: board private structure
1257 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter
*adapter
,
1258 bool queues
, bool flush
)
1260 struct ixgbe_hw
*hw
= &adapter
->hw
;
1264 mask
= (IXGBE_EIMS_ENABLE_MASK
& ~IXGBE_EIMS_RTX_QUEUE
);
1267 IXGBE_WRITE_REG(hw
, IXGBE_VTEIMS
, mask
);
1270 ixgbevf_irq_enable_queues(adapter
, qmask
);
1273 IXGBE_WRITE_FLUSH(hw
);
1277 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1278 * @adapter: board private structure
1280 * Configure the Tx unit of the MAC after a reset.
1282 static void ixgbevf_configure_tx(struct ixgbevf_adapter
*adapter
)
1285 struct ixgbe_hw
*hw
= &adapter
->hw
;
1286 u32 i
, j
, tdlen
, txctrl
;
1288 /* Setup the HW Tx Head and Tail descriptor pointers */
1289 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1290 struct ixgbevf_ring
*ring
= &adapter
->tx_ring
[i
];
1293 tdlen
= ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
1294 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAL(j
),
1295 (tdba
& DMA_BIT_MASK(32)));
1296 IXGBE_WRITE_REG(hw
, IXGBE_VFTDBAH(j
), (tdba
>> 32));
1297 IXGBE_WRITE_REG(hw
, IXGBE_VFTDLEN(j
), tdlen
);
1298 IXGBE_WRITE_REG(hw
, IXGBE_VFTDH(j
), 0);
1299 IXGBE_WRITE_REG(hw
, IXGBE_VFTDT(j
), 0);
1300 adapter
->tx_ring
[i
].head
= IXGBE_VFTDH(j
);
1301 adapter
->tx_ring
[i
].tail
= IXGBE_VFTDT(j
);
1302 /* Disable Tx Head Writeback RO bit, since this hoses
1303 * bookkeeping if things aren't delivered in order.
1305 txctrl
= IXGBE_READ_REG(hw
, IXGBE_VFDCA_TXCTRL(j
));
1306 txctrl
&= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN
;
1307 IXGBE_WRITE_REG(hw
, IXGBE_VFDCA_TXCTRL(j
), txctrl
);
1311 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1313 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter
*adapter
, int index
)
1315 struct ixgbevf_ring
*rx_ring
;
1316 struct ixgbe_hw
*hw
= &adapter
->hw
;
1319 rx_ring
= &adapter
->rx_ring
[index
];
1321 srrctl
= IXGBE_SRRCTL_DROP_EN
;
1323 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1324 u16 bufsz
= IXGBEVF_RXBUFFER_2048
;
1325 /* grow the amount we can receive on large page machines */
1326 if (bufsz
< (PAGE_SIZE
/ 2))
1327 bufsz
= (PAGE_SIZE
/ 2);
1328 /* cap the bufsz at our largest descriptor size */
1329 bufsz
= min((u16
)IXGBEVF_MAX_RXBUFFER
, bufsz
);
1331 srrctl
|= bufsz
>> IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1332 srrctl
|= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS
;
1333 srrctl
|= ((IXGBEVF_RX_HDR_SIZE
<<
1334 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT
) &
1335 IXGBE_SRRCTL_BSIZEHDR_MASK
);
1337 srrctl
|= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF
;
1339 if (rx_ring
->rx_buf_len
== MAXIMUM_ETHERNET_VLAN_SIZE
)
1340 srrctl
|= IXGBEVF_RXBUFFER_2048
>>
1341 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1343 srrctl
|= rx_ring
->rx_buf_len
>>
1344 IXGBE_SRRCTL_BSIZEPKT_SHIFT
;
1346 IXGBE_WRITE_REG(hw
, IXGBE_VFSRRCTL(index
), srrctl
);
1350 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1351 * @adapter: board private structure
1353 * Configure the Rx unit of the MAC after a reset.
1355 static void ixgbevf_configure_rx(struct ixgbevf_adapter
*adapter
)
1358 struct ixgbe_hw
*hw
= &adapter
->hw
;
1359 struct net_device
*netdev
= adapter
->netdev
;
1360 int max_frame
= netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1365 /* Decide whether to use packet split mode or not */
1366 if (netdev
->mtu
> ETH_DATA_LEN
) {
1367 if (adapter
->flags
& IXGBE_FLAG_RX_PS_CAPABLE
)
1368 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1370 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1372 if (adapter
->flags
& IXGBE_FLAG_RX_1BUF_CAPABLE
)
1373 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
1375 adapter
->flags
|= IXGBE_FLAG_RX_PS_ENABLED
;
1378 /* Set the RX buffer length according to the mode */
1379 if (adapter
->flags
& IXGBE_FLAG_RX_PS_ENABLED
) {
1380 /* PSRTYPE must be initialized in 82599 */
1381 u32 psrtype
= IXGBE_PSRTYPE_TCPHDR
|
1382 IXGBE_PSRTYPE_UDPHDR
|
1383 IXGBE_PSRTYPE_IPV4HDR
|
1384 IXGBE_PSRTYPE_IPV6HDR
|
1385 IXGBE_PSRTYPE_L2HDR
;
1386 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, psrtype
);
1387 rx_buf_len
= IXGBEVF_RX_HDR_SIZE
;
1389 IXGBE_WRITE_REG(hw
, IXGBE_VFPSRTYPE
, 0);
1390 if (netdev
->mtu
<= ETH_DATA_LEN
)
1391 rx_buf_len
= MAXIMUM_ETHERNET_VLAN_SIZE
;
1393 rx_buf_len
= ALIGN(max_frame
, 1024);
1396 rdlen
= adapter
->rx_ring
[0].count
* sizeof(union ixgbe_adv_rx_desc
);
1397 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1398 * the Base and Length of the Rx Descriptor Ring */
1399 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1400 rdba
= adapter
->rx_ring
[i
].dma
;
1401 j
= adapter
->rx_ring
[i
].reg_idx
;
1402 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAL(j
),
1403 (rdba
& DMA_BIT_MASK(32)));
1404 IXGBE_WRITE_REG(hw
, IXGBE_VFRDBAH(j
), (rdba
>> 32));
1405 IXGBE_WRITE_REG(hw
, IXGBE_VFRDLEN(j
), rdlen
);
1406 IXGBE_WRITE_REG(hw
, IXGBE_VFRDH(j
), 0);
1407 IXGBE_WRITE_REG(hw
, IXGBE_VFRDT(j
), 0);
1408 adapter
->rx_ring
[i
].head
= IXGBE_VFRDH(j
);
1409 adapter
->rx_ring
[i
].tail
= IXGBE_VFRDT(j
);
1410 adapter
->rx_ring
[i
].rx_buf_len
= rx_buf_len
;
1412 ixgbevf_configure_srrctl(adapter
, j
);
1416 static int ixgbevf_vlan_rx_add_vid(struct net_device
*netdev
, u16 vid
)
1418 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1419 struct ixgbe_hw
*hw
= &adapter
->hw
;
1421 /* add VID to filter table */
1422 if (hw
->mac
.ops
.set_vfta
)
1423 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, true);
1424 set_bit(vid
, adapter
->active_vlans
);
1429 static int ixgbevf_vlan_rx_kill_vid(struct net_device
*netdev
, u16 vid
)
1431 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1432 struct ixgbe_hw
*hw
= &adapter
->hw
;
1434 /* remove VID from filter table */
1435 if (hw
->mac
.ops
.set_vfta
)
1436 hw
->mac
.ops
.set_vfta(hw
, vid
, 0, false);
1437 clear_bit(vid
, adapter
->active_vlans
);
1442 static void ixgbevf_restore_vlan(struct ixgbevf_adapter
*adapter
)
1446 for_each_set_bit(vid
, adapter
->active_vlans
, VLAN_N_VID
)
1447 ixgbevf_vlan_rx_add_vid(adapter
->netdev
, vid
);
1450 static int ixgbevf_write_uc_addr_list(struct net_device
*netdev
)
1452 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1453 struct ixgbe_hw
*hw
= &adapter
->hw
;
1456 if ((netdev_uc_count(netdev
)) > 10) {
1457 pr_err("Too many unicast filters - No Space\n");
1461 if (!netdev_uc_empty(netdev
)) {
1462 struct netdev_hw_addr
*ha
;
1463 netdev_for_each_uc_addr(ha
, netdev
) {
1464 hw
->mac
.ops
.set_uc_addr(hw
, ++count
, ha
->addr
);
1469 * If the list is empty then send message to PF driver to
1470 * clear all macvlans on this VF.
1472 hw
->mac
.ops
.set_uc_addr(hw
, 0, NULL
);
1479 * ixgbevf_set_rx_mode - Multicast set
1480 * @netdev: network interface device structure
1482 * The set_rx_method entry point is called whenever the multicast address
1483 * list or the network interface flags are updated. This routine is
1484 * responsible for configuring the hardware for proper multicast mode.
1486 static void ixgbevf_set_rx_mode(struct net_device
*netdev
)
1488 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
1489 struct ixgbe_hw
*hw
= &adapter
->hw
;
1491 /* reprogram multicast list */
1492 if (hw
->mac
.ops
.update_mc_addr_list
)
1493 hw
->mac
.ops
.update_mc_addr_list(hw
, netdev
);
1495 ixgbevf_write_uc_addr_list(netdev
);
1498 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter
*adapter
)
1501 struct ixgbevf_q_vector
*q_vector
;
1502 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1504 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1505 struct napi_struct
*napi
;
1506 q_vector
= adapter
->q_vector
[q_idx
];
1507 if (!q_vector
->rxr_count
)
1509 napi
= &q_vector
->napi
;
1510 if (q_vector
->rxr_count
> 1)
1511 napi
->poll
= &ixgbevf_clean_rxonly_many
;
1517 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter
*adapter
)
1520 struct ixgbevf_q_vector
*q_vector
;
1521 int q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
1523 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1524 q_vector
= adapter
->q_vector
[q_idx
];
1525 if (!q_vector
->rxr_count
)
1527 napi_disable(&q_vector
->napi
);
1531 static void ixgbevf_configure(struct ixgbevf_adapter
*adapter
)
1533 struct net_device
*netdev
= adapter
->netdev
;
1536 ixgbevf_set_rx_mode(netdev
);
1538 ixgbevf_restore_vlan(adapter
);
1540 ixgbevf_configure_tx(adapter
);
1541 ixgbevf_configure_rx(adapter
);
1542 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1543 struct ixgbevf_ring
*ring
= &adapter
->rx_ring
[i
];
1544 ixgbevf_alloc_rx_buffers(adapter
, ring
, ring
->count
);
1545 ring
->next_to_use
= ring
->count
- 1;
1546 writel(ring
->next_to_use
, adapter
->hw
.hw_addr
+ ring
->tail
);
1550 #define IXGBE_MAX_RX_DESC_POLL 10
1551 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter
*adapter
,
1554 struct ixgbe_hw
*hw
= &adapter
->hw
;
1555 int j
= adapter
->rx_ring
[rxr
].reg_idx
;
1558 for (k
= 0; k
< IXGBE_MAX_RX_DESC_POLL
; k
++) {
1559 if (IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
)) & IXGBE_RXDCTL_ENABLE
)
1564 if (k
>= IXGBE_MAX_RX_DESC_POLL
) {
1565 hw_dbg(hw
, "RXDCTL.ENABLE on Rx queue %d "
1566 "not set within the polling period\n", rxr
);
1569 ixgbevf_release_rx_desc(&adapter
->hw
, &adapter
->rx_ring
[rxr
],
1570 (adapter
->rx_ring
[rxr
].count
- 1));
1573 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter
*adapter
)
1575 /* Only save pre-reset stats if there are some */
1576 if (adapter
->stats
.vfgprc
|| adapter
->stats
.vfgptc
) {
1577 adapter
->stats
.saved_reset_vfgprc
+= adapter
->stats
.vfgprc
-
1578 adapter
->stats
.base_vfgprc
;
1579 adapter
->stats
.saved_reset_vfgptc
+= adapter
->stats
.vfgptc
-
1580 adapter
->stats
.base_vfgptc
;
1581 adapter
->stats
.saved_reset_vfgorc
+= adapter
->stats
.vfgorc
-
1582 adapter
->stats
.base_vfgorc
;
1583 adapter
->stats
.saved_reset_vfgotc
+= adapter
->stats
.vfgotc
-
1584 adapter
->stats
.base_vfgotc
;
1585 adapter
->stats
.saved_reset_vfmprc
+= adapter
->stats
.vfmprc
-
1586 adapter
->stats
.base_vfmprc
;
1590 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter
*adapter
)
1592 struct ixgbe_hw
*hw
= &adapter
->hw
;
1594 adapter
->stats
.last_vfgprc
= IXGBE_READ_REG(hw
, IXGBE_VFGPRC
);
1595 adapter
->stats
.last_vfgorc
= IXGBE_READ_REG(hw
, IXGBE_VFGORC_LSB
);
1596 adapter
->stats
.last_vfgorc
|=
1597 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGORC_MSB
))) << 32);
1598 adapter
->stats
.last_vfgptc
= IXGBE_READ_REG(hw
, IXGBE_VFGPTC
);
1599 adapter
->stats
.last_vfgotc
= IXGBE_READ_REG(hw
, IXGBE_VFGOTC_LSB
);
1600 adapter
->stats
.last_vfgotc
|=
1601 (((u64
)(IXGBE_READ_REG(hw
, IXGBE_VFGOTC_MSB
))) << 32);
1602 adapter
->stats
.last_vfmprc
= IXGBE_READ_REG(hw
, IXGBE_VFMPRC
);
1604 adapter
->stats
.base_vfgprc
= adapter
->stats
.last_vfgprc
;
1605 adapter
->stats
.base_vfgorc
= adapter
->stats
.last_vfgorc
;
1606 adapter
->stats
.base_vfgptc
= adapter
->stats
.last_vfgptc
;
1607 adapter
->stats
.base_vfgotc
= adapter
->stats
.last_vfgotc
;
1608 adapter
->stats
.base_vfmprc
= adapter
->stats
.last_vfmprc
;
1611 static void ixgbevf_up_complete(struct ixgbevf_adapter
*adapter
)
1613 struct net_device
*netdev
= adapter
->netdev
;
1614 struct ixgbe_hw
*hw
= &adapter
->hw
;
1616 int num_rx_rings
= adapter
->num_rx_queues
;
1620 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1621 j
= adapter
->tx_ring
[i
].reg_idx
;
1622 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1623 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1624 txdctl
|= (8 << 16);
1625 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1628 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1629 j
= adapter
->tx_ring
[i
].reg_idx
;
1630 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1631 txdctl
|= IXGBE_TXDCTL_ENABLE
;
1632 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
), txdctl
);
1635 for (i
= 0; i
< num_rx_rings
; i
++) {
1636 j
= adapter
->rx_ring
[i
].reg_idx
;
1637 rxdctl
= IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(j
));
1638 rxdctl
|= IXGBE_RXDCTL_ENABLE
| IXGBE_RXDCTL_VME
;
1639 if (hw
->mac
.type
== ixgbe_mac_X540_vf
) {
1640 rxdctl
&= ~IXGBE_RXDCTL_RLPMLMASK
;
1641 rxdctl
|= ((netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
) |
1642 IXGBE_RXDCTL_RLPML_EN
);
1644 IXGBE_WRITE_REG(hw
, IXGBE_VFRXDCTL(j
), rxdctl
);
1645 ixgbevf_rx_desc_queue_enable(adapter
, i
);
1648 ixgbevf_configure_msix(adapter
);
1650 if (hw
->mac
.ops
.set_rar
) {
1651 if (is_valid_ether_addr(hw
->mac
.addr
))
1652 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
1654 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.perm_addr
, 0);
1657 msg
[0] = IXGBE_VF_SET_LPE
;
1658 msg
[1] = netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
1659 hw
->mbx
.ops
.write_posted(hw
, msg
, 2);
1661 clear_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1662 ixgbevf_napi_enable_all(adapter
);
1664 /* enable transmits */
1665 netif_tx_start_all_queues(netdev
);
1667 ixgbevf_save_reset_stats(adapter
);
1668 ixgbevf_init_last_counter_stats(adapter
);
1670 /* bring the link up in the watchdog, this could race with our first
1671 * link up interrupt but shouldn't be a problem */
1672 adapter
->flags
|= IXGBE_FLAG_NEED_LINK_UPDATE
;
1673 adapter
->link_check_timeout
= jiffies
;
1674 mod_timer(&adapter
->watchdog_timer
, jiffies
);
1677 void ixgbevf_up(struct ixgbevf_adapter
*adapter
)
1679 struct ixgbe_hw
*hw
= &adapter
->hw
;
1681 ixgbevf_configure(adapter
);
1683 ixgbevf_up_complete(adapter
);
1685 /* clear any pending interrupts, may auto mask */
1686 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
1688 ixgbevf_irq_enable(adapter
, true, true);
1692 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1693 * @adapter: board private structure
1694 * @rx_ring: ring to free buffers from
1696 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter
*adapter
,
1697 struct ixgbevf_ring
*rx_ring
)
1699 struct pci_dev
*pdev
= adapter
->pdev
;
1703 if (!rx_ring
->rx_buffer_info
)
1706 /* Free all the Rx ring sk_buffs */
1707 for (i
= 0; i
< rx_ring
->count
; i
++) {
1708 struct ixgbevf_rx_buffer
*rx_buffer_info
;
1710 rx_buffer_info
= &rx_ring
->rx_buffer_info
[i
];
1711 if (rx_buffer_info
->dma
) {
1712 dma_unmap_single(&pdev
->dev
, rx_buffer_info
->dma
,
1713 rx_ring
->rx_buf_len
,
1715 rx_buffer_info
->dma
= 0;
1717 if (rx_buffer_info
->skb
) {
1718 struct sk_buff
*skb
= rx_buffer_info
->skb
;
1719 rx_buffer_info
->skb
= NULL
;
1721 struct sk_buff
*this = skb
;
1723 dev_kfree_skb(this);
1726 if (!rx_buffer_info
->page
)
1728 dma_unmap_page(&pdev
->dev
, rx_buffer_info
->page_dma
,
1729 PAGE_SIZE
/ 2, DMA_FROM_DEVICE
);
1730 rx_buffer_info
->page_dma
= 0;
1731 put_page(rx_buffer_info
->page
);
1732 rx_buffer_info
->page
= NULL
;
1733 rx_buffer_info
->page_offset
= 0;
1736 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
1737 memset(rx_ring
->rx_buffer_info
, 0, size
);
1739 /* Zero out the descriptor ring */
1740 memset(rx_ring
->desc
, 0, rx_ring
->size
);
1742 rx_ring
->next_to_clean
= 0;
1743 rx_ring
->next_to_use
= 0;
1746 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->head
);
1748 writel(0, adapter
->hw
.hw_addr
+ rx_ring
->tail
);
1752 * ixgbevf_clean_tx_ring - Free Tx Buffers
1753 * @adapter: board private structure
1754 * @tx_ring: ring to be cleaned
1756 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter
*adapter
,
1757 struct ixgbevf_ring
*tx_ring
)
1759 struct ixgbevf_tx_buffer
*tx_buffer_info
;
1763 if (!tx_ring
->tx_buffer_info
)
1766 /* Free all the Tx ring sk_buffs */
1768 for (i
= 0; i
< tx_ring
->count
; i
++) {
1769 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
1770 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
1773 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
1774 memset(tx_ring
->tx_buffer_info
, 0, size
);
1776 memset(tx_ring
->desc
, 0, tx_ring
->size
);
1778 tx_ring
->next_to_use
= 0;
1779 tx_ring
->next_to_clean
= 0;
1782 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->head
);
1784 writel(0, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
1788 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1789 * @adapter: board private structure
1791 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter
*adapter
)
1795 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1796 ixgbevf_clean_rx_ring(adapter
, &adapter
->rx_ring
[i
]);
1800 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1801 * @adapter: board private structure
1803 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter
*adapter
)
1807 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1808 ixgbevf_clean_tx_ring(adapter
, &adapter
->tx_ring
[i
]);
1811 void ixgbevf_down(struct ixgbevf_adapter
*adapter
)
1813 struct net_device
*netdev
= adapter
->netdev
;
1814 struct ixgbe_hw
*hw
= &adapter
->hw
;
1818 /* signal that we are down to the interrupt handler */
1819 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
1820 /* disable receives */
1822 netif_tx_disable(netdev
);
1826 netif_tx_stop_all_queues(netdev
);
1828 ixgbevf_irq_disable(adapter
);
1830 ixgbevf_napi_disable_all(adapter
);
1832 del_timer_sync(&adapter
->watchdog_timer
);
1833 /* can't call flush scheduled work here because it can deadlock
1834 * if linkwatch_event tries to acquire the rtnl_lock which we are
1836 while (adapter
->flags
& IXGBE_FLAG_IN_WATCHDOG_TASK
)
1839 /* disable transmits in the hardware now that interrupts are off */
1840 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1841 j
= adapter
->tx_ring
[i
].reg_idx
;
1842 txdctl
= IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(j
));
1843 IXGBE_WRITE_REG(hw
, IXGBE_VFTXDCTL(j
),
1844 (txdctl
& ~IXGBE_TXDCTL_ENABLE
));
1847 netif_carrier_off(netdev
);
1849 if (!pci_channel_offline(adapter
->pdev
))
1850 ixgbevf_reset(adapter
);
1852 ixgbevf_clean_all_tx_rings(adapter
);
1853 ixgbevf_clean_all_rx_rings(adapter
);
1856 void ixgbevf_reinit_locked(struct ixgbevf_adapter
*adapter
)
1858 struct ixgbe_hw
*hw
= &adapter
->hw
;
1860 WARN_ON(in_interrupt());
1862 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
1866 * Check if PF is up before re-init. If not then skip until
1867 * later when the PF is up and ready to service requests from
1868 * the VF via mailbox. If the VF is up and running then the
1869 * watchdog task will continue to schedule reset tasks until
1870 * the PF is up and running.
1872 if (!hw
->mac
.ops
.reset_hw(hw
)) {
1873 ixgbevf_down(adapter
);
1874 ixgbevf_up(adapter
);
1877 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
1880 void ixgbevf_reset(struct ixgbevf_adapter
*adapter
)
1882 struct ixgbe_hw
*hw
= &adapter
->hw
;
1883 struct net_device
*netdev
= adapter
->netdev
;
1885 if (hw
->mac
.ops
.reset_hw(hw
))
1886 hw_dbg(hw
, "PF still resetting\n");
1888 hw
->mac
.ops
.init_hw(hw
);
1890 if (is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1891 memcpy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
1893 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
,
1898 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter
*adapter
,
1901 int err
, vector_threshold
;
1903 /* We'll want at least 3 (vector_threshold):
1906 * 3) Other (Link Status Change, etc.)
1908 vector_threshold
= MIN_MSIX_COUNT
;
1910 /* The more we get, the more we will assign to Tx/Rx Cleanup
1911 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1912 * Right now, we simply care about how many we'll get; we'll
1913 * set them up later while requesting irq's.
1915 while (vectors
>= vector_threshold
) {
1916 err
= pci_enable_msix(adapter
->pdev
, adapter
->msix_entries
,
1918 if (!err
) /* Success in acquiring all requested vectors. */
1921 vectors
= 0; /* Nasty failure, quit now */
1922 else /* err == number of vectors we should try again with */
1926 if (vectors
< vector_threshold
) {
1927 /* Can't allocate enough MSI-X interrupts? Oh well.
1928 * This just means we'll go with either a single MSI
1929 * vector or fall back to legacy interrupts.
1931 hw_dbg(&adapter
->hw
,
1932 "Unable to allocate MSI-X interrupts\n");
1933 kfree(adapter
->msix_entries
);
1934 adapter
->msix_entries
= NULL
;
1937 * Adjust for only the vectors we'll use, which is minimum
1938 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1939 * vectors we were allocated.
1941 adapter
->num_msix_vectors
= vectors
;
1946 * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
1947 * @adapter: board private structure to initialize
1949 * This is the top level queue allocation routine. The order here is very
1950 * important, starting with the "most" number of features turned on at once,
1951 * and ending with the smallest set of features. This way large combinations
1952 * can be allocated if they're turned on, and smaller combinations are the
1953 * fallthrough conditions.
1956 static void ixgbevf_set_num_queues(struct ixgbevf_adapter
*adapter
)
1958 /* Start with base case */
1959 adapter
->num_rx_queues
= 1;
1960 adapter
->num_tx_queues
= 1;
1961 adapter
->num_rx_pools
= adapter
->num_rx_queues
;
1962 adapter
->num_rx_queues_per_pool
= 1;
1966 * ixgbevf_alloc_queues - Allocate memory for all rings
1967 * @adapter: board private structure to initialize
1969 * We allocate one ring per queue at run-time since we don't know the
1970 * number of queues at compile-time. The polling_netdev array is
1971 * intended for Multiqueue, but should work fine with a single queue.
1973 static int ixgbevf_alloc_queues(struct ixgbevf_adapter
*adapter
)
1977 adapter
->tx_ring
= kcalloc(adapter
->num_tx_queues
,
1978 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1979 if (!adapter
->tx_ring
)
1980 goto err_tx_ring_allocation
;
1982 adapter
->rx_ring
= kcalloc(adapter
->num_rx_queues
,
1983 sizeof(struct ixgbevf_ring
), GFP_KERNEL
);
1984 if (!adapter
->rx_ring
)
1985 goto err_rx_ring_allocation
;
1987 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1988 adapter
->tx_ring
[i
].count
= adapter
->tx_ring_count
;
1989 adapter
->tx_ring
[i
].queue_index
= i
;
1990 adapter
->tx_ring
[i
].reg_idx
= i
;
1993 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1994 adapter
->rx_ring
[i
].count
= adapter
->rx_ring_count
;
1995 adapter
->rx_ring
[i
].queue_index
= i
;
1996 adapter
->rx_ring
[i
].reg_idx
= i
;
2001 err_rx_ring_allocation
:
2002 kfree(adapter
->tx_ring
);
2003 err_tx_ring_allocation
:
2008 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2009 * @adapter: board private structure to initialize
2011 * Attempt to configure the interrupts using the best available
2012 * capabilities of the hardware and the kernel.
2014 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2017 int vector
, v_budget
;
2020 * It's easy to be greedy for MSI-X vectors, but it really
2021 * doesn't do us much good if we have a lot more vectors
2022 * than CPU's. So let's be conservative and only ask for
2023 * (roughly) twice the number of vectors as there are CPU's.
2025 v_budget
= min(adapter
->num_rx_queues
+ adapter
->num_tx_queues
,
2026 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS
;
2028 /* A failure in MSI-X entry allocation isn't fatal, but it does
2029 * mean we disable MSI-X capabilities of the adapter. */
2030 adapter
->msix_entries
= kcalloc(v_budget
,
2031 sizeof(struct msix_entry
), GFP_KERNEL
);
2032 if (!adapter
->msix_entries
) {
2037 for (vector
= 0; vector
< v_budget
; vector
++)
2038 adapter
->msix_entries
[vector
].entry
= vector
;
2040 ixgbevf_acquire_msix_vectors(adapter
, v_budget
);
2047 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2048 * @adapter: board private structure to initialize
2050 * We allocate one q_vector per queue interrupt. If allocation fails we
2053 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter
*adapter
)
2055 int q_idx
, num_q_vectors
;
2056 struct ixgbevf_q_vector
*q_vector
;
2058 int (*poll
)(struct napi_struct
*, int);
2060 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2061 napi_vectors
= adapter
->num_rx_queues
;
2062 poll
= &ixgbevf_clean_rxonly
;
2064 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2065 q_vector
= kzalloc(sizeof(struct ixgbevf_q_vector
), GFP_KERNEL
);
2068 q_vector
->adapter
= adapter
;
2069 q_vector
->v_idx
= q_idx
;
2070 q_vector
->eitr
= adapter
->eitr_param
;
2071 if (q_idx
< napi_vectors
)
2072 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
2074 adapter
->q_vector
[q_idx
] = q_vector
;
2082 q_vector
= adapter
->q_vector
[q_idx
];
2083 netif_napi_del(&q_vector
->napi
);
2085 adapter
->q_vector
[q_idx
] = NULL
;
2091 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2092 * @adapter: board private structure to initialize
2094 * This function frees the memory allocated to the q_vectors. In addition if
2095 * NAPI is enabled it will delete any references to the NAPI struct prior
2096 * to freeing the q_vector.
2098 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter
*adapter
)
2100 int q_idx
, num_q_vectors
;
2103 num_q_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2104 napi_vectors
= adapter
->num_rx_queues
;
2106 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
2107 struct ixgbevf_q_vector
*q_vector
= adapter
->q_vector
[q_idx
];
2109 adapter
->q_vector
[q_idx
] = NULL
;
2110 if (q_idx
< napi_vectors
)
2111 netif_napi_del(&q_vector
->napi
);
2117 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2118 * @adapter: board private structure
2121 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter
*adapter
)
2123 pci_disable_msix(adapter
->pdev
);
2124 kfree(adapter
->msix_entries
);
2125 adapter
->msix_entries
= NULL
;
2129 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2130 * @adapter: board private structure to initialize
2133 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter
*adapter
)
2137 /* Number of supported queues */
2138 ixgbevf_set_num_queues(adapter
);
2140 err
= ixgbevf_set_interrupt_capability(adapter
);
2142 hw_dbg(&adapter
->hw
,
2143 "Unable to setup interrupt capabilities\n");
2144 goto err_set_interrupt
;
2147 err
= ixgbevf_alloc_q_vectors(adapter
);
2149 hw_dbg(&adapter
->hw
, "Unable to allocate memory for queue "
2151 goto err_alloc_q_vectors
;
2154 err
= ixgbevf_alloc_queues(adapter
);
2156 pr_err("Unable to allocate memory for queues\n");
2157 goto err_alloc_queues
;
2160 hw_dbg(&adapter
->hw
, "Multiqueue %s: Rx Queue count = %u, "
2161 "Tx Queue count = %u\n",
2162 (adapter
->num_rx_queues
> 1) ? "Enabled" :
2163 "Disabled", adapter
->num_rx_queues
, adapter
->num_tx_queues
);
2165 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2169 ixgbevf_free_q_vectors(adapter
);
2170 err_alloc_q_vectors
:
2171 ixgbevf_reset_interrupt_capability(adapter
);
2177 * ixgbevf_sw_init - Initialize general software structures
2178 * (struct ixgbevf_adapter)
2179 * @adapter: board private structure to initialize
2181 * ixgbevf_sw_init initializes the Adapter private data structure.
2182 * Fields are initialized based on PCI device information and
2183 * OS network device settings (MTU size).
2185 static int __devinit
ixgbevf_sw_init(struct ixgbevf_adapter
*adapter
)
2187 struct ixgbe_hw
*hw
= &adapter
->hw
;
2188 struct pci_dev
*pdev
= adapter
->pdev
;
2191 /* PCI config space info */
2193 hw
->vendor_id
= pdev
->vendor
;
2194 hw
->device_id
= pdev
->device
;
2195 hw
->revision_id
= pdev
->revision
;
2196 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2197 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2199 hw
->mbx
.ops
.init_params(hw
);
2200 hw
->mac
.max_tx_queues
= MAX_TX_QUEUES
;
2201 hw
->mac
.max_rx_queues
= MAX_RX_QUEUES
;
2202 err
= hw
->mac
.ops
.reset_hw(hw
);
2204 dev_info(&pdev
->dev
,
2205 "PF still in reset state, assigning new address\n");
2206 eth_hw_addr_random(adapter
->netdev
);
2207 memcpy(adapter
->hw
.mac
.addr
, adapter
->netdev
->dev_addr
,
2208 adapter
->netdev
->addr_len
);
2210 err
= hw
->mac
.ops
.init_hw(hw
);
2212 pr_err("init_shared_code failed: %d\n", err
);
2215 memcpy(adapter
->netdev
->dev_addr
, adapter
->hw
.mac
.addr
,
2216 adapter
->netdev
->addr_len
);
2219 /* Enable dynamic interrupt throttling rates */
2220 adapter
->eitr_param
= 20000;
2221 adapter
->itr_setting
= 1;
2223 /* set defaults for eitr in MegaBytes */
2224 adapter
->eitr_low
= 10;
2225 adapter
->eitr_high
= 20;
2227 /* set default ring sizes */
2228 adapter
->tx_ring_count
= IXGBEVF_DEFAULT_TXD
;
2229 adapter
->rx_ring_count
= IXGBEVF_DEFAULT_RXD
;
2231 /* enable rx csum by default */
2232 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
2234 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
2241 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2243 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2244 if (current_counter < last_counter) \
2245 counter += 0x100000000LL; \
2246 last_counter = current_counter; \
2247 counter &= 0xFFFFFFFF00000000LL; \
2248 counter |= current_counter; \
2251 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2253 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2254 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2255 u64 current_counter = (current_counter_msb << 32) | \
2256 current_counter_lsb; \
2257 if (current_counter < last_counter) \
2258 counter += 0x1000000000LL; \
2259 last_counter = current_counter; \
2260 counter &= 0xFFFFFFF000000000LL; \
2261 counter |= current_counter; \
2264 * ixgbevf_update_stats - Update the board statistics counters.
2265 * @adapter: board private structure
2267 void ixgbevf_update_stats(struct ixgbevf_adapter
*adapter
)
2269 struct ixgbe_hw
*hw
= &adapter
->hw
;
2271 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC
, adapter
->stats
.last_vfgprc
,
2272 adapter
->stats
.vfgprc
);
2273 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC
, adapter
->stats
.last_vfgptc
,
2274 adapter
->stats
.vfgptc
);
2275 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB
, IXGBE_VFGORC_MSB
,
2276 adapter
->stats
.last_vfgorc
,
2277 adapter
->stats
.vfgorc
);
2278 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB
, IXGBE_VFGOTC_MSB
,
2279 adapter
->stats
.last_vfgotc
,
2280 adapter
->stats
.vfgotc
);
2281 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC
, adapter
->stats
.last_vfmprc
,
2282 adapter
->stats
.vfmprc
);
2286 * ixgbevf_watchdog - Timer Call-back
2287 * @data: pointer to adapter cast into an unsigned long
2289 static void ixgbevf_watchdog(unsigned long data
)
2291 struct ixgbevf_adapter
*adapter
= (struct ixgbevf_adapter
*)data
;
2292 struct ixgbe_hw
*hw
= &adapter
->hw
;
2297 * Do the watchdog outside of interrupt context due to the lovely
2298 * delays that some of the newer hardware requires
2301 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2302 goto watchdog_short_circuit
;
2304 /* get one bit for every active tx/rx interrupt vector */
2305 for (i
= 0; i
< adapter
->num_msix_vectors
- NON_Q_VECTORS
; i
++) {
2306 struct ixgbevf_q_vector
*qv
= adapter
->q_vector
[i
];
2307 if (qv
->rxr_count
|| qv
->txr_count
)
2311 IXGBE_WRITE_REG(hw
, IXGBE_VTEICS
, (u32
)eics
);
2313 watchdog_short_circuit
:
2314 schedule_work(&adapter
->watchdog_task
);
2318 * ixgbevf_tx_timeout - Respond to a Tx Hang
2319 * @netdev: network interface device structure
2321 static void ixgbevf_tx_timeout(struct net_device
*netdev
)
2323 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2325 /* Do the reset outside of interrupt context */
2326 schedule_work(&adapter
->reset_task
);
2329 static void ixgbevf_reset_task(struct work_struct
*work
)
2331 struct ixgbevf_adapter
*adapter
;
2332 adapter
= container_of(work
, struct ixgbevf_adapter
, reset_task
);
2334 /* If we're already down or resetting, just bail */
2335 if (test_bit(__IXGBEVF_DOWN
, &adapter
->state
) ||
2336 test_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
2339 adapter
->tx_timeout_count
++;
2341 ixgbevf_reinit_locked(adapter
);
2345 * ixgbevf_watchdog_task - worker thread to bring link up
2346 * @work: pointer to work_struct containing our data
2348 static void ixgbevf_watchdog_task(struct work_struct
*work
)
2350 struct ixgbevf_adapter
*adapter
= container_of(work
,
2351 struct ixgbevf_adapter
,
2353 struct net_device
*netdev
= adapter
->netdev
;
2354 struct ixgbe_hw
*hw
= &adapter
->hw
;
2355 u32 link_speed
= adapter
->link_speed
;
2356 bool link_up
= adapter
->link_up
;
2358 adapter
->flags
|= IXGBE_FLAG_IN_WATCHDOG_TASK
;
2361 * Always check the link on the watchdog because we have
2364 if (hw
->mac
.ops
.check_link
) {
2365 if ((hw
->mac
.ops
.check_link(hw
, &link_speed
,
2366 &link_up
, false)) != 0) {
2367 adapter
->link_up
= link_up
;
2368 adapter
->link_speed
= link_speed
;
2369 netif_carrier_off(netdev
);
2370 netif_tx_stop_all_queues(netdev
);
2371 schedule_work(&adapter
->reset_task
);
2375 /* always assume link is up, if no check link
2377 link_speed
= IXGBE_LINK_SPEED_10GB_FULL
;
2380 adapter
->link_up
= link_up
;
2381 adapter
->link_speed
= link_speed
;
2384 if (!netif_carrier_ok(netdev
)) {
2385 hw_dbg(&adapter
->hw
, "NIC Link is Up, %u Gbps\n",
2386 (link_speed
== IXGBE_LINK_SPEED_10GB_FULL
) ?
2388 netif_carrier_on(netdev
);
2389 netif_tx_wake_all_queues(netdev
);
2392 adapter
->link_up
= false;
2393 adapter
->link_speed
= 0;
2394 if (netif_carrier_ok(netdev
)) {
2395 hw_dbg(&adapter
->hw
, "NIC Link is Down\n");
2396 netif_carrier_off(netdev
);
2397 netif_tx_stop_all_queues(netdev
);
2401 ixgbevf_update_stats(adapter
);
2404 /* Reset the timer */
2405 if (!test_bit(__IXGBEVF_DOWN
, &adapter
->state
))
2406 mod_timer(&adapter
->watchdog_timer
,
2407 round_jiffies(jiffies
+ (2 * HZ
)));
2409 adapter
->flags
&= ~IXGBE_FLAG_IN_WATCHDOG_TASK
;
2413 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2414 * @adapter: board private structure
2415 * @tx_ring: Tx descriptor ring for a specific queue
2417 * Free all transmit software resources
2419 void ixgbevf_free_tx_resources(struct ixgbevf_adapter
*adapter
,
2420 struct ixgbevf_ring
*tx_ring
)
2422 struct pci_dev
*pdev
= adapter
->pdev
;
2424 ixgbevf_clean_tx_ring(adapter
, tx_ring
);
2426 vfree(tx_ring
->tx_buffer_info
);
2427 tx_ring
->tx_buffer_info
= NULL
;
2429 dma_free_coherent(&pdev
->dev
, tx_ring
->size
, tx_ring
->desc
,
2432 tx_ring
->desc
= NULL
;
2436 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2437 * @adapter: board private structure
2439 * Free all transmit software resources
2441 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2445 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
2446 if (adapter
->tx_ring
[i
].desc
)
2447 ixgbevf_free_tx_resources(adapter
,
2448 &adapter
->tx_ring
[i
]);
2453 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2454 * @adapter: board private structure
2455 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2457 * Return 0 on success, negative on failure
2459 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter
*adapter
,
2460 struct ixgbevf_ring
*tx_ring
)
2462 struct pci_dev
*pdev
= adapter
->pdev
;
2465 size
= sizeof(struct ixgbevf_tx_buffer
) * tx_ring
->count
;
2466 tx_ring
->tx_buffer_info
= vzalloc(size
);
2467 if (!tx_ring
->tx_buffer_info
)
2470 /* round up to nearest 4K */
2471 tx_ring
->size
= tx_ring
->count
* sizeof(union ixgbe_adv_tx_desc
);
2472 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
2474 tx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, tx_ring
->size
,
2475 &tx_ring
->dma
, GFP_KERNEL
);
2479 tx_ring
->next_to_use
= 0;
2480 tx_ring
->next_to_clean
= 0;
2481 tx_ring
->work_limit
= tx_ring
->count
;
2485 vfree(tx_ring
->tx_buffer_info
);
2486 tx_ring
->tx_buffer_info
= NULL
;
2487 hw_dbg(&adapter
->hw
, "Unable to allocate memory for the transmit "
2488 "descriptor ring\n");
2493 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2494 * @adapter: board private structure
2496 * If this function returns with an error, then it's possible one or
2497 * more of the rings is populated (while the rest are not). It is the
2498 * callers duty to clean those orphaned rings.
2500 * Return 0 on success, negative on failure
2502 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter
*adapter
)
2506 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
2507 err
= ixgbevf_setup_tx_resources(adapter
, &adapter
->tx_ring
[i
]);
2510 hw_dbg(&adapter
->hw
,
2511 "Allocation for Tx Queue %u failed\n", i
);
2519 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2520 * @adapter: board private structure
2521 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2523 * Returns 0 on success, negative on failure
2525 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter
*adapter
,
2526 struct ixgbevf_ring
*rx_ring
)
2528 struct pci_dev
*pdev
= adapter
->pdev
;
2531 size
= sizeof(struct ixgbevf_rx_buffer
) * rx_ring
->count
;
2532 rx_ring
->rx_buffer_info
= vzalloc(size
);
2533 if (!rx_ring
->rx_buffer_info
)
2536 /* Round up to nearest 4K */
2537 rx_ring
->size
= rx_ring
->count
* sizeof(union ixgbe_adv_rx_desc
);
2538 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
2540 rx_ring
->desc
= dma_alloc_coherent(&pdev
->dev
, rx_ring
->size
,
2541 &rx_ring
->dma
, GFP_KERNEL
);
2543 if (!rx_ring
->desc
) {
2544 hw_dbg(&adapter
->hw
,
2545 "Unable to allocate memory for "
2546 "the receive descriptor ring\n");
2547 vfree(rx_ring
->rx_buffer_info
);
2548 rx_ring
->rx_buffer_info
= NULL
;
2552 rx_ring
->next_to_clean
= 0;
2553 rx_ring
->next_to_use
= 0;
2561 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2562 * @adapter: board private structure
2564 * If this function returns with an error, then it's possible one or
2565 * more of the rings is populated (while the rest are not). It is the
2566 * callers duty to clean those orphaned rings.
2568 * Return 0 on success, negative on failure
2570 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2574 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
2575 err
= ixgbevf_setup_rx_resources(adapter
, &adapter
->rx_ring
[i
]);
2578 hw_dbg(&adapter
->hw
,
2579 "Allocation for Rx Queue %u failed\n", i
);
2586 * ixgbevf_free_rx_resources - Free Rx Resources
2587 * @adapter: board private structure
2588 * @rx_ring: ring to clean the resources from
2590 * Free all receive software resources
2592 void ixgbevf_free_rx_resources(struct ixgbevf_adapter
*adapter
,
2593 struct ixgbevf_ring
*rx_ring
)
2595 struct pci_dev
*pdev
= adapter
->pdev
;
2597 ixgbevf_clean_rx_ring(adapter
, rx_ring
);
2599 vfree(rx_ring
->rx_buffer_info
);
2600 rx_ring
->rx_buffer_info
= NULL
;
2602 dma_free_coherent(&pdev
->dev
, rx_ring
->size
, rx_ring
->desc
,
2605 rx_ring
->desc
= NULL
;
2609 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2610 * @adapter: board private structure
2612 * Free all receive software resources
2614 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter
*adapter
)
2618 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
2619 if (adapter
->rx_ring
[i
].desc
)
2620 ixgbevf_free_rx_resources(adapter
,
2621 &adapter
->rx_ring
[i
]);
2625 * ixgbevf_open - Called when a network interface is made active
2626 * @netdev: network interface device structure
2628 * Returns 0 on success, negative value on failure
2630 * The open entry point is called when a network interface is made
2631 * active by the system (IFF_UP). At this point all resources needed
2632 * for transmit and receive operations are allocated, the interrupt
2633 * handler is registered with the OS, the watchdog timer is started,
2634 * and the stack is notified that the interface is ready.
2636 static int ixgbevf_open(struct net_device
*netdev
)
2638 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2639 struct ixgbe_hw
*hw
= &adapter
->hw
;
2642 /* disallow open during test */
2643 if (test_bit(__IXGBEVF_TESTING
, &adapter
->state
))
2646 if (hw
->adapter_stopped
) {
2647 ixgbevf_reset(adapter
);
2648 /* if adapter is still stopped then PF isn't up and
2649 * the vf can't start. */
2650 if (hw
->adapter_stopped
) {
2651 err
= IXGBE_ERR_MBX
;
2652 pr_err("Unable to start - perhaps the PF Driver isn't "
2654 goto err_setup_reset
;
2658 /* allocate transmit descriptors */
2659 err
= ixgbevf_setup_all_tx_resources(adapter
);
2663 /* allocate receive descriptors */
2664 err
= ixgbevf_setup_all_rx_resources(adapter
);
2668 ixgbevf_configure(adapter
);
2671 * Map the Tx/Rx rings to the vectors we were allotted.
2672 * if request_irq will be called in this function map_rings
2673 * must be called *before* up_complete
2675 ixgbevf_map_rings_to_vectors(adapter
);
2677 ixgbevf_up_complete(adapter
);
2679 /* clear any pending interrupts, may auto mask */
2680 IXGBE_READ_REG(hw
, IXGBE_VTEICR
);
2681 err
= ixgbevf_request_irq(adapter
);
2685 ixgbevf_irq_enable(adapter
, true, true);
2690 ixgbevf_down(adapter
);
2691 ixgbevf_free_irq(adapter
);
2693 ixgbevf_free_all_rx_resources(adapter
);
2695 ixgbevf_free_all_tx_resources(adapter
);
2696 ixgbevf_reset(adapter
);
2704 * ixgbevf_close - Disables a network interface
2705 * @netdev: network interface device structure
2707 * Returns 0, this is not allowed to fail
2709 * The close entry point is called when an interface is de-activated
2710 * by the OS. The hardware is still under the drivers control, but
2711 * needs to be disabled. A global MAC reset is issued to stop the
2712 * hardware, and all transmit and receive resources are freed.
2714 static int ixgbevf_close(struct net_device
*netdev
)
2716 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
2718 ixgbevf_down(adapter
);
2719 ixgbevf_free_irq(adapter
);
2721 ixgbevf_free_all_tx_resources(adapter
);
2722 ixgbevf_free_all_rx_resources(adapter
);
2727 static int ixgbevf_tso(struct ixgbevf_adapter
*adapter
,
2728 struct ixgbevf_ring
*tx_ring
,
2729 struct sk_buff
*skb
, u32 tx_flags
, u8
*hdr_len
)
2731 struct ixgbe_adv_tx_context_desc
*context_desc
;
2734 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2735 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
;
2736 u32 mss_l4len_idx
, l4len
;
2738 if (skb_is_gso(skb
)) {
2739 if (skb_header_cloned(skb
)) {
2740 err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2744 l4len
= tcp_hdrlen(skb
);
2747 if (skb
->protocol
== htons(ETH_P_IP
)) {
2748 struct iphdr
*iph
= ip_hdr(skb
);
2751 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
2755 adapter
->hw_tso_ctxt
++;
2756 } else if (skb_is_gso_v6(skb
)) {
2757 ipv6_hdr(skb
)->payload_len
= 0;
2758 tcp_hdr(skb
)->check
=
2759 ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
2760 &ipv6_hdr(skb
)->daddr
,
2762 adapter
->hw_tso6_ctxt
++;
2765 i
= tx_ring
->next_to_use
;
2767 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2768 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2770 /* VLAN MACLEN IPLEN */
2771 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2773 (tx_flags
& IXGBE_TX_FLAGS_VLAN_MASK
);
2774 vlan_macip_lens
|= ((skb_network_offset(skb
)) <<
2775 IXGBE_ADVTXD_MACLEN_SHIFT
);
2776 *hdr_len
+= skb_network_offset(skb
);
2778 (skb_transport_header(skb
) - skb_network_header(skb
));
2780 (skb_transport_header(skb
) - skb_network_header(skb
));
2781 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2782 context_desc
->seqnum_seed
= 0;
2784 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2785 type_tucmd_mlhl
= (IXGBE_TXD_CMD_DEXT
|
2786 IXGBE_ADVTXD_DTYP_CTXT
);
2788 if (skb
->protocol
== htons(ETH_P_IP
))
2789 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2790 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2791 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2795 (skb_shinfo(skb
)->gso_size
<< IXGBE_ADVTXD_MSS_SHIFT
);
2796 mss_l4len_idx
|= (l4len
<< IXGBE_ADVTXD_L4LEN_SHIFT
);
2797 /* use index 1 for TSO */
2798 mss_l4len_idx
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
2799 context_desc
->mss_l4len_idx
= cpu_to_le32(mss_l4len_idx
);
2801 tx_buffer_info
->time_stamp
= jiffies
;
2802 tx_buffer_info
->next_to_watch
= i
;
2805 if (i
== tx_ring
->count
)
2807 tx_ring
->next_to_use
= i
;
2815 static bool ixgbevf_tx_csum(struct ixgbevf_adapter
*adapter
,
2816 struct ixgbevf_ring
*tx_ring
,
2817 struct sk_buff
*skb
, u32 tx_flags
)
2819 struct ixgbe_adv_tx_context_desc
*context_desc
;
2821 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2822 u32 vlan_macip_lens
= 0, type_tucmd_mlhl
= 0;
2824 if (skb
->ip_summed
== CHECKSUM_PARTIAL
||
2825 (tx_flags
& IXGBE_TX_FLAGS_VLAN
)) {
2826 i
= tx_ring
->next_to_use
;
2827 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2828 context_desc
= IXGBE_TX_CTXTDESC_ADV(*tx_ring
, i
);
2830 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
2831 vlan_macip_lens
|= (tx_flags
&
2832 IXGBE_TX_FLAGS_VLAN_MASK
);
2833 vlan_macip_lens
|= (skb_network_offset(skb
) <<
2834 IXGBE_ADVTXD_MACLEN_SHIFT
);
2835 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2836 vlan_macip_lens
|= (skb_transport_header(skb
) -
2837 skb_network_header(skb
));
2839 context_desc
->vlan_macip_lens
= cpu_to_le32(vlan_macip_lens
);
2840 context_desc
->seqnum_seed
= 0;
2842 type_tucmd_mlhl
|= (IXGBE_TXD_CMD_DEXT
|
2843 IXGBE_ADVTXD_DTYP_CTXT
);
2845 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2846 switch (skb
->protocol
) {
2847 case __constant_htons(ETH_P_IP
):
2848 type_tucmd_mlhl
|= IXGBE_ADVTXD_TUCMD_IPV4
;
2849 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
2851 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2853 case __constant_htons(ETH_P_IPV6
):
2854 /* XXX what about other V6 headers?? */
2855 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
2857 IXGBE_ADVTXD_TUCMD_L4T_TCP
;
2860 if (unlikely(net_ratelimit())) {
2861 pr_warn("partial checksum but "
2862 "proto=%x!\n", skb
->protocol
);
2868 context_desc
->type_tucmd_mlhl
= cpu_to_le32(type_tucmd_mlhl
);
2869 /* use index zero for tx checksum offload */
2870 context_desc
->mss_l4len_idx
= 0;
2872 tx_buffer_info
->time_stamp
= jiffies
;
2873 tx_buffer_info
->next_to_watch
= i
;
2875 adapter
->hw_csum_tx_good
++;
2877 if (i
== tx_ring
->count
)
2879 tx_ring
->next_to_use
= i
;
2887 static int ixgbevf_tx_map(struct ixgbevf_adapter
*adapter
,
2888 struct ixgbevf_ring
*tx_ring
,
2889 struct sk_buff
*skb
, u32 tx_flags
,
2892 struct pci_dev
*pdev
= adapter
->pdev
;
2893 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2895 unsigned int total
= skb
->len
;
2896 unsigned int offset
= 0, size
;
2898 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
2902 i
= tx_ring
->next_to_use
;
2904 len
= min(skb_headlen(skb
), total
);
2906 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2907 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2909 tx_buffer_info
->length
= size
;
2910 tx_buffer_info
->mapped_as_page
= false;
2911 tx_buffer_info
->dma
= dma_map_single(&adapter
->pdev
->dev
,
2913 size
, DMA_TO_DEVICE
);
2914 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2916 tx_buffer_info
->time_stamp
= jiffies
;
2917 tx_buffer_info
->next_to_watch
= i
;
2924 if (i
== tx_ring
->count
)
2928 for (f
= 0; f
< nr_frags
; f
++) {
2929 const struct skb_frag_struct
*frag
;
2931 frag
= &skb_shinfo(skb
)->frags
[f
];
2932 len
= min((unsigned int)skb_frag_size(frag
), total
);
2936 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2937 size
= min(len
, (unsigned int)IXGBE_MAX_DATA_PER_TXD
);
2939 tx_buffer_info
->length
= size
;
2940 tx_buffer_info
->dma
=
2941 skb_frag_dma_map(&adapter
->pdev
->dev
, frag
,
2942 offset
, size
, DMA_TO_DEVICE
);
2943 tx_buffer_info
->mapped_as_page
= true;
2944 if (dma_mapping_error(&pdev
->dev
, tx_buffer_info
->dma
))
2946 tx_buffer_info
->time_stamp
= jiffies
;
2947 tx_buffer_info
->next_to_watch
= i
;
2954 if (i
== tx_ring
->count
)
2962 i
= tx_ring
->count
- 1;
2965 tx_ring
->tx_buffer_info
[i
].skb
= skb
;
2966 tx_ring
->tx_buffer_info
[first
].next_to_watch
= i
;
2971 dev_err(&pdev
->dev
, "TX DMA map failed\n");
2973 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2974 tx_buffer_info
->dma
= 0;
2975 tx_buffer_info
->time_stamp
= 0;
2976 tx_buffer_info
->next_to_watch
= 0;
2979 /* clear timestamp and dma mappings for remaining portion of packet */
2980 while (count
>= 0) {
2984 i
+= tx_ring
->count
;
2985 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
2986 ixgbevf_unmap_and_free_tx_resource(adapter
, tx_buffer_info
);
2992 static void ixgbevf_tx_queue(struct ixgbevf_adapter
*adapter
,
2993 struct ixgbevf_ring
*tx_ring
, int tx_flags
,
2994 int count
, u32 paylen
, u8 hdr_len
)
2996 union ixgbe_adv_tx_desc
*tx_desc
= NULL
;
2997 struct ixgbevf_tx_buffer
*tx_buffer_info
;
2998 u32 olinfo_status
= 0, cmd_type_len
= 0;
3001 u32 txd_cmd
= IXGBE_TXD_CMD_EOP
| IXGBE_TXD_CMD_RS
| IXGBE_TXD_CMD_IFCS
;
3003 cmd_type_len
|= IXGBE_ADVTXD_DTYP_DATA
;
3005 cmd_type_len
|= IXGBE_ADVTXD_DCMD_IFCS
| IXGBE_ADVTXD_DCMD_DEXT
;
3007 if (tx_flags
& IXGBE_TX_FLAGS_VLAN
)
3008 cmd_type_len
|= IXGBE_ADVTXD_DCMD_VLE
;
3010 if (tx_flags
& IXGBE_TX_FLAGS_TSO
) {
3011 cmd_type_len
|= IXGBE_ADVTXD_DCMD_TSE
;
3013 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3014 IXGBE_ADVTXD_POPTS_SHIFT
;
3016 /* use index 1 context for tso */
3017 olinfo_status
|= (1 << IXGBE_ADVTXD_IDX_SHIFT
);
3018 if (tx_flags
& IXGBE_TX_FLAGS_IPV4
)
3019 olinfo_status
|= IXGBE_TXD_POPTS_IXSM
<<
3020 IXGBE_ADVTXD_POPTS_SHIFT
;
3022 } else if (tx_flags
& IXGBE_TX_FLAGS_CSUM
)
3023 olinfo_status
|= IXGBE_TXD_POPTS_TXSM
<<
3024 IXGBE_ADVTXD_POPTS_SHIFT
;
3026 olinfo_status
|= ((paylen
- hdr_len
) << IXGBE_ADVTXD_PAYLEN_SHIFT
);
3028 i
= tx_ring
->next_to_use
;
3030 tx_buffer_info
= &tx_ring
->tx_buffer_info
[i
];
3031 tx_desc
= IXGBE_TX_DESC_ADV(*tx_ring
, i
);
3032 tx_desc
->read
.buffer_addr
= cpu_to_le64(tx_buffer_info
->dma
);
3033 tx_desc
->read
.cmd_type_len
=
3034 cpu_to_le32(cmd_type_len
| tx_buffer_info
->length
);
3035 tx_desc
->read
.olinfo_status
= cpu_to_le32(olinfo_status
);
3037 if (i
== tx_ring
->count
)
3041 tx_desc
->read
.cmd_type_len
|= cpu_to_le32(txd_cmd
);
3044 * Force memory writes to complete before letting h/w
3045 * know there are new descriptors to fetch. (Only
3046 * applicable for weak-ordered memory model archs,
3051 tx_ring
->next_to_use
= i
;
3052 writel(i
, adapter
->hw
.hw_addr
+ tx_ring
->tail
);
3055 static int __ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3056 struct ixgbevf_ring
*tx_ring
, int size
)
3058 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3060 netif_stop_subqueue(netdev
, tx_ring
->queue_index
);
3061 /* Herbert's original patch had:
3062 * smp_mb__after_netif_stop_queue();
3063 * but since that doesn't exist yet, just open code it. */
3066 /* We need to check again in a case another CPU has just
3067 * made room available. */
3068 if (likely(IXGBE_DESC_UNUSED(tx_ring
) < size
))
3071 /* A reprieve! - use start_queue because it doesn't call schedule */
3072 netif_start_subqueue(netdev
, tx_ring
->queue_index
);
3073 ++adapter
->restart_queue
;
3077 static int ixgbevf_maybe_stop_tx(struct net_device
*netdev
,
3078 struct ixgbevf_ring
*tx_ring
, int size
)
3080 if (likely(IXGBE_DESC_UNUSED(tx_ring
) >= size
))
3082 return __ixgbevf_maybe_stop_tx(netdev
, tx_ring
, size
);
3085 static int ixgbevf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
3087 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3088 struct ixgbevf_ring
*tx_ring
;
3090 unsigned int tx_flags
= 0;
3097 tx_ring
= &adapter
->tx_ring
[r_idx
];
3099 if (vlan_tx_tag_present(skb
)) {
3100 tx_flags
|= vlan_tx_tag_get(skb
);
3101 tx_flags
<<= IXGBE_TX_FLAGS_VLAN_SHIFT
;
3102 tx_flags
|= IXGBE_TX_FLAGS_VLAN
;
3105 /* four things can cause us to need a context descriptor */
3106 if (skb_is_gso(skb
) ||
3107 (skb
->ip_summed
== CHECKSUM_PARTIAL
) ||
3108 (tx_flags
& IXGBE_TX_FLAGS_VLAN
))
3111 count
+= TXD_USE_COUNT(skb_headlen(skb
));
3112 for (f
= 0; f
< skb_shinfo(skb
)->nr_frags
; f
++)
3113 count
+= TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb
)->frags
[f
]));
3115 if (ixgbevf_maybe_stop_tx(netdev
, tx_ring
, count
)) {
3117 return NETDEV_TX_BUSY
;
3120 first
= tx_ring
->next_to_use
;
3122 if (skb
->protocol
== htons(ETH_P_IP
))
3123 tx_flags
|= IXGBE_TX_FLAGS_IPV4
;
3124 tso
= ixgbevf_tso(adapter
, tx_ring
, skb
, tx_flags
, &hdr_len
);
3126 dev_kfree_skb_any(skb
);
3127 return NETDEV_TX_OK
;
3131 tx_flags
|= IXGBE_TX_FLAGS_TSO
;
3132 else if (ixgbevf_tx_csum(adapter
, tx_ring
, skb
, tx_flags
) &&
3133 (skb
->ip_summed
== CHECKSUM_PARTIAL
))
3134 tx_flags
|= IXGBE_TX_FLAGS_CSUM
;
3136 ixgbevf_tx_queue(adapter
, tx_ring
, tx_flags
,
3137 ixgbevf_tx_map(adapter
, tx_ring
, skb
, tx_flags
, first
),
3140 ixgbevf_maybe_stop_tx(netdev
, tx_ring
, DESC_NEEDED
);
3142 return NETDEV_TX_OK
;
3146 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3147 * @netdev: network interface device structure
3148 * @p: pointer to an address structure
3150 * Returns 0 on success, negative on failure
3152 static int ixgbevf_set_mac(struct net_device
*netdev
, void *p
)
3154 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3155 struct ixgbe_hw
*hw
= &adapter
->hw
;
3156 struct sockaddr
*addr
= p
;
3158 if (!is_valid_ether_addr(addr
->sa_data
))
3159 return -EADDRNOTAVAIL
;
3161 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
3162 memcpy(hw
->mac
.addr
, addr
->sa_data
, netdev
->addr_len
);
3164 if (hw
->mac
.ops
.set_rar
)
3165 hw
->mac
.ops
.set_rar(hw
, 0, hw
->mac
.addr
, 0);
3171 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3172 * @netdev: network interface device structure
3173 * @new_mtu: new value for maximum frame size
3175 * Returns 0 on success, negative on failure
3177 static int ixgbevf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3179 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3180 struct ixgbe_hw
*hw
= &adapter
->hw
;
3181 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
;
3182 int max_possible_frame
= MAXIMUM_ETHERNET_VLAN_SIZE
;
3185 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540_vf
)
3186 max_possible_frame
= IXGBE_MAX_JUMBO_FRAME_SIZE
;
3188 /* MTU < 68 is an error and causes problems on some kernels */
3189 if ((new_mtu
< 68) || (max_frame
> max_possible_frame
))
3192 hw_dbg(&adapter
->hw
, "changing MTU from %d to %d\n",
3193 netdev
->mtu
, new_mtu
);
3194 /* must set new MTU before calling down or up */
3195 netdev
->mtu
= new_mtu
;
3197 if (!netif_running(netdev
)) {
3198 msg
[0] = IXGBE_VF_SET_LPE
;
3200 hw
->mbx
.ops
.write_posted(hw
, msg
, 2);
3203 if (netif_running(netdev
))
3204 ixgbevf_reinit_locked(adapter
);
3209 static void ixgbevf_shutdown(struct pci_dev
*pdev
)
3211 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3212 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3214 netif_device_detach(netdev
);
3216 if (netif_running(netdev
)) {
3217 ixgbevf_down(adapter
);
3218 ixgbevf_free_irq(adapter
);
3219 ixgbevf_free_all_tx_resources(adapter
);
3220 ixgbevf_free_all_rx_resources(adapter
);
3224 pci_save_state(pdev
);
3227 pci_disable_device(pdev
);
3230 static struct rtnl_link_stats64
*ixgbevf_get_stats(struct net_device
*netdev
,
3231 struct rtnl_link_stats64
*stats
)
3233 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3236 const struct ixgbevf_ring
*ring
;
3239 ixgbevf_update_stats(adapter
);
3241 stats
->multicast
= adapter
->stats
.vfmprc
- adapter
->stats
.base_vfmprc
;
3243 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
3244 ring
= &adapter
->rx_ring
[i
];
3246 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3247 bytes
= ring
->total_bytes
;
3248 packets
= ring
->total_packets
;
3249 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3250 stats
->rx_bytes
+= bytes
;
3251 stats
->rx_packets
+= packets
;
3254 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
3255 ring
= &adapter
->tx_ring
[i
];
3257 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
3258 bytes
= ring
->total_bytes
;
3259 packets
= ring
->total_packets
;
3260 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
3261 stats
->tx_bytes
+= bytes
;
3262 stats
->tx_packets
+= packets
;
3268 static int ixgbevf_set_features(struct net_device
*netdev
,
3269 netdev_features_t features
)
3271 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3273 if (features
& NETIF_F_RXCSUM
)
3274 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
3276 adapter
->flags
&= ~IXGBE_FLAG_RX_CSUM_ENABLED
;
3281 static const struct net_device_ops ixgbe_netdev_ops
= {
3282 .ndo_open
= ixgbevf_open
,
3283 .ndo_stop
= ixgbevf_close
,
3284 .ndo_start_xmit
= ixgbevf_xmit_frame
,
3285 .ndo_set_rx_mode
= ixgbevf_set_rx_mode
,
3286 .ndo_get_stats64
= ixgbevf_get_stats
,
3287 .ndo_validate_addr
= eth_validate_addr
,
3288 .ndo_set_mac_address
= ixgbevf_set_mac
,
3289 .ndo_change_mtu
= ixgbevf_change_mtu
,
3290 .ndo_tx_timeout
= ixgbevf_tx_timeout
,
3291 .ndo_vlan_rx_add_vid
= ixgbevf_vlan_rx_add_vid
,
3292 .ndo_vlan_rx_kill_vid
= ixgbevf_vlan_rx_kill_vid
,
3293 .ndo_set_features
= ixgbevf_set_features
,
3296 static void ixgbevf_assign_netdev_ops(struct net_device
*dev
)
3298 dev
->netdev_ops
= &ixgbe_netdev_ops
;
3299 ixgbevf_set_ethtool_ops(dev
);
3300 dev
->watchdog_timeo
= 5 * HZ
;
3304 * ixgbevf_probe - Device Initialization Routine
3305 * @pdev: PCI device information struct
3306 * @ent: entry in ixgbevf_pci_tbl
3308 * Returns 0 on success, negative on failure
3310 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3311 * The OS initialization, configuring of the adapter private structure,
3312 * and a hardware reset occur.
3314 static int __devinit
ixgbevf_probe(struct pci_dev
*pdev
,
3315 const struct pci_device_id
*ent
)
3317 struct net_device
*netdev
;
3318 struct ixgbevf_adapter
*adapter
= NULL
;
3319 struct ixgbe_hw
*hw
= NULL
;
3320 const struct ixgbevf_info
*ii
= ixgbevf_info_tbl
[ent
->driver_data
];
3321 static int cards_found
;
3322 int err
, pci_using_dac
;
3324 err
= pci_enable_device(pdev
);
3328 if (!dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)) &&
3329 !dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(64))) {
3332 err
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
3334 err
= dma_set_coherent_mask(&pdev
->dev
,
3337 dev_err(&pdev
->dev
, "No usable DMA "
3338 "configuration, aborting\n");
3345 err
= pci_request_regions(pdev
, ixgbevf_driver_name
);
3347 dev_err(&pdev
->dev
, "pci_request_regions failed 0x%x\n", err
);
3351 pci_set_master(pdev
);
3354 netdev
= alloc_etherdev_mq(sizeof(struct ixgbevf_adapter
),
3357 netdev
= alloc_etherdev(sizeof(struct ixgbevf_adapter
));
3361 goto err_alloc_etherdev
;
3364 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3366 pci_set_drvdata(pdev
, netdev
);
3367 adapter
= netdev_priv(netdev
);
3369 adapter
->netdev
= netdev
;
3370 adapter
->pdev
= pdev
;
3373 adapter
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3376 * call save state here in standalone driver because it relies on
3377 * adapter struct to exist, and needs to call netdev_priv
3379 pci_save_state(pdev
);
3381 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3382 pci_resource_len(pdev
, 0));
3388 ixgbevf_assign_netdev_ops(netdev
);
3390 adapter
->bd_number
= cards_found
;
3393 memcpy(&hw
->mac
.ops
, ii
->mac_ops
, sizeof(hw
->mac
.ops
));
3394 hw
->mac
.type
= ii
->mac
;
3396 memcpy(&hw
->mbx
.ops
, &ixgbevf_mbx_ops
,
3397 sizeof(struct ixgbe_mbx_operations
));
3399 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_CAPABLE
;
3400 adapter
->flags
&= ~IXGBE_FLAG_RX_PS_ENABLED
;
3401 adapter
->flags
|= IXGBE_FLAG_RX_1BUF_CAPABLE
;
3403 /* setup the private structure */
3404 err
= ixgbevf_sw_init(adapter
);
3408 /* The HW MAC address was set and/or determined in sw_init */
3409 memcpy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
, netdev
->addr_len
);
3411 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
3412 pr_err("invalid MAC address\n");
3417 netdev
->hw_features
= NETIF_F_SG
|
3424 netdev
->features
= netdev
->hw_features
|
3425 NETIF_F_HW_VLAN_TX
|
3426 NETIF_F_HW_VLAN_RX
|
3427 NETIF_F_HW_VLAN_FILTER
;
3429 netdev
->vlan_features
|= NETIF_F_TSO
;
3430 netdev
->vlan_features
|= NETIF_F_TSO6
;
3431 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
3432 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
3433 netdev
->vlan_features
|= NETIF_F_SG
;
3436 netdev
->features
|= NETIF_F_HIGHDMA
;
3438 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3440 init_timer(&adapter
->watchdog_timer
);
3441 adapter
->watchdog_timer
.function
= ixgbevf_watchdog
;
3442 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
3444 INIT_WORK(&adapter
->reset_task
, ixgbevf_reset_task
);
3445 INIT_WORK(&adapter
->watchdog_task
, ixgbevf_watchdog_task
);
3447 err
= ixgbevf_init_interrupt_scheme(adapter
);
3451 /* pick up the PCI bus settings for reporting later */
3452 if (hw
->mac
.ops
.get_bus_info
)
3453 hw
->mac
.ops
.get_bus_info(hw
);
3455 strcpy(netdev
->name
, "eth%d");
3457 err
= register_netdev(netdev
);
3461 adapter
->netdev_registered
= true;
3463 netif_carrier_off(netdev
);
3465 ixgbevf_init_last_counter_stats(adapter
);
3467 /* print the MAC address */
3468 hw_dbg(hw
, "%pM\n", netdev
->dev_addr
);
3470 hw_dbg(hw
, "MAC: %d\n", hw
->mac
.type
);
3472 hw_dbg(hw
, "LRO is disabled\n");
3474 hw_dbg(hw
, "Intel(R) 82599 Virtual Function\n");
3480 ixgbevf_reset_interrupt_capability(adapter
);
3481 iounmap(hw
->hw_addr
);
3483 free_netdev(netdev
);
3485 pci_release_regions(pdev
);
3488 pci_disable_device(pdev
);
3493 * ixgbevf_remove - Device Removal Routine
3494 * @pdev: PCI device information struct
3496 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3497 * that it should release a PCI device. The could be caused by a
3498 * Hot-Plug event, or because the driver is going to be removed from
3501 static void __devexit
ixgbevf_remove(struct pci_dev
*pdev
)
3503 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3504 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
3506 set_bit(__IXGBEVF_DOWN
, &adapter
->state
);
3508 del_timer_sync(&adapter
->watchdog_timer
);
3510 cancel_work_sync(&adapter
->reset_task
);
3511 cancel_work_sync(&adapter
->watchdog_task
);
3513 if (adapter
->netdev_registered
) {
3514 unregister_netdev(netdev
);
3515 adapter
->netdev_registered
= false;
3518 ixgbevf_reset_interrupt_capability(adapter
);
3520 iounmap(adapter
->hw
.hw_addr
);
3521 pci_release_regions(pdev
);
3523 hw_dbg(&adapter
->hw
, "Remove complete\n");
3525 kfree(adapter
->tx_ring
);
3526 kfree(adapter
->rx_ring
);
3528 free_netdev(netdev
);
3530 pci_disable_device(pdev
);
3533 static struct pci_driver ixgbevf_driver
= {
3534 .name
= ixgbevf_driver_name
,
3535 .id_table
= ixgbevf_pci_tbl
,
3536 .probe
= ixgbevf_probe
,
3537 .remove
= __devexit_p(ixgbevf_remove
),
3538 .shutdown
= ixgbevf_shutdown
,
3542 * ixgbevf_init_module - Driver Registration Routine
3544 * ixgbevf_init_module is the first routine called when the driver is
3545 * loaded. All it does is register with the PCI subsystem.
3547 static int __init
ixgbevf_init_module(void)
3550 pr_info("%s - version %s\n", ixgbevf_driver_string
,
3551 ixgbevf_driver_version
);
3553 pr_info("%s\n", ixgbevf_copyright
);
3555 ret
= pci_register_driver(&ixgbevf_driver
);
3559 module_init(ixgbevf_init_module
);
3562 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3564 * ixgbevf_exit_module is called just before the driver is removed
3567 static void __exit
ixgbevf_exit_module(void)
3569 pci_unregister_driver(&ixgbevf_driver
);
3574 * ixgbevf_get_hw_dev_name - return device name string
3575 * used by hardware layer to print debugging information
3577 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw
*hw
)
3579 struct ixgbevf_adapter
*adapter
= hw
->back
;
3580 return adapter
->netdev
->name
;
3584 module_exit(ixgbevf_exit_module
);
3586 /* ixgbevf_main.c */