1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/prefetch.h>
28 #include <net/busy_poll.h>
31 #include "i40e_trace.h"
32 #include "i40e_prototype.h"
34 static inline __le64
build_ctob(u32 td_cmd
, u32 td_offset
, unsigned int size
,
37 return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA
|
38 ((u64
)td_cmd
<< I40E_TXD_QW1_CMD_SHIFT
) |
39 ((u64
)td_offset
<< I40E_TXD_QW1_OFFSET_SHIFT
) |
40 ((u64
)size
<< I40E_TXD_QW1_TX_BUF_SZ_SHIFT
) |
41 ((u64
)td_tag
<< I40E_TXD_QW1_L2TAG1_SHIFT
));
44 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
47 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
48 * @ring: the ring that owns the buffer
49 * @tx_buffer: the buffer to free
51 static void i40e_unmap_and_free_tx_resource(struct i40e_ring
*ring
,
52 struct i40e_tx_buffer
*tx_buffer
)
55 if (tx_buffer
->tx_flags
& I40E_TX_FLAGS_FD_SB
)
56 kfree(tx_buffer
->raw_buf
);
58 dev_kfree_skb_any(tx_buffer
->skb
);
59 if (dma_unmap_len(tx_buffer
, len
))
60 dma_unmap_single(ring
->dev
,
61 dma_unmap_addr(tx_buffer
, dma
),
62 dma_unmap_len(tx_buffer
, len
),
64 } else if (dma_unmap_len(tx_buffer
, len
)) {
65 dma_unmap_page(ring
->dev
,
66 dma_unmap_addr(tx_buffer
, dma
),
67 dma_unmap_len(tx_buffer
, len
),
71 tx_buffer
->next_to_watch
= NULL
;
72 tx_buffer
->skb
= NULL
;
73 dma_unmap_len_set(tx_buffer
, len
, 0);
74 /* tx_buffer must be completely set up in the transmit path */
78 * i40evf_clean_tx_ring - Free any empty Tx buffers
79 * @tx_ring: ring to be cleaned
81 void i40evf_clean_tx_ring(struct i40e_ring
*tx_ring
)
83 unsigned long bi_size
;
86 /* ring already cleared, nothing to do */
90 /* Free all the Tx ring sk_buffs */
91 for (i
= 0; i
< tx_ring
->count
; i
++)
92 i40e_unmap_and_free_tx_resource(tx_ring
, &tx_ring
->tx_bi
[i
]);
94 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
95 memset(tx_ring
->tx_bi
, 0, bi_size
);
97 /* Zero out the descriptor ring */
98 memset(tx_ring
->desc
, 0, tx_ring
->size
);
100 tx_ring
->next_to_use
= 0;
101 tx_ring
->next_to_clean
= 0;
103 if (!tx_ring
->netdev
)
106 /* cleanup Tx queue statistics */
107 netdev_tx_reset_queue(txring_txq(tx_ring
));
111 * i40evf_free_tx_resources - Free Tx resources per queue
112 * @tx_ring: Tx descriptor ring for a specific queue
114 * Free all transmit software resources
116 void i40evf_free_tx_resources(struct i40e_ring
*tx_ring
)
118 i40evf_clean_tx_ring(tx_ring
);
119 kfree(tx_ring
->tx_bi
);
120 tx_ring
->tx_bi
= NULL
;
123 dma_free_coherent(tx_ring
->dev
, tx_ring
->size
,
124 tx_ring
->desc
, tx_ring
->dma
);
125 tx_ring
->desc
= NULL
;
130 * i40evf_get_tx_pending - how many Tx descriptors not processed
131 * @tx_ring: the ring of descriptors
132 * @in_sw: is tx_pending being checked in SW or HW
134 * Since there is no access to the ring head register
135 * in XL710, we need to use our local copies
137 u32
i40evf_get_tx_pending(struct i40e_ring
*ring
, bool in_sw
)
141 head
= ring
->next_to_clean
;
142 tail
= readl(ring
->tail
);
145 return (head
< tail
) ?
146 tail
- head
: (tail
+ ring
->count
- head
);
154 * i40e_clean_tx_irq - Reclaim resources after transmit completes
155 * @vsi: the VSI we care about
156 * @tx_ring: Tx ring to clean
157 * @napi_budget: Used to determine if we are in netpoll
159 * Returns true if there's any budget left (e.g. the clean is finished)
161 static bool i40e_clean_tx_irq(struct i40e_vsi
*vsi
,
162 struct i40e_ring
*tx_ring
, int napi_budget
)
164 u16 i
= tx_ring
->next_to_clean
;
165 struct i40e_tx_buffer
*tx_buf
;
166 struct i40e_tx_desc
*tx_desc
;
167 unsigned int total_bytes
= 0, total_packets
= 0;
168 unsigned int budget
= vsi
->work_limit
;
170 tx_buf
= &tx_ring
->tx_bi
[i
];
171 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
175 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
177 /* if next_to_watch is not set then there is no work pending */
181 /* prevent any other reads prior to eop_desc */
184 i40e_trace(clean_tx_irq
, tx_ring
, tx_desc
, tx_buf
);
185 /* if the descriptor isn't done, no work yet to do */
186 if (!(eop_desc
->cmd_type_offset_bsz
&
187 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE
)))
190 /* clear next_to_watch to prevent false hangs */
191 tx_buf
->next_to_watch
= NULL
;
193 /* update the statistics for this packet */
194 total_bytes
+= tx_buf
->bytecount
;
195 total_packets
+= tx_buf
->gso_segs
;
198 napi_consume_skb(tx_buf
->skb
, napi_budget
);
200 /* unmap skb header data */
201 dma_unmap_single(tx_ring
->dev
,
202 dma_unmap_addr(tx_buf
, dma
),
203 dma_unmap_len(tx_buf
, len
),
206 /* clear tx_buffer data */
208 dma_unmap_len_set(tx_buf
, len
, 0);
210 /* unmap remaining buffers */
211 while (tx_desc
!= eop_desc
) {
212 i40e_trace(clean_tx_irq_unmap
,
213 tx_ring
, tx_desc
, tx_buf
);
220 tx_buf
= tx_ring
->tx_bi
;
221 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
224 /* unmap any remaining paged data */
225 if (dma_unmap_len(tx_buf
, len
)) {
226 dma_unmap_page(tx_ring
->dev
,
227 dma_unmap_addr(tx_buf
, dma
),
228 dma_unmap_len(tx_buf
, len
),
230 dma_unmap_len_set(tx_buf
, len
, 0);
234 /* move us one more past the eop_desc for start of next pkt */
240 tx_buf
= tx_ring
->tx_bi
;
241 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
246 /* update budget accounting */
248 } while (likely(budget
));
251 tx_ring
->next_to_clean
= i
;
252 u64_stats_update_begin(&tx_ring
->syncp
);
253 tx_ring
->stats
.bytes
+= total_bytes
;
254 tx_ring
->stats
.packets
+= total_packets
;
255 u64_stats_update_end(&tx_ring
->syncp
);
256 tx_ring
->q_vector
->tx
.total_bytes
+= total_bytes
;
257 tx_ring
->q_vector
->tx
.total_packets
+= total_packets
;
259 if (tx_ring
->flags
& I40E_TXR_FLAGS_WB_ON_ITR
) {
260 /* check to see if there are < 4 descriptors
261 * waiting to be written back, then kick the hardware to force
262 * them to be written back in case we stay in NAPI.
263 * In this mode on X722 we do not enable Interrupt.
265 unsigned int j
= i40evf_get_tx_pending(tx_ring
, false);
268 ((j
/ WB_STRIDE
) == 0) && (j
> 0) &&
269 !test_bit(__I40E_VSI_DOWN
, vsi
->state
) &&
270 (I40E_DESC_UNUSED(tx_ring
) != tx_ring
->count
))
271 tx_ring
->arm_wb
= true;
274 /* notify netdev of completed buffers */
275 netdev_tx_completed_queue(txring_txq(tx_ring
),
276 total_packets
, total_bytes
);
278 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
279 if (unlikely(total_packets
&& netif_carrier_ok(tx_ring
->netdev
) &&
280 (I40E_DESC_UNUSED(tx_ring
) >= TX_WAKE_THRESHOLD
))) {
281 /* Make sure that anybody stopping the queue after this
282 * sees the new next_to_clean.
285 if (__netif_subqueue_stopped(tx_ring
->netdev
,
286 tx_ring
->queue_index
) &&
287 !test_bit(__I40E_VSI_DOWN
, vsi
->state
)) {
288 netif_wake_subqueue(tx_ring
->netdev
,
289 tx_ring
->queue_index
);
290 ++tx_ring
->tx_stats
.restart_queue
;
298 * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
299 * @vsi: the VSI we care about
300 * @q_vector: the vector on which to enable writeback
303 static void i40e_enable_wb_on_itr(struct i40e_vsi
*vsi
,
304 struct i40e_q_vector
*q_vector
)
306 u16 flags
= q_vector
->tx
.ring
[0].flags
;
309 if (!(flags
& I40E_TXR_FLAGS_WB_ON_ITR
))
312 if (q_vector
->arm_wb_state
)
315 val
= I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK
|
316 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
; /* set noitr */
319 I40E_VFINT_DYN_CTLN1(q_vector
->v_idx
+
320 vsi
->base_vector
- 1), val
);
321 q_vector
->arm_wb_state
= true;
325 * i40evf_force_wb - Issue SW Interrupt so HW does a wb
326 * @vsi: the VSI we care about
327 * @q_vector: the vector on which to force writeback
330 void i40evf_force_wb(struct i40e_vsi
*vsi
, struct i40e_q_vector
*q_vector
)
332 u32 val
= I40E_VFINT_DYN_CTLN1_INTENA_MASK
|
333 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
| /* set noitr */
334 I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK
|
335 I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
336 /* allow 00 to be written to the index */;
339 I40E_VFINT_DYN_CTLN1(q_vector
->v_idx
+ vsi
->base_vector
- 1),
344 * i40e_set_new_dynamic_itr - Find new ITR level
345 * @rc: structure containing ring performance data
347 * Returns true if ITR changed, false if not
349 * Stores a new ITR value based on packets and byte counts during
350 * the last interrupt. The advantage of per interrupt computation
351 * is faster updates and more accurate ITR for the current traffic
352 * pattern. Constants in this function were computed based on
353 * theoretical maximum wire speed and thresholds were set based on
354 * testing data as well as attempting to minimize response time
355 * while increasing bulk throughput.
357 static bool i40e_set_new_dynamic_itr(struct i40e_ring_container
*rc
)
359 enum i40e_latency_range new_latency_range
= rc
->latency_range
;
360 u32 new_itr
= rc
->itr
;
362 unsigned int usecs
, estimated_usecs
;
364 if (rc
->total_packets
== 0 || !rc
->itr
)
367 usecs
= (rc
->itr
<< 1) * ITR_COUNTDOWN_START
;
368 bytes_per_int
= rc
->total_bytes
/ usecs
;
370 /* The calculations in this algorithm depend on interrupts actually
371 * firing at the ITR rate. This may not happen if the packet rate is
372 * really low, or if we've been napi polling. Check to make sure
373 * that's not the case before we continue.
375 estimated_usecs
= jiffies_to_usecs(jiffies
- rc
->last_itr_update
);
376 if (estimated_usecs
> usecs
) {
377 new_latency_range
= I40E_LOW_LATENCY
;
381 /* simple throttlerate management
382 * 0-10MB/s lowest (50000 ints/s)
383 * 10-20MB/s low (20000 ints/s)
384 * 20-1249MB/s bulk (18000 ints/s)
386 * The math works out because the divisor is in 10^(-6) which
387 * turns the bytes/us input value into MB/s values, but
388 * make sure to use usecs, as the register values written
389 * are in 2 usec increments in the ITR registers, and make sure
390 * to use the smoothed values that the countdown timer gives us.
392 switch (new_latency_range
) {
393 case I40E_LOWEST_LATENCY
:
394 if (bytes_per_int
> 10)
395 new_latency_range
= I40E_LOW_LATENCY
;
397 case I40E_LOW_LATENCY
:
398 if (bytes_per_int
> 20)
399 new_latency_range
= I40E_BULK_LATENCY
;
400 else if (bytes_per_int
<= 10)
401 new_latency_range
= I40E_LOWEST_LATENCY
;
403 case I40E_BULK_LATENCY
:
405 if (bytes_per_int
<= 20)
406 new_latency_range
= I40E_LOW_LATENCY
;
411 rc
->latency_range
= new_latency_range
;
413 switch (new_latency_range
) {
414 case I40E_LOWEST_LATENCY
:
415 new_itr
= I40E_ITR_50K
;
417 case I40E_LOW_LATENCY
:
418 new_itr
= I40E_ITR_20K
;
420 case I40E_BULK_LATENCY
:
421 new_itr
= I40E_ITR_18K
;
428 rc
->total_packets
= 0;
429 rc
->last_itr_update
= jiffies
;
431 if (new_itr
!= rc
->itr
) {
439 * i40evf_setup_tx_descriptors - Allocate the Tx descriptors
440 * @tx_ring: the tx ring to set up
442 * Return 0 on success, negative on error
444 int i40evf_setup_tx_descriptors(struct i40e_ring
*tx_ring
)
446 struct device
*dev
= tx_ring
->dev
;
452 /* warn if we are about to overwrite the pointer */
453 WARN_ON(tx_ring
->tx_bi
);
454 bi_size
= sizeof(struct i40e_tx_buffer
) * tx_ring
->count
;
455 tx_ring
->tx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
459 /* round up to nearest 4K */
460 tx_ring
->size
= tx_ring
->count
* sizeof(struct i40e_tx_desc
);
461 tx_ring
->size
= ALIGN(tx_ring
->size
, 4096);
462 tx_ring
->desc
= dma_alloc_coherent(dev
, tx_ring
->size
,
463 &tx_ring
->dma
, GFP_KERNEL
);
464 if (!tx_ring
->desc
) {
465 dev_info(dev
, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
470 tx_ring
->next_to_use
= 0;
471 tx_ring
->next_to_clean
= 0;
475 kfree(tx_ring
->tx_bi
);
476 tx_ring
->tx_bi
= NULL
;
481 * i40evf_clean_rx_ring - Free Rx buffers
482 * @rx_ring: ring to be cleaned
484 void i40evf_clean_rx_ring(struct i40e_ring
*rx_ring
)
486 unsigned long bi_size
;
489 /* ring already cleared, nothing to do */
494 dev_kfree_skb(rx_ring
->skb
);
498 /* Free all the Rx ring sk_buffs */
499 for (i
= 0; i
< rx_ring
->count
; i
++) {
500 struct i40e_rx_buffer
*rx_bi
= &rx_ring
->rx_bi
[i
];
505 /* Invalidate cache lines that may have been written to by
506 * device so that we avoid corrupting memory.
508 dma_sync_single_range_for_cpu(rx_ring
->dev
,
514 /* free resources associated with mapping */
515 dma_unmap_page_attrs(rx_ring
->dev
, rx_bi
->dma
,
516 i40e_rx_pg_size(rx_ring
),
520 __page_frag_cache_drain(rx_bi
->page
, rx_bi
->pagecnt_bias
);
523 rx_bi
->page_offset
= 0;
526 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
527 memset(rx_ring
->rx_bi
, 0, bi_size
);
529 /* Zero out the descriptor ring */
530 memset(rx_ring
->desc
, 0, rx_ring
->size
);
532 rx_ring
->next_to_alloc
= 0;
533 rx_ring
->next_to_clean
= 0;
534 rx_ring
->next_to_use
= 0;
538 * i40evf_free_rx_resources - Free Rx resources
539 * @rx_ring: ring to clean the resources from
541 * Free all receive software resources
543 void i40evf_free_rx_resources(struct i40e_ring
*rx_ring
)
545 i40evf_clean_rx_ring(rx_ring
);
546 kfree(rx_ring
->rx_bi
);
547 rx_ring
->rx_bi
= NULL
;
550 dma_free_coherent(rx_ring
->dev
, rx_ring
->size
,
551 rx_ring
->desc
, rx_ring
->dma
);
552 rx_ring
->desc
= NULL
;
557 * i40evf_setup_rx_descriptors - Allocate Rx descriptors
558 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
560 * Returns 0 on success, negative on failure
562 int i40evf_setup_rx_descriptors(struct i40e_ring
*rx_ring
)
564 struct device
*dev
= rx_ring
->dev
;
567 /* warn if we are about to overwrite the pointer */
568 WARN_ON(rx_ring
->rx_bi
);
569 bi_size
= sizeof(struct i40e_rx_buffer
) * rx_ring
->count
;
570 rx_ring
->rx_bi
= kzalloc(bi_size
, GFP_KERNEL
);
574 u64_stats_init(&rx_ring
->syncp
);
576 /* Round up to nearest 4K */
577 rx_ring
->size
= rx_ring
->count
* sizeof(union i40e_32byte_rx_desc
);
578 rx_ring
->size
= ALIGN(rx_ring
->size
, 4096);
579 rx_ring
->desc
= dma_alloc_coherent(dev
, rx_ring
->size
,
580 &rx_ring
->dma
, GFP_KERNEL
);
582 if (!rx_ring
->desc
) {
583 dev_info(dev
, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
588 rx_ring
->next_to_alloc
= 0;
589 rx_ring
->next_to_clean
= 0;
590 rx_ring
->next_to_use
= 0;
594 kfree(rx_ring
->rx_bi
);
595 rx_ring
->rx_bi
= NULL
;
600 * i40e_release_rx_desc - Store the new tail and head values
601 * @rx_ring: ring to bump
602 * @val: new head index
604 static inline void i40e_release_rx_desc(struct i40e_ring
*rx_ring
, u32 val
)
606 rx_ring
->next_to_use
= val
;
608 /* update next to alloc since we have filled the ring */
609 rx_ring
->next_to_alloc
= val
;
611 /* Force memory writes to complete before letting h/w
612 * know there are new descriptors to fetch. (Only
613 * applicable for weak-ordered memory model archs,
617 writel(val
, rx_ring
->tail
);
621 * i40e_rx_offset - Return expected offset into page to access data
622 * @rx_ring: Ring we are requesting offset of
624 * Returns the offset value for ring into the data buffer.
626 static inline unsigned int i40e_rx_offset(struct i40e_ring
*rx_ring
)
628 return ring_uses_build_skb(rx_ring
) ? I40E_SKB_PAD
: 0;
632 * i40e_alloc_mapped_page - recycle or make a new page
633 * @rx_ring: ring to use
634 * @bi: rx_buffer struct to modify
636 * Returns true if the page was successfully allocated or
639 static bool i40e_alloc_mapped_page(struct i40e_ring
*rx_ring
,
640 struct i40e_rx_buffer
*bi
)
642 struct page
*page
= bi
->page
;
645 /* since we are recycling buffers we should seldom need to alloc */
647 rx_ring
->rx_stats
.page_reuse_count
++;
651 /* alloc new page for storage */
652 page
= dev_alloc_pages(i40e_rx_pg_order(rx_ring
));
653 if (unlikely(!page
)) {
654 rx_ring
->rx_stats
.alloc_page_failed
++;
658 /* map page for use */
659 dma
= dma_map_page_attrs(rx_ring
->dev
, page
, 0,
660 i40e_rx_pg_size(rx_ring
),
664 /* if mapping failed free memory back to system since
665 * there isn't much point in holding memory we can't use
667 if (dma_mapping_error(rx_ring
->dev
, dma
)) {
668 __free_pages(page
, i40e_rx_pg_order(rx_ring
));
669 rx_ring
->rx_stats
.alloc_page_failed
++;
675 bi
->page_offset
= i40e_rx_offset(rx_ring
);
677 /* initialize pagecnt_bias to 1 representing we fully own page */
678 bi
->pagecnt_bias
= 1;
684 * i40e_receive_skb - Send a completed packet up the stack
685 * @rx_ring: rx ring in play
686 * @skb: packet to send up
687 * @vlan_tag: vlan tag for packet
689 static void i40e_receive_skb(struct i40e_ring
*rx_ring
,
690 struct sk_buff
*skb
, u16 vlan_tag
)
692 struct i40e_q_vector
*q_vector
= rx_ring
->q_vector
;
694 if ((rx_ring
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
695 (vlan_tag
& VLAN_VID_MASK
))
696 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vlan_tag
);
698 napi_gro_receive(&q_vector
->napi
, skb
);
702 * i40evf_alloc_rx_buffers - Replace used receive buffers
703 * @rx_ring: ring to place buffers on
704 * @cleaned_count: number of buffers to replace
706 * Returns false if all allocations were successful, true if any fail
708 bool i40evf_alloc_rx_buffers(struct i40e_ring
*rx_ring
, u16 cleaned_count
)
710 u16 ntu
= rx_ring
->next_to_use
;
711 union i40e_rx_desc
*rx_desc
;
712 struct i40e_rx_buffer
*bi
;
714 /* do nothing if no valid netdev defined */
715 if (!rx_ring
->netdev
|| !cleaned_count
)
718 rx_desc
= I40E_RX_DESC(rx_ring
, ntu
);
719 bi
= &rx_ring
->rx_bi
[ntu
];
722 if (!i40e_alloc_mapped_page(rx_ring
, bi
))
725 /* sync the buffer for use by the device */
726 dma_sync_single_range_for_device(rx_ring
->dev
, bi
->dma
,
731 /* Refresh the desc even if buffer_addrs didn't change
732 * because each write-back erases this info.
734 rx_desc
->read
.pkt_addr
= cpu_to_le64(bi
->dma
+ bi
->page_offset
);
739 if (unlikely(ntu
== rx_ring
->count
)) {
740 rx_desc
= I40E_RX_DESC(rx_ring
, 0);
745 /* clear the status bits for the next_to_use descriptor */
746 rx_desc
->wb
.qword1
.status_error_len
= 0;
749 } while (cleaned_count
);
751 if (rx_ring
->next_to_use
!= ntu
)
752 i40e_release_rx_desc(rx_ring
, ntu
);
757 if (rx_ring
->next_to_use
!= ntu
)
758 i40e_release_rx_desc(rx_ring
, ntu
);
760 /* make sure to come back via polling to try again after
767 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
768 * @vsi: the VSI we care about
769 * @skb: skb currently being received and modified
770 * @rx_desc: the receive descriptor
772 static inline void i40e_rx_checksum(struct i40e_vsi
*vsi
,
774 union i40e_rx_desc
*rx_desc
)
776 struct i40e_rx_ptype_decoded decoded
;
777 u32 rx_error
, rx_status
;
782 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
783 ptype
= (qword
& I40E_RXD_QW1_PTYPE_MASK
) >> I40E_RXD_QW1_PTYPE_SHIFT
;
784 rx_error
= (qword
& I40E_RXD_QW1_ERROR_MASK
) >>
785 I40E_RXD_QW1_ERROR_SHIFT
;
786 rx_status
= (qword
& I40E_RXD_QW1_STATUS_MASK
) >>
787 I40E_RXD_QW1_STATUS_SHIFT
;
788 decoded
= decode_rx_desc_ptype(ptype
);
790 skb
->ip_summed
= CHECKSUM_NONE
;
792 skb_checksum_none_assert(skb
);
794 /* Rx csum enabled and ip headers found? */
795 if (!(vsi
->netdev
->features
& NETIF_F_RXCSUM
))
798 /* did the hardware decode the packet and checksum? */
799 if (!(rx_status
& BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT
)))
802 /* both known and outer_ip must be set for the below code to work */
803 if (!(decoded
.known
&& decoded
.outer_ip
))
806 ipv4
= (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
) &&
807 (decoded
.outer_ip_ver
== I40E_RX_PTYPE_OUTER_IPV4
);
808 ipv6
= (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
) &&
809 (decoded
.outer_ip_ver
== I40E_RX_PTYPE_OUTER_IPV6
);
812 (rx_error
& (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT
) |
813 BIT(I40E_RX_DESC_ERROR_EIPE_SHIFT
))))
816 /* likely incorrect csum if alternate IP extension headers found */
818 rx_status
& BIT(I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT
))
819 /* don't increment checksum err here, non-fatal err */
822 /* there was some L4 error, count error and punt packet to the stack */
823 if (rx_error
& BIT(I40E_RX_DESC_ERROR_L4E_SHIFT
))
826 /* handle packets that were not able to be checksummed due
827 * to arrival speed, in this case the stack can compute
830 if (rx_error
& BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT
))
833 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
834 switch (decoded
.inner_prot
) {
835 case I40E_RX_PTYPE_INNER_PROT_TCP
:
836 case I40E_RX_PTYPE_INNER_PROT_UDP
:
837 case I40E_RX_PTYPE_INNER_PROT_SCTP
:
838 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
847 vsi
->back
->hw_csum_rx_error
++;
851 * i40e_ptype_to_htype - get a hash type
852 * @ptype: the ptype value from the descriptor
854 * Returns a hash type to be used by skb_set_hash
856 static inline int i40e_ptype_to_htype(u8 ptype
)
858 struct i40e_rx_ptype_decoded decoded
= decode_rx_desc_ptype(ptype
);
861 return PKT_HASH_TYPE_NONE
;
863 if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
864 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4
)
865 return PKT_HASH_TYPE_L4
;
866 else if (decoded
.outer_ip
== I40E_RX_PTYPE_OUTER_IP
&&
867 decoded
.payload_layer
== I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3
)
868 return PKT_HASH_TYPE_L3
;
870 return PKT_HASH_TYPE_L2
;
874 * i40e_rx_hash - set the hash value in the skb
875 * @ring: descriptor ring
876 * @rx_desc: specific descriptor
878 static inline void i40e_rx_hash(struct i40e_ring
*ring
,
879 union i40e_rx_desc
*rx_desc
,
884 const __le64 rss_mask
=
885 cpu_to_le64((u64
)I40E_RX_DESC_FLTSTAT_RSS_HASH
<<
886 I40E_RX_DESC_STATUS_FLTSTAT_SHIFT
);
888 if (ring
->netdev
->features
& NETIF_F_RXHASH
)
891 if ((rx_desc
->wb
.qword1
.status_error_len
& rss_mask
) == rss_mask
) {
892 hash
= le32_to_cpu(rx_desc
->wb
.qword0
.hi_dword
.rss
);
893 skb_set_hash(skb
, hash
, i40e_ptype_to_htype(rx_ptype
));
898 * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
899 * @rx_ring: rx descriptor ring packet is being transacted on
900 * @rx_desc: pointer to the EOP Rx descriptor
901 * @skb: pointer to current skb being populated
902 * @rx_ptype: the packet type decoded by hardware
904 * This function checks the ring, descriptor, and packet information in
905 * order to populate the hash, checksum, VLAN, protocol, and
906 * other fields within the skb.
909 void i40evf_process_skb_fields(struct i40e_ring
*rx_ring
,
910 union i40e_rx_desc
*rx_desc
, struct sk_buff
*skb
,
913 i40e_rx_hash(rx_ring
, rx_desc
, skb
, rx_ptype
);
915 i40e_rx_checksum(rx_ring
->vsi
, skb
, rx_desc
);
917 skb_record_rx_queue(skb
, rx_ring
->queue_index
);
919 /* modifies the skb - consumes the enet header */
920 skb
->protocol
= eth_type_trans(skb
, rx_ring
->netdev
);
924 * i40e_cleanup_headers - Correct empty headers
925 * @rx_ring: rx descriptor ring packet is being transacted on
926 * @skb: pointer to current skb being fixed
928 * Also address the case where we are pulling data in on pages only
929 * and as such no data is present in the skb header.
931 * In addition if skb is not at least 60 bytes we need to pad it so that
932 * it is large enough to qualify as a valid Ethernet frame.
934 * Returns true if an error was encountered and skb was freed.
936 static bool i40e_cleanup_headers(struct i40e_ring
*rx_ring
, struct sk_buff
*skb
)
938 /* if eth_skb_pad returns an error the skb was freed */
939 if (eth_skb_pad(skb
))
946 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
947 * @rx_ring: rx descriptor ring to store buffers on
948 * @old_buff: donor buffer to have page reused
950 * Synchronizes page for reuse by the adapter
952 static void i40e_reuse_rx_page(struct i40e_ring
*rx_ring
,
953 struct i40e_rx_buffer
*old_buff
)
955 struct i40e_rx_buffer
*new_buff
;
956 u16 nta
= rx_ring
->next_to_alloc
;
958 new_buff
= &rx_ring
->rx_bi
[nta
];
960 /* update, and store next to alloc */
962 rx_ring
->next_to_alloc
= (nta
< rx_ring
->count
) ? nta
: 0;
964 /* transfer page from old buffer to new buffer */
965 new_buff
->dma
= old_buff
->dma
;
966 new_buff
->page
= old_buff
->page
;
967 new_buff
->page_offset
= old_buff
->page_offset
;
968 new_buff
->pagecnt_bias
= old_buff
->pagecnt_bias
;
972 * i40e_page_is_reusable - check if any reuse is possible
973 * @page: page struct to check
975 * A page is not reusable if it was allocated under low memory
976 * conditions, or it's not in the same NUMA node as this CPU.
978 static inline bool i40e_page_is_reusable(struct page
*page
)
980 return (page_to_nid(page
) == numa_mem_id()) &&
981 !page_is_pfmemalloc(page
);
985 * i40e_can_reuse_rx_page - Determine if this page can be reused by
986 * the adapter for another receive
988 * @rx_buffer: buffer containing the page
990 * If page is reusable, rx_buffer->page_offset is adjusted to point to
991 * an unused region in the page.
993 * For small pages, @truesize will be a constant value, half the size
994 * of the memory at page. We'll attempt to alternate between high and
995 * low halves of the page, with one half ready for use by the hardware
996 * and the other half being consumed by the stack. We use the page
997 * ref count to determine whether the stack has finished consuming the
998 * portion of this page that was passed up with a previous packet. If
999 * the page ref count is >1, we'll assume the "other" half page is
1000 * still busy, and this page cannot be reused.
1002 * For larger pages, @truesize will be the actual space used by the
1003 * received packet (adjusted upward to an even multiple of the cache
1004 * line size). This will advance through the page by the amount
1005 * actually consumed by the received packets while there is still
1006 * space for a buffer. Each region of larger pages will be used at
1007 * most once, after which the page will not be reused.
1009 * In either case, if the page is reusable its refcount is increased.
1011 static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer
*rx_buffer
)
1013 unsigned int pagecnt_bias
= rx_buffer
->pagecnt_bias
;
1014 struct page
*page
= rx_buffer
->page
;
1016 /* Is any reuse possible? */
1017 if (unlikely(!i40e_page_is_reusable(page
)))
1020 #if (PAGE_SIZE < 8192)
1021 /* if we are only owner of page we can reuse it */
1022 if (unlikely((page_count(page
) - pagecnt_bias
) > 1))
1025 #define I40E_LAST_OFFSET \
1026 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048)
1027 if (rx_buffer
->page_offset
> I40E_LAST_OFFSET
)
1031 /* If we have drained the page fragment pool we need to update
1032 * the pagecnt_bias and page count so that we fully restock the
1033 * number of references the driver holds.
1035 if (unlikely(!pagecnt_bias
)) {
1036 page_ref_add(page
, USHRT_MAX
);
1037 rx_buffer
->pagecnt_bias
= USHRT_MAX
;
1044 * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
1045 * @rx_ring: rx descriptor ring to transact packets on
1046 * @rx_buffer: buffer containing page to add
1047 * @skb: sk_buff to place the data into
1048 * @size: packet length from rx_desc
1050 * This function will add the data contained in rx_buffer->page to the skb.
1051 * It will just attach the page as a frag to the skb.
1053 * The function will then update the page offset.
1055 static void i40e_add_rx_frag(struct i40e_ring
*rx_ring
,
1056 struct i40e_rx_buffer
*rx_buffer
,
1057 struct sk_buff
*skb
,
1060 #if (PAGE_SIZE < 8192)
1061 unsigned int truesize
= i40e_rx_pg_size(rx_ring
) / 2;
1063 unsigned int truesize
= SKB_DATA_ALIGN(size
+ i40e_rx_offset(rx_ring
));
1066 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, rx_buffer
->page
,
1067 rx_buffer
->page_offset
, size
, truesize
);
1069 /* page is being used so we must update the page offset */
1070 #if (PAGE_SIZE < 8192)
1071 rx_buffer
->page_offset
^= truesize
;
1073 rx_buffer
->page_offset
+= truesize
;
1078 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
1079 * @rx_ring: rx descriptor ring to transact packets on
1080 * @size: size of buffer to add to skb
1082 * This function will pull an Rx buffer from the ring and synchronize it
1083 * for use by the CPU.
1085 static struct i40e_rx_buffer
*i40e_get_rx_buffer(struct i40e_ring
*rx_ring
,
1086 const unsigned int size
)
1088 struct i40e_rx_buffer
*rx_buffer
;
1090 rx_buffer
= &rx_ring
->rx_bi
[rx_ring
->next_to_clean
];
1091 prefetchw(rx_buffer
->page
);
1093 /* we are reusing so sync this buffer for CPU use */
1094 dma_sync_single_range_for_cpu(rx_ring
->dev
,
1096 rx_buffer
->page_offset
,
1100 /* We have pulled a buffer for use, so decrement pagecnt_bias */
1101 rx_buffer
->pagecnt_bias
--;
1107 * i40e_construct_skb - Allocate skb and populate it
1108 * @rx_ring: rx descriptor ring to transact packets on
1109 * @rx_buffer: rx buffer to pull data from
1110 * @size: size of buffer to add to skb
1112 * This function allocates an skb. It then populates it with the page
1113 * data from the current receive descriptor, taking care to set up the
1116 static struct sk_buff
*i40e_construct_skb(struct i40e_ring
*rx_ring
,
1117 struct i40e_rx_buffer
*rx_buffer
,
1120 void *va
= page_address(rx_buffer
->page
) + rx_buffer
->page_offset
;
1121 #if (PAGE_SIZE < 8192)
1122 unsigned int truesize
= i40e_rx_pg_size(rx_ring
) / 2;
1124 unsigned int truesize
= SKB_DATA_ALIGN(size
);
1126 unsigned int headlen
;
1127 struct sk_buff
*skb
;
1129 /* prefetch first cache line of first page */
1131 #if L1_CACHE_BYTES < 128
1132 prefetch(va
+ L1_CACHE_BYTES
);
1135 /* allocate a skb to store the frags */
1136 skb
= __napi_alloc_skb(&rx_ring
->q_vector
->napi
,
1138 GFP_ATOMIC
| __GFP_NOWARN
);
1142 /* Determine available headroom for copy */
1144 if (headlen
> I40E_RX_HDR_SIZE
)
1145 headlen
= eth_get_headlen(va
, I40E_RX_HDR_SIZE
);
1147 /* align pull length to size of long to optimize memcpy performance */
1148 memcpy(__skb_put(skb
, headlen
), va
, ALIGN(headlen
, sizeof(long)));
1150 /* update all of the pointers */
1153 skb_add_rx_frag(skb
, 0, rx_buffer
->page
,
1154 rx_buffer
->page_offset
+ headlen
,
1157 /* buffer is used by skb, update page_offset */
1158 #if (PAGE_SIZE < 8192)
1159 rx_buffer
->page_offset
^= truesize
;
1161 rx_buffer
->page_offset
+= truesize
;
1164 /* buffer is unused, reset bias back to rx_buffer */
1165 rx_buffer
->pagecnt_bias
++;
1172 * i40e_build_skb - Build skb around an existing buffer
1173 * @rx_ring: Rx descriptor ring to transact packets on
1174 * @rx_buffer: Rx buffer to pull data from
1175 * @size: size of buffer to add to skb
1177 * This function builds an skb around an existing Rx buffer, taking care
1178 * to set up the skb correctly and avoid any memcpy overhead.
1180 static struct sk_buff
*i40e_build_skb(struct i40e_ring
*rx_ring
,
1181 struct i40e_rx_buffer
*rx_buffer
,
1184 void *va
= page_address(rx_buffer
->page
) + rx_buffer
->page_offset
;
1185 #if (PAGE_SIZE < 8192)
1186 unsigned int truesize
= i40e_rx_pg_size(rx_ring
) / 2;
1188 unsigned int truesize
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) +
1189 SKB_DATA_ALIGN(I40E_SKB_PAD
+ size
);
1191 struct sk_buff
*skb
;
1193 /* prefetch first cache line of first page */
1195 #if L1_CACHE_BYTES < 128
1196 prefetch(va
+ L1_CACHE_BYTES
);
1198 /* build an skb around the page buffer */
1199 skb
= build_skb(va
- I40E_SKB_PAD
, truesize
);
1203 /* update pointers within the skb to store the data */
1204 skb_reserve(skb
, I40E_SKB_PAD
);
1205 __skb_put(skb
, size
);
1207 /* buffer is used by skb, update page_offset */
1208 #if (PAGE_SIZE < 8192)
1209 rx_buffer
->page_offset
^= truesize
;
1211 rx_buffer
->page_offset
+= truesize
;
1218 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
1219 * @rx_ring: rx descriptor ring to transact packets on
1220 * @rx_buffer: rx buffer to pull data from
1222 * This function will clean up the contents of the rx_buffer. It will
1223 * either recycle the bufer or unmap it and free the associated resources.
1225 static void i40e_put_rx_buffer(struct i40e_ring
*rx_ring
,
1226 struct i40e_rx_buffer
*rx_buffer
)
1228 if (i40e_can_reuse_rx_page(rx_buffer
)) {
1229 /* hand second half of page back to the ring */
1230 i40e_reuse_rx_page(rx_ring
, rx_buffer
);
1231 rx_ring
->rx_stats
.page_reuse_count
++;
1233 /* we are not reusing the buffer so unmap it */
1234 dma_unmap_page_attrs(rx_ring
->dev
, rx_buffer
->dma
,
1235 i40e_rx_pg_size(rx_ring
),
1236 DMA_FROM_DEVICE
, I40E_RX_DMA_ATTR
);
1237 __page_frag_cache_drain(rx_buffer
->page
,
1238 rx_buffer
->pagecnt_bias
);
1241 /* clear contents of buffer_info */
1242 rx_buffer
->page
= NULL
;
1246 * i40e_is_non_eop - process handling of non-EOP buffers
1247 * @rx_ring: Rx ring being processed
1248 * @rx_desc: Rx descriptor for current buffer
1249 * @skb: Current socket buffer containing buffer in progress
1251 * This function updates next to clean. If the buffer is an EOP buffer
1252 * this function exits returning false, otherwise it will place the
1253 * sk_buff in the next buffer to be chained and return true indicating
1254 * that this is in fact a non-EOP buffer.
1256 static bool i40e_is_non_eop(struct i40e_ring
*rx_ring
,
1257 union i40e_rx_desc
*rx_desc
,
1258 struct sk_buff
*skb
)
1260 u32 ntc
= rx_ring
->next_to_clean
+ 1;
1262 /* fetch, update, and store next to clean */
1263 ntc
= (ntc
< rx_ring
->count
) ? ntc
: 0;
1264 rx_ring
->next_to_clean
= ntc
;
1266 prefetch(I40E_RX_DESC(rx_ring
, ntc
));
1268 /* if we are the last buffer then there is nothing else to do */
1269 #define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
1270 if (likely(i40e_test_staterr(rx_desc
, I40E_RXD_EOF
)))
1273 rx_ring
->rx_stats
.non_eop_descs
++;
1279 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1280 * @rx_ring: rx descriptor ring to transact packets on
1281 * @budget: Total limit on number of packets to process
1283 * This function provides a "bounce buffer" approach to Rx interrupt
1284 * processing. The advantage to this is that on systems that have
1285 * expensive overhead for IOMMU access this provides a means of avoiding
1286 * it by maintaining the mapping of the page to the system.
1288 * Returns amount of work completed
1290 static int i40e_clean_rx_irq(struct i40e_ring
*rx_ring
, int budget
)
1292 unsigned int total_rx_bytes
= 0, total_rx_packets
= 0;
1293 struct sk_buff
*skb
= rx_ring
->skb
;
1294 u16 cleaned_count
= I40E_DESC_UNUSED(rx_ring
);
1295 bool failure
= false;
1297 while (likely(total_rx_packets
< (unsigned int)budget
)) {
1298 struct i40e_rx_buffer
*rx_buffer
;
1299 union i40e_rx_desc
*rx_desc
;
1305 /* return some buffers to hardware, one at a time is too slow */
1306 if (cleaned_count
>= I40E_RX_BUFFER_WRITE
) {
1307 failure
= failure
||
1308 i40evf_alloc_rx_buffers(rx_ring
, cleaned_count
);
1312 rx_desc
= I40E_RX_DESC(rx_ring
, rx_ring
->next_to_clean
);
1314 /* status_error_len will always be zero for unused descriptors
1315 * because it's cleared in cleanup, and overlaps with hdr_addr
1316 * which is always zero because packet split isn't used, if the
1317 * hardware wrote DD then the length will be non-zero
1319 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1321 /* This memory barrier is needed to keep us from reading
1322 * any other fields out of the rx_desc until we have
1323 * verified the descriptor has been written back.
1327 size
= (qword
& I40E_RXD_QW1_LENGTH_PBUF_MASK
) >>
1328 I40E_RXD_QW1_LENGTH_PBUF_SHIFT
;
1332 i40e_trace(clean_rx_irq
, rx_ring
, rx_desc
, skb
);
1333 rx_buffer
= i40e_get_rx_buffer(rx_ring
, size
);
1335 /* retrieve a buffer from the ring */
1337 i40e_add_rx_frag(rx_ring
, rx_buffer
, skb
, size
);
1338 else if (ring_uses_build_skb(rx_ring
))
1339 skb
= i40e_build_skb(rx_ring
, rx_buffer
, size
);
1341 skb
= i40e_construct_skb(rx_ring
, rx_buffer
, size
);
1343 /* exit if we failed to retrieve a buffer */
1345 rx_ring
->rx_stats
.alloc_buff_failed
++;
1346 rx_buffer
->pagecnt_bias
++;
1350 i40e_put_rx_buffer(rx_ring
, rx_buffer
);
1353 if (i40e_is_non_eop(rx_ring
, rx_desc
, skb
))
1356 /* ERR_MASK will only have valid bits if EOP set, and
1357 * what we are doing here is actually checking
1358 * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
1361 if (unlikely(i40e_test_staterr(rx_desc
, BIT(I40E_RXD_QW1_ERROR_SHIFT
)))) {
1362 dev_kfree_skb_any(skb
);
1367 if (i40e_cleanup_headers(rx_ring
, skb
)) {
1372 /* probably a little skewed due to removing CRC */
1373 total_rx_bytes
+= skb
->len
;
1375 qword
= le64_to_cpu(rx_desc
->wb
.qword1
.status_error_len
);
1376 rx_ptype
= (qword
& I40E_RXD_QW1_PTYPE_MASK
) >>
1377 I40E_RXD_QW1_PTYPE_SHIFT
;
1379 /* populate checksum, VLAN, and protocol */
1380 i40evf_process_skb_fields(rx_ring
, rx_desc
, skb
, rx_ptype
);
1383 vlan_tag
= (qword
& BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT
)) ?
1384 le16_to_cpu(rx_desc
->wb
.qword0
.lo_dword
.l2tag1
) : 0;
1386 i40e_trace(clean_rx_irq_rx
, rx_ring
, rx_desc
, skb
);
1387 i40e_receive_skb(rx_ring
, skb
, vlan_tag
);
1390 /* update budget accounting */
1396 u64_stats_update_begin(&rx_ring
->syncp
);
1397 rx_ring
->stats
.packets
+= total_rx_packets
;
1398 rx_ring
->stats
.bytes
+= total_rx_bytes
;
1399 u64_stats_update_end(&rx_ring
->syncp
);
1400 rx_ring
->q_vector
->rx
.total_packets
+= total_rx_packets
;
1401 rx_ring
->q_vector
->rx
.total_bytes
+= total_rx_bytes
;
1403 /* guarantee a trip back through this routine if there was a failure */
1404 return failure
? budget
: (int)total_rx_packets
;
1407 static u32
i40e_buildreg_itr(const int type
, const u16 itr
)
1411 val
= I40E_VFINT_DYN_CTLN1_INTENA_MASK
|
1412 /* Don't clear PBA because that can cause lost interrupts that
1413 * came in while we were cleaning/polling
1415 (type
<< I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT
) |
1416 (itr
<< I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT
);
1421 /* a small macro to shorten up some long lines */
1422 #define INTREG I40E_VFINT_DYN_CTLN1
1423 static inline int get_rx_itr(struct i40e_vsi
*vsi
, int idx
)
1425 struct i40evf_adapter
*adapter
= vsi
->back
;
1427 return adapter
->rx_rings
[idx
].rx_itr_setting
;
1430 static inline int get_tx_itr(struct i40e_vsi
*vsi
, int idx
)
1432 struct i40evf_adapter
*adapter
= vsi
->back
;
1434 return adapter
->tx_rings
[idx
].tx_itr_setting
;
1438 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
1439 * @vsi: the VSI we care about
1440 * @q_vector: q_vector for which itr is being updated and interrupt enabled
1443 static inline void i40e_update_enable_itr(struct i40e_vsi
*vsi
,
1444 struct i40e_q_vector
*q_vector
)
1446 struct i40e_hw
*hw
= &vsi
->back
->hw
;
1447 bool rx
= false, tx
= false;
1450 int idx
= q_vector
->v_idx
;
1451 int rx_itr_setting
, tx_itr_setting
;
1453 vector
= (q_vector
->v_idx
+ vsi
->base_vector
);
1455 /* avoid dynamic calculation if in countdown mode OR if
1456 * all dynamic is disabled
1458 rxval
= txval
= i40e_buildreg_itr(I40E_ITR_NONE
, 0);
1460 rx_itr_setting
= get_rx_itr(vsi
, idx
);
1461 tx_itr_setting
= get_tx_itr(vsi
, idx
);
1463 if (q_vector
->itr_countdown
> 0 ||
1464 (!ITR_IS_DYNAMIC(rx_itr_setting
) &&
1465 !ITR_IS_DYNAMIC(tx_itr_setting
))) {
1469 if (ITR_IS_DYNAMIC(rx_itr_setting
)) {
1470 rx
= i40e_set_new_dynamic_itr(&q_vector
->rx
);
1471 rxval
= i40e_buildreg_itr(I40E_RX_ITR
, q_vector
->rx
.itr
);
1474 if (ITR_IS_DYNAMIC(tx_itr_setting
)) {
1475 tx
= i40e_set_new_dynamic_itr(&q_vector
->tx
);
1476 txval
= i40e_buildreg_itr(I40E_TX_ITR
, q_vector
->tx
.itr
);
1480 /* get the higher of the two ITR adjustments and
1481 * use the same value for both ITR registers
1482 * when in adaptive mode (Rx and/or Tx)
1484 u16 itr
= max(q_vector
->tx
.itr
, q_vector
->rx
.itr
);
1486 q_vector
->tx
.itr
= q_vector
->rx
.itr
= itr
;
1487 txval
= i40e_buildreg_itr(I40E_TX_ITR
, itr
);
1489 rxval
= i40e_buildreg_itr(I40E_RX_ITR
, itr
);
1493 /* only need to enable the interrupt once, but need
1494 * to possibly update both ITR values
1497 /* set the INTENA_MSK_MASK so that this first write
1498 * won't actually enable the interrupt, instead just
1499 * updating the ITR (it's bit 31 PF and VF)
1502 /* don't check _DOWN because interrupt isn't being enabled */
1503 wr32(hw
, INTREG(vector
- 1), rxval
);
1507 if (!test_bit(__I40E_VSI_DOWN
, vsi
->state
))
1508 wr32(hw
, INTREG(vector
- 1), txval
);
1510 if (q_vector
->itr_countdown
)
1511 q_vector
->itr_countdown
--;
1513 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
1517 * i40evf_napi_poll - NAPI polling Rx/Tx cleanup routine
1518 * @napi: napi struct with our devices info in it
1519 * @budget: amount of work driver is allowed to do this pass, in packets
1521 * This function will clean all queues associated with a q_vector.
1523 * Returns the amount of work done
1525 int i40evf_napi_poll(struct napi_struct
*napi
, int budget
)
1527 struct i40e_q_vector
*q_vector
=
1528 container_of(napi
, struct i40e_q_vector
, napi
);
1529 struct i40e_vsi
*vsi
= q_vector
->vsi
;
1530 struct i40e_ring
*ring
;
1531 bool clean_complete
= true;
1532 bool arm_wb
= false;
1533 int budget_per_ring
;
1536 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
)) {
1537 napi_complete(napi
);
1541 /* Since the actual Tx work is minimal, we can give the Tx a larger
1542 * budget and be more aggressive about cleaning up the Tx descriptors.
1544 i40e_for_each_ring(ring
, q_vector
->tx
) {
1545 if (!i40e_clean_tx_irq(vsi
, ring
, budget
)) {
1546 clean_complete
= false;
1549 arm_wb
|= ring
->arm_wb
;
1550 ring
->arm_wb
= false;
1553 /* Handle case where we are called by netpoll with a budget of 0 */
1557 /* We attempt to distribute budget to each Rx queue fairly, but don't
1558 * allow the budget to go below 1 because that would exit polling early.
1560 budget_per_ring
= max(budget
/q_vector
->num_ringpairs
, 1);
1562 i40e_for_each_ring(ring
, q_vector
->rx
) {
1563 int cleaned
= i40e_clean_rx_irq(ring
, budget_per_ring
);
1565 work_done
+= cleaned
;
1566 /* if we clean as many as budgeted, we must not be done */
1567 if (cleaned
>= budget_per_ring
)
1568 clean_complete
= false;
1571 /* If work not completed, return budget and polling will return */
1572 if (!clean_complete
) {
1573 int cpu_id
= smp_processor_id();
1575 /* It is possible that the interrupt affinity has changed but,
1576 * if the cpu is pegged at 100%, polling will never exit while
1577 * traffic continues and the interrupt will be stuck on this
1578 * cpu. We check to make sure affinity is correct before we
1579 * continue to poll, otherwise we must stop polling so the
1580 * interrupt can move to the correct cpu.
1582 if (!cpumask_test_cpu(cpu_id
, &q_vector
->affinity_mask
)) {
1583 /* Tell napi that we are done polling */
1584 napi_complete_done(napi
, work_done
);
1586 /* Force an interrupt */
1587 i40evf_force_wb(vsi
, q_vector
);
1589 /* Return budget-1 so that polling stops */
1594 q_vector
->tx
.ring
[0].tx_stats
.tx_force_wb
++;
1595 i40e_enable_wb_on_itr(vsi
, q_vector
);
1600 if (vsi
->back
->flags
& I40E_TXR_FLAGS_WB_ON_ITR
)
1601 q_vector
->arm_wb_state
= false;
1603 /* Work is done so exit the polling mode and re-enable the interrupt */
1604 napi_complete_done(napi
, work_done
);
1606 i40e_update_enable_itr(vsi
, q_vector
);
1608 return min(work_done
, budget
- 1);
1612 * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
1614 * @tx_ring: ring to send buffer on
1615 * @flags: the tx flags to be set
1617 * Checks the skb and set up correspondingly several generic transmit flags
1618 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1620 * Returns error code indicate the frame should be dropped upon error and the
1621 * otherwise returns 0 to indicate the flags has been set properly.
1623 static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff
*skb
,
1624 struct i40e_ring
*tx_ring
,
1627 __be16 protocol
= skb
->protocol
;
1630 if (protocol
== htons(ETH_P_8021Q
) &&
1631 !(tx_ring
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_TX
)) {
1632 /* When HW VLAN acceleration is turned off by the user the
1633 * stack sets the protocol to 8021q so that the driver
1634 * can take any steps required to support the SW only
1635 * VLAN handling. In our case the driver doesn't need
1636 * to take any further steps so just set the protocol
1637 * to the encapsulated ethertype.
1639 skb
->protocol
= vlan_get_protocol(skb
);
1643 /* if we have a HW VLAN tag being added, default to the HW one */
1644 if (skb_vlan_tag_present(skb
)) {
1645 tx_flags
|= skb_vlan_tag_get(skb
) << I40E_TX_FLAGS_VLAN_SHIFT
;
1646 tx_flags
|= I40E_TX_FLAGS_HW_VLAN
;
1647 /* else if it is a SW VLAN, check the next protocol and store the tag */
1648 } else if (protocol
== htons(ETH_P_8021Q
)) {
1649 struct vlan_hdr
*vhdr
, _vhdr
;
1651 vhdr
= skb_header_pointer(skb
, ETH_HLEN
, sizeof(_vhdr
), &_vhdr
);
1655 protocol
= vhdr
->h_vlan_encapsulated_proto
;
1656 tx_flags
|= ntohs(vhdr
->h_vlan_TCI
) << I40E_TX_FLAGS_VLAN_SHIFT
;
1657 tx_flags
|= I40E_TX_FLAGS_SW_VLAN
;
1666 * i40e_tso - set up the tso context descriptor
1667 * @first: pointer to first Tx buffer for xmit
1668 * @hdr_len: ptr to the size of the packet header
1669 * @cd_type_cmd_tso_mss: Quad Word 1
1671 * Returns 0 if no TSO can happen, 1 if tso is going, or error
1673 static int i40e_tso(struct i40e_tx_buffer
*first
, u8
*hdr_len
,
1674 u64
*cd_type_cmd_tso_mss
)
1676 struct sk_buff
*skb
= first
->skb
;
1677 u64 cd_cmd
, cd_tso_len
, cd_mss
;
1688 u32 paylen
, l4_offset
;
1689 u16 gso_segs
, gso_size
;
1692 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1695 if (!skb_is_gso(skb
))
1698 err
= skb_cow_head(skb
, 0);
1702 ip
.hdr
= skb_network_header(skb
);
1703 l4
.hdr
= skb_transport_header(skb
);
1705 /* initialize outer IP header fields */
1706 if (ip
.v4
->version
== 4) {
1710 ip
.v6
->payload_len
= 0;
1713 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
1717 SKB_GSO_UDP_TUNNEL
|
1718 SKB_GSO_UDP_TUNNEL_CSUM
)) {
1719 if (!(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
) &&
1720 (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
)) {
1723 /* determine offset of outer transport header */
1724 l4_offset
= l4
.hdr
- skb
->data
;
1726 /* remove payload length from outer checksum */
1727 paylen
= skb
->len
- l4_offset
;
1728 csum_replace_by_diff(&l4
.udp
->check
,
1729 (__force __wsum
)htonl(paylen
));
1732 /* reset pointers to inner headers */
1733 ip
.hdr
= skb_inner_network_header(skb
);
1734 l4
.hdr
= skb_inner_transport_header(skb
);
1736 /* initialize inner IP header fields */
1737 if (ip
.v4
->version
== 4) {
1741 ip
.v6
->payload_len
= 0;
1745 /* determine offset of inner transport header */
1746 l4_offset
= l4
.hdr
- skb
->data
;
1748 /* remove payload length from inner checksum */
1749 paylen
= skb
->len
- l4_offset
;
1750 csum_replace_by_diff(&l4
.tcp
->check
, (__force __wsum
)htonl(paylen
));
1752 /* compute length of segmentation header */
1753 *hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
1755 /* pull values out of skb_shinfo */
1756 gso_size
= skb_shinfo(skb
)->gso_size
;
1757 gso_segs
= skb_shinfo(skb
)->gso_segs
;
1759 /* update GSO size and bytecount with header size */
1760 first
->gso_segs
= gso_segs
;
1761 first
->bytecount
+= (first
->gso_segs
- 1) * *hdr_len
;
1763 /* find the field values */
1764 cd_cmd
= I40E_TX_CTX_DESC_TSO
;
1765 cd_tso_len
= skb
->len
- *hdr_len
;
1767 *cd_type_cmd_tso_mss
|= (cd_cmd
<< I40E_TXD_CTX_QW1_CMD_SHIFT
) |
1768 (cd_tso_len
<< I40E_TXD_CTX_QW1_TSO_LEN_SHIFT
) |
1769 (cd_mss
<< I40E_TXD_CTX_QW1_MSS_SHIFT
);
1774 * i40e_tx_enable_csum - Enable Tx checksum offloads
1776 * @tx_flags: pointer to Tx flags currently set
1777 * @td_cmd: Tx descriptor command bits to set
1778 * @td_offset: Tx descriptor header offsets to set
1779 * @tx_ring: Tx descriptor ring
1780 * @cd_tunneling: ptr to context desc bits
1782 static int i40e_tx_enable_csum(struct sk_buff
*skb
, u32
*tx_flags
,
1783 u32
*td_cmd
, u32
*td_offset
,
1784 struct i40e_ring
*tx_ring
,
1797 unsigned char *exthdr
;
1798 u32 offset
, cmd
= 0;
1802 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1805 ip
.hdr
= skb_network_header(skb
);
1806 l4
.hdr
= skb_transport_header(skb
);
1808 /* compute outer L2 header size */
1809 offset
= ((ip
.hdr
- skb
->data
) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT
;
1811 if (skb
->encapsulation
) {
1813 /* define outer network header type */
1814 if (*tx_flags
& I40E_TX_FLAGS_IPV4
) {
1815 tunnel
|= (*tx_flags
& I40E_TX_FLAGS_TSO
) ?
1816 I40E_TX_CTX_EXT_IP_IPV4
:
1817 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM
;
1819 l4_proto
= ip
.v4
->protocol
;
1820 } else if (*tx_flags
& I40E_TX_FLAGS_IPV6
) {
1821 tunnel
|= I40E_TX_CTX_EXT_IP_IPV6
;
1823 exthdr
= ip
.hdr
+ sizeof(*ip
.v6
);
1824 l4_proto
= ip
.v6
->nexthdr
;
1825 if (l4
.hdr
!= exthdr
)
1826 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
1827 &l4_proto
, &frag_off
);
1830 /* define outer transport */
1833 tunnel
|= I40E_TXD_CTX_UDP_TUNNELING
;
1834 *tx_flags
|= I40E_TX_FLAGS_VXLAN_TUNNEL
;
1837 tunnel
|= I40E_TXD_CTX_GRE_TUNNELING
;
1838 *tx_flags
|= I40E_TX_FLAGS_VXLAN_TUNNEL
;
1842 *tx_flags
|= I40E_TX_FLAGS_VXLAN_TUNNEL
;
1843 l4
.hdr
= skb_inner_network_header(skb
);
1846 if (*tx_flags
& I40E_TX_FLAGS_TSO
)
1849 skb_checksum_help(skb
);
1853 /* compute outer L3 header size */
1854 tunnel
|= ((l4
.hdr
- ip
.hdr
) / 4) <<
1855 I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT
;
1857 /* switch IP header pointer from outer to inner header */
1858 ip
.hdr
= skb_inner_network_header(skb
);
1860 /* compute tunnel header size */
1861 tunnel
|= ((ip
.hdr
- l4
.hdr
) / 2) <<
1862 I40E_TXD_CTX_QW0_NATLEN_SHIFT
;
1864 /* indicate if we need to offload outer UDP header */
1865 if ((*tx_flags
& I40E_TX_FLAGS_TSO
) &&
1866 !(skb_shinfo(skb
)->gso_type
& SKB_GSO_PARTIAL
) &&
1867 (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL_CSUM
))
1868 tunnel
|= I40E_TXD_CTX_QW0_L4T_CS_MASK
;
1870 /* record tunnel offload values */
1871 *cd_tunneling
|= tunnel
;
1873 /* switch L4 header pointer from outer to inner */
1874 l4
.hdr
= skb_inner_transport_header(skb
);
1877 /* reset type as we transition from outer to inner headers */
1878 *tx_flags
&= ~(I40E_TX_FLAGS_IPV4
| I40E_TX_FLAGS_IPV6
);
1879 if (ip
.v4
->version
== 4)
1880 *tx_flags
|= I40E_TX_FLAGS_IPV4
;
1881 if (ip
.v6
->version
== 6)
1882 *tx_flags
|= I40E_TX_FLAGS_IPV6
;
1885 /* Enable IP checksum offloads */
1886 if (*tx_flags
& I40E_TX_FLAGS_IPV4
) {
1887 l4_proto
= ip
.v4
->protocol
;
1888 /* the stack computes the IP header already, the only time we
1889 * need the hardware to recompute it is in the case of TSO.
1891 cmd
|= (*tx_flags
& I40E_TX_FLAGS_TSO
) ?
1892 I40E_TX_DESC_CMD_IIPT_IPV4_CSUM
:
1893 I40E_TX_DESC_CMD_IIPT_IPV4
;
1894 } else if (*tx_flags
& I40E_TX_FLAGS_IPV6
) {
1895 cmd
|= I40E_TX_DESC_CMD_IIPT_IPV6
;
1897 exthdr
= ip
.hdr
+ sizeof(*ip
.v6
);
1898 l4_proto
= ip
.v6
->nexthdr
;
1899 if (l4
.hdr
!= exthdr
)
1900 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
1901 &l4_proto
, &frag_off
);
1904 /* compute inner L3 header size */
1905 offset
|= ((l4
.hdr
- ip
.hdr
) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT
;
1907 /* Enable L4 checksum offloads */
1910 /* enable checksum offloads */
1911 cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_TCP
;
1912 offset
|= l4
.tcp
->doff
<< I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1915 /* enable SCTP checksum offload */
1916 cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_SCTP
;
1917 offset
|= (sizeof(struct sctphdr
) >> 2) <<
1918 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1921 /* enable UDP checksum offload */
1922 cmd
|= I40E_TX_DESC_CMD_L4T_EOFT_UDP
;
1923 offset
|= (sizeof(struct udphdr
) >> 2) <<
1924 I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT
;
1927 if (*tx_flags
& I40E_TX_FLAGS_TSO
)
1929 skb_checksum_help(skb
);
1934 *td_offset
|= offset
;
1940 * i40e_create_tx_ctx Build the Tx context descriptor
1941 * @tx_ring: ring to create the descriptor on
1942 * @cd_type_cmd_tso_mss: Quad Word 1
1943 * @cd_tunneling: Quad Word 0 - bits 0-31
1944 * @cd_l2tag2: Quad Word 0 - bits 32-63
1946 static void i40e_create_tx_ctx(struct i40e_ring
*tx_ring
,
1947 const u64 cd_type_cmd_tso_mss
,
1948 const u32 cd_tunneling
, const u32 cd_l2tag2
)
1950 struct i40e_tx_context_desc
*context_desc
;
1951 int i
= tx_ring
->next_to_use
;
1953 if ((cd_type_cmd_tso_mss
== I40E_TX_DESC_DTYPE_CONTEXT
) &&
1954 !cd_tunneling
&& !cd_l2tag2
)
1957 /* grab the next descriptor */
1958 context_desc
= I40E_TX_CTXTDESC(tx_ring
, i
);
1961 tx_ring
->next_to_use
= (i
< tx_ring
->count
) ? i
: 0;
1963 /* cpu_to_le32 and assign to struct fields */
1964 context_desc
->tunneling_params
= cpu_to_le32(cd_tunneling
);
1965 context_desc
->l2tag2
= cpu_to_le16(cd_l2tag2
);
1966 context_desc
->rsvd
= cpu_to_le16(0);
1967 context_desc
->type_cmd_tso_mss
= cpu_to_le64(cd_type_cmd_tso_mss
);
1971 * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet
1974 * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire
1975 * and so we need to figure out the cases where we need to linearize the skb.
1977 * For TSO we need to count the TSO header and segment payload separately.
1978 * As such we need to check cases where we have 7 fragments or more as we
1979 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1980 * the segment payload in the first descriptor, and another 7 for the
1983 bool __i40evf_chk_linearize(struct sk_buff
*skb
)
1985 const struct skb_frag_struct
*frag
, *stale
;
1988 /* no need to check if number of frags is less than 7 */
1989 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1990 if (nr_frags
< (I40E_MAX_BUFFER_TXD
- 1))
1993 /* We need to walk through the list and validate that each group
1994 * of 6 fragments totals at least gso_size.
1996 nr_frags
-= I40E_MAX_BUFFER_TXD
- 2;
1997 frag
= &skb_shinfo(skb
)->frags
[0];
1999 /* Initialize size to the negative value of gso_size minus 1. We
2000 * use this as the worst case scenerio in which the frag ahead
2001 * of us only provides one byte which is why we are limited to 6
2002 * descriptors for a single transmit as the header and previous
2003 * fragment are already consuming 2 descriptors.
2005 sum
= 1 - skb_shinfo(skb
)->gso_size
;
2007 /* Add size of frags 0 through 4 to create our initial sum */
2008 sum
+= skb_frag_size(frag
++);
2009 sum
+= skb_frag_size(frag
++);
2010 sum
+= skb_frag_size(frag
++);
2011 sum
+= skb_frag_size(frag
++);
2012 sum
+= skb_frag_size(frag
++);
2014 /* Walk through fragments adding latest fragment, testing it, and
2015 * then removing stale fragments from the sum.
2017 for (stale
= &skb_shinfo(skb
)->frags
[0];; stale
++) {
2018 int stale_size
= skb_frag_size(stale
);
2020 sum
+= skb_frag_size(frag
++);
2022 /* The stale fragment may present us with a smaller
2023 * descriptor than the actual fragment size. To account
2024 * for that we need to remove all the data on the front and
2025 * figure out what the remainder would be in the last
2026 * descriptor associated with the fragment.
2028 if (stale_size
> I40E_MAX_DATA_PER_TXD
) {
2029 int align_pad
= -(stale
->page_offset
) &
2030 (I40E_MAX_READ_REQ_SIZE
- 1);
2033 stale_size
-= align_pad
;
2036 sum
-= I40E_MAX_DATA_PER_TXD_ALIGNED
;
2037 stale_size
-= I40E_MAX_DATA_PER_TXD_ALIGNED
;
2038 } while (stale_size
> I40E_MAX_DATA_PER_TXD
);
2041 /* if sum is negative we failed to make sufficient progress */
2055 * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
2056 * @tx_ring: the ring to be checked
2057 * @size: the size buffer we want to assure is available
2059 * Returns -EBUSY if a stop is needed, else 0
2061 int __i40evf_maybe_stop_tx(struct i40e_ring
*tx_ring
, int size
)
2063 netif_stop_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2064 /* Memory barrier before checking head and tail */
2067 /* Check again in a case another CPU has just made room available. */
2068 if (likely(I40E_DESC_UNUSED(tx_ring
) < size
))
2071 /* A reprieve! - use start_queue because it doesn't call schedule */
2072 netif_start_subqueue(tx_ring
->netdev
, tx_ring
->queue_index
);
2073 ++tx_ring
->tx_stats
.restart_queue
;
2078 * i40evf_tx_map - Build the Tx descriptor
2079 * @tx_ring: ring to send buffer on
2081 * @first: first buffer info buffer to use
2082 * @tx_flags: collected send information
2083 * @hdr_len: size of the packet header
2084 * @td_cmd: the command field in the descriptor
2085 * @td_offset: offset for checksum or crc
2087 static inline void i40evf_tx_map(struct i40e_ring
*tx_ring
, struct sk_buff
*skb
,
2088 struct i40e_tx_buffer
*first
, u32 tx_flags
,
2089 const u8 hdr_len
, u32 td_cmd
, u32 td_offset
)
2091 unsigned int data_len
= skb
->data_len
;
2092 unsigned int size
= skb_headlen(skb
);
2093 struct skb_frag_struct
*frag
;
2094 struct i40e_tx_buffer
*tx_bi
;
2095 struct i40e_tx_desc
*tx_desc
;
2096 u16 i
= tx_ring
->next_to_use
;
2100 if (tx_flags
& I40E_TX_FLAGS_HW_VLAN
) {
2101 td_cmd
|= I40E_TX_DESC_CMD_IL2TAG1
;
2102 td_tag
= (tx_flags
& I40E_TX_FLAGS_VLAN_MASK
) >>
2103 I40E_TX_FLAGS_VLAN_SHIFT
;
2106 first
->tx_flags
= tx_flags
;
2108 dma
= dma_map_single(tx_ring
->dev
, skb
->data
, size
, DMA_TO_DEVICE
);
2110 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
2113 for (frag
= &skb_shinfo(skb
)->frags
[0];; frag
++) {
2114 unsigned int max_data
= I40E_MAX_DATA_PER_TXD_ALIGNED
;
2116 if (dma_mapping_error(tx_ring
->dev
, dma
))
2119 /* record length, and DMA address */
2120 dma_unmap_len_set(tx_bi
, len
, size
);
2121 dma_unmap_addr_set(tx_bi
, dma
, dma
);
2123 /* align size to end of page */
2124 max_data
+= -dma
& (I40E_MAX_READ_REQ_SIZE
- 1);
2125 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2127 while (unlikely(size
> I40E_MAX_DATA_PER_TXD
)) {
2128 tx_desc
->cmd_type_offset_bsz
=
2129 build_ctob(td_cmd
, td_offset
,
2135 if (i
== tx_ring
->count
) {
2136 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2143 max_data
= I40E_MAX_DATA_PER_TXD_ALIGNED
;
2144 tx_desc
->buffer_addr
= cpu_to_le64(dma
);
2147 if (likely(!data_len
))
2150 tx_desc
->cmd_type_offset_bsz
= build_ctob(td_cmd
, td_offset
,
2156 if (i
== tx_ring
->count
) {
2157 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
2161 size
= skb_frag_size(frag
);
2164 dma
= skb_frag_dma_map(tx_ring
->dev
, frag
, 0, size
,
2167 tx_bi
= &tx_ring
->tx_bi
[i
];
2170 netdev_tx_sent_queue(txring_txq(tx_ring
), first
->bytecount
);
2173 if (i
== tx_ring
->count
)
2176 tx_ring
->next_to_use
= i
;
2178 i40e_maybe_stop_tx(tx_ring
, DESC_NEEDED
);
2180 /* write last descriptor with RS and EOP bits */
2181 td_cmd
|= I40E_TXD_CMD
;
2182 tx_desc
->cmd_type_offset_bsz
=
2183 build_ctob(td_cmd
, td_offset
, size
, td_tag
);
2185 /* Force memory writes to complete before letting h/w know there
2186 * are new descriptors to fetch.
2188 * We also use this memory barrier to make certain all of the
2189 * status bits have been updated before next_to_watch is written.
2193 /* set next_to_watch value indicating a packet is present */
2194 first
->next_to_watch
= tx_desc
;
2196 /* notify HW of packet */
2197 if (netif_xmit_stopped(txring_txq(tx_ring
)) || !skb
->xmit_more
) {
2198 writel(i
, tx_ring
->tail
);
2200 /* we need this if more than one processor can write to our tail
2201 * at a time, it synchronizes IO on IA64/Altix systems
2209 dev_info(tx_ring
->dev
, "TX DMA map failed\n");
2211 /* clear dma mappings for failed tx_bi map */
2213 tx_bi
= &tx_ring
->tx_bi
[i
];
2214 i40e_unmap_and_free_tx_resource(tx_ring
, tx_bi
);
2222 tx_ring
->next_to_use
= i
;
2226 * i40e_xmit_frame_ring - Sends buffer on Tx ring
2228 * @tx_ring: ring to send buffer on
2230 * Returns NETDEV_TX_OK if sent, else an error code
2232 static netdev_tx_t
i40e_xmit_frame_ring(struct sk_buff
*skb
,
2233 struct i40e_ring
*tx_ring
)
2235 u64 cd_type_cmd_tso_mss
= I40E_TX_DESC_DTYPE_CONTEXT
;
2236 u32 cd_tunneling
= 0, cd_l2tag2
= 0;
2237 struct i40e_tx_buffer
*first
;
2245 /* prefetch the data, we'll need it later */
2246 prefetch(skb
->data
);
2248 i40e_trace(xmit_frame_ring
, skb
, tx_ring
);
2250 count
= i40e_xmit_descriptor_count(skb
);
2251 if (i40e_chk_linearize(skb
, count
)) {
2252 if (__skb_linearize(skb
)) {
2253 dev_kfree_skb_any(skb
);
2254 return NETDEV_TX_OK
;
2256 count
= i40e_txd_use_count(skb
->len
);
2257 tx_ring
->tx_stats
.tx_linearize
++;
2260 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
2261 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
2262 * + 4 desc gap to avoid the cache line where head is,
2263 * + 1 desc for context descriptor,
2264 * otherwise try next time
2266 if (i40e_maybe_stop_tx(tx_ring
, count
+ 4 + 1)) {
2267 tx_ring
->tx_stats
.tx_busy
++;
2268 return NETDEV_TX_BUSY
;
2271 /* record the location of the first descriptor for this packet */
2272 first
= &tx_ring
->tx_bi
[tx_ring
->next_to_use
];
2274 first
->bytecount
= skb
->len
;
2275 first
->gso_segs
= 1;
2277 /* prepare the xmit flags */
2278 if (i40evf_tx_prepare_vlan_flags(skb
, tx_ring
, &tx_flags
))
2281 /* obtain protocol of skb */
2282 protocol
= vlan_get_protocol(skb
);
2284 /* setup IPv4/IPv6 offloads */
2285 if (protocol
== htons(ETH_P_IP
))
2286 tx_flags
|= I40E_TX_FLAGS_IPV4
;
2287 else if (protocol
== htons(ETH_P_IPV6
))
2288 tx_flags
|= I40E_TX_FLAGS_IPV6
;
2290 tso
= i40e_tso(first
, &hdr_len
, &cd_type_cmd_tso_mss
);
2295 tx_flags
|= I40E_TX_FLAGS_TSO
;
2297 /* Always offload the checksum, since it's in the data descriptor */
2298 tso
= i40e_tx_enable_csum(skb
, &tx_flags
, &td_cmd
, &td_offset
,
2299 tx_ring
, &cd_tunneling
);
2303 skb_tx_timestamp(skb
);
2305 /* always enable CRC insertion offload */
2306 td_cmd
|= I40E_TX_DESC_CMD_ICRC
;
2308 i40e_create_tx_ctx(tx_ring
, cd_type_cmd_tso_mss
,
2309 cd_tunneling
, cd_l2tag2
);
2311 i40evf_tx_map(tx_ring
, skb
, first
, tx_flags
, hdr_len
,
2314 return NETDEV_TX_OK
;
2317 i40e_trace(xmit_frame_ring_drop
, first
->skb
, tx_ring
);
2318 dev_kfree_skb_any(first
->skb
);
2320 return NETDEV_TX_OK
;
2324 * i40evf_xmit_frame - Selects the correct VSI and Tx queue to send buffer
2326 * @netdev: network interface device structure
2328 * Returns NETDEV_TX_OK if sent, else an error code
2330 netdev_tx_t
i40evf_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
2332 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2333 struct i40e_ring
*tx_ring
= &adapter
->tx_rings
[skb
->queue_mapping
];
2335 /* hardware can't handle really short frames, hardware padding works
2338 if (unlikely(skb
->len
< I40E_MIN_TX_LEN
)) {
2339 if (skb_pad(skb
, I40E_MIN_TX_LEN
- skb
->len
))
2340 return NETDEV_TX_OK
;
2341 skb
->len
= I40E_MIN_TX_LEN
;
2342 skb_set_tail_pointer(skb
, I40E_MIN_TX_LEN
);
2345 return i40e_xmit_frame_ring(skb
, tx_ring
);