2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include <net/busy_poll.h>
41 static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp
*tstamp
)
43 return tstamp
->hwtstamp_config
.rx_filter
== HWTSTAMP_FILTER_ALL
;
46 static inline void mlx5e_read_cqe_slot(struct mlx5e_cq
*cq
, u32 cqcc
,
49 u32 ci
= cqcc
& cq
->wq
.sz_m1
;
51 memcpy(data
, mlx5_cqwq_get_wqe(&cq
->wq
, ci
), sizeof(struct mlx5_cqe64
));
54 static inline void mlx5e_read_title_slot(struct mlx5e_rq
*rq
,
55 struct mlx5e_cq
*cq
, u32 cqcc
)
57 mlx5e_read_cqe_slot(cq
, cqcc
, &cq
->title
);
58 cq
->decmprs_left
= be32_to_cpu(cq
->title
.byte_cnt
);
59 cq
->decmprs_wqe_counter
= be16_to_cpu(cq
->title
.wqe_counter
);
60 rq
->stats
.cqe_compress_blks
++;
63 static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq
*cq
, u32 cqcc
)
65 mlx5e_read_cqe_slot(cq
, cqcc
, cq
->mini_arr
);
69 static inline void mlx5e_cqes_update_owner(struct mlx5e_cq
*cq
, u32 cqcc
, int n
)
71 u8 op_own
= (cqcc
>> cq
->wq
.log_sz
) & 1;
72 u32 wq_sz
= 1 << cq
->wq
.log_sz
;
73 u32 ci
= cqcc
& cq
->wq
.sz_m1
;
74 u32 ci_top
= min_t(u32
, wq_sz
, ci
+ n
);
76 for (; ci
< ci_top
; ci
++, n
--) {
77 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, ci
);
82 if (unlikely(ci
== wq_sz
)) {
84 for (ci
= 0; ci
< n
; ci
++) {
85 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, ci
);
92 static inline void mlx5e_decompress_cqe(struct mlx5e_rq
*rq
,
93 struct mlx5e_cq
*cq
, u32 cqcc
)
97 cq
->title
.byte_cnt
= cq
->mini_arr
[cq
->mini_arr_idx
].byte_cnt
;
98 cq
->title
.check_sum
= cq
->mini_arr
[cq
->mini_arr_idx
].checksum
;
99 cq
->title
.op_own
&= 0xf0;
100 cq
->title
.op_own
|= 0x01 & (cqcc
>> cq
->wq
.log_sz
);
101 cq
->title
.wqe_counter
= cpu_to_be16(cq
->decmprs_wqe_counter
);
104 rq
->wq_type
== MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ
?
105 mpwrq_get_cqe_consumed_strides(&cq
->title
) : 1;
106 cq
->decmprs_wqe_counter
=
107 (cq
->decmprs_wqe_counter
+ wqe_cnt_step
) & rq
->wq
.sz_m1
;
110 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq
*rq
,
111 struct mlx5e_cq
*cq
, u32 cqcc
)
113 mlx5e_decompress_cqe(rq
, cq
, cqcc
);
114 cq
->title
.rss_hash_type
= 0;
115 cq
->title
.rss_hash_result
= 0;
118 static inline u32
mlx5e_decompress_cqes_cont(struct mlx5e_rq
*rq
,
120 int update_owner_only
,
123 u32 cqcc
= cq
->wq
.cc
+ update_owner_only
;
127 cqe_count
= min_t(u32
, cq
->decmprs_left
, budget_rem
);
129 for (i
= update_owner_only
; i
< cqe_count
;
130 i
++, cq
->mini_arr_idx
++, cqcc
++) {
131 if (cq
->mini_arr_idx
== MLX5_MINI_CQE_ARRAY_SIZE
)
132 mlx5e_read_mini_arr_slot(cq
, cqcc
);
134 mlx5e_decompress_cqe_no_hash(rq
, cq
, cqcc
);
135 rq
->handle_rx_cqe(rq
, &cq
->title
);
137 mlx5e_cqes_update_owner(cq
, cq
->wq
.cc
, cqcc
- cq
->wq
.cc
);
139 cq
->decmprs_left
-= cqe_count
;
140 rq
->stats
.cqe_compress_pkts
+= cqe_count
;
145 static inline u32
mlx5e_decompress_cqes_start(struct mlx5e_rq
*rq
,
149 mlx5e_read_title_slot(rq
, cq
, cq
->wq
.cc
);
150 mlx5e_read_mini_arr_slot(cq
, cq
->wq
.cc
+ 1);
151 mlx5e_decompress_cqe(rq
, cq
, cq
->wq
.cc
);
152 rq
->handle_rx_cqe(rq
, &cq
->title
);
155 return mlx5e_decompress_cqes_cont(rq
, cq
, 1, budget_rem
) - 1;
158 void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv
*priv
, bool val
)
162 if (!MLX5_CAP_GEN(priv
->mdev
, cqe_compression
))
165 mutex_lock(&priv
->state_lock
);
167 if (MLX5E_GET_PFLAG(priv
, MLX5E_PFLAG_RX_CQE_COMPRESS
) == val
)
170 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
172 mlx5e_close_locked(priv
->netdev
);
174 MLX5E_SET_PFLAG(priv
, MLX5E_PFLAG_RX_CQE_COMPRESS
, val
);
177 mlx5e_open_locked(priv
->netdev
);
180 mutex_unlock(&priv
->state_lock
);
183 #define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
185 static inline bool mlx5e_rx_cache_put(struct mlx5e_rq
*rq
,
186 struct mlx5e_dma_info
*dma_info
)
188 struct mlx5e_page_cache
*cache
= &rq
->page_cache
;
189 u32 tail_next
= (cache
->tail
+ 1) & (MLX5E_CACHE_SIZE
- 1);
191 if (tail_next
== cache
->head
) {
192 rq
->stats
.cache_full
++;
196 cache
->page_cache
[cache
->tail
] = *dma_info
;
197 cache
->tail
= tail_next
;
201 static inline bool mlx5e_rx_cache_get(struct mlx5e_rq
*rq
,
202 struct mlx5e_dma_info
*dma_info
)
204 struct mlx5e_page_cache
*cache
= &rq
->page_cache
;
206 if (unlikely(cache
->head
== cache
->tail
)) {
207 rq
->stats
.cache_empty
++;
211 if (page_ref_count(cache
->page_cache
[cache
->head
].page
) != 1) {
212 rq
->stats
.cache_busy
++;
216 *dma_info
= cache
->page_cache
[cache
->head
];
217 cache
->head
= (cache
->head
+ 1) & (MLX5E_CACHE_SIZE
- 1);
218 rq
->stats
.cache_reuse
++;
220 dma_sync_single_for_device(rq
->pdev
, dma_info
->addr
,
226 static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq
*rq
,
227 struct mlx5e_dma_info
*dma_info
)
231 if (mlx5e_rx_cache_get(rq
, dma_info
))
234 page
= dev_alloc_pages(rq
->buff
.page_order
);
238 dma_info
->page
= page
;
239 dma_info
->addr
= dma_map_page(rq
->pdev
, page
, 0,
240 RQ_PAGE_SIZE(rq
), rq
->buff
.map_dir
);
241 if (unlikely(dma_mapping_error(rq
->pdev
, dma_info
->addr
))) {
249 void mlx5e_page_release(struct mlx5e_rq
*rq
, struct mlx5e_dma_info
*dma_info
,
252 if (likely(recycle
) && mlx5e_rx_cache_put(rq
, dma_info
))
255 dma_unmap_page(rq
->pdev
, dma_info
->addr
, RQ_PAGE_SIZE(rq
),
257 put_page(dma_info
->page
);
260 int mlx5e_alloc_rx_wqe(struct mlx5e_rq
*rq
, struct mlx5e_rx_wqe
*wqe
, u16 ix
)
262 struct mlx5e_dma_info
*di
= &rq
->dma_info
[ix
];
264 if (unlikely(mlx5e_page_alloc_mapped(rq
, di
)))
267 wqe
->data
.addr
= cpu_to_be64(di
->addr
+ MLX5_RX_HEADROOM
);
271 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq
*rq
, u16 ix
)
273 struct mlx5e_dma_info
*di
= &rq
->dma_info
[ix
];
275 mlx5e_page_release(rq
, di
, true);
278 static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq
*rq
)
280 return rq
->mpwqe_num_strides
>> MLX5_MPWRQ_WQE_PAGE_ORDER
;
283 static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq
*rq
,
285 struct mlx5e_mpw_info
*wi
,
286 u32 page_idx
, u32 frag_offset
,
289 unsigned int truesize
= ALIGN(len
, rq
->mpwqe_stride_sz
);
291 dma_sync_single_for_cpu(rq
->pdev
,
292 wi
->umr
.dma_info
[page_idx
].addr
+ frag_offset
,
293 len
, DMA_FROM_DEVICE
);
294 wi
->skbs_frags
[page_idx
]++;
295 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
296 wi
->umr
.dma_info
[page_idx
].page
, frag_offset
,
301 mlx5e_copy_skb_header_mpwqe(struct device
*pdev
,
303 struct mlx5e_mpw_info
*wi
,
304 u32 page_idx
, u32 offset
,
307 u16 headlen_pg
= min_t(u32
, headlen
, PAGE_SIZE
- offset
);
308 struct mlx5e_dma_info
*dma_info
= &wi
->umr
.dma_info
[page_idx
];
311 /* Aligning len to sizeof(long) optimizes memcpy performance */
312 len
= ALIGN(headlen_pg
, sizeof(long));
313 dma_sync_single_for_cpu(pdev
, dma_info
->addr
+ offset
, len
,
315 skb_copy_to_linear_data_offset(skb
, 0,
316 page_address(dma_info
->page
) + offset
,
318 if (unlikely(offset
+ headlen
> PAGE_SIZE
)) {
321 len
= ALIGN(headlen
- headlen_pg
, sizeof(long));
322 dma_sync_single_for_cpu(pdev
, dma_info
->addr
, len
,
324 skb_copy_to_linear_data_offset(skb
, headlen_pg
,
325 page_address(dma_info
->page
),
330 static inline void mlx5e_post_umr_wqe(struct mlx5e_rq
*rq
, u16 ix
)
332 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[ix
];
333 struct mlx5e_sq
*sq
= &rq
->channel
->icosq
;
334 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
335 struct mlx5e_umr_wqe
*wqe
;
336 u8 num_wqebbs
= DIV_ROUND_UP(sizeof(*wqe
), MLX5_SEND_WQE_BB
);
339 /* fill sq edge with nops to avoid wqe wrap around */
340 while ((pi
= (sq
->pc
& wq
->sz_m1
)) > sq
->edge
) {
341 sq
->db
.ico_wqe
[pi
].opcode
= MLX5_OPCODE_NOP
;
342 sq
->db
.ico_wqe
[pi
].num_wqebbs
= 1;
343 mlx5e_send_nop(sq
, false);
346 wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
347 memcpy(wqe
, &wi
->umr
.wqe
, sizeof(*wqe
));
348 wqe
->ctrl
.opmod_idx_opcode
=
349 cpu_to_be32((sq
->pc
<< MLX5_WQE_CTRL_WQE_INDEX_SHIFT
) |
352 sq
->db
.ico_wqe
[pi
].opcode
= MLX5_OPCODE_UMR
;
353 sq
->db
.ico_wqe
[pi
].num_wqebbs
= num_wqebbs
;
354 sq
->pc
+= num_wqebbs
;
355 mlx5e_tx_notify_hw(sq
, &wqe
->ctrl
, 0);
358 static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq
*rq
,
359 struct mlx5e_rx_wqe
*wqe
,
362 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[ix
];
363 u64 dma_offset
= (u64
)mlx5e_get_wqe_mtt_offset(rq
, ix
) << PAGE_SHIFT
;
364 int pg_strides
= mlx5e_mpwqe_strides_per_page(rq
);
368 for (i
= 0; i
< MLX5_MPWRQ_PAGES_PER_WQE
; i
++) {
369 struct mlx5e_dma_info
*dma_info
= &wi
->umr
.dma_info
[i
];
371 err
= mlx5e_page_alloc_mapped(rq
, dma_info
);
374 wi
->umr
.mtt
[i
] = cpu_to_be64(dma_info
->addr
| MLX5_EN_WR
);
375 page_ref_add(dma_info
->page
, pg_strides
);
376 wi
->skbs_frags
[i
] = 0;
379 wi
->consumed_strides
= 0;
380 wqe
->data
.addr
= cpu_to_be64(dma_offset
);
386 struct mlx5e_dma_info
*dma_info
= &wi
->umr
.dma_info
[i
];
388 page_ref_sub(dma_info
->page
, pg_strides
);
389 mlx5e_page_release(rq
, dma_info
, true);
395 void mlx5e_free_rx_mpwqe(struct mlx5e_rq
*rq
, struct mlx5e_mpw_info
*wi
)
397 int pg_strides
= mlx5e_mpwqe_strides_per_page(rq
);
400 for (i
= 0; i
< MLX5_MPWRQ_PAGES_PER_WQE
; i
++) {
401 struct mlx5e_dma_info
*dma_info
= &wi
->umr
.dma_info
[i
];
403 page_ref_sub(dma_info
->page
, pg_strides
- wi
->skbs_frags
[i
]);
404 mlx5e_page_release(rq
, dma_info
, true);
408 void mlx5e_post_rx_mpwqe(struct mlx5e_rq
*rq
)
410 struct mlx5_wq_ll
*wq
= &rq
->wq
;
411 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(wq
, wq
->head
);
413 clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS
, &rq
->state
);
415 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
))) {
416 mlx5e_free_rx_mpwqe(rq
, &rq
->mpwqe
.info
[wq
->head
]);
420 mlx5_wq_ll_push(wq
, be16_to_cpu(wqe
->next
.next_wqe_index
));
422 /* ensure wqes are visible to device before updating doorbell record */
425 mlx5_wq_ll_update_db_record(wq
);
428 int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq
*rq
, struct mlx5e_rx_wqe
*wqe
, u16 ix
)
432 err
= mlx5e_alloc_rx_umr_mpwqe(rq
, wqe
, ix
);
435 set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS
, &rq
->state
);
436 mlx5e_post_umr_wqe(rq
, ix
);
440 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq
*rq
, u16 ix
)
442 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[ix
];
444 mlx5e_free_rx_mpwqe(rq
, wi
);
447 #define RQ_CANNOT_POST(rq) \
448 (!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \
449 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
451 bool mlx5e_post_rx_wqes(struct mlx5e_rq
*rq
)
453 struct mlx5_wq_ll
*wq
= &rq
->wq
;
455 if (unlikely(RQ_CANNOT_POST(rq
)))
458 while (!mlx5_wq_ll_is_full(wq
)) {
459 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(wq
, wq
->head
);
462 err
= rq
->alloc_wqe(rq
, wqe
, wq
->head
);
466 rq
->stats
.buff_alloc_err
++;
470 mlx5_wq_ll_push(wq
, be16_to_cpu(wqe
->next
.next_wqe_index
));
473 /* ensure wqes are visible to device before updating doorbell record */
476 mlx5_wq_ll_update_db_record(wq
);
478 return !mlx5_wq_ll_is_full(wq
);
481 static void mlx5e_lro_update_hdr(struct sk_buff
*skb
, struct mlx5_cqe64
*cqe
,
484 struct ethhdr
*eth
= (struct ethhdr
*)(skb
->data
);
486 struct ipv6hdr
*ipv6
;
488 int network_depth
= 0;
492 u8 l4_hdr_type
= get_cqe_l4_hdr_type(cqe
);
493 int tcp_ack
= ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA
== l4_hdr_type
) ||
494 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA
== l4_hdr_type
));
496 skb
->mac_len
= ETH_HLEN
;
497 proto
= __vlan_get_protocol(skb
, eth
->h_proto
, &network_depth
);
499 ipv4
= (struct iphdr
*)(skb
->data
+ network_depth
);
500 ipv6
= (struct ipv6hdr
*)(skb
->data
+ network_depth
);
501 tot_len
= cqe_bcnt
- network_depth
;
503 if (proto
== htons(ETH_P_IP
)) {
504 tcp
= (struct tcphdr
*)(skb
->data
+ network_depth
+
505 sizeof(struct iphdr
));
507 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV4
;
509 tcp
= (struct tcphdr
*)(skb
->data
+ network_depth
+
510 sizeof(struct ipv6hdr
));
512 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
515 if (get_cqe_lro_tcppsh(cqe
))
520 tcp
->ack_seq
= cqe
->lro_ack_seq_num
;
521 tcp
->window
= cqe
->lro_tcp_win
;
525 ipv4
->ttl
= cqe
->lro_min_ttl
;
526 ipv4
->tot_len
= cpu_to_be16(tot_len
);
528 ipv4
->check
= ip_fast_csum((unsigned char *)ipv4
,
531 ipv6
->hop_limit
= cqe
->lro_min_ttl
;
532 ipv6
->payload_len
= cpu_to_be16(tot_len
-
533 sizeof(struct ipv6hdr
));
537 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64
*cqe
,
540 u8 cht
= cqe
->rss_hash_type
;
541 int ht
= (cht
& CQE_RSS_HTYPE_L4
) ? PKT_HASH_TYPE_L4
:
542 (cht
& CQE_RSS_HTYPE_IP
) ? PKT_HASH_TYPE_L3
:
544 skb_set_hash(skb
, be32_to_cpu(cqe
->rss_hash_result
), ht
);
547 static inline bool is_first_ethertype_ip(struct sk_buff
*skb
)
549 __be16 ethertype
= ((struct ethhdr
*)skb
->data
)->h_proto
;
551 return (ethertype
== htons(ETH_P_IP
) || ethertype
== htons(ETH_P_IPV6
));
554 static inline void mlx5e_handle_csum(struct net_device
*netdev
,
555 struct mlx5_cqe64
*cqe
,
560 if (unlikely(!(netdev
->features
& NETIF_F_RXCSUM
)))
564 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
568 if (is_first_ethertype_ip(skb
)) {
569 skb
->ip_summed
= CHECKSUM_COMPLETE
;
570 skb
->csum
= csum_unfold((__force __sum16
)cqe
->check_sum
);
571 rq
->stats
.csum_complete
++;
575 if (likely((cqe
->hds_ip_ext
& CQE_L3_OK
) &&
576 (cqe
->hds_ip_ext
& CQE_L4_OK
))) {
577 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
578 if (cqe_is_tunneled(cqe
)) {
580 skb
->encapsulation
= 1;
581 rq
->stats
.csum_unnecessary_inner
++;
586 skb
->ip_summed
= CHECKSUM_NONE
;
587 rq
->stats
.csum_none
++;
590 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64
*cqe
,
595 struct net_device
*netdev
= rq
->netdev
;
596 struct mlx5e_tstamp
*tstamp
= rq
->tstamp
;
599 lro_num_seg
= be32_to_cpu(cqe
->srqn
) >> 24;
600 if (lro_num_seg
> 1) {
601 mlx5e_lro_update_hdr(skb
, cqe
, cqe_bcnt
);
602 skb_shinfo(skb
)->gso_size
= DIV_ROUND_UP(cqe_bcnt
, lro_num_seg
);
603 rq
->stats
.lro_packets
++;
604 rq
->stats
.lro_bytes
+= cqe_bcnt
;
607 if (unlikely(mlx5e_rx_hw_stamp(tstamp
)))
608 mlx5e_fill_hwstamp(tstamp
, get_cqe_ts(cqe
), skb_hwtstamps(skb
));
610 skb_record_rx_queue(skb
, rq
->ix
);
612 if (likely(netdev
->features
& NETIF_F_RXHASH
))
613 mlx5e_skb_set_hash(cqe
, skb
);
615 if (cqe_has_vlan(cqe
))
616 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
617 be16_to_cpu(cqe
->vlan_info
));
619 skb
->mark
= be32_to_cpu(cqe
->sop_drop_qpn
) & MLX5E_TC_FLOW_ID_MASK
;
621 mlx5e_handle_csum(netdev
, cqe
, rq
, skb
, !!lro_num_seg
);
622 skb
->protocol
= eth_type_trans(skb
, netdev
);
625 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq
*rq
,
626 struct mlx5_cqe64
*cqe
,
631 rq
->stats
.bytes
+= cqe_bcnt
;
632 mlx5e_build_rx_skb(cqe
, cqe_bcnt
, rq
, skb
);
635 static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq
*sq
)
637 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
638 struct mlx5e_tx_wqe
*wqe
;
639 u16 pi
= (sq
->pc
- MLX5E_XDP_TX_WQEBBS
) & wq
->sz_m1
; /* last pi */
641 wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
643 wqe
->ctrl
.fm_ce_se
= MLX5_WQE_CTRL_CQ_UPDATE
;
644 mlx5e_tx_notify_hw(sq
, &wqe
->ctrl
, 0);
647 static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq
*rq
,
648 struct mlx5e_dma_info
*di
,
649 unsigned int data_offset
,
652 struct mlx5e_sq
*sq
= &rq
->channel
->xdp_sq
;
653 struct mlx5_wq_cyc
*wq
= &sq
->wq
;
654 u16 pi
= sq
->pc
& wq
->sz_m1
;
655 struct mlx5e_tx_wqe
*wqe
= mlx5_wq_cyc_get_wqe(wq
, pi
);
656 struct mlx5e_sq_wqe_info
*wi
= &sq
->db
.xdp
.wqe_info
[pi
];
658 struct mlx5_wqe_ctrl_seg
*cseg
= &wqe
->ctrl
;
659 struct mlx5_wqe_eth_seg
*eseg
= &wqe
->eth
;
660 struct mlx5_wqe_data_seg
*dseg
;
662 dma_addr_t dma_addr
= di
->addr
+ data_offset
+ MLX5E_XDP_MIN_INLINE
;
663 unsigned int dma_len
= len
- MLX5E_XDP_MIN_INLINE
;
664 void *data
= page_address(di
->page
) + data_offset
;
666 if (unlikely(!mlx5e_sq_has_room_for(sq
, MLX5E_XDP_TX_WQEBBS
))) {
667 if (sq
->db
.xdp
.doorbell
) {
668 /* SQ is full, ring doorbell */
669 mlx5e_xmit_xdp_doorbell(sq
);
670 sq
->db
.xdp
.doorbell
= false;
672 rq
->stats
.xdp_tx_full
++;
673 mlx5e_page_release(rq
, di
, true);
677 dma_sync_single_for_device(sq
->pdev
, dma_addr
, dma_len
,
680 memset(wqe
, 0, sizeof(*wqe
));
682 /* copy the inline part */
683 memcpy(eseg
->inline_hdr_start
, data
, MLX5E_XDP_MIN_INLINE
);
684 eseg
->inline_hdr_sz
= cpu_to_be16(MLX5E_XDP_MIN_INLINE
);
686 dseg
= (struct mlx5_wqe_data_seg
*)cseg
+ (MLX5E_XDP_TX_DS_COUNT
- 1);
688 /* write the dma part */
689 dseg
->addr
= cpu_to_be64(dma_addr
);
690 dseg
->byte_count
= cpu_to_be32(dma_len
);
691 dseg
->lkey
= sq
->mkey_be
;
693 cseg
->opmod_idx_opcode
= cpu_to_be32((sq
->pc
<< 8) | MLX5_OPCODE_SEND
);
694 cseg
->qpn_ds
= cpu_to_be32((sq
->sqn
<< 8) | MLX5E_XDP_TX_DS_COUNT
);
696 sq
->db
.xdp
.di
[pi
] = *di
;
697 wi
->opcode
= MLX5_OPCODE_SEND
;
698 wi
->num_wqebbs
= MLX5E_XDP_TX_WQEBBS
;
699 sq
->pc
+= MLX5E_XDP_TX_WQEBBS
;
701 sq
->db
.xdp
.doorbell
= true;
705 /* returns true if packet was consumed by xdp */
706 static inline bool mlx5e_xdp_handle(struct mlx5e_rq
*rq
,
707 const struct bpf_prog
*prog
,
708 struct mlx5e_dma_info
*di
,
718 xdp
.data_end
= xdp
.data
+ len
;
719 act
= bpf_prog_run_xdp(prog
, &xdp
);
724 mlx5e_xmit_xdp_frame(rq
, di
, MLX5_RX_HEADROOM
, len
);
727 bpf_warn_invalid_xdp_action(act
);
730 rq
->stats
.xdp_drop
++;
731 mlx5e_page_release(rq
, di
, true);
737 struct sk_buff
*skb_from_cqe(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
,
738 u16 wqe_counter
, u32 cqe_bcnt
)
740 struct mlx5e_dma_info
*di
;
745 di
= &rq
->dma_info
[wqe_counter
];
746 va
= page_address(di
->page
);
747 data
= va
+ MLX5_RX_HEADROOM
;
749 dma_sync_single_range_for_cpu(rq
->pdev
,
756 if (unlikely((cqe
->op_own
>> 4) != MLX5_CQE_RESP_SEND
)) {
758 mlx5e_page_release(rq
, di
, true);
763 consumed
= mlx5e_xdp_handle(rq
, READ_ONCE(rq
->xdp_prog
), di
, data
,
767 return NULL
; /* page/packet was consumed by XDP */
769 skb
= build_skb(va
, RQ_PAGE_SIZE(rq
));
770 if (unlikely(!skb
)) {
771 rq
->stats
.buff_alloc_err
++;
772 mlx5e_page_release(rq
, di
, true);
776 /* queue up for recycling ..*/
777 page_ref_inc(di
->page
);
778 mlx5e_page_release(rq
, di
, true);
780 skb_reserve(skb
, MLX5_RX_HEADROOM
);
781 skb_put(skb
, cqe_bcnt
);
786 void mlx5e_handle_rx_cqe(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
788 struct mlx5e_rx_wqe
*wqe
;
789 __be16 wqe_counter_be
;
794 wqe_counter_be
= cqe
->wqe_counter
;
795 wqe_counter
= be16_to_cpu(wqe_counter_be
);
796 wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, wqe_counter
);
797 cqe_bcnt
= be32_to_cpu(cqe
->byte_cnt
);
799 skb
= skb_from_cqe(rq
, cqe
, wqe_counter
, cqe_bcnt
);
803 mlx5e_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, skb
);
804 napi_gro_receive(rq
->cq
.napi
, skb
);
807 mlx5_wq_ll_pop(&rq
->wq
, wqe_counter_be
,
808 &wqe
->next
.next_wqe_index
);
811 void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
813 struct net_device
*netdev
= rq
->netdev
;
814 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
815 struct mlx5_eswitch_rep
*rep
= priv
->ppriv
;
816 struct mlx5e_rx_wqe
*wqe
;
818 __be16 wqe_counter_be
;
822 wqe_counter_be
= cqe
->wqe_counter
;
823 wqe_counter
= be16_to_cpu(wqe_counter_be
);
824 wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, wqe_counter
);
825 cqe_bcnt
= be32_to_cpu(cqe
->byte_cnt
);
827 skb
= skb_from_cqe(rq
, cqe
, wqe_counter
, cqe_bcnt
);
831 mlx5e_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, skb
);
833 if (rep
->vlan
&& skb_vlan_tag_present(skb
))
836 napi_gro_receive(rq
->cq
.napi
, skb
);
839 mlx5_wq_ll_pop(&rq
->wq
, wqe_counter_be
,
840 &wqe
->next
.next_wqe_index
);
843 static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq
*rq
,
844 struct mlx5_cqe64
*cqe
,
845 struct mlx5e_mpw_info
*wi
,
849 u16 stride_ix
= mpwrq_get_cqe_stride_index(cqe
);
850 u32 wqe_offset
= stride_ix
* rq
->mpwqe_stride_sz
;
851 u32 head_offset
= wqe_offset
& (PAGE_SIZE
- 1);
852 u32 page_idx
= wqe_offset
>> PAGE_SHIFT
;
853 u32 head_page_idx
= page_idx
;
854 u16 headlen
= min_t(u16
, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD
, cqe_bcnt
);
855 u32 frag_offset
= head_offset
+ headlen
;
856 u16 byte_cnt
= cqe_bcnt
- headlen
;
858 if (unlikely(frag_offset
>= PAGE_SIZE
)) {
860 frag_offset
-= PAGE_SIZE
;
864 u32 pg_consumed_bytes
=
865 min_t(u32
, PAGE_SIZE
- frag_offset
, byte_cnt
);
867 mlx5e_add_skb_frag_mpwqe(rq
, skb
, wi
, page_idx
, frag_offset
,
869 byte_cnt
-= pg_consumed_bytes
;
874 mlx5e_copy_skb_header_mpwqe(rq
->pdev
, skb
, wi
, head_page_idx
,
875 head_offset
, headlen
);
876 /* skb linear part was allocated with headlen and aligned to long */
877 skb
->tail
+= headlen
;
881 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq
*rq
, struct mlx5_cqe64
*cqe
)
883 u16 cstrides
= mpwrq_get_cqe_consumed_strides(cqe
);
884 u16 wqe_id
= be16_to_cpu(cqe
->wqe_id
);
885 struct mlx5e_mpw_info
*wi
= &rq
->mpwqe
.info
[wqe_id
];
886 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, wqe_id
);
890 wi
->consumed_strides
+= cstrides
;
892 if (unlikely((cqe
->op_own
>> 4) != MLX5_CQE_RESP_SEND
)) {
897 if (unlikely(mpwrq_is_filler_cqe(cqe
))) {
898 rq
->stats
.mpwqe_filler
++;
902 skb
= napi_alloc_skb(rq
->cq
.napi
,
903 ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD
,
905 if (unlikely(!skb
)) {
906 rq
->stats
.buff_alloc_err
++;
911 cqe_bcnt
= mpwrq_get_cqe_byte_cnt(cqe
);
913 mlx5e_mpwqe_fill_rx_skb(rq
, cqe
, wi
, cqe_bcnt
, skb
);
914 mlx5e_complete_rx_cqe(rq
, cqe
, cqe_bcnt
, skb
);
915 napi_gro_receive(rq
->cq
.napi
, skb
);
918 if (likely(wi
->consumed_strides
< rq
->mpwqe_num_strides
))
921 mlx5e_free_rx_mpwqe(rq
, wi
);
922 mlx5_wq_ll_pop(&rq
->wq
, cqe
->wqe_id
, &wqe
->next
.next_wqe_index
);
925 int mlx5e_poll_rx_cq(struct mlx5e_cq
*cq
, int budget
)
927 struct mlx5e_rq
*rq
= container_of(cq
, struct mlx5e_rq
, cq
);
928 struct mlx5e_sq
*xdp_sq
= &rq
->channel
->xdp_sq
;
931 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED
, &rq
->state
)))
934 if (cq
->decmprs_left
)
935 work_done
+= mlx5e_decompress_cqes_cont(rq
, cq
, 0, budget
);
937 for (; work_done
< budget
; work_done
++) {
938 struct mlx5_cqe64
*cqe
= mlx5e_get_cqe(cq
);
943 if (mlx5_get_cqe_format(cqe
) == MLX5_COMPRESSED
) {
945 mlx5e_decompress_cqes_start(rq
, cq
,
950 mlx5_cqwq_pop(&cq
->wq
);
952 rq
->handle_rx_cqe(rq
, cqe
);
955 if (xdp_sq
->db
.xdp
.doorbell
) {
956 mlx5e_xmit_xdp_doorbell(xdp_sq
);
957 xdp_sq
->db
.xdp
.doorbell
= false;
960 mlx5_cqwq_update_db_record(&cq
->wq
);
962 /* ensure cq space is freed before enabling more cqes */