#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
+
+#define MLX5_MPWRQ_LOG_NUM_STRIDES 11 /* >= 9, HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
+#define MLX5_MPWRQ_NUM_STRIDES BIT(MLX5_MPWRQ_LOG_NUM_STRIDES)
+#define MLX5_MPWRQ_STRIDE_SIZE BIT(MLX5_MPWRQ_LOG_STRIDE_SIZE)
+#define MLX5_MPWRQ_LOG_WQE_SZ (MLX5_MPWRQ_LOG_NUM_STRIDES +\
+ MLX5_MPWRQ_LOG_STRIDE_SIZE)
+#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
+ MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
+#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
+ MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
+
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_NUM_MAIN_GROUPS 9
#define MLX5E_NET_IP_ALIGN 2
+static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
+ wq_size / 2);
+ default:
+ return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
+ wq_size / 2);
+ }
+}
+
+static inline int mlx5_min_log_rq_size(int wq_type)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+ default:
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+ }
+}
+
+static inline int mlx5_max_log_rq_size(int wq_type)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
+ default:
+ return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ }
+}
+
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
"tx_queue_wake",
"tx_queue_dropped",
"rx_wqe_err",
+ "rx_mpwqe_filler",
};
struct mlx5e_vport_stats {
u64 tx_queue_wake;
u64 tx_queue_dropped;
u64 rx_wqe_err;
+ u64 rx_mpwqe_filler;
-#define NUM_VPORT_COUNTERS 35
+#define NUM_VPORT_COUNTERS 36
};
static const char pport_strings[][ETH_GSTRING_LEN] = {
"csum_sw",
"lro_packets",
"lro_bytes",
- "wqe_err"
+ "wqe_err",
+ "mpwqe_filler",
};
struct mlx5e_rq_stats {
u64 lro_packets;
u64 lro_bytes;
u64 wqe_err;
-#define NUM_RQ_STATS 7
+ u64 mpwqe_filler;
+#define NUM_RQ_STATS 8
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
struct mlx5e_params {
u8 log_sq_size;
+ u8 rq_wq_type;
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
u16 ix);
+struct mlx5e_dma_info {
+ struct page *page;
+ dma_addr_t addr;
+};
+
+struct mlx5e_mpw_info {
+ struct mlx5e_dma_info dma_info;
+ u16 consumed_strides;
+ u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
u32 wqe_sz;
struct sk_buff **skb;
+ struct mlx5e_mpw_info *wqe_info;
struct device *pdev;
struct net_device *netdev;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
+ u8 wq_type;
u32 rqn;
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_update_stats(struct mlx5e_priv *priv);
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int rq_wq_type = priv->params.rq_wq_type;
- param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->params.log_rq_size;
param->tx_pending = 1 << priv->params.log_sq_size;
{
struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened;
+ int rq_wq_type = priv->params.rq_wq_type;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
__func__);
return -EINVAL;
}
- if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ 1 << mlx5_min_log_rq_size(rq_wq_type));
return -EINVAL;
}
- if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+ if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
log_rq_size = order_base_2(param->rx_pending);
log_sq_size = order_base_2(param->tx_pending);
- min_rx_wqes = min_t(u16, param->rx_pending - 1,
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+ min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size &&
s->rx_csum_none = 0;
s->rx_csum_sw = 0;
s->rx_wqe_err = 0;
+ s->rx_mpwqe_filler = 0;
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_sw += rq_stats->csum_sw;
s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 byte_count;
int wq_sz;
int err;
int i;
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!rq->skb) {
- err = -ENOMEM;
- goto err_rq_wq_destroy;
- }
- rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
- MLX5E_SW2HW_MTU(priv->netdev->mtu);
- rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->wqe_info) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
+ rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+
+ rq->wqe_sz = MLX5_MPWRQ_NUM_STRIDES * MLX5_MPWRQ_STRIDE_SIZE;
+ byte_count = rq->wqe_sz;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!rq->skb) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+ rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+
+ rq->wqe_sz = (priv->params.lro_en) ?
+ priv->params.lro_wqe_sz :
+ MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+ byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
+ byte_count |= MLX5_HW_START_PADDING;
+ }
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
- u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
wqe->data.lkey = c->mkey_be;
- wqe->data.byte_count =
- cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+ wqe->data.byte_count = cpu_to_be32(byte_count);
}
- rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
- rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+ rq->wq_type = priv->params.rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->tstamp = &priv->tstamp;
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
- kfree(rq->skb);
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ kfree(rq->wqe_info);
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ kfree(rq->skb);
+ }
+
mlx5_wq_destroy(&rq->wq_ctrl);
}
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ MLX5_MPWRQ_LOG_NUM_STRIDES - 9);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ MLX5_MPWRQ_LOG_STRIDE_SIZE - 6);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ }
+
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
+ u8 log_cq_size;
- MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ log_cq_size = priv->params.log_rq_size +
+ MLX5_MPWRQ_LOG_NUM_STRIDES;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ log_cq_size = priv->params.log_rq_size;
+ }
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
mlx5e_build_common_cq_param(priv, param);
}
if (changes & NETIF_F_LRO) {
bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
+ if (was_opened && (priv->params.rq_wq_type ==
+ MLX5_WQ_TYPE_LINKED_LIST))
mlx5e_close_locked(priv->netdev);
priv->params.lro_en = !!(features & NETIF_F_LRO);
mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
err);
- if (was_opened)
+ if (was_opened && (priv->params.rq_wq_type ==
+ MLX5_WQ_TYPE_LINKED_LIST))
err = mlx5e_open_locked(priv->netdev);
}
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- priv->params.log_rq_size =
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.rq_wq_type = MLX5_CAP_GEN(mdev, striding_rq) ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
+
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.lro_en = true;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ }
+
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
priv->params.rx_cq_moderation_usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
priv->params.rx_cq_moderation_pkts =
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- priv->params.min_rx_wqes =
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.num_tc = 1;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
return -ENOMEM;
}
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ gfp_t gfp_mask;
+ int i;
+
+ gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
+ wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+ MLX5_MPWRQ_WQE_PAGE_ORDER);
+ if (unlikely(!wi->dma_info.page))
+ return -ENOMEM;
+
+ wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
+ rq->wqe_sz, PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
+ put_page(wi->dma_info.page);
+ return -ENOMEM;
+ }
+
+ /* We split the high-order page into order-0 ones and manage their
+ * reference counter to minimize the memory held by small skb fragments
+ */
+ split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ atomic_add(MLX5_MPWRQ_STRIDES_PER_PAGE,
+ &wi->dma_info.page[i]._count);
+ wi->skbs_frags[i] = 0;
+ }
+
+ wi->consumed_strides = 0;
+ wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
+
+ return 0;
+}
+
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
return !mlx5_wq_ll_is_full(wq);
}
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
- u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+ u16 tot_len = cqe_bcnt - ETH_HLEN;
if (eth->h_proto == htons(ETH_P_IP)) {
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
}
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
- u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
struct mlx5e_tstamp *tstamp = rq->tstamp;
int lro_num_seg;
- skb_put(skb, cqe_bcnt);
-
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
- mlx5e_lro_update_hdr(skb, cqe);
+ mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
}
+static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ rq->stats.packets++;
+ rq->stats.bytes += cqe_bcnt;
+ mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+}
+
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5e_rx_wqe *wqe;
struct sk_buff *skb;
__be16 wqe_counter_be;
u16 wqe_counter;
+ u32 cqe_bcnt;
wqe_counter_be = cqe->wqe_counter;
wqe_counter = be16_to_cpu(wqe_counter_be);
goto wq_ll_pop;
}
- mlx5e_build_rx_skb(cqe, rq, skb);
- rq->stats.packets++;
- rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
- napi_gro_receive(rq->cq.napi, skb);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ skb_put(skb, cqe_bcnt);
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
wq_ll_pop:
mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
&wqe->next.next_wqe_index);
}
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+ struct sk_buff *skb;
+ u32 consumed_bytes;
+ u32 head_offset;
+ u32 frag_offset;
+ u32 wqe_offset;
+ u32 page_idx;
+ u16 byte_cnt;
+ u16 cqe_bcnt;
+ u16 headlen;
+ int i;
+
+ wi->consumed_strides += cstrides;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ rq->stats.mpwqe_filler++;
+ goto mpwrq_cqe_out;
+ }
+
+ skb = netdev_alloc_skb(rq->netdev,
+ ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
+ sizeof(long)));
+ if (unlikely(!skb))
+ goto mpwrq_cqe_out;
+
+ prefetch(skb->data);
+ wqe_offset = stride_ix * MLX5_MPWRQ_STRIDE_SIZE;
+ consumed_bytes = cstrides * MLX5_MPWRQ_STRIDE_SIZE;
+ dma_sync_single_for_cpu(rq->pdev, wi->dma_info.addr + wqe_offset,
+ consumed_bytes, DMA_FROM_DEVICE);
+
+ head_offset = wqe_offset & (PAGE_SIZE - 1);
+ page_idx = wqe_offset >> PAGE_SHIFT;
+ cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+ headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+ frag_offset = head_offset + headlen;
+
+ byte_cnt = cqe_bcnt - headlen;
+ while (byte_cnt) {
+ u32 pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+ unsigned int truesize =
+ ALIGN(pg_consumed_bytes, MLX5_MPWRQ_STRIDE_SIZE);
+
+ wi->skbs_frags[page_idx]++;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ &wi->dma_info.page[page_idx], frag_offset,
+ pg_consumed_bytes, truesize);
+ byte_cnt -= pg_consumed_bytes;
+ frag_offset = 0;
+ page_idx++;
+ }
+
+ skb_copy_to_linear_data(skb,
+ page_address(wi->dma_info.page) + wqe_offset,
+ ALIGN(headlen, sizeof(long)));
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += headlen;
+ skb->len += headlen;
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < MLX5_MPWRQ_NUM_STRIDES))
+ return;
+
+ dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
+ PCI_DMA_FROMDEVICE);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ atomic_sub(MLX5_MPWRQ_STRIDES_PER_PAGE - wi->skbs_frags[i],
+ &wi->dma_info.page[i]._count);
+ put_page(&wi->dma_info.page[i]);
+ }
+ mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
+
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
};
struct mlx5_cqe64 {
- u8 rsvd0[4];
+ u8 rsvd0[2];
+ __be16 wqe_id;
u8 lro_tcppsh_abort_dupack;
u8 lro_min_ttl;
__be16 lro_tcp_win;
return (u64)lo | ((u64)hi << 32);
}
+struct mpwrq_cqe_bc {
+ __be16 filler_consumed_strides;
+ __be16 byte_cnt;
+};
+
+static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
+{
+ struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+ return be16_to_cpu(bc->byte_cnt);
+}
+
+static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
+{
+ return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
+{
+ struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+ return mpwrq_get_cqe_bc_consumed_strides(bc);
+}
+
+static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
+{
+ struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+ return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
+{
+ return be16_to_cpu(cqe->wqe_counter);
+}
+
enum {
CQE_L4_HDR_TYPE_NONE = 0x0,
CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,