struct {
struct mlx5e_wqe_frag_info *frag_info;
u32 frag_sz; /* max possible skb frag_sz */
- bool page_reuse;
- bool xdp_xmit;
+ union {
+ bool page_reuse;
+ bool xdp_xmit;
+ };
} wqe;
struct {
struct mlx5e_mpw_info *info;
void *mtt_no_align;
+ u16 stride_sz;
+ u16 num_strides;
} mpwqe;
};
struct {
- u8 page_order;
u32 wqe_sz; /* wqe data buffer size */
+ u16 headroom;
+ u8 page_order;
u8 map_dir; /* dma map direction */
} buff;
- __be32 mkey_be;
struct device *pdev;
struct net_device *netdev;
unsigned long state;
int ix;
- u16 rx_headroom;
struct mlx5e_rx_am am; /* Adaptive Moderation */
/* control */
struct mlx5_wq_ctrl wq_ctrl;
+ __be32 mkey_be;
u8 wq_type;
- u32 mpwqe_stride_sz;
- u32 mpwqe_num_strides;
u32 rqn;
struct mlx5e_channel *channel;
struct mlx5_core_dev *mdev;
}
rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
- rq->rx_headroom = params->rq_headroom;
+ rq->buff.headroom = params->rq_headroom;
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
goto err_rq_wq_destroy;
}
- rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz);
- rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides);
+ rq->mpwqe.stride_sz = BIT(params->mpwqe_log_stride_sz);
+ rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
- rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+ rq->buff.wqe_sz = rq->mpwqe.stride_sz * rq->mpwqe.num_strides;
byte_count = rq->buff.wqe_sz;
err = mlx5e_create_rq_umr_mkey(mdev, rq);
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
- rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count);
+ rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
rq->buff.page_order = order_base_2(npages);
wi->offset = 0;
}
- wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset +
- rq->rx_headroom);
+ wqe->data.addr = cpu_to_be64(wi->di.addr + wi->offset + rq->buff.headroom);
return 0;
}
static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
{
- return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+ return rq->mpwqe.num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
}
static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
u32 page_idx, u32 frag_offset,
u32 len)
{
- unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
+ unsigned int truesize = ALIGN(len, rq->mpwqe.stride_sz);
dma_sync_single_for_cpu(rq->pdev,
wi->umr.dma_info[page_idx].addr + frag_offset,
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt)
{
struct mlx5e_dma_info *di = &wi->di;
+ u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb;
void *va, *data;
- u16 rx_headroom = rq->rx_headroom;
bool consumed;
u32 frag_size;
struct sk_buff *skb)
{
u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
- u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz;
+ u32 wqe_offset = stride_ix * rq->mpwqe.stride_sz;
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
u32 head_page_idx = page_idx;
napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out:
- if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
+ if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;
mlx5e_free_rx_mpwqe(rq, wi);