"rx_wqe_err",
"rx_mpwqe_filler",
"rx_mpwqe_frag",
+ "rx_buff_alloc_err",
};
struct mlx5e_vport_stats {
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_mpwqe_frag;
+ u64 rx_buff_alloc_err;
-#define NUM_VPORT_COUNTERS 37
+#define NUM_VPORT_COUNTERS 38
};
static const char pport_strings[][ETH_GSTRING_LEN] = {
"wqe_err",
"mpwqe_filler",
"mpwqe_frag",
+ "buff_alloc_err",
};
struct mlx5e_rq_stats {
u64 wqe_err;
u64 mpwqe_filler;
u64 mpwqe_frag;
-#define NUM_RQ_STATS 9
+ u64 buff_alloc_err;
+#define NUM_RQ_STATS 10
};
static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
s->rx_wqe_err = 0;
s->rx_mpwqe_filler = 0;
s->rx_mpwqe_frag = 0;
+ s->rx_buff_alloc_err = 0;
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
while (!mlx5_wq_ll_is_full(wq)) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+ int err;
- if (unlikely(rq->alloc_wqe(rq, wqe, wq->head)))
+ err = rq->alloc_wqe(rq, wqe, wq->head);
+ if (unlikely(err)) {
+ if (err != -EBUSY)
+ rq->stats.buff_alloc_err++;
break;
+ }
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
}
skb = napi_alloc_skb(rq->cq.napi,
ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
sizeof(long)));
- if (unlikely(!skb))
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
goto mpwrq_cqe_out;
+ }
prefetch(skb->data);
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);