u16 sw_rx_cons;
u16 sw_rx_prod;
- u16 num_rx_buffers; /* Slowpath */
+ u16 filled_buffers;
u8 data_direction;
u8 rxq_id;
struct qed_chain rx_bd_ring;
struct qed_chain rx_comp_ring ____cacheline_aligned;
+ /* Used once per each NAPI run */
+ u16 num_rx_buffers;
+
/* GRO */
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
struct net_device *dev,
netdev_features_t features);
void qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp);
-int qede_alloc_rx_buffer(struct qede_rx_queue *rxq);
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
int qede_free_tx_pkt(struct qede_dev *edev,
struct qede_tx_queue *txq, int *len);
int qede_poll(struct napi_struct *napi, int budget);
* Content also used by slowpath *
*********************************/
-int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
{
struct sw_rx_data *sw_rx_data;
struct eth_rx_bd *rx_bd;
dma_addr_t mapping;
struct page *data;
+ /* In case lazy-allocation is allowed, postpone allocation until the
+ * end of the NAPI run. We'd still need to make sure the Rx ring has
+ * sufficient buffers to guarantee an additional Rx interrupt.
+ */
+ if (allow_lazy && likely(rxq->filled_buffers > 12)) {
+ rxq->filled_buffers--;
+ return 0;
+ }
+
data = alloc_pages(GFP_ATOMIC, 0);
if (unlikely(!data))
return -ENOMEM;
rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
rxq->sw_rx_prod++;
+ rxq->filled_buffers++;
return 0;
}
curr_cons->page_offset += rxq->rx_buf_seg_size;
if (curr_cons->page_offset == PAGE_SIZE) {
- if (unlikely(qede_alloc_rx_buffer(rxq))) {
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
/* Since we failed to allocate new buffer
* current buffer can be used again.
*/
switch (act) {
case XDP_TX:
/* We need the replacement buffer before transmit. */
- if (qede_alloc_rx_buffer(rxq)) {
+ if (qede_alloc_rx_buffer(rxq, true)) {
qede_recycle_rx_bd_ring(rxq, 1);
return false;
}
}
/* We need a replacement buffer for each BD */
- if (unlikely(qede_alloc_rx_buffer(rxq)))
+ if (unlikely(qede_alloc_rx_buffer(rxq, true)))
goto out;
/* Now that we've allocated the replacement buffer,
work_done++;
}
+ /* Allocate replacement buffers */
+ while (rxq->num_rx_buffers - rxq->filled_buffers)
+ if (qede_alloc_rx_buffer(rxq, false))
+ break;
+
/* Update producers */
qede_update_rx_prod(edev, rxq);