}
/* Recycle the pages that are used by buffers that have just been received. */
-static void efx_recycle_rx_buffers(struct efx_channel *channel,
- struct efx_rx_buffer *rx_buf,
- unsigned int n_frags)
+static void efx_recycle_rx_pages(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
{
struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
} while (--n_frags);
}
+static void efx_discard_rx_packet(struct efx_channel *channel,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int n_frags)
+{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+ efx_recycle_rx_pages(channel, rx_buf, n_frags);
+
+ do {
+ efx_free_rx_buffer(rx_buf);
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--n_frags);
+}
+
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
*/
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
efx_rx_flush_packet(channel);
- put_page(rx_buf->page);
- efx_recycle_rx_buffers(channel, rx_buf, n_frags);
+ efx_discard_rx_packet(channel, rx_buf, n_frags);
return;
}
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
- /* All fragments have been DMA-synced, so recycle buffers and pages. */
+ /* All fragments have been DMA-synced, so recycle pages. */
rx_buf = efx_rx_buffer(rx_queue, index);
- efx_recycle_rx_buffers(channel, rx_buf, n_frags);
+ efx_recycle_rx_pages(channel, rx_buf, n_frags);
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.