* @dma_addr: DMA base address of the buffer
* @page: The associated page buffer.
* Will be %NULL if the buffer slot is currently free.
- * @page_offset: Offset within page
+ * @page_offset: If pending: offset in @page of DMA base address.
+ * If completed: offset in @page of Ethernet header.
* @len: If pending: length for DMA descriptor.
* If completed: received length, excluding hash prefix.
* @flags: Flags for buffer and packet state.
*/
#define EFX_RXD_HEAD_ROOM 2
-/* Offset of ethernet header within page */
-static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
- struct efx_rx_buffer *buf)
-{
- return buf->page_offset + efx->type->rx_buffer_hash_size;
-}
-
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
{
return page_address(buf->page) + buf->page_offset;
if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh);
- skb_fill_page_desc(skb, 0, page,
- efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
+ skb_fill_page_desc(skb, 0, page, rx_buf->page_offset, rx_buf->len);
skb->len = rx_buf->len;
skb->data_len = rx_buf->len;
if (rx_buf->len > hdr_len) {
skb->data_len = skb->len - hdr_len;
skb_fill_page_desc(skb, 0, rx_buf->page,
- efx_rx_buf_offset(efx, rx_buf) + hdr_len,
+ rx_buf->page_offset + hdr_len,
skb->data_len);
} else {
__free_pages(rx_buf->page, efx->rx_buffer_order);
*/
prefetch(efx_rx_buf_va(rx_buf));
+ rx_buf->page_offset += efx->type->rx_buffer_hash_size;
+ rx_buf->len = len - efx->type->rx_buffer_hash_size;
+
/* Pipeline receives so that we give time for packet headers to be
* prefetched into cache.
*/
- rx_buf->len = len - efx->type->rx_buffer_hash_size;
out:
efx_rx_flush_packet(channel);
channel->rx_pkt = rx_buf;
void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
{
struct efx_nic *efx = channel->efx;
- u8 *eh = efx_rx_buf_va(rx_buf) + efx->type->rx_buffer_hash_size;
+ u8 *eh = efx_rx_buf_va(rx_buf);
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here