struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
+ union e1000_adv_rx_desc *rx_desc;
u64 rdba = ring->dma;
int reg_idx = ring->reg_idx;
u32 srrctl = 0, rxdctl = 0;
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
+ /* initialize Rx descriptor 0 */
+ rx_desc = IGB_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
/* enable receive descriptor fetching */
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
wr32(E1000_RXDCTL(reg_idx), rxdctl);
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
memset(rx_ring->rx_buffer_info, 0, size);
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
- if (!rx_desc->wb.upper.status_error)
+ if (!rx_desc->wb.upper.length)
break;
/* This memory barrier is needed to keep us from reading
i -= rx_ring->count;
}
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.upper.status_error = 0;
+ /* clear the length for the next_to_use descriptor */
+ rx_desc->wb.upper.length = 0;
cleaned_count--;
} while (cleaned_count);