while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
+ dma_rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
length = le16_to_cpu(rx_desc->length);
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
skb = buffer_info->skb;
buffer_info->skb = NULL;
(count < tx_ring->count)) {
bool cleaned = false;
- rmb(); /* read buffer_info after eop_desc */
+ dma_rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
break;
(*work_done)++;
skb = buffer_info->skb;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
/* in the packet split case this is header only */
prefetch(skb->data - NET_IP_ALIGN);
if (*work_done >= work_to_do)
break;
(*work_done)++;
- rmb(); /* read descriptor and rx_buffer_info after status DD */
+ dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
skb = buffer_info->skb;
buffer_info->skb = NULL;