(u16)vlan_tag,
0 /* loopback */);
- wmb();
-
vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
}
(u64)dma_addr | VNIC_PADDR_TARGET,
type, (u16)len);
- wmb();
-
vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
}
#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
#endif
- if ((buf->index & VNIC_RQ_RETURN_RATE) == 0)
+ if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
iowrite32(buf->index, &rq->ctrl->posted_index);
+ }
}
static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
buf->len = len;
buf = buf->next;
- if (eop)
+ if (eop) {
+ /* Adding write memory barrier prevents compiler and/or CPU
+ * reordering, thus avoiding descriptor posting before
+ * descriptor is initialized. Otherwise, hardware can read
+ * stale descriptor fields.
+ */
+ wmb();
iowrite32(buf->index, &wq->ctrl->posted_index);
+ }
wq->to_use = buf;
wq->ring.desc_avail--;