Revert "virtio_ring: Update weak barriers to use dma_wmb/rmb"
authorMichael S. Tsirkin <mst@redhat.com>
Sun, 20 Dec 2015 11:52:10 +0000 (13:52 +0200)
committerMichael S. Tsirkin <mst@redhat.com>
Tue, 12 Jan 2016 18:47:00 +0000 (20:47 +0200)
This reverts commit 9e1a27ea42691429e31f158cce6fc61bc79bb2e9.

While that commit optimizes !CONFIG_SMP, it mixes
up DMA and SMP concepts, making the code hard
to figure out.

A better way to optimize this is with the new __smp_XXX
barriers.

As a first step, go back to full rmb/wmb barriers
for !SMP.
We switch to __smp_XXX barriers in the next patch.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Alexander Duyck <alexander.duyck@gmail.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
include/linux/virtio_ring.h

index 8e50888a6d595af06221ee43dfc2d0e4aa91c18e..67e06fe18c03b6814e217f05134cdb68a5df13bc 100644 (file)
  * actually quite cheap.
  */
 
+#ifdef CONFIG_SMP
 static inline void virtio_mb(bool weak_barriers)
 {
-#ifdef CONFIG_SMP
        if (weak_barriers)
                smp_mb();
        else
-#endif
                mb();
 }
 
 static inline void virtio_rmb(bool weak_barriers)
 {
        if (weak_barriers)
-               dma_rmb();
+               smp_rmb();
        else
                rmb();
 }
@@ -42,10 +41,26 @@ static inline void virtio_rmb(bool weak_barriers)
 static inline void virtio_wmb(bool weak_barriers)
 {
        if (weak_barriers)
-               dma_wmb();
+               smp_wmb();
        else
                wmb();
 }
+#else
+static inline void virtio_mb(bool weak_barriers)
+{
+       mb();
+}
+
+static inline void virtio_rmb(bool weak_barriers)
+{
+       rmb();
+}
+
+static inline void virtio_wmb(bool weak_barriers)
+{
+       wmb();
+}
+#endif
 
 struct virtio_device;
 struct virtqueue;