vmbus: refactor hv_signal_on_read
authorStephen Hemminger <stephen@networkplumber.org>
Sun, 25 Jun 2017 19:30:26 +0000 (12:30 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 17 Jul 2017 15:16:05 +0000 (17:16 +0200)
The function hv_signal_on_read was defined in hyperv.h and
only used in one place in ring_buffer code. Clearer to just
move it inline there.

Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/hv/ring_buffer.c
include/linux/hyperv.h

index f2998176465359aa17b5d011eba11cda7952e56d..a9021f13379f05ba8f48162711f0fcd2533610c7 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/uio.h>
 #include <linux/vmalloc.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 
 #include "hyperv_vmbus.h"
 
@@ -357,7 +358,7 @@ struct vmpacket_descriptor *hv_pkt_iter_first(struct vmbus_channel *channel)
 {
        struct hv_ring_buffer_info *rbi = &channel->inbound;
 
-       /* set state for later hv_signal_on_read() */
+       /* set state for later hv_pkt_iter_close */
        rbi->cached_read_index = rbi->ring_buffer->read_index;
 
        if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
@@ -400,6 +401,8 @@ EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
 void hv_pkt_iter_close(struct vmbus_channel *channel)
 {
        struct hv_ring_buffer_info *rbi = &channel->inbound;
+       u32 cur_write_sz, cached_write_sz;
+       u32 pending_sz;
 
        /*
         * Make sure all reads are done before we update the read index since
@@ -409,6 +412,31 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
        virt_rmb();
        rbi->ring_buffer->read_index = rbi->priv_read_index;
 
-       hv_signal_on_read(channel);
+       /*
+        * Issue a full memory barrier before making the signaling decision.
+        * Here is the reason for having this barrier:
+        * If the reading of the pend_sz (in this function)
+        * were to be reordered and read before we commit the new read
+        * index (in the calling function)  we could
+        * have a problem. If the host were to set the pending_sz after we
+        * have sampled pending_sz and go to sleep before we commit the
+        * read index, we could miss sending the interrupt. Issue a full
+        * memory barrier to address this.
+        */
+       virt_mb();
+
+       pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
+       /* If the other end is not blocked on write don't bother. */
+       if (pending_sz == 0)
+               return;
+
+       cur_write_sz = hv_get_bytes_to_write(rbi);
+
+       if (cur_write_sz < pending_sz)
+               return;
+
+       cached_write_sz = hv_get_cached_bytes_to_write(rbi);
+       if (cached_write_sz < pending_sz)
+               vmbus_setevent(channel);
 }
 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);
index 5e5f966bf37f3102fb57e3ad8bf0fca0b01cb3d3..308e1f9706bbfd660403f6d52b8e76ab28713d6b 100644 (file)
@@ -1471,55 +1471,6 @@ hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
        return ring_info->ring_buffer->buffer;
 }
 
-/*
- * To optimize the flow management on the send-side,
- * when the sender is blocked because of lack of
- * sufficient space in the ring buffer, potential the
- * consumer of the ring buffer can signal the producer.
- * This is controlled by the following parameters:
- *
- * 1. pending_send_sz: This is the size in bytes that the
- *    producer is trying to send.
- * 2. The feature bit feat_pending_send_sz set to indicate if
- *    the consumer of the ring will signal when the ring
- *    state transitions from being full to a state where
- *    there is room for the producer to send the pending packet.
- */
-
-static inline  void hv_signal_on_read(struct vmbus_channel *channel)
-{
-       u32 cur_write_sz, cached_write_sz;
-       u32 pending_sz;
-       struct hv_ring_buffer_info *rbi = &channel->inbound;
-
-       /*
-        * Issue a full memory barrier before making the signaling decision.
-        * Here is the reason for having this barrier:
-        * If the reading of the pend_sz (in this function)
-        * were to be reordered and read before we commit the new read
-        * index (in the calling function)  we could
-        * have a problem. If the host were to set the pending_sz after we
-        * have sampled pending_sz and go to sleep before we commit the
-        * read index, we could miss sending the interrupt. Issue a full
-        * memory barrier to address this.
-        */
-       virt_mb();
-
-       pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
-       /* If the other end is not blocked on write don't bother. */
-       if (pending_sz == 0)
-               return;
-
-       cur_write_sz = hv_get_bytes_to_write(rbi);
-
-       if (cur_write_sz < pending_sz)
-               return;
-
-       cached_write_sz = hv_get_cached_bytes_to_write(rbi);
-       if (cached_write_sz < pending_sz)
-               vmbus_setevent(channel);
-}
-
 /*
  * Mask off host interrupt callback notifications
  */