From 4c5fef7789571672459e18170add13ca56f33246 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Mon, 14 May 2018 15:32:07 -0700 Subject: [PATCH] hv_netvsc: empty current transmit aggregation if flow blocked [ Commit cfd8afd986cdb59ea9adac873c5082498a1eb7c0 upstream. ] If the transmit queue is known full, then don't keep aggregating data. And the cp_partial flag which indicates that the current aggregation buffer is full can be folded in to avoid more conditionals. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/hyperv/hyperv_net.h | 2 +- drivers/net/hyperv/netvsc.c | 36 ++++++++++++++++++------------- drivers/net/hyperv/netvsc_drv.c | 2 +- drivers/net/hyperv/rndis_filter.c | 3 +-- 4 files changed, 24 insertions(+), 19 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a3f628c3c9ed..fd51a329e36e 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -192,7 +192,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, const struct netvsc_device_info *info); int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx); void netvsc_device_remove(struct hv_device *device); -int netvsc_send(struct net_device_context *ndc, +int netvsc_send(struct net_device *net, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *page_buffer, diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4bc8a1d529d9..22da6399b37a 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -700,13 +700,13 @@ static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) return NETVSC_INVALID_INDEX; } -static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, - unsigned int section_index, - u32 pend_size, - struct hv_netvsc_packet *packet, - struct rndis_message *rndis_msg, - struct hv_page_buffer *pb, - struct sk_buff *skb) +static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, + unsigned int section_index, + u32 pend_size, + struct hv_netvsc_packet *packet, + struct rndis_message *rndis_msg, + struct hv_page_buffer *pb, + bool xmit_more) { char *start = net_device->send_buf; char *dest = start + (section_index * net_device->send_section_size) @@ -719,7 +719,8 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, packet->page_buf_cnt; /* Add padding */ - if (skb->xmit_more && remain && !packet->cp_partial) { + remain = packet->total_data_buflen & (net_device->pkt_align - 1); + if (xmit_more && remain) { padding = net_device->pkt_align - remain; rndis_msg->msg_len += padding; packet->total_data_buflen += padding; @@ -739,8 +740,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, memset(dest, 0, padding); msg_size += padding; } - - return msg_size; } static inline int netvsc_send_pkt( @@ -828,12 +827,13 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, } /* RCU already held by caller */ -int netvsc_send(struct net_device_context *ndev_ctx, +int netvsc_send(struct net_device *ndev, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *pb, struct sk_buff *skb) { + struct net_device_context *ndev_ctx = netdev_priv(ndev); struct netvsc_device *net_device = rcu_dereference_bh(ndev_ctx->nvdev); struct hv_device *device = ndev_ctx->device_ctx; @@ -844,8 +844,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; struct sk_buff *msd_skb = NULL; - bool try_batch; - bool xmit_more = (skb != NULL) ? skb->xmit_more : false; + bool try_batch, xmit_more; /* If device is rescinded, return error and packet will get dropped. */ if (unlikely(!net_device || net_device->destroy)) @@ -896,10 +895,17 @@ int netvsc_send(struct net_device_context *ndev_ctx, } } + /* Keep aggregating only if stack says more data is coming + * and not doing mixed modes send and not flow blocked + */ + xmit_more = skb->xmit_more && + !packet->cp_partial && + !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); + if (section_index != NETVSC_INVALID_INDEX) { netvsc_copy_to_send_buf(net_device, section_index, msd_len, - packet, rndis_msg, pb, skb); + packet, rndis_msg, pb, xmit_more); packet->send_buf_index = section_index; @@ -919,7 +925,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, if (msdp->skb) dev_consume_skb_any(msdp->skb); - if (xmit_more && !packet->cp_partial) { + if (xmit_more) { msdp->skb = skb; msdp->pkt = packet; msdp->count++; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 717899993933..cc5bf9544d57 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -614,7 +614,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) /* timestamp packet in software */ skb_tx_timestamp(skb); - ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); + ret = netvsc_send(net, packet, rndis_msg, pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index be57639bee29..0c99d9926085 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -217,7 +217,6 @@ static int rndis_filter_send_request(struct rndis_device *dev, struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; - struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); int ret; /* Setup the packet to send it */ @@ -245,7 +244,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, } rcu_read_lock_bh(); - ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL); + ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL); rcu_read_unlock_bh(); return ret; -- 2.20.1