u32 msg_size = 0;
u32 padding = 0;
u32 remain = packet->total_data_buflen % net_device->pkt_align;
+ u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
/* Add padding */
- if (packet->is_data_pkt && packet->xmit_more && remain) {
+ if (packet->is_data_pkt && packet->xmit_more && remain &&
+ !packet->cp_partial) {
padding = net_device->pkt_align - remain;
packet->rndis_msg->msg_len += padding;
packet->total_data_buflen += padding;
}
- for (i = 0; i < packet->page_buf_cnt; i++) {
+ for (i = 0; i < page_count; i++) {
char *src = phys_to_virt(packet->page_buf[i].pfn << PAGE_SHIFT);
u32 offset = packet->page_buf[i].offset;
u32 len = packet->page_buf[i].len;
struct net_device *ndev = net_device->ndev;
u64 req_id;
int ret;
+ struct hv_page_buffer *pgbuf;
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
if (packet->is_data_pkt) {
return -ENODEV;
if (packet->page_buf_cnt) {
+ pgbuf = packet->cp_partial ? packet->page_buf +
+ packet->rmsg_pgcnt : packet->page_buf;
ret = vmbus_sendpacket_pagebuffer(out_channel,
- packet->page_buf,
+ pgbuf,
packet->page_buf_cnt,
&nvmsg,
sizeof(struct nvsp_message),
unsigned long flag;
struct multi_send_data *msdp;
struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
+ bool try_batch;
net_device = get_outbound_net_device(device);
if (!net_device)
}
packet->channel = out_channel;
packet->send_buf_index = NETVSC_INVALID_INDEX;
+ packet->cp_partial = false;
msdp = &net_device->msd[q_idx];
if (msdp->pkt)
msd_len = msdp->pkt->total_data_buflen;
- if (packet->is_data_pkt && msd_len > 0 &&
- msdp->count < net_device->max_pkt &&
- msd_len + pktlen + net_device->pkt_align <
+ try_batch = packet->is_data_pkt && msd_len > 0 && msdp->count <
+ net_device->max_pkt;
+
+ if (try_batch && msd_len + pktlen + net_device->pkt_align <
net_device->send_section_size) {
section_index = msdp->pkt->send_buf_index;
+ } else if (try_batch && msd_len + packet->rmsg_size <
+ net_device->send_section_size) {
+ section_index = msdp->pkt->send_buf_index;
+ packet->cp_partial = true;
+
} else if (packet->is_data_pkt && pktlen + net_device->pkt_align <
net_device->send_section_size) {
section_index = netvsc_get_next_send_section(net_device);
netvsc_copy_to_send_buf(net_device,
section_index, msd_len,
packet);
- if (!packet->part_of_skb) {
- skb = (struct sk_buff *)
- (unsigned long)
- packet->send_completion_tid;
-
- packet->send_completion_tid = 0;
- }
- packet->page_buf_cnt = 0;
packet->send_buf_index = section_index;
- packet->total_data_buflen += msd_len;
+
+ if (packet->cp_partial) {
+ packet->page_buf_cnt -= packet->rmsg_pgcnt;
+ packet->total_data_buflen = msd_len + packet->rmsg_size;
+ } else {
+ packet->page_buf_cnt = 0;
+ packet->total_data_buflen += msd_len;
+ if (!packet->part_of_skb) {
+ skb = (struct sk_buff *)(unsigned long)packet->
+ send_completion_tid;
+ packet->send_completion_tid = 0;
+ }
+ }
if (msdp->pkt)
netvsc_xmit_completion(msdp->pkt);
- if (packet->xmit_more) {
+ if (packet->xmit_more && !packet->cp_partial) {
msdp->pkt = packet;
msdp->count++;
} else {
}
static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
- struct hv_page_buffer *pb)
+ struct hv_netvsc_packet *packet)
{
+ struct hv_page_buffer *pb = packet->page_buf;
u32 slots_used = 0;
char *data = skb->data;
int frags = skb_shinfo(skb)->nr_frags;
int i;
/* The packet is laid out thus:
- * 1. hdr
+ * 1. hdr: RNDIS header and PPI
* 2. skb linear data
* 3. skb fragment data
*/
offset_in_page(hdr),
len, &pb[slots_used]);
+ packet->rmsg_size = len;
+ packet->rmsg_pgcnt = slots_used;
+
slots_used += fill_pg_buf(virt_to_page(data),
offset_in_page(data),
skb_headlen(skb), &pb[slots_used]);
rndis_msg->msg_len += rndis_msg_size;
packet->total_data_buflen = rndis_msg->msg_len;
packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
- skb, &page_buf[0]);
+ skb, packet);
ret = netvsc_send(net_device_ctx->device_ctx, packet);