Merge tag 'v3.10.105' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / gadget / u_ether.c
index 4b76124ce96b8dfaeab5ab0f0a89bf4f5abf15a6..589ea4edde19c2322db66ec6e90e0b3d661467c5 100644 (file)
  */
 
 /* #define VERBOSE_DEBUG */
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "["KBUILD_MODNAME"]" fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
-
 #include "u_ether.h"
 
-
 /*
  * This component encapsulates the Ethernet link glue needed to provide
  * one (!) network link through the USB gadget stack, normally "usb0".
 
 #define UETH__VERSION  "29-May-2008"
 
+static struct workqueue_struct *uether_wq;
+static struct workqueue_struct *uether_wq1;
+
+
 struct eth_dev {
        /* lock is held while accessing port_usb
         */
@@ -57,20 +63,29 @@ struct eth_dev {
        struct net_device       *net;
        struct usb_gadget       *gadget;
 
-       spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
+       spinlock_t              req_lock;       /* guard {tx}_reqs */
+       spinlock_t              reqrx_lock;     /* guard {rx}_reqs */   
        struct list_head        tx_reqs, rx_reqs;
-       atomic_t                tx_qlen;
+       unsigned                tx_qlen;
+/* Minimum number of TX USB request queued to UDC */
+#define TX_REQ_THRESHOLD       5
+       int                     no_tx_req_used;
+       int                     tx_skb_hold_count;
+       u32                     tx_req_bufsize;
 
        struct sk_buff_head     rx_frames;
 
        unsigned                header_len;
+       unsigned int            ul_max_pkts_per_xfer;
+       unsigned int            dl_max_pkts_per_xfer;
        struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
        int                     (*unwrap)(struct gether *,
                                                struct sk_buff *skb,
                                                struct sk_buff_head *list);
 
        struct work_struct      work;
-
+       struct work_struct      rx_work;
+       struct work_struct      rx_work1;
        unsigned long           todo;
 #define        WORK_RX_MEMORY          0
 
@@ -84,10 +99,14 @@ struct eth_dev {
 
 #define DEFAULT_QLEN   2       /* double buffering by default */
 
-static unsigned qmult = 5;
+static unsigned qmult = 10;
 module_param(qmult, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
 
+static unsigned tx_wakeup_threshold = 13;
+module_param(tx_wakeup_threshold, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(tx_wakeup_threshold, "tx wakeup threshold value");
+
 /* for dual-speed hardware, use deeper queues at high/super speed */
 static inline int qlen(struct usb_gadget *gadget)
 {
@@ -98,6 +117,16 @@ static inline int qlen(struct usb_gadget *gadget)
                return DEFAULT_QLEN;
 }
 
+static inline int qlenrx(struct usb_gadget *gadget)
+{
+       if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
+                                           gadget->speed == USB_SPEED_SUPER))
+               return qmult * DEFAULT_QLEN;
+
+       else
+               return DEFAULT_QLEN;
+}
+
 /*-------------------------------------------------------------------------*/
 
 /* REVISIT there must be a better way than having two sets
@@ -135,6 +164,16 @@ static inline int qlen(struct usb_gadget *gadget)
 
 /*-------------------------------------------------------------------------*/
 
+unsigned long rndis_test_rx_usb_in = 0 ;
+unsigned long rndis_test_rx_net_out = 0 ;
+unsigned long rndis_test_rx_nomem = 0 ;
+unsigned long rndis_test_rx_error = 0 ;
+
+unsigned long rndis_test_tx_net_in = 0 ;
+unsigned long rndis_test_tx_busy = 0 ;
+unsigned long rndis_test_tx_usb_out = 0 ;
+unsigned long rndis_test_tx_complete = 0 ;
+
 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
 
 static int ueth_change_mtu(struct net_device *net, int new_mtu)
@@ -153,6 +192,8 @@ static int ueth_change_mtu(struct net_device *net, int new_mtu)
                net->mtu = new_mtu;
        spin_unlock_irqrestore(&dev->lock, flags);
 
+       pr_debug("[XLOG_INFO][UTHER]ueth_change_mtu to %d, status is %d\n", new_mtu , status);
+
        return status;
 }
 
@@ -188,6 +229,7 @@ static void defer_kevent(struct eth_dev *dev, int flag)
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
+static void tx_complete(struct usb_ep *ep, struct usb_request *req);
 
 static int
 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
@@ -223,15 +265,23 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
         */
        size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
        size += dev->port_usb->header_len;
+       /*
        size += out->maxpacket - 1;
        size -= size % out->maxpacket;
+       */
+       if (dev->ul_max_pkts_per_xfer)
+               size *= dev->ul_max_pkts_per_xfer;
 
        if (dev->port_usb->is_fixed)
                size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 
+       pr_debug("%s: size: %d, mtu: %d, header_len: %d, maxpacket: %d, ul_max_pkts_per_xfer: %d",
+               __func__, (int)size, dev->net->mtu, dev->port_usb->header_len, out->maxpacket, dev->ul_max_pkts_per_xfer);
        skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
        if (skb == NULL) {
+               pr_debug("[XLOG_INFO][UTHER]rx_submit : no rx skb\n");
                DBG(dev, "no rx skb\n");
+               rndis_test_rx_nomem ++ ;
                goto enomem;
        }
 
@@ -243,7 +293,6 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
 
        req->buf = skb->data;
        req->length = size;
-       req->complete = rx_complete;
        req->context = skb;
 
        retval = usb_ep_queue(out, req, gfp_flags);
@@ -254,23 +303,22 @@ enomem:
                DBG(dev, "rx submit --> %d\n", retval);
                if (skb)
                        dev_kfree_skb_any(skb);
-               spin_lock_irqsave(&dev->req_lock, flags);
-               list_add(&req->list, &dev->rx_reqs);
-               spin_unlock_irqrestore(&dev->req_lock, flags);
        }
        return retval;
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-       struct sk_buff  *skb = req->context, *skb2;
+       struct sk_buff  *skb = req->context;
        struct eth_dev  *dev = ep->driver_data;
        int             status = req->status;
+       bool    queue = 0;
 
        switch (status) {
 
        /* normal completion */
        case 0:
+               pr_debug("%s: transferred size: %d", __func__, req->actual);
                skb_put(skb, req->actual);
 
                if (dev->unwrap) {
@@ -281,6 +329,10 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                                status = dev->unwrap(dev->port_usb,
                                                        skb,
                                                        &dev->rx_frames);
+                               if (status == -EINVAL)
+                                       dev->net->stats.rx_errors++;
+                               else if (status == -EOVERFLOW)
+                                       dev->net->stats.rx_over_errors++;
                        } else {
                                dev_kfree_skb_any(skb);
                                status = -ENOTCONN;
@@ -289,30 +341,11 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
                } else {
                        skb_queue_tail(&dev->rx_frames, skb);
                }
-               skb = NULL;
-
-               skb2 = skb_dequeue(&dev->rx_frames);
-               while (skb2) {
-                       if (status < 0
-                                       || ETH_HLEN > skb2->len
-                                       || skb2->len > VLAN_ETH_FRAME_LEN) {
-                               dev->net->stats.rx_errors++;
-                               dev->net->stats.rx_length_errors++;
-                               DBG(dev, "rx length %d\n", skb2->len);
-                               dev_kfree_skb_any(skb2);
-                               goto next_frame;
-                       }
-                       skb2->protocol = eth_type_trans(skb2, dev->net);
-                       dev->net->stats.rx_packets++;
-                       dev->net->stats.rx_bytes += skb2->len;
 
-                       /* no buffer copies needed, unless hardware can't
-                        * use skb buffers.
-                        */
-                       status = netif_rx(skb2);
-next_frame:
-                       skb2 = skb_dequeue(&dev->rx_frames);
-               }
+               if (!status)
+                       queue = 1;
+
+               rndis_test_rx_usb_in ++ ;
                break;
 
        /* software-driven interface shutdown */
@@ -335,28 +368,30 @@ quiesce:
                /* FALLTHROUGH */
 
        default:
+               queue = 1;
+               dev_kfree_skb_any(skb);
                dev->net->stats.rx_errors++;
                DBG(dev, "rx status %d\n", status);
                break;
        }
 
-       if (skb)
-               dev_kfree_skb_any(skb);
-       if (!netif_running(dev->net)) {
 clean:
-               spin_lock(&dev->req_lock);
-               list_add(&req->list, &dev->rx_reqs);
-               spin_unlock(&dev->req_lock);
-               req = NULL;
+       spin_lock(&dev->reqrx_lock);
+       list_add(&req->list, &dev->rx_reqs);
+       spin_unlock(&dev->reqrx_lock);
+
+       if (queue)
+       {
+               queue_work(uether_wq, &dev->rx_work);
+               queue_work(uether_wq1, &dev->rx_work1);
        }
-       if (req)
-               rx_submit(dev, req, GFP_ATOMIC);
 }
 
 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
 {
        unsigned                i;
        struct usb_request      *req;
+       bool                    usb_in;
 
        if (!n)
                return -ENOMEM;
@@ -367,10 +402,22 @@ static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
                if (i-- == 0)
                        goto extra;
        }
+
+       if (ep->desc->bEndpointAddress & USB_DIR_IN)
+               usb_in = true;
+       else
+               usb_in = false;
+
        while (i--) {
                req = usb_ep_alloc_request(ep, GFP_ATOMIC);
                if (!req)
                        return list_empty(list) ? -ENOMEM : 0;
+               /* update completion handler */
+               if (usb_in)
+                       req->complete = tx_complete;
+               else
+                       req->complete = rx_complete;
+
                list_add(&req->list, list);
        }
        return 0;
@@ -392,7 +439,7 @@ extra:
        return 0;
 }
 
-static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
+static int alloc_tx_requests(struct eth_dev *dev, struct gether *link, unsigned n)
 {
        int     status;
 
@@ -400,14 +447,30 @@ static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
        status = prealloc(&dev->tx_reqs, link->in_ep, n);
        if (status < 0)
                goto fail;
+
+       goto done;
+fail:
+       DBG(dev, "can't alloc tx requests\n");
+       pr_debug("[XLOG_INFO][UTHER]alloc_requests : can't alloc requests\n");
+done:
+       spin_unlock(&dev->req_lock);
+       return status;
+}
+static int alloc_rx_requests(struct eth_dev *dev, struct gether *link, unsigned n)
+{
+       int     status;
+
+       spin_lock(&dev->reqrx_lock);
+
        status = prealloc(&dev->rx_reqs, link->out_ep, n);
        if (status < 0)
                goto fail;
        goto done;
 fail:
-       DBG(dev, "can't alloc requests\n");
+       DBG(dev, "can't alloc rx requests\n");
+       pr_debug("[XLOG_INFO][UTHER]alloc_requests : can't alloc rxrequests\n");
 done:
-       spin_unlock(&dev->req_lock);
+       spin_unlock(&dev->reqrx_lock);
        return status;
 }
 
@@ -415,23 +478,77 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 {
        struct usb_request      *req;
        unsigned long           flags;
+       int                 req_cnt = 0;
 
        /* fill unused rxq slots with some skb */
-       spin_lock_irqsave(&dev->req_lock, flags);
+       spin_lock_irqsave(&dev->reqrx_lock, flags);
        while (!list_empty(&dev->rx_reqs)) {
+               /* break the nexus of continuous completion and re-submission*/
+               if (++req_cnt > qlenrx(dev->gadget))
+                       break;
+
                req = container_of(dev->rx_reqs.next,
                                struct usb_request, list);
                list_del_init(&req->list);
-               spin_unlock_irqrestore(&dev->req_lock, flags);
+               spin_unlock_irqrestore(&dev->reqrx_lock, flags);
 
                if (rx_submit(dev, req, gfp_flags) < 0) {
+                       spin_lock_irqsave(&dev->reqrx_lock, flags);
+                       list_add(&req->list, &dev->rx_reqs);
+                       spin_unlock_irqrestore(&dev->reqrx_lock, flags);
                        defer_kevent(dev, WORK_RX_MEMORY);
                        return;
                }
 
-               spin_lock_irqsave(&dev->req_lock, flags);
+               spin_lock_irqsave(&dev->reqrx_lock, flags);
        }
-       spin_unlock_irqrestore(&dev->req_lock, flags);
+       spin_unlock_irqrestore(&dev->reqrx_lock, flags);
+}
+
+static void process_rx_w(struct work_struct *work)
+{
+       struct eth_dev  *dev = container_of(work, struct eth_dev, rx_work);
+       struct sk_buff  *skb;
+       int             status = 0;
+
+       if (!dev->port_usb)
+               return;
+
+       while ((skb = skb_dequeue(&dev->rx_frames))) {
+               if (status < 0
+                               || ETH_HLEN > skb->len
+                               || skb->len > VLAN_ETH_FRAME_LEN) {
+                       dev->net->stats.rx_errors++;
+                       dev->net->stats.rx_length_errors++;
+                       rndis_test_rx_error++ ;
+                       DBG(dev, "rx length %d\n", skb->len);
+                       dev_kfree_skb_any(skb);
+                       continue;
+               }
+               skb->protocol = eth_type_trans(skb, dev->net);
+               dev->net->stats.rx_packets++;
+               dev->net->stats.rx_bytes += skb->len;
+
+               rndis_test_rx_net_out ++ ;
+               status = netif_rx_ni(skb);
+       }
+
+/* move to another workthread */
+#if 0
+       if (netif_running(dev->net))
+               rx_fill(dev, GFP_KERNEL);
+#endif
+}
+
+static void process_rx_w1(struct work_struct *work)
+{
+       struct eth_dev  *dev = container_of(work, struct eth_dev, rx_work1);    
+       
+       if (!dev->port_usb)
+               return;
+
+       if (netif_running(dev->net))
+               rx_fill(dev, GFP_KERNEL);
 }
 
 static void eth_work(struct work_struct *work)
@@ -449,8 +566,26 @@ static void eth_work(struct work_struct *work)
 
 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-       struct sk_buff  *skb = req->context;
-       struct eth_dev  *dev = ep->driver_data;
+       struct sk_buff  *skb;
+       struct eth_dev  *dev;
+       struct net_device *net;
+       struct usb_request *new_req;
+       struct usb_ep *in;
+       int length;
+       int retval;
+
+       if (!ep->driver_data) {
+               usb_ep_free_request(ep, req);
+               return;
+       }
+
+       dev = ep->driver_data;
+       net = dev->net;
+
+       if (!dev->port_usb) {
+               usb_ep_free_request(ep, req);
+               return;
+       }
 
        switch (req->status) {
        default:
@@ -461,18 +596,103 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
        case -ESHUTDOWN:                /* disconnect etc */
                break;
        case 0:
-               dev->net->stats.tx_bytes += skb->len;
+               if (!req->zero)
+                       dev->net->stats.tx_bytes += req->length-1;
+               else
+                       dev->net->stats.tx_bytes += req->length;
        }
        dev->net->stats.tx_packets++;
+       rndis_test_tx_complete++ ;
 
        spin_lock(&dev->req_lock);
-       list_add(&req->list, &dev->tx_reqs);
-       spin_unlock(&dev->req_lock);
-       dev_kfree_skb_any(skb);
+       list_add_tail(&req->list, &dev->tx_reqs);
+
+       if (dev->port_usb->multi_pkt_xfer && !req->context) {
+               dev->no_tx_req_used--;
+               req->length = 0;
+               in = dev->port_usb->in_ep;
+
+               if (!list_empty(&dev->tx_reqs)) {
+                       new_req = container_of(dev->tx_reqs.next,
+                                       struct usb_request, list);
+                       list_del(&new_req->list);
+                       spin_unlock(&dev->req_lock);
+                       if (new_req->length > 0) {
+                               length = new_req->length;
+
+                               /* NCM requires no zlp if transfer is
+                                * dwNtbInMaxSize */
+                               if (dev->port_usb->is_fixed &&
+                                       length == dev->port_usb->fixed_in_len &&
+                                       (length % in->maxpacket) == 0)
+                                       new_req->zero = 0;
+                               else
+                                       new_req->zero = 1;
+
+                               /* use zlp framing on tx for strict CDC-Ether
+                                * conformance, though any robust network rx
+                                * path ignores extra padding. and some hardware
+                                * doesn't like to write zlps.
+                                */
+                               if (new_req->zero && !dev->zlp &&
+                                               (length % in->maxpacket) == 0) {
+                                       new_req->zero = 0;
+                                       length++;
+                               }
+
+                               new_req->length = length;
+                               retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
+                               switch (retval) {
+                               default:
+                                       DBG(dev, "tx queue err %d\n", retval);
+                                       new_req->length = 0;
+                                       spin_lock(&dev->req_lock);
+                                       list_add_tail(&new_req->list,
+                                                       &dev->tx_reqs);
+                                       spin_unlock(&dev->req_lock);
+                                       break;
+                               case 0:
+                                       spin_lock(&dev->req_lock);
+                                       dev->no_tx_req_used++;
+                                       spin_unlock(&dev->req_lock);
+                                       net->trans_start = jiffies;
+                               }
+                       } else {
+                               spin_lock(&dev->req_lock);
+                               /*
+                                * Put the idle request at the back of the
+                                * queue. The xmit function will put the
+                                * unfinished request at the beginning of the
+                                * queue.
+                                */
+                               list_add_tail(&new_req->list, &dev->tx_reqs);
+                               spin_unlock(&dev->req_lock);
+                       }
+               } else {
+                       spin_unlock(&dev->req_lock);
+               }
+       } else {
+               skb = req->context;
+               /* Is aggregation already enabled and buffers allocated ? */
+               if (dev->port_usb->multi_pkt_xfer && dev->tx_req_bufsize) {
+                       req->buf = kzalloc(dev->tx_req_bufsize, GFP_ATOMIC);
+                       req->context = NULL;
+               } else {
+                       req->buf = NULL;
+               }
+
+               spin_unlock(&dev->req_lock);
+               dev_kfree_skb_any(skb);
+       }
 
-       atomic_dec(&dev->tx_qlen);
        if (netif_carrier_ok(dev->net))
-               netif_wake_queue(dev->net);
+       {
+               spin_lock(&dev->req_lock);
+               if(dev->no_tx_req_used < tx_wakeup_threshold)
+                       netif_wake_queue(dev->net);
+               spin_unlock(&dev->req_lock);                            
+       }
+               
 }
 
 static inline int is_promisc(u16 cdc_filter)
@@ -480,6 +700,42 @@ static inline int is_promisc(u16 cdc_filter)
        return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 }
 
+static int alloc_tx_buffer(struct eth_dev *dev)
+{
+       struct list_head        *act;
+       struct usb_request      *req;
+
+       dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
+                               (dev->net->mtu
+                               + sizeof(struct ethhdr)
+                               /* size of rndis_packet_msg_type */
+                               + 44
+                               + 22));
+
+       list_for_each(act, &dev->tx_reqs) {
+               req = container_of(act, struct usb_request, list);
+               if (!req->buf) {
+                       req->buf = kzalloc(dev->tx_req_bufsize,
+                                               GFP_ATOMIC);
+                       if (!req->buf)
+                               goto free_buf;
+               }
+               /* req->context is not used for multi_pkt_xfers */
+               req->context = NULL;
+       }
+       return 0;
+
+free_buf:
+       /* tx_req_bufsize = 0 retries mem alloc on next eth_start_xmit */
+       dev->tx_req_bufsize = 0;
+       list_for_each(act, &dev->tx_reqs) {
+               req = container_of(act, struct usb_request, list);
+               kfree(req->buf);
+               req->buf = NULL;
+       }
+       return -ENOMEM;
+}
+
 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                                        struct net_device *net)
 {
@@ -490,11 +746,18 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
        unsigned long           flags;
        struct usb_ep           *in;
        u16                     cdc_filter;
+       bool                    multi_pkt_xfer = false;
+
+       //ALPS00542120
+       static unsigned int okCnt = 0, busyCnt = 0;
+       static int firstShot = 1, diffSec;
+       static struct timeval tv_last, tv_cur;
 
        spin_lock_irqsave(&dev->lock, flags);
        if (dev->port_usb) {
                in = dev->port_usb->in_ep;
                cdc_filter = dev->port_usb->cdc_filter;
+               multi_pkt_xfer = dev->port_usb->multi_pkt_xfer;
        } else {
                in = NULL;
                cdc_filter = 0;
@@ -506,6 +769,19 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_OK;
        }
 
+       /* Allocate memory for tx_reqs to support multi packet transfer */
+       spin_lock_irqsave(&dev->req_lock, flags);
+       if (multi_pkt_xfer && !dev->tx_req_bufsize) {
+               retval = alloc_tx_buffer(dev);
+               if (retval < 0) {
+                       spin_unlock_irqrestore(&dev->req_lock, flags);
+                       return -ENOMEM;
+               }
+       }
+       spin_unlock_irqrestore(&dev->req_lock, flags);
+
+       rndis_test_tx_net_in ++ ;
+
        /* apply outgoing CDC or RNDIS filters */
        if (!is_promisc(cdc_filter)) {
                u8              *dest = skb->data;
@@ -522,6 +798,7 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                                type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
                        if (!(cdc_filter & type)) {
                                dev_kfree_skb_any(skb);
+                               pr_warning("cdc_filter error, cdc_filter is 0x%x , type is 0x%x \n", cdc_filter , type);
                                return NETDEV_TX_OK;
                        }
                }
@@ -535,9 +812,32 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
         * network stack decided to xmit but before we got the spinlock.
         */
        if (list_empty(&dev->tx_reqs)) {
+
+               busyCnt++;
+               do_gettimeofday(&tv_cur);
+
+               if(firstShot)
+               {
+                       tv_last = tv_cur;
+                       firstShot = 0;
+                       printk(KERN_ERR "%s, NETDEV_TX_BUSY returned at firstShot , okCnt : %u, busyCnt : %u\n", __func__, okCnt, busyCnt);
+               }
+               else
+               {
+                       diffSec = tv_cur.tv_sec - tv_last.tv_sec;
+                       if(diffSec >=2 )
+                       {
+                               tv_last = tv_cur;
+                               printk(KERN_ERR "%s, NETDEV_TX_BUSY returned, okCnt : %u, busyCnt : %u\n", __func__, okCnt, busyCnt);
+                       }
+               }
+
                spin_unlock_irqrestore(&dev->req_lock, flags);
+
+               rndis_test_tx_busy ++ ;
                return NETDEV_TX_BUSY;
        }
+       okCnt++;
 
        req = container_of(dev->tx_reqs.next, struct usb_request, list);
        list_del(&req->list);
@@ -551,21 +851,70 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
         * or the hardware can't use skb buffers.
         * or there's not enough space for extra headers we need
         */
+       spin_lock_irqsave(&dev->lock, flags);
        if (dev->wrap) {
-               unsigned long   flags;
-
-               spin_lock_irqsave(&dev->lock, flags);
                if (dev->port_usb)
                        skb = dev->wrap(dev->port_usb, skb);
-               spin_unlock_irqrestore(&dev->lock, flags);
-               if (!skb)
+               if (!skb) {
+                       spin_unlock_irqrestore(&dev->lock, flags);
                        goto drop;
+               }
+       }
+
+       if (multi_pkt_xfer) {
+
+               pr_debug("req->length:%d header_len:%u\n"
+                               "skb->len:%d skb->data_len:%d\n",
+                               req->length, dev->header_len,
+                               skb->len, skb->data_len);
 
+               if (dev->port_usb == NULL)
+               {
+                       dev_kfree_skb_any(skb);
+                       pr_debug("eth_start_xmit, port_usb becomes NULL\n");
+                       return NETDEV_TX_OK;
+               }
+               /* Add RNDIS Header */
+               memcpy(req->buf + req->length, dev->port_usb->header,
+                                               dev->header_len);
+               /* Increment req length by header size */
+               req->length += dev->header_len;
+               spin_unlock_irqrestore(&dev->lock, flags);
+               /* Copy received IP data from SKB */
+               memcpy(req->buf + req->length, skb->data, skb->len);
+               /* Increment req length by skb data length */
+               req->length += skb->len;
+               length = req->length;
+               dev_kfree_skb_any(skb);
+
+               spin_lock_irqsave(&dev->req_lock, flags);
+               dev->tx_skb_hold_count++;
+               //if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
+               if ((dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) && (length < (dev->port_usb->dl_max_transfer_len - dev->net->mtu))) {
+                       if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
+                               list_add(&req->list, &dev->tx_reqs);
+                               spin_unlock_irqrestore(&dev->req_lock, flags);
+                               goto success;
+                       }
+               }
+
+               dev->no_tx_req_used++;
+               dev->tx_skb_hold_count = 0;
+               spin_unlock_irqrestore(&dev->req_lock, flags);
+       } else {
+               spin_unlock_irqrestore(&dev->lock, flags);
                length = skb->len;
+               req->buf = skb->data;
+               req->context = skb;
+       }
+
+       if (dev->port_usb == NULL)
+       {
+               if (!multi_pkt_xfer)
+                       dev_kfree_skb_any(skb);
+               pr_debug("eth_start_xmit, port_usb becomes NULL\n");
+               return NETDEV_TX_OK;
        }
-       req->buf = skb->data;
-       req->context = skb;
-       req->complete = tx_complete;
 
        /* NCM requires no zlp if transfer is dwNtbInMaxSize */
        if (dev->port_usb->is_fixed &&
@@ -579,38 +928,53 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
         * though any robust network rx path ignores extra padding.
         * and some hardware doesn't like to write zlps.
         */
-       if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+       if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
+               req->zero = 0;
                length++;
+       }
 
        req->length = length;
 
        /* throttle high/super speed IRQ rate back slightly */
-       if (gadget_is_dualspeed(dev->gadget))
-               req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
-                                    dev->gadget->speed == USB_SPEED_SUPER)
-                       ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
-                       : 0;
+       if (gadget_is_dualspeed(dev->gadget) &&
+                        (dev->gadget->speed == USB_SPEED_HIGH ||
+                         dev->gadget->speed == USB_SPEED_SUPER)) {
+               dev->tx_qlen++;
+               if (dev->tx_qlen == (qmult/2)) {
+                       req->no_interrupt = 0;
+                       dev->tx_qlen = 0;
+               } else {
+                       req->no_interrupt = 1;
+               }
+       } else {
+               req->no_interrupt = 0;
+       }
+       rndis_test_tx_usb_out ++ ;
 
        retval = usb_ep_queue(in, req, GFP_ATOMIC);
        switch (retval) {
        default:
                DBG(dev, "tx queue err %d\n", retval);
+               pr_debug("[XLOG_INFO][UTHER]eth_start_xmit : tx queue err %d\n", retval);
                break;
        case 0:
                net->trans_start = jiffies;
-               atomic_inc(&dev->tx_qlen);
        }
 
        if (retval) {
-               dev_kfree_skb_any(skb);
+               if (!multi_pkt_xfer)
+                       dev_kfree_skb_any(skb);
+               else
+                       req->length = 0;
 drop:
                dev->net->stats.tx_dropped++;
                spin_lock_irqsave(&dev->req_lock, flags);
                if (list_empty(&dev->tx_reqs))
                        netif_start_queue(net);
-               list_add(&req->list, &dev->tx_reqs);
+               list_add_tail(&req->list, &dev->tx_reqs);
                spin_unlock_irqrestore(&dev->req_lock, flags);
        }
+success:
        return NETDEV_TX_OK;
 }
 
@@ -619,12 +983,13 @@ drop:
 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
 {
        DBG(dev, "%s\n", __func__);
+       pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
 
        /* fill the rx queue */
        rx_fill(dev, gfp_flags);
 
        /* and open the tx floodgates */
-       atomic_set(&dev->tx_qlen, 0);
+       dev->tx_qlen = 0;
        netif_wake_queue(dev->net);
 }
 
@@ -634,6 +999,8 @@ static int eth_open(struct net_device *net)
        struct gether   *link;
 
        DBG(dev, "%s\n", __func__);
+       pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
+
        if (netif_carrier_ok(dev->net))
                eth_start(dev, GFP_KERNEL);
 
@@ -652,6 +1019,8 @@ static int eth_stop(struct net_device *net)
        unsigned long   flags;
 
        VDBG(dev, "%s\n", __func__);
+       pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
+
        netif_stop_queue(net);
 
        DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
@@ -769,7 +1138,10 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
        dev = netdev_priv(net);
        spin_lock_init(&dev->lock);
        spin_lock_init(&dev->req_lock);
+       spin_lock_init(&dev->reqrx_lock);       
        INIT_WORK(&dev->work, eth_work);
+       INIT_WORK(&dev->rx_work, process_rx_w);
+        INIT_WORK(&dev->rx_work1, process_rx_w1);
        INIT_LIST_HEAD(&dev->tx_reqs);
        INIT_LIST_HEAD(&dev->rx_reqs);
 
@@ -856,6 +1228,16 @@ struct net_device *gether_connect(struct gether *link)
        if (!dev)
                return ERR_PTR(-EINVAL);
 
+       link->header = kzalloc(sizeof(struct rndis_packet_msg_type),
+                                                       GFP_ATOMIC);
+       if (!link->header) {
+               pr_err("RNDIS header memory allocation failed.\n");
+               result = -ENOMEM;
+               goto fail;
+       }
+
+       pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
+
        link->in_ep->driver_data = dev;
        result = usb_ep_enable(link->in_ep);
        if (result != 0) {
@@ -873,7 +1255,11 @@ struct net_device *gether_connect(struct gether *link)
        }
 
        if (result == 0)
-               result = alloc_requests(dev, link, qlen(dev->gadget));
+       {
+               result = alloc_tx_requests(dev, link, qlen(dev->gadget));
+               if(result == 0)  
+                       result = alloc_rx_requests(dev, link, qlenrx(dev->gadget));             
+       } 
 
        if (result == 0) {
                dev->zlp = link->is_zlp_ok;
@@ -882,8 +1268,13 @@ struct net_device *gether_connect(struct gether *link)
                dev->header_len = link->header_len;
                dev->unwrap = link->unwrap;
                dev->wrap = link->wrap;
+               dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
+               dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
 
                spin_lock(&dev->lock);
+               dev->tx_skb_hold_count = 0;
+               dev->no_tx_req_used = 0;
+               dev->tx_req_bufsize = 0;
                dev->port_usb = link;
                if (netif_running(dev->net)) {
                        if (link->open)
@@ -904,10 +1295,15 @@ struct net_device *gether_connect(struct gether *link)
 fail1:
                (void) usb_ep_disable(link->in_ep);
        }
-fail0:
+
        /* caller is responsible for cleanup on error */
-       if (result < 0)
+       if (result < 0) {
+fail0:
+               kfree(link->header);
+fail:
                return ERR_PTR(result);
+       }
+
        return dev->net;
 }
 
@@ -927,12 +1323,24 @@ void gether_disconnect(struct gether *link)
 {
        struct eth_dev          *dev = link->ioport;
        struct usb_request      *req;
+       struct sk_buff          *skb;
 
        WARN_ON(!dev);
        if (!dev)
                return;
 
        DBG(dev, "%s\n", __func__);
+       pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
+
+       rndis_test_rx_usb_in = 0 ;
+       rndis_test_rx_net_out = 0 ;
+       rndis_test_rx_nomem = 0 ;
+       rndis_test_rx_error = 0 ;
+
+       rndis_test_tx_net_in = 0 ;
+       rndis_test_tx_busy = 0 ;
+       rndis_test_tx_usb_out = 0 ;
+       rndis_test_tx_complete = 0 ;
 
        netif_stop_queue(dev->net);
        netif_carrier_off(dev->net);
@@ -949,25 +1357,38 @@ void gether_disconnect(struct gether *link)
                list_del(&req->list);
 
                spin_unlock(&dev->req_lock);
+               if (link->multi_pkt_xfer)  {
+                       kfree(req->buf);
+                       req->buf = NULL;
+               }
                usb_ep_free_request(link->in_ep, req);
                spin_lock(&dev->req_lock);
        }
+       /* Free rndis header buffer memory */
+       kfree(link->header);
+       link->header = NULL;
        spin_unlock(&dev->req_lock);
        link->in_ep->driver_data = NULL;
        link->in_ep->desc = NULL;
 
        usb_ep_disable(link->out_ep);
-       spin_lock(&dev->req_lock);
+       spin_lock(&dev->reqrx_lock);
        while (!list_empty(&dev->rx_reqs)) {
                req = container_of(dev->rx_reqs.next,
                                        struct usb_request, list);
                list_del(&req->list);
 
-               spin_unlock(&dev->req_lock);
+               spin_unlock(&dev->reqrx_lock);
                usb_ep_free_request(link->out_ep, req);
-               spin_lock(&dev->req_lock);
+               spin_lock(&dev->reqrx_lock);
        }
-       spin_unlock(&dev->req_lock);
+       spin_unlock(&dev->reqrx_lock);
+
+       spin_lock(&dev->rx_frames.lock);
+       while ((skb = __skb_dequeue(&dev->rx_frames)))
+               dev_kfree_skb_any(skb);
+       spin_unlock(&dev->rx_frames.lock);
+
        link->out_ep->driver_data = NULL;
        link->out_ep->desc = NULL;
 
@@ -980,3 +1401,29 @@ void gether_disconnect(struct gether *link)
        dev->port_usb = NULL;
        spin_unlock(&dev->lock);
 }
+
+static int __init gether_init(void)
+{
+       uether_wq  = create_singlethread_workqueue("uether");
+       if (!uether_wq) {
+               pr_err("%s: Unable to create workqueue: uether\n", __func__);
+               return -ENOMEM;
+       }
+       uether_wq1  = create_singlethread_workqueue("uether_rx1");      
+       if (!uether_wq1) {
+               destroy_workqueue(uether_wq);
+               pr_err("%s: Unable to create workqueue: uether\n", __func__);
+               return -ENOMEM;
+       }
+       return 0;
+}
+module_init(gether_init);
+
+static void __exit gether_exit(void)
+{
+       destroy_workqueue(uether_wq);
+       destroy_workqueue(uether_wq1);
+}
+module_exit(gether_exit);
+MODULE_DESCRIPTION("ethernet over USB driver");
+MODULE_LICENSE("GPL v2");