2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 /* #define VERBOSE_DEBUG */
18 #define pr_fmt(fmt) "["KBUILD_MODNAME"]" fmt
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/gfp.h>
23 #include <linux/device.h>
24 #include <linux/ctype.h>
25 #include <linux/etherdevice.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_vlan.h>
31 * This component encapsulates the Ethernet link glue needed to provide
32 * one (!) network link through the USB gadget stack, normally "usb0".
34 * The control and data models are handled by the function driver which
35 * connects to this code; such as CDC Ethernet (ECM or EEM),
36 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
39 * Link level addressing is handled by this component using module
40 * parameters; if no such parameters are provided, random link level
41 * addresses are used. Each end of the link uses one address. The
42 * host end address is exported in various ways, and is often recorded
43 * in configuration databases.
45 * The driver which assembles each configuration using such a link is
46 * responsible for ensuring that each configuration includes at most one
47 * instance of is network link. (The network layer provides ways for
48 * this single "physical" link to be used by multiple virtual links.)
51 #define UETH__VERSION "29-May-2008"
53 static struct workqueue_struct
*uether_wq
;
54 static struct workqueue_struct
*uether_wq1
;
58 /* lock is held while accessing port_usb
61 struct gether
*port_usb
;
63 struct net_device
*net
;
64 struct usb_gadget
*gadget
;
66 spinlock_t req_lock
; /* guard {tx}_reqs */
67 spinlock_t reqrx_lock
; /* guard {rx}_reqs */
68 struct list_head tx_reqs
, rx_reqs
;
70 /* Minimum number of TX USB request queued to UDC */
71 #define TX_REQ_THRESHOLD 5
73 int tx_skb_hold_count
;
76 struct sk_buff_head rx_frames
;
79 unsigned int ul_max_pkts_per_xfer
;
80 unsigned int dl_max_pkts_per_xfer
;
81 struct sk_buff
*(*wrap
)(struct gether
*, struct sk_buff
*skb
);
82 int (*unwrap
)(struct gether
*,
84 struct sk_buff_head
*list
);
86 struct work_struct work
;
87 struct work_struct rx_work
;
88 struct work_struct rx_work1
;
90 #define WORK_RX_MEMORY 0
93 u8 host_mac
[ETH_ALEN
];
96 /*-------------------------------------------------------------------------*/
98 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
100 #define DEFAULT_QLEN 2 /* double buffering by default */
102 static unsigned qmult
= 10;
103 module_param(qmult
, uint
, S_IRUGO
|S_IWUSR
);
104 MODULE_PARM_DESC(qmult
, "queue length multiplier at high/super speed");
106 static unsigned tx_wakeup_threshold
= 13;
107 module_param(tx_wakeup_threshold
, uint
, S_IRUGO
|S_IWUSR
);
108 MODULE_PARM_DESC(tx_wakeup_threshold
, "tx wakeup threshold value");
110 /* for dual-speed hardware, use deeper queues at high/super speed */
111 static inline int qlen(struct usb_gadget
*gadget
)
113 if (gadget_is_dualspeed(gadget
) && (gadget
->speed
== USB_SPEED_HIGH
||
114 gadget
->speed
== USB_SPEED_SUPER
))
115 return qmult
* DEFAULT_QLEN
;
120 static inline int qlenrx(struct usb_gadget
*gadget
)
122 if (gadget_is_dualspeed(gadget
) && (gadget
->speed
== USB_SPEED_HIGH
||
123 gadget
->speed
== USB_SPEED_SUPER
))
124 return qmult
* DEFAULT_QLEN
;
130 /*-------------------------------------------------------------------------*/
132 /* REVISIT there must be a better way than having two sets
141 #define xprintk(d, level, fmt, args...) \
142 printk(level "%s: " fmt , (d)->net->name , ## args)
146 #define DBG(dev, fmt, args...) \
147 xprintk(dev , KERN_DEBUG , fmt , ## args)
149 #define DBG(dev, fmt, args...) \
156 #define VDBG(dev, fmt, args...) \
160 #define ERROR(dev, fmt, args...) \
161 xprintk(dev , KERN_ERR , fmt , ## args)
162 #define INFO(dev, fmt, args...) \
163 xprintk(dev , KERN_INFO , fmt , ## args)
165 /*-------------------------------------------------------------------------*/
167 unsigned long rndis_test_rx_usb_in
= 0 ;
168 unsigned long rndis_test_rx_net_out
= 0 ;
169 unsigned long rndis_test_rx_nomem
= 0 ;
170 unsigned long rndis_test_rx_error
= 0 ;
172 unsigned long rndis_test_tx_net_in
= 0 ;
173 unsigned long rndis_test_tx_busy
= 0 ;
174 unsigned long rndis_test_tx_usb_out
= 0 ;
175 unsigned long rndis_test_tx_complete
= 0 ;
177 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
179 static int ueth_change_mtu(struct net_device
*net
, int new_mtu
)
181 struct eth_dev
*dev
= netdev_priv(net
);
185 /* don't change MTU on "live" link (peer won't know) */
186 spin_lock_irqsave(&dev
->lock
, flags
);
189 else if (new_mtu
<= ETH_HLEN
|| new_mtu
> ETH_FRAME_LEN
)
193 spin_unlock_irqrestore(&dev
->lock
, flags
);
195 pr_debug("[XLOG_INFO][UTHER]ueth_change_mtu to %d, status is %d\n", new_mtu
, status
);
200 static void eth_get_drvinfo(struct net_device
*net
, struct ethtool_drvinfo
*p
)
202 struct eth_dev
*dev
= netdev_priv(net
);
204 strlcpy(p
->driver
, "g_ether", sizeof(p
->driver
));
205 strlcpy(p
->version
, UETH__VERSION
, sizeof(p
->version
));
206 strlcpy(p
->fw_version
, dev
->gadget
->name
, sizeof(p
->fw_version
));
207 strlcpy(p
->bus_info
, dev_name(&dev
->gadget
->dev
), sizeof(p
->bus_info
));
210 /* REVISIT can also support:
211 * - WOL (by tracking suspends and issuing remote wakeup)
212 * - msglevel (implies updated messaging)
213 * - ... probably more ethtool ops
216 static const struct ethtool_ops ops
= {
217 .get_drvinfo
= eth_get_drvinfo
,
218 .get_link
= ethtool_op_get_link
,
221 static void defer_kevent(struct eth_dev
*dev
, int flag
)
223 if (test_and_set_bit(flag
, &dev
->todo
))
225 if (!schedule_work(&dev
->work
))
226 ERROR(dev
, "kevent %d may have been dropped\n", flag
);
228 DBG(dev
, "kevent %d scheduled\n", flag
);
231 static void rx_complete(struct usb_ep
*ep
, struct usb_request
*req
);
232 static void tx_complete(struct usb_ep
*ep
, struct usb_request
*req
);
235 rx_submit(struct eth_dev
*dev
, struct usb_request
*req
, gfp_t gfp_flags
)
238 int retval
= -ENOMEM
;
243 spin_lock_irqsave(&dev
->lock
, flags
);
245 out
= dev
->port_usb
->out_ep
;
248 spin_unlock_irqrestore(&dev
->lock
, flags
);
254 /* Padding up to RX_EXTRA handles minor disagreements with host.
255 * Normally we use the USB "terminate on short read" convention;
256 * so allow up to (N*maxpacket), since that memory is normally
257 * already allocated. Some hardware doesn't deal well with short
258 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
259 * byte off the end (to force hardware errors on overflow).
261 * RNDIS uses internal framing, and explicitly allows senders to
262 * pad to end-of-packet. That's potentially nice for speed, but
263 * means receivers can't recover lost synch on their own (because
264 * new packets don't only start after a short RX).
266 size
+= sizeof(struct ethhdr
) + dev
->net
->mtu
+ RX_EXTRA
;
267 size
+= dev
->port_usb
->header_len
;
269 size += out->maxpacket - 1;
270 size -= size % out->maxpacket;
272 if (dev
->ul_max_pkts_per_xfer
)
273 size
*= dev
->ul_max_pkts_per_xfer
;
275 if (dev
->port_usb
->is_fixed
)
276 size
= max_t(size_t, size
, dev
->port_usb
->fixed_out_len
);
278 pr_debug("%s: size: %d, mtu: %d, header_len: %d, maxpacket: %d, ul_max_pkts_per_xfer: %d",
279 __func__
, (int)size
, dev
->net
->mtu
, dev
->port_usb
->header_len
, out
->maxpacket
, dev
->ul_max_pkts_per_xfer
);
280 skb
= alloc_skb(size
+ NET_IP_ALIGN
, gfp_flags
);
282 pr_debug("[XLOG_INFO][UTHER]rx_submit : no rx skb\n");
283 DBG(dev
, "no rx skb\n");
284 rndis_test_rx_nomem
++ ;
288 /* Some platforms perform better when IP packets are aligned,
289 * but on at least one, checksumming fails otherwise. Note:
290 * RNDIS headers involve variable numbers of LE32 values.
292 skb_reserve(skb
, NET_IP_ALIGN
);
294 req
->buf
= skb
->data
;
298 retval
= usb_ep_queue(out
, req
, gfp_flags
);
299 if (retval
== -ENOMEM
)
301 defer_kevent(dev
, WORK_RX_MEMORY
);
303 DBG(dev
, "rx submit --> %d\n", retval
);
305 dev_kfree_skb_any(skb
);
310 static void rx_complete(struct usb_ep
*ep
, struct usb_request
*req
)
312 struct sk_buff
*skb
= req
->context
;
313 struct eth_dev
*dev
= ep
->driver_data
;
314 int status
= req
->status
;
319 /* normal completion */
321 pr_debug("%s: transferred size: %d", __func__
, req
->actual
);
322 skb_put(skb
, req
->actual
);
327 spin_lock_irqsave(&dev
->lock
, flags
);
329 status
= dev
->unwrap(dev
->port_usb
,
332 if (status
== -EINVAL
)
333 dev
->net
->stats
.rx_errors
++;
334 else if (status
== -EOVERFLOW
)
335 dev
->net
->stats
.rx_over_errors
++;
337 dev_kfree_skb_any(skb
);
340 spin_unlock_irqrestore(&dev
->lock
, flags
);
342 skb_queue_tail(&dev
->rx_frames
, skb
);
348 rndis_test_rx_usb_in
++ ;
351 /* software-driven interface shutdown */
352 case -ECONNRESET
: /* unlink */
353 case -ESHUTDOWN
: /* disconnect etc */
354 VDBG(dev
, "rx shutdown, code %d\n", status
);
357 /* for hardware automagic (such as pxa) */
358 case -ECONNABORTED
: /* endpoint reset */
359 DBG(dev
, "rx %s reset\n", ep
->name
);
360 defer_kevent(dev
, WORK_RX_MEMORY
);
362 dev_kfree_skb_any(skb
);
367 dev
->net
->stats
.rx_over_errors
++;
372 dev_kfree_skb_any(skb
);
373 dev
->net
->stats
.rx_errors
++;
374 DBG(dev
, "rx status %d\n", status
);
379 spin_lock(&dev
->reqrx_lock
);
380 list_add(&req
->list
, &dev
->rx_reqs
);
381 spin_unlock(&dev
->reqrx_lock
);
385 queue_work(uether_wq
, &dev
->rx_work
);
386 queue_work(uether_wq1
, &dev
->rx_work1
);
390 static int prealloc(struct list_head
*list
, struct usb_ep
*ep
, unsigned n
)
393 struct usb_request
*req
;
399 /* queue/recycle up to N requests */
401 list_for_each_entry(req
, list
, list
) {
406 if (ep
->desc
->bEndpointAddress
& USB_DIR_IN
)
412 req
= usb_ep_alloc_request(ep
, GFP_ATOMIC
);
414 return list_empty(list
) ? -ENOMEM
: 0;
415 /* update completion handler */
417 req
->complete
= tx_complete
;
419 req
->complete
= rx_complete
;
421 list_add(&req
->list
, list
);
428 struct list_head
*next
;
430 next
= req
->list
.next
;
431 list_del(&req
->list
);
432 usb_ep_free_request(ep
, req
);
437 req
= container_of(next
, struct usb_request
, list
);
442 static int alloc_tx_requests(struct eth_dev
*dev
, struct gether
*link
, unsigned n
)
446 spin_lock(&dev
->req_lock
);
447 status
= prealloc(&dev
->tx_reqs
, link
->in_ep
, n
);
453 DBG(dev
, "can't alloc tx requests\n");
454 pr_debug("[XLOG_INFO][UTHER]alloc_requests : can't alloc requests\n");
456 spin_unlock(&dev
->req_lock
);
459 static int alloc_rx_requests(struct eth_dev
*dev
, struct gether
*link
, unsigned n
)
463 spin_lock(&dev
->reqrx_lock
);
465 status
= prealloc(&dev
->rx_reqs
, link
->out_ep
, n
);
470 DBG(dev
, "can't alloc rx requests\n");
471 pr_debug("[XLOG_INFO][UTHER]alloc_requests : can't alloc rxrequests\n");
473 spin_unlock(&dev
->reqrx_lock
);
477 static void rx_fill(struct eth_dev
*dev
, gfp_t gfp_flags
)
479 struct usb_request
*req
;
483 /* fill unused rxq slots with some skb */
484 spin_lock_irqsave(&dev
->reqrx_lock
, flags
);
485 while (!list_empty(&dev
->rx_reqs
)) {
486 /* break the nexus of continuous completion and re-submission*/
487 if (++req_cnt
> qlenrx(dev
->gadget
))
490 req
= container_of(dev
->rx_reqs
.next
,
491 struct usb_request
, list
);
492 list_del_init(&req
->list
);
493 spin_unlock_irqrestore(&dev
->reqrx_lock
, flags
);
495 if (rx_submit(dev
, req
, gfp_flags
) < 0) {
496 spin_lock_irqsave(&dev
->reqrx_lock
, flags
);
497 list_add(&req
->list
, &dev
->rx_reqs
);
498 spin_unlock_irqrestore(&dev
->reqrx_lock
, flags
);
499 defer_kevent(dev
, WORK_RX_MEMORY
);
503 spin_lock_irqsave(&dev
->reqrx_lock
, flags
);
505 spin_unlock_irqrestore(&dev
->reqrx_lock
, flags
);
508 static void process_rx_w(struct work_struct
*work
)
510 struct eth_dev
*dev
= container_of(work
, struct eth_dev
, rx_work
);
517 while ((skb
= skb_dequeue(&dev
->rx_frames
))) {
519 || ETH_HLEN
> skb
->len
520 || skb
->len
> VLAN_ETH_FRAME_LEN
) {
521 dev
->net
->stats
.rx_errors
++;
522 dev
->net
->stats
.rx_length_errors
++;
523 rndis_test_rx_error
++ ;
524 DBG(dev
, "rx length %d\n", skb
->len
);
525 dev_kfree_skb_any(skb
);
528 skb
->protocol
= eth_type_trans(skb
, dev
->net
);
529 dev
->net
->stats
.rx_packets
++;
530 dev
->net
->stats
.rx_bytes
+= skb
->len
;
532 rndis_test_rx_net_out
++ ;
533 status
= netif_rx_ni(skb
);
536 /* move to another workthread */
538 if (netif_running(dev
->net
))
539 rx_fill(dev
, GFP_KERNEL
);
543 static void process_rx_w1(struct work_struct
*work
)
545 struct eth_dev
*dev
= container_of(work
, struct eth_dev
, rx_work1
);
550 if (netif_running(dev
->net
))
551 rx_fill(dev
, GFP_KERNEL
);
554 static void eth_work(struct work_struct
*work
)
556 struct eth_dev
*dev
= container_of(work
, struct eth_dev
, work
);
558 if (test_and_clear_bit(WORK_RX_MEMORY
, &dev
->todo
)) {
559 if (netif_running(dev
->net
))
560 rx_fill(dev
, GFP_KERNEL
);
564 DBG(dev
, "work done, flags = 0x%lx\n", dev
->todo
);
567 static void tx_complete(struct usb_ep
*ep
, struct usb_request
*req
)
571 struct net_device
*net
;
572 struct usb_request
*new_req
;
577 if (!ep
->driver_data
) {
578 usb_ep_free_request(ep
, req
);
582 dev
= ep
->driver_data
;
585 if (!dev
->port_usb
) {
586 usb_ep_free_request(ep
, req
);
590 switch (req
->status
) {
592 dev
->net
->stats
.tx_errors
++;
593 VDBG(dev
, "tx err %d\n", req
->status
);
595 case -ECONNRESET
: /* unlink */
596 case -ESHUTDOWN
: /* disconnect etc */
600 dev
->net
->stats
.tx_bytes
+= req
->length
-1;
602 dev
->net
->stats
.tx_bytes
+= req
->length
;
604 dev
->net
->stats
.tx_packets
++;
605 rndis_test_tx_complete
++ ;
607 spin_lock(&dev
->req_lock
);
608 list_add_tail(&req
->list
, &dev
->tx_reqs
);
610 if (dev
->port_usb
->multi_pkt_xfer
&& !req
->context
) {
611 dev
->no_tx_req_used
--;
613 in
= dev
->port_usb
->in_ep
;
615 if (!list_empty(&dev
->tx_reqs
)) {
616 new_req
= container_of(dev
->tx_reqs
.next
,
617 struct usb_request
, list
);
618 list_del(&new_req
->list
);
619 spin_unlock(&dev
->req_lock
);
620 if (new_req
->length
> 0) {
621 length
= new_req
->length
;
623 /* NCM requires no zlp if transfer is
625 if (dev
->port_usb
->is_fixed
&&
626 length
== dev
->port_usb
->fixed_in_len
&&
627 (length
% in
->maxpacket
) == 0)
632 /* use zlp framing on tx for strict CDC-Ether
633 * conformance, though any robust network rx
634 * path ignores extra padding. and some hardware
635 * doesn't like to write zlps.
637 if (new_req
->zero
&& !dev
->zlp
&&
638 (length
% in
->maxpacket
) == 0) {
643 new_req
->length
= length
;
644 retval
= usb_ep_queue(in
, new_req
, GFP_ATOMIC
);
647 DBG(dev
, "tx queue err %d\n", retval
);
649 spin_lock(&dev
->req_lock
);
650 list_add_tail(&new_req
->list
,
652 spin_unlock(&dev
->req_lock
);
655 spin_lock(&dev
->req_lock
);
656 dev
->no_tx_req_used
++;
657 spin_unlock(&dev
->req_lock
);
658 net
->trans_start
= jiffies
;
661 spin_lock(&dev
->req_lock
);
663 * Put the idle request at the back of the
664 * queue. The xmit function will put the
665 * unfinished request at the beginning of the
668 list_add_tail(&new_req
->list
, &dev
->tx_reqs
);
669 spin_unlock(&dev
->req_lock
);
672 spin_unlock(&dev
->req_lock
);
676 /* Is aggregation already enabled and buffers allocated ? */
677 if (dev
->port_usb
->multi_pkt_xfer
&& dev
->tx_req_bufsize
) {
678 req
->buf
= kzalloc(dev
->tx_req_bufsize
, GFP_ATOMIC
);
684 spin_unlock(&dev
->req_lock
);
685 dev_kfree_skb_any(skb
);
688 if (netif_carrier_ok(dev
->net
))
690 spin_lock(&dev
->req_lock
);
691 if(dev
->no_tx_req_used
< tx_wakeup_threshold
)
692 netif_wake_queue(dev
->net
);
693 spin_unlock(&dev
->req_lock
);
698 static inline int is_promisc(u16 cdc_filter
)
700 return cdc_filter
& USB_CDC_PACKET_TYPE_PROMISCUOUS
;
703 static int alloc_tx_buffer(struct eth_dev
*dev
)
705 struct list_head
*act
;
706 struct usb_request
*req
;
708 dev
->tx_req_bufsize
= (dev
->dl_max_pkts_per_xfer
*
710 + sizeof(struct ethhdr
)
711 /* size of rndis_packet_msg_type */
715 list_for_each(act
, &dev
->tx_reqs
) {
716 req
= container_of(act
, struct usb_request
, list
);
718 req
->buf
= kzalloc(dev
->tx_req_bufsize
,
723 /* req->context is not used for multi_pkt_xfers */
729 /* tx_req_bufsize = 0 retries mem alloc on next eth_start_xmit */
730 dev
->tx_req_bufsize
= 0;
731 list_for_each(act
, &dev
->tx_reqs
) {
732 req
= container_of(act
, struct usb_request
, list
);
739 static netdev_tx_t
eth_start_xmit(struct sk_buff
*skb
,
740 struct net_device
*net
)
742 struct eth_dev
*dev
= netdev_priv(net
);
743 int length
= skb
->len
;
745 struct usb_request
*req
= NULL
;
749 bool multi_pkt_xfer
= false;
752 static unsigned int okCnt
= 0, busyCnt
= 0;
753 static int firstShot
= 1, diffSec
;
754 static struct timeval tv_last
, tv_cur
;
756 spin_lock_irqsave(&dev
->lock
, flags
);
758 in
= dev
->port_usb
->in_ep
;
759 cdc_filter
= dev
->port_usb
->cdc_filter
;
760 multi_pkt_xfer
= dev
->port_usb
->multi_pkt_xfer
;
765 spin_unlock_irqrestore(&dev
->lock
, flags
);
768 dev_kfree_skb_any(skb
);
772 /* Allocate memory for tx_reqs to support multi packet transfer */
773 spin_lock_irqsave(&dev
->req_lock
, flags
);
774 if (multi_pkt_xfer
&& !dev
->tx_req_bufsize
) {
775 retval
= alloc_tx_buffer(dev
);
777 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
781 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
783 rndis_test_tx_net_in
++ ;
785 /* apply outgoing CDC or RNDIS filters */
786 if (!is_promisc(cdc_filter
)) {
787 u8
*dest
= skb
->data
;
789 if (is_multicast_ether_addr(dest
)) {
792 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
793 * SET_ETHERNET_MULTICAST_FILTERS requests
795 if (is_broadcast_ether_addr(dest
))
796 type
= USB_CDC_PACKET_TYPE_BROADCAST
;
798 type
= USB_CDC_PACKET_TYPE_ALL_MULTICAST
;
799 if (!(cdc_filter
& type
)) {
800 dev_kfree_skb_any(skb
);
801 pr_warning("cdc_filter error, cdc_filter is 0x%x , type is 0x%x \n", cdc_filter
, type
);
805 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
808 spin_lock_irqsave(&dev
->req_lock
, flags
);
810 * this freelist can be empty if an interrupt triggered disconnect()
811 * and reconfigured the gadget (shutting down this queue) after the
812 * network stack decided to xmit but before we got the spinlock.
814 if (list_empty(&dev
->tx_reqs
)) {
817 do_gettimeofday(&tv_cur
);
823 printk(KERN_ERR
"%s, NETDEV_TX_BUSY returned at firstShot , okCnt : %u, busyCnt : %u\n", __func__
, okCnt
, busyCnt
);
827 diffSec
= tv_cur
.tv_sec
- tv_last
.tv_sec
;
831 printk(KERN_ERR
"%s, NETDEV_TX_BUSY returned, okCnt : %u, busyCnt : %u\n", __func__
, okCnt
, busyCnt
);
835 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
837 rndis_test_tx_busy
++ ;
838 return NETDEV_TX_BUSY
;
842 req
= container_of(dev
->tx_reqs
.next
, struct usb_request
, list
);
843 list_del(&req
->list
);
845 /* temporarily stop TX queue when the freelist empties */
846 if (list_empty(&dev
->tx_reqs
))
847 netif_stop_queue(net
);
848 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
850 /* no buffer copies needed, unless the network stack did it
851 * or the hardware can't use skb buffers.
852 * or there's not enough space for extra headers we need
854 spin_lock_irqsave(&dev
->lock
, flags
);
857 skb
= dev
->wrap(dev
->port_usb
, skb
);
859 spin_unlock_irqrestore(&dev
->lock
, flags
);
864 if (multi_pkt_xfer
) {
866 pr_debug("req->length:%d header_len:%u\n"
867 "skb->len:%d skb->data_len:%d\n",
868 req
->length
, dev
->header_len
,
869 skb
->len
, skb
->data_len
);
871 if (dev
->port_usb
== NULL
)
873 dev_kfree_skb_any(skb
);
874 pr_debug("eth_start_xmit, port_usb becomes NULL\n");
877 /* Add RNDIS Header */
878 memcpy(req
->buf
+ req
->length
, dev
->port_usb
->header
,
880 /* Increment req length by header size */
881 req
->length
+= dev
->header_len
;
882 spin_unlock_irqrestore(&dev
->lock
, flags
);
883 /* Copy received IP data from SKB */
884 memcpy(req
->buf
+ req
->length
, skb
->data
, skb
->len
);
885 /* Increment req length by skb data length */
886 req
->length
+= skb
->len
;
887 length
= req
->length
;
888 dev_kfree_skb_any(skb
);
890 spin_lock_irqsave(&dev
->req_lock
, flags
);
891 dev
->tx_skb_hold_count
++;
892 //if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
893 if ((dev
->tx_skb_hold_count
< dev
->dl_max_pkts_per_xfer
) && (length
< (dev
->port_usb
->dl_max_transfer_len
- dev
->net
->mtu
))) {
894 if (dev
->no_tx_req_used
> TX_REQ_THRESHOLD
) {
895 list_add(&req
->list
, &dev
->tx_reqs
);
896 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
901 dev
->no_tx_req_used
++;
902 dev
->tx_skb_hold_count
= 0;
903 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
905 spin_unlock_irqrestore(&dev
->lock
, flags
);
907 req
->buf
= skb
->data
;
911 if (dev
->port_usb
== NULL
)
914 dev_kfree_skb_any(skb
);
915 pr_debug("eth_start_xmit, port_usb becomes NULL\n");
919 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
920 if (dev
->port_usb
->is_fixed
&&
921 length
== dev
->port_usb
->fixed_in_len
&&
922 (length
% in
->maxpacket
) == 0)
927 /* use zlp framing on tx for strict CDC-Ether conformance,
928 * though any robust network rx path ignores extra padding.
929 * and some hardware doesn't like to write zlps.
931 if (req
->zero
&& !dev
->zlp
&& (length
% in
->maxpacket
) == 0) {
936 req
->length
= length
;
938 /* throttle high/super speed IRQ rate back slightly */
939 if (gadget_is_dualspeed(dev
->gadget
) &&
940 (dev
->gadget
->speed
== USB_SPEED_HIGH
||
941 dev
->gadget
->speed
== USB_SPEED_SUPER
)) {
943 if (dev
->tx_qlen
== (qmult
/2)) {
944 req
->no_interrupt
= 0;
947 req
->no_interrupt
= 1;
950 req
->no_interrupt
= 0;
952 rndis_test_tx_usb_out
++ ;
954 retval
= usb_ep_queue(in
, req
, GFP_ATOMIC
);
957 DBG(dev
, "tx queue err %d\n", retval
);
958 pr_debug("[XLOG_INFO][UTHER]eth_start_xmit : tx queue err %d\n", retval
);
961 net
->trans_start
= jiffies
;
966 dev_kfree_skb_any(skb
);
970 dev
->net
->stats
.tx_dropped
++;
971 spin_lock_irqsave(&dev
->req_lock
, flags
);
972 if (list_empty(&dev
->tx_reqs
))
973 netif_start_queue(net
);
974 list_add_tail(&req
->list
, &dev
->tx_reqs
);
975 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
981 /*-------------------------------------------------------------------------*/
983 static void eth_start(struct eth_dev
*dev
, gfp_t gfp_flags
)
985 DBG(dev
, "%s\n", __func__
);
986 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__
);
988 /* fill the rx queue */
989 rx_fill(dev
, gfp_flags
);
991 /* and open the tx floodgates */
993 netif_wake_queue(dev
->net
);
996 static int eth_open(struct net_device
*net
)
998 struct eth_dev
*dev
= netdev_priv(net
);
1001 DBG(dev
, "%s\n", __func__
);
1002 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__
);
1004 if (netif_carrier_ok(dev
->net
))
1005 eth_start(dev
, GFP_KERNEL
);
1007 spin_lock_irq(&dev
->lock
);
1008 link
= dev
->port_usb
;
1009 if (link
&& link
->open
)
1011 spin_unlock_irq(&dev
->lock
);
1016 static int eth_stop(struct net_device
*net
)
1018 struct eth_dev
*dev
= netdev_priv(net
);
1019 unsigned long flags
;
1021 VDBG(dev
, "%s\n", __func__
);
1022 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__
);
1024 netif_stop_queue(net
);
1026 DBG(dev
, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
1027 dev
->net
->stats
.rx_packets
, dev
->net
->stats
.tx_packets
,
1028 dev
->net
->stats
.rx_errors
, dev
->net
->stats
.tx_errors
1031 /* ensure there are no more active requests */
1032 spin_lock_irqsave(&dev
->lock
, flags
);
1033 if (dev
->port_usb
) {
1034 struct gether
*link
= dev
->port_usb
;
1035 const struct usb_endpoint_descriptor
*in
;
1036 const struct usb_endpoint_descriptor
*out
;
1041 /* NOTE: we have no abort-queue primitive we could use
1042 * to cancel all pending I/O. Instead, we disable then
1043 * reenable the endpoints ... this idiom may leave toggle
1044 * wrong, but that's a self-correcting error.
1046 * REVISIT: we *COULD* just let the transfers complete at
1047 * their own pace; the network stack can handle old packets.
1048 * For the moment we leave this here, since it works.
1050 in
= link
->in_ep
->desc
;
1051 out
= link
->out_ep
->desc
;
1052 usb_ep_disable(link
->in_ep
);
1053 usb_ep_disable(link
->out_ep
);
1054 if (netif_carrier_ok(net
)) {
1055 DBG(dev
, "host still using in/out endpoints\n");
1056 link
->in_ep
->desc
= in
;
1057 link
->out_ep
->desc
= out
;
1058 usb_ep_enable(link
->in_ep
);
1059 usb_ep_enable(link
->out_ep
);
1062 spin_unlock_irqrestore(&dev
->lock
, flags
);
1067 /*-------------------------------------------------------------------------*/
1069 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
1070 static char *dev_addr
;
1071 module_param(dev_addr
, charp
, S_IRUGO
);
1072 MODULE_PARM_DESC(dev_addr
, "Device Ethernet Address");
1074 /* this address is invisible to ifconfig */
1075 static char *host_addr
;
1076 module_param(host_addr
, charp
, S_IRUGO
);
1077 MODULE_PARM_DESC(host_addr
, "Host Ethernet Address");
1079 static int get_ether_addr(const char *str
, u8
*dev_addr
)
1084 for (i
= 0; i
< 6; i
++) {
1087 if ((*str
== '.') || (*str
== ':'))
1089 num
= hex_to_bin(*str
++) << 4;
1090 num
|= hex_to_bin(*str
++);
1093 if (is_valid_ether_addr(dev_addr
))
1096 eth_random_addr(dev_addr
);
1100 static const struct net_device_ops eth_netdev_ops
= {
1101 .ndo_open
= eth_open
,
1102 .ndo_stop
= eth_stop
,
1103 .ndo_start_xmit
= eth_start_xmit
,
1104 .ndo_change_mtu
= ueth_change_mtu
,
1105 .ndo_set_mac_address
= eth_mac_addr
,
1106 .ndo_validate_addr
= eth_validate_addr
,
1109 static struct device_type gadget_type
= {
1114 * gether_setup_name - initialize one ethernet-over-usb link
1115 * @g: gadget to associated with these links
1116 * @ethaddr: NULL, or a buffer in which the ethernet address of the
1117 * host side of the link is recorded
1118 * @netname: name for network device (for example, "usb")
1119 * Context: may sleep
1121 * This sets up the single network link that may be exported by a
1122 * gadget driver using this framework. The link layer addresses are
1123 * set up using module parameters.
1125 * Returns negative errno, or zero on success
1127 struct eth_dev
*gether_setup_name(struct usb_gadget
*g
, u8 ethaddr
[ETH_ALEN
],
1128 const char *netname
)
1130 struct eth_dev
*dev
;
1131 struct net_device
*net
;
1134 net
= alloc_etherdev(sizeof *dev
);
1136 return ERR_PTR(-ENOMEM
);
1138 dev
= netdev_priv(net
);
1139 spin_lock_init(&dev
->lock
);
1140 spin_lock_init(&dev
->req_lock
);
1141 spin_lock_init(&dev
->reqrx_lock
);
1142 INIT_WORK(&dev
->work
, eth_work
);
1143 INIT_WORK(&dev
->rx_work
, process_rx_w
);
1144 INIT_WORK(&dev
->rx_work1
, process_rx_w1
);
1145 INIT_LIST_HEAD(&dev
->tx_reqs
);
1146 INIT_LIST_HEAD(&dev
->rx_reqs
);
1148 skb_queue_head_init(&dev
->rx_frames
);
1150 /* network device setup */
1152 snprintf(net
->name
, sizeof(net
->name
), "%s%%d", netname
);
1154 if (get_ether_addr(dev_addr
, net
->dev_addr
))
1156 "using random %s ethernet address\n", "self");
1157 if (get_ether_addr(host_addr
, dev
->host_mac
))
1159 "using random %s ethernet address\n", "host");
1162 memcpy(ethaddr
, dev
->host_mac
, ETH_ALEN
);
1164 net
->netdev_ops
= ð_netdev_ops
;
1166 SET_ETHTOOL_OPS(net
, &ops
);
1169 SET_NETDEV_DEV(net
, &g
->dev
);
1170 SET_NETDEV_DEVTYPE(net
, &gadget_type
);
1172 status
= register_netdev(net
);
1174 dev_dbg(&g
->dev
, "register_netdev failed, %d\n", status
);
1176 dev
= ERR_PTR(status
);
1178 INFO(dev
, "MAC %pM\n", net
->dev_addr
);
1179 INFO(dev
, "HOST MAC %pM\n", dev
->host_mac
);
1181 /* two kinds of host-initiated state changes:
1182 * - iff DATA transfer is active, carrier is "on"
1183 * - tx queueing enabled if open *and* carrier is "on"
1185 netif_carrier_off(net
);
1192 * gether_cleanup - remove Ethernet-over-USB device
1193 * Context: may sleep
1195 * This is called to free all resources allocated by @gether_setup().
1197 void gether_cleanup(struct eth_dev
*dev
)
1202 unregister_netdev(dev
->net
);
1203 flush_work(&dev
->work
);
1204 free_netdev(dev
->net
);
1208 * gether_connect - notify network layer that USB link is active
1209 * @link: the USB link, set up with endpoints, descriptors matching
1210 * current device speed, and any framing wrapper(s) set up.
1211 * Context: irqs blocked
1213 * This is called to activate endpoints and let the network layer know
1214 * the connection is active ("carrier detect"). It may cause the I/O
1215 * queues to open and start letting network packets flow, but will in
1216 * any case activate the endpoints so that they respond properly to the
1219 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1220 * indicate some error code (negative errno), ep->driver_data values
1221 * have been overwritten.
1223 struct net_device
*gether_connect(struct gether
*link
)
1225 struct eth_dev
*dev
= link
->ioport
;
1229 return ERR_PTR(-EINVAL
);
1231 link
->header
= kzalloc(sizeof(struct rndis_packet_msg_type
),
1233 if (!link
->header
) {
1234 pr_err("RNDIS header memory allocation failed.\n");
1239 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__
);
1241 link
->in_ep
->driver_data
= dev
;
1242 result
= usb_ep_enable(link
->in_ep
);
1244 DBG(dev
, "enable %s --> %d\n",
1245 link
->in_ep
->name
, result
);
1249 link
->out_ep
->driver_data
= dev
;
1250 result
= usb_ep_enable(link
->out_ep
);
1252 DBG(dev
, "enable %s --> %d\n",
1253 link
->out_ep
->name
, result
);
1259 result
= alloc_tx_requests(dev
, link
, qlen(dev
->gadget
));
1261 result
= alloc_rx_requests(dev
, link
, qlenrx(dev
->gadget
));
1265 dev
->zlp
= link
->is_zlp_ok
;
1266 DBG(dev
, "qlen %d\n", qlen(dev
->gadget
));
1268 dev
->header_len
= link
->header_len
;
1269 dev
->unwrap
= link
->unwrap
;
1270 dev
->wrap
= link
->wrap
;
1271 dev
->ul_max_pkts_per_xfer
= link
->ul_max_pkts_per_xfer
;
1272 dev
->dl_max_pkts_per_xfer
= link
->dl_max_pkts_per_xfer
;
1274 spin_lock(&dev
->lock
);
1275 dev
->tx_skb_hold_count
= 0;
1276 dev
->no_tx_req_used
= 0;
1277 dev
->tx_req_bufsize
= 0;
1278 dev
->port_usb
= link
;
1279 if (netif_running(dev
->net
)) {
1286 spin_unlock(&dev
->lock
);
1288 netif_carrier_on(dev
->net
);
1289 if (netif_running(dev
->net
))
1290 eth_start(dev
, GFP_ATOMIC
);
1292 /* on error, disable any endpoints */
1294 (void) usb_ep_disable(link
->out_ep
);
1296 (void) usb_ep_disable(link
->in_ep
);
1299 /* caller is responsible for cleanup on error */
1302 kfree(link
->header
);
1304 return ERR_PTR(result
);
1311 * gether_disconnect - notify network layer that USB link is inactive
1312 * @link: the USB link, on which gether_connect() was called
1313 * Context: irqs blocked
1315 * This is called to deactivate endpoints and let the network layer know
1316 * the connection went inactive ("no carrier").
1318 * On return, the state is as if gether_connect() had never been called.
1319 * The endpoints are inactive, and accordingly without active USB I/O.
1320 * Pointers to endpoint descriptors and endpoint private data are nulled.
1322 void gether_disconnect(struct gether
*link
)
1324 struct eth_dev
*dev
= link
->ioport
;
1325 struct usb_request
*req
;
1326 struct sk_buff
*skb
;
1332 DBG(dev
, "%s\n", __func__
);
1333 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__
);
1335 rndis_test_rx_usb_in
= 0 ;
1336 rndis_test_rx_net_out
= 0 ;
1337 rndis_test_rx_nomem
= 0 ;
1338 rndis_test_rx_error
= 0 ;
1340 rndis_test_tx_net_in
= 0 ;
1341 rndis_test_tx_busy
= 0 ;
1342 rndis_test_tx_usb_out
= 0 ;
1343 rndis_test_tx_complete
= 0 ;
1345 netif_stop_queue(dev
->net
);
1346 netif_carrier_off(dev
->net
);
1348 /* disable endpoints, forcing (synchronous) completion
1349 * of all pending i/o. then free the request objects
1350 * and forget about the endpoints.
1352 usb_ep_disable(link
->in_ep
);
1353 spin_lock(&dev
->req_lock
);
1354 while (!list_empty(&dev
->tx_reqs
)) {
1355 req
= container_of(dev
->tx_reqs
.next
,
1356 struct usb_request
, list
);
1357 list_del(&req
->list
);
1359 spin_unlock(&dev
->req_lock
);
1360 if (link
->multi_pkt_xfer
) {
1364 usb_ep_free_request(link
->in_ep
, req
);
1365 spin_lock(&dev
->req_lock
);
1367 /* Free rndis header buffer memory */
1368 kfree(link
->header
);
1369 link
->header
= NULL
;
1370 spin_unlock(&dev
->req_lock
);
1371 link
->in_ep
->driver_data
= NULL
;
1372 link
->in_ep
->desc
= NULL
;
1374 usb_ep_disable(link
->out_ep
);
1375 spin_lock(&dev
->reqrx_lock
);
1376 while (!list_empty(&dev
->rx_reqs
)) {
1377 req
= container_of(dev
->rx_reqs
.next
,
1378 struct usb_request
, list
);
1379 list_del(&req
->list
);
1381 spin_unlock(&dev
->reqrx_lock
);
1382 usb_ep_free_request(link
->out_ep
, req
);
1383 spin_lock(&dev
->reqrx_lock
);
1385 spin_unlock(&dev
->reqrx_lock
);
1387 spin_lock(&dev
->rx_frames
.lock
);
1388 while ((skb
= __skb_dequeue(&dev
->rx_frames
)))
1389 dev_kfree_skb_any(skb
);
1390 spin_unlock(&dev
->rx_frames
.lock
);
1392 link
->out_ep
->driver_data
= NULL
;
1393 link
->out_ep
->desc
= NULL
;
1395 /* finish forgetting about this USB link episode */
1396 dev
->header_len
= 0;
1400 spin_lock(&dev
->lock
);
1401 dev
->port_usb
= NULL
;
1402 spin_unlock(&dev
->lock
);
1405 static int __init
gether_init(void)
1407 uether_wq
= create_singlethread_workqueue("uether");
1409 pr_err("%s: Unable to create workqueue: uether\n", __func__
);
1412 uether_wq1
= create_singlethread_workqueue("uether_rx1");
1414 destroy_workqueue(uether_wq
);
1415 pr_err("%s: Unable to create workqueue: uether\n", __func__
);
1420 module_init(gether_init
);
1422 static void __exit
gether_exit(void)
1424 destroy_workqueue(uether_wq
);
1425 destroy_workqueue(uether_wq1
);
1427 module_exit(gether_exit
);
1428 MODULE_DESCRIPTION("ethernet over USB driver");
1429 MODULE_LICENSE("GPL v2");