2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 /* #define VERBOSE_DEBUG */
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/gfp.h>
19 #include <linux/device.h>
20 #include <linux/ctype.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/if_vlan.h>
29 * This component encapsulates the Ethernet link glue needed to provide
30 * one (!) network link through the USB gadget stack, normally "usb0".
32 * The control and data models are handled by the function driver which
33 * connects to this code; such as CDC Ethernet (ECM or EEM),
34 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
37 * Link level addressing is handled by this component using module
38 * parameters; if no such parameters are provided, random link level
39 * addresses are used. Each end of the link uses one address. The
40 * host end address is exported in various ways, and is often recorded
41 * in configuration databases.
43 * The driver which assembles each configuration using such a link is
44 * responsible for ensuring that each configuration includes at most one
45 * instance of is network link. (The network layer provides ways for
46 * this single "physical" link to be used by multiple virtual links.)
49 #define UETH__VERSION "29-May-2008"
52 /* lock is held while accessing port_usb
55 struct gether
*port_usb
;
57 struct net_device
*net
;
58 struct usb_gadget
*gadget
;
60 spinlock_t req_lock
; /* guard {rx,tx}_reqs */
61 struct list_head tx_reqs
, rx_reqs
;
64 struct sk_buff_head rx_frames
;
67 struct sk_buff
*(*wrap
)(struct gether
*, struct sk_buff
*skb
);
68 int (*unwrap
)(struct gether
*,
70 struct sk_buff_head
*list
);
72 struct work_struct work
;
75 #define WORK_RX_MEMORY 0
78 u8 host_mac
[ETH_ALEN
];
81 /*-------------------------------------------------------------------------*/
83 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
85 #define DEFAULT_QLEN 2 /* double buffering by default */
87 static unsigned qmult
= 5;
88 module_param(qmult
, uint
, S_IRUGO
|S_IWUSR
);
89 MODULE_PARM_DESC(qmult
, "queue length multiplier at high/super speed");
91 /* for dual-speed hardware, use deeper queues at high/super speed */
92 static inline int qlen(struct usb_gadget
*gadget
)
94 if (gadget_is_dualspeed(gadget
) && (gadget
->speed
== USB_SPEED_HIGH
||
95 gadget
->speed
== USB_SPEED_SUPER
))
96 return qmult
* DEFAULT_QLEN
;
101 /*-------------------------------------------------------------------------*/
103 /* REVISIT there must be a better way than having two sets
112 #define xprintk(d, level, fmt, args...) \
113 printk(level "%s: " fmt , (d)->net->name , ## args)
117 #define DBG(dev, fmt, args...) \
118 xprintk(dev , KERN_DEBUG , fmt , ## args)
120 #define DBG(dev, fmt, args...) \
127 #define VDBG(dev, fmt, args...) \
131 #define ERROR(dev, fmt, args...) \
132 xprintk(dev , KERN_ERR , fmt , ## args)
133 #define INFO(dev, fmt, args...) \
134 xprintk(dev , KERN_INFO , fmt , ## args)
136 /*-------------------------------------------------------------------------*/
138 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
140 static int ueth_change_mtu(struct net_device
*net
, int new_mtu
)
142 struct eth_dev
*dev
= netdev_priv(net
);
146 /* don't change MTU on "live" link (peer won't know) */
147 spin_lock_irqsave(&dev
->lock
, flags
);
150 else if (new_mtu
<= ETH_HLEN
|| new_mtu
> ETH_FRAME_LEN
)
154 spin_unlock_irqrestore(&dev
->lock
, flags
);
159 static void eth_get_drvinfo(struct net_device
*net
, struct ethtool_drvinfo
*p
)
161 struct eth_dev
*dev
= netdev_priv(net
);
163 strlcpy(p
->driver
, "g_ether", sizeof(p
->driver
));
164 strlcpy(p
->version
, UETH__VERSION
, sizeof(p
->version
));
165 strlcpy(p
->fw_version
, dev
->gadget
->name
, sizeof(p
->fw_version
));
166 strlcpy(p
->bus_info
, dev_name(&dev
->gadget
->dev
), sizeof(p
->bus_info
));
169 /* REVISIT can also support:
170 * - WOL (by tracking suspends and issuing remote wakeup)
171 * - msglevel (implies updated messaging)
172 * - ... probably more ethtool ops
175 static const struct ethtool_ops ops
= {
176 .get_drvinfo
= eth_get_drvinfo
,
177 .get_link
= ethtool_op_get_link
,
180 static void defer_kevent(struct eth_dev
*dev
, int flag
)
182 if (test_and_set_bit(flag
, &dev
->todo
))
184 if (!schedule_work(&dev
->work
))
185 ERROR(dev
, "kevent %d may have been dropped\n", flag
);
187 DBG(dev
, "kevent %d scheduled\n", flag
);
190 static void rx_complete(struct usb_ep
*ep
, struct usb_request
*req
);
193 rx_submit(struct eth_dev
*dev
, struct usb_request
*req
, gfp_t gfp_flags
)
196 int retval
= -ENOMEM
;
201 spin_lock_irqsave(&dev
->lock
, flags
);
203 out
= dev
->port_usb
->out_ep
;
206 spin_unlock_irqrestore(&dev
->lock
, flags
);
212 /* Padding up to RX_EXTRA handles minor disagreements with host.
213 * Normally we use the USB "terminate on short read" convention;
214 * so allow up to (N*maxpacket), since that memory is normally
215 * already allocated. Some hardware doesn't deal well with short
216 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
217 * byte off the end (to force hardware errors on overflow).
219 * RNDIS uses internal framing, and explicitly allows senders to
220 * pad to end-of-packet. That's potentially nice for speed, but
221 * means receivers can't recover lost synch on their own (because
222 * new packets don't only start after a short RX).
224 size
+= sizeof(struct ethhdr
) + dev
->net
->mtu
+ RX_EXTRA
;
225 size
+= dev
->port_usb
->header_len
;
226 size
+= out
->maxpacket
- 1;
227 size
-= size
% out
->maxpacket
;
229 if (dev
->port_usb
->is_fixed
)
230 size
= max_t(size_t, size
, dev
->port_usb
->fixed_out_len
);
232 skb
= alloc_skb(size
+ NET_IP_ALIGN
, gfp_flags
);
234 DBG(dev
, "no rx skb\n");
238 /* Some platforms perform better when IP packets are aligned,
239 * but on at least one, checksumming fails otherwise. Note:
240 * RNDIS headers involve variable numbers of LE32 values.
242 skb_reserve(skb
, NET_IP_ALIGN
);
244 req
->buf
= skb
->data
;
246 req
->complete
= rx_complete
;
249 retval
= usb_ep_queue(out
, req
, gfp_flags
);
250 if (retval
== -ENOMEM
)
252 defer_kevent(dev
, WORK_RX_MEMORY
);
254 DBG(dev
, "rx submit --> %d\n", retval
);
256 dev_kfree_skb_any(skb
);
257 spin_lock_irqsave(&dev
->req_lock
, flags
);
258 list_add(&req
->list
, &dev
->rx_reqs
);
259 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
264 static void rx_complete(struct usb_ep
*ep
, struct usb_request
*req
)
266 struct sk_buff
*skb
= req
->context
, *skb2
;
267 struct eth_dev
*dev
= ep
->driver_data
;
268 int status
= req
->status
;
272 /* normal completion */
274 skb_put(skb
, req
->actual
);
279 spin_lock_irqsave(&dev
->lock
, flags
);
281 status
= dev
->unwrap(dev
->port_usb
,
285 dev_kfree_skb_any(skb
);
288 spin_unlock_irqrestore(&dev
->lock
, flags
);
290 skb_queue_tail(&dev
->rx_frames
, skb
);
294 skb2
= skb_dequeue(&dev
->rx_frames
);
297 || ETH_HLEN
> skb2
->len
298 || skb2
->len
> VLAN_ETH_FRAME_LEN
) {
299 dev
->net
->stats
.rx_errors
++;
300 dev
->net
->stats
.rx_length_errors
++;
301 DBG(dev
, "rx length %d\n", skb2
->len
);
302 dev_kfree_skb_any(skb2
);
305 skb2
->protocol
= eth_type_trans(skb2
, dev
->net
);
306 dev
->net
->stats
.rx_packets
++;
307 dev
->net
->stats
.rx_bytes
+= skb2
->len
;
309 /* no buffer copies needed, unless hardware can't
312 status
= netif_rx(skb2
);
314 skb2
= skb_dequeue(&dev
->rx_frames
);
318 /* software-driven interface shutdown */
319 case -ECONNRESET
: /* unlink */
320 case -ESHUTDOWN
: /* disconnect etc */
321 VDBG(dev
, "rx shutdown, code %d\n", status
);
324 /* for hardware automagic (such as pxa) */
325 case -ECONNABORTED
: /* endpoint reset */
326 DBG(dev
, "rx %s reset\n", ep
->name
);
327 defer_kevent(dev
, WORK_RX_MEMORY
);
329 dev_kfree_skb_any(skb
);
334 dev
->net
->stats
.rx_over_errors
++;
338 dev
->net
->stats
.rx_errors
++;
339 DBG(dev
, "rx status %d\n", status
);
344 dev_kfree_skb_any(skb
);
345 if (!netif_running(dev
->net
)) {
347 spin_lock(&dev
->req_lock
);
348 list_add(&req
->list
, &dev
->rx_reqs
);
349 spin_unlock(&dev
->req_lock
);
353 rx_submit(dev
, req
, GFP_ATOMIC
);
356 static int prealloc(struct list_head
*list
, struct usb_ep
*ep
, unsigned n
)
359 struct usb_request
*req
;
364 /* queue/recycle up to N requests */
366 list_for_each_entry(req
, list
, list
) {
371 req
= usb_ep_alloc_request(ep
, GFP_ATOMIC
);
373 return list_empty(list
) ? -ENOMEM
: 0;
374 list_add(&req
->list
, list
);
381 struct list_head
*next
;
383 next
= req
->list
.next
;
384 list_del(&req
->list
);
385 usb_ep_free_request(ep
, req
);
390 req
= container_of(next
, struct usb_request
, list
);
395 static int alloc_requests(struct eth_dev
*dev
, struct gether
*link
, unsigned n
)
399 spin_lock(&dev
->req_lock
);
400 status
= prealloc(&dev
->tx_reqs
, link
->in_ep
, n
);
403 status
= prealloc(&dev
->rx_reqs
, link
->out_ep
, n
);
408 DBG(dev
, "can't alloc requests\n");
410 spin_unlock(&dev
->req_lock
);
414 static void rx_fill(struct eth_dev
*dev
, gfp_t gfp_flags
)
416 struct usb_request
*req
;
419 /* fill unused rxq slots with some skb */
420 spin_lock_irqsave(&dev
->req_lock
, flags
);
421 while (!list_empty(&dev
->rx_reqs
)) {
422 req
= container_of(dev
->rx_reqs
.next
,
423 struct usb_request
, list
);
424 list_del_init(&req
->list
);
425 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
427 if (rx_submit(dev
, req
, gfp_flags
) < 0) {
428 defer_kevent(dev
, WORK_RX_MEMORY
);
432 spin_lock_irqsave(&dev
->req_lock
, flags
);
434 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
437 static void eth_work(struct work_struct
*work
)
439 struct eth_dev
*dev
= container_of(work
, struct eth_dev
, work
);
441 if (test_and_clear_bit(WORK_RX_MEMORY
, &dev
->todo
)) {
442 if (netif_running(dev
->net
))
443 rx_fill(dev
, GFP_KERNEL
);
447 DBG(dev
, "work done, flags = 0x%lx\n", dev
->todo
);
450 static void tx_complete(struct usb_ep
*ep
, struct usb_request
*req
)
452 struct sk_buff
*skb
= req
->context
;
453 struct eth_dev
*dev
= ep
->driver_data
;
455 switch (req
->status
) {
457 dev
->net
->stats
.tx_errors
++;
458 VDBG(dev
, "tx err %d\n", req
->status
);
460 case -ECONNRESET
: /* unlink */
461 case -ESHUTDOWN
: /* disconnect etc */
464 dev
->net
->stats
.tx_bytes
+= skb
->len
;
466 dev
->net
->stats
.tx_packets
++;
468 spin_lock(&dev
->req_lock
);
469 list_add(&req
->list
, &dev
->tx_reqs
);
470 spin_unlock(&dev
->req_lock
);
471 dev_kfree_skb_any(skb
);
473 atomic_dec(&dev
->tx_qlen
);
474 if (netif_carrier_ok(dev
->net
))
475 netif_wake_queue(dev
->net
);
478 static inline int is_promisc(u16 cdc_filter
)
480 return cdc_filter
& USB_CDC_PACKET_TYPE_PROMISCUOUS
;
483 static netdev_tx_t
eth_start_xmit(struct sk_buff
*skb
,
484 struct net_device
*net
)
486 struct eth_dev
*dev
= netdev_priv(net
);
487 int length
= skb
->len
;
489 struct usb_request
*req
= NULL
;
494 spin_lock_irqsave(&dev
->lock
, flags
);
496 in
= dev
->port_usb
->in_ep
;
497 cdc_filter
= dev
->port_usb
->cdc_filter
;
502 spin_unlock_irqrestore(&dev
->lock
, flags
);
505 dev_kfree_skb_any(skb
);
509 /* apply outgoing CDC or RNDIS filters */
510 if (!is_promisc(cdc_filter
)) {
511 u8
*dest
= skb
->data
;
513 if (is_multicast_ether_addr(dest
)) {
516 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
517 * SET_ETHERNET_MULTICAST_FILTERS requests
519 if (is_broadcast_ether_addr(dest
))
520 type
= USB_CDC_PACKET_TYPE_BROADCAST
;
522 type
= USB_CDC_PACKET_TYPE_ALL_MULTICAST
;
523 if (!(cdc_filter
& type
)) {
524 dev_kfree_skb_any(skb
);
528 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
531 spin_lock_irqsave(&dev
->req_lock
, flags
);
533 * this freelist can be empty if an interrupt triggered disconnect()
534 * and reconfigured the gadget (shutting down this queue) after the
535 * network stack decided to xmit but before we got the spinlock.
537 if (list_empty(&dev
->tx_reqs
)) {
538 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
539 return NETDEV_TX_BUSY
;
542 req
= container_of(dev
->tx_reqs
.next
, struct usb_request
, list
);
543 list_del(&req
->list
);
545 /* temporarily stop TX queue when the freelist empties */
546 if (list_empty(&dev
->tx_reqs
))
547 netif_stop_queue(net
);
548 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
550 /* no buffer copies needed, unless the network stack did it
551 * or the hardware can't use skb buffers.
552 * or there's not enough space for extra headers we need
557 spin_lock_irqsave(&dev
->lock
, flags
);
559 skb
= dev
->wrap(dev
->port_usb
, skb
);
560 spin_unlock_irqrestore(&dev
->lock
, flags
);
566 req
->buf
= skb
->data
;
568 req
->complete
= tx_complete
;
570 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
571 if (dev
->port_usb
->is_fixed
&&
572 length
== dev
->port_usb
->fixed_in_len
&&
573 (length
% in
->maxpacket
) == 0)
578 /* use zlp framing on tx for strict CDC-Ether conformance,
579 * though any robust network rx path ignores extra padding.
580 * and some hardware doesn't like to write zlps.
582 if (req
->zero
&& !dev
->zlp
&& (length
% in
->maxpacket
) == 0)
585 req
->length
= length
;
587 /* throttle high/super speed IRQ rate back slightly */
588 if (gadget_is_dualspeed(dev
->gadget
))
589 req
->no_interrupt
= (dev
->gadget
->speed
== USB_SPEED_HIGH
||
590 dev
->gadget
->speed
== USB_SPEED_SUPER
)
591 ? ((atomic_read(&dev
->tx_qlen
) % qmult
) != 0)
594 retval
= usb_ep_queue(in
, req
, GFP_ATOMIC
);
597 DBG(dev
, "tx queue err %d\n", retval
);
600 net
->trans_start
= jiffies
;
601 atomic_inc(&dev
->tx_qlen
);
605 dev_kfree_skb_any(skb
);
607 dev
->net
->stats
.tx_dropped
++;
608 spin_lock_irqsave(&dev
->req_lock
, flags
);
609 if (list_empty(&dev
->tx_reqs
))
610 netif_start_queue(net
);
611 list_add(&req
->list
, &dev
->tx_reqs
);
612 spin_unlock_irqrestore(&dev
->req_lock
, flags
);
617 /*-------------------------------------------------------------------------*/
619 static void eth_start(struct eth_dev
*dev
, gfp_t gfp_flags
)
621 DBG(dev
, "%s\n", __func__
);
623 /* fill the rx queue */
624 rx_fill(dev
, gfp_flags
);
626 /* and open the tx floodgates */
627 atomic_set(&dev
->tx_qlen
, 0);
628 netif_wake_queue(dev
->net
);
631 static int eth_open(struct net_device
*net
)
633 struct eth_dev
*dev
= netdev_priv(net
);
636 DBG(dev
, "%s\n", __func__
);
637 if (netif_carrier_ok(dev
->net
))
638 eth_start(dev
, GFP_KERNEL
);
640 spin_lock_irq(&dev
->lock
);
641 link
= dev
->port_usb
;
642 if (link
&& link
->open
)
644 spin_unlock_irq(&dev
->lock
);
649 static int eth_stop(struct net_device
*net
)
651 struct eth_dev
*dev
= netdev_priv(net
);
654 VDBG(dev
, "%s\n", __func__
);
655 netif_stop_queue(net
);
657 DBG(dev
, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
658 dev
->net
->stats
.rx_packets
, dev
->net
->stats
.tx_packets
,
659 dev
->net
->stats
.rx_errors
, dev
->net
->stats
.tx_errors
662 /* ensure there are no more active requests */
663 spin_lock_irqsave(&dev
->lock
, flags
);
665 struct gether
*link
= dev
->port_usb
;
666 const struct usb_endpoint_descriptor
*in
;
667 const struct usb_endpoint_descriptor
*out
;
672 /* NOTE: we have no abort-queue primitive we could use
673 * to cancel all pending I/O. Instead, we disable then
674 * reenable the endpoints ... this idiom may leave toggle
675 * wrong, but that's a self-correcting error.
677 * REVISIT: we *COULD* just let the transfers complete at
678 * their own pace; the network stack can handle old packets.
679 * For the moment we leave this here, since it works.
681 in
= link
->in_ep
->desc
;
682 out
= link
->out_ep
->desc
;
683 usb_ep_disable(link
->in_ep
);
684 usb_ep_disable(link
->out_ep
);
685 if (netif_carrier_ok(net
)) {
686 DBG(dev
, "host still using in/out endpoints\n");
687 link
->in_ep
->desc
= in
;
688 link
->out_ep
->desc
= out
;
689 usb_ep_enable(link
->in_ep
);
690 usb_ep_enable(link
->out_ep
);
693 spin_unlock_irqrestore(&dev
->lock
, flags
);
698 /*-------------------------------------------------------------------------*/
700 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
701 static char *dev_addr
;
702 module_param(dev_addr
, charp
, S_IRUGO
);
703 MODULE_PARM_DESC(dev_addr
, "Device Ethernet Address");
705 /* this address is invisible to ifconfig */
706 static char *host_addr
;
707 module_param(host_addr
, charp
, S_IRUGO
);
708 MODULE_PARM_DESC(host_addr
, "Host Ethernet Address");
710 static int get_ether_addr(const char *str
, u8
*dev_addr
)
715 for (i
= 0; i
< 6; i
++) {
718 if ((*str
== '.') || (*str
== ':'))
720 num
= hex_to_bin(*str
++) << 4;
721 num
|= hex_to_bin(*str
++);
724 if (is_valid_ether_addr(dev_addr
))
727 eth_random_addr(dev_addr
);
731 static const struct net_device_ops eth_netdev_ops
= {
732 .ndo_open
= eth_open
,
733 .ndo_stop
= eth_stop
,
734 .ndo_start_xmit
= eth_start_xmit
,
735 .ndo_change_mtu
= ueth_change_mtu
,
736 .ndo_set_mac_address
= eth_mac_addr
,
737 .ndo_validate_addr
= eth_validate_addr
,
740 static struct device_type gadget_type
= {
745 * gether_setup_name - initialize one ethernet-over-usb link
746 * @g: gadget to associated with these links
747 * @ethaddr: NULL, or a buffer in which the ethernet address of the
748 * host side of the link is recorded
749 * @netname: name for network device (for example, "usb")
752 * This sets up the single network link that may be exported by a
753 * gadget driver using this framework. The link layer addresses are
754 * set up using module parameters.
756 * Returns negative errno, or zero on success
758 struct eth_dev
*gether_setup_name(struct usb_gadget
*g
, u8 ethaddr
[ETH_ALEN
],
762 struct net_device
*net
;
765 net
= alloc_etherdev(sizeof *dev
);
767 return ERR_PTR(-ENOMEM
);
769 dev
= netdev_priv(net
);
770 spin_lock_init(&dev
->lock
);
771 spin_lock_init(&dev
->req_lock
);
772 INIT_WORK(&dev
->work
, eth_work
);
773 INIT_LIST_HEAD(&dev
->tx_reqs
);
774 INIT_LIST_HEAD(&dev
->rx_reqs
);
776 skb_queue_head_init(&dev
->rx_frames
);
778 /* network device setup */
780 snprintf(net
->name
, sizeof(net
->name
), "%s%%d", netname
);
782 if (get_ether_addr(dev_addr
, net
->dev_addr
))
784 "using random %s ethernet address\n", "self");
785 if (get_ether_addr(host_addr
, dev
->host_mac
))
787 "using random %s ethernet address\n", "host");
790 memcpy(ethaddr
, dev
->host_mac
, ETH_ALEN
);
792 net
->netdev_ops
= ð_netdev_ops
;
794 SET_ETHTOOL_OPS(net
, &ops
);
797 SET_NETDEV_DEV(net
, &g
->dev
);
798 SET_NETDEV_DEVTYPE(net
, &gadget_type
);
800 status
= register_netdev(net
);
802 dev_dbg(&g
->dev
, "register_netdev failed, %d\n", status
);
804 dev
= ERR_PTR(status
);
806 INFO(dev
, "MAC %pM\n", net
->dev_addr
);
807 INFO(dev
, "HOST MAC %pM\n", dev
->host_mac
);
809 /* two kinds of host-initiated state changes:
810 * - iff DATA transfer is active, carrier is "on"
811 * - tx queueing enabled if open *and* carrier is "on"
813 netif_carrier_off(net
);
820 * gether_cleanup - remove Ethernet-over-USB device
823 * This is called to free all resources allocated by @gether_setup().
825 void gether_cleanup(struct eth_dev
*dev
)
830 unregister_netdev(dev
->net
);
831 flush_work(&dev
->work
);
832 free_netdev(dev
->net
);
836 * gether_connect - notify network layer that USB link is active
837 * @link: the USB link, set up with endpoints, descriptors matching
838 * current device speed, and any framing wrapper(s) set up.
839 * Context: irqs blocked
841 * This is called to activate endpoints and let the network layer know
842 * the connection is active ("carrier detect"). It may cause the I/O
843 * queues to open and start letting network packets flow, but will in
844 * any case activate the endpoints so that they respond properly to the
847 * Verify net_device pointer returned using IS_ERR(). If it doesn't
848 * indicate some error code (negative errno), ep->driver_data values
849 * have been overwritten.
851 struct net_device
*gether_connect(struct gether
*link
)
853 struct eth_dev
*dev
= link
->ioport
;
857 return ERR_PTR(-EINVAL
);
859 link
->in_ep
->driver_data
= dev
;
860 result
= usb_ep_enable(link
->in_ep
);
862 DBG(dev
, "enable %s --> %d\n",
863 link
->in_ep
->name
, result
);
867 link
->out_ep
->driver_data
= dev
;
868 result
= usb_ep_enable(link
->out_ep
);
870 DBG(dev
, "enable %s --> %d\n",
871 link
->out_ep
->name
, result
);
876 result
= alloc_requests(dev
, link
, qlen(dev
->gadget
));
879 dev
->zlp
= link
->is_zlp_ok
;
880 DBG(dev
, "qlen %d\n", qlen(dev
->gadget
));
882 dev
->header_len
= link
->header_len
;
883 dev
->unwrap
= link
->unwrap
;
884 dev
->wrap
= link
->wrap
;
886 spin_lock(&dev
->lock
);
887 dev
->port_usb
= link
;
888 if (netif_running(dev
->net
)) {
895 spin_unlock(&dev
->lock
);
897 netif_carrier_on(dev
->net
);
898 if (netif_running(dev
->net
))
899 eth_start(dev
, GFP_ATOMIC
);
901 /* on error, disable any endpoints */
903 (void) usb_ep_disable(link
->out_ep
);
905 (void) usb_ep_disable(link
->in_ep
);
908 /* caller is responsible for cleanup on error */
910 return ERR_PTR(result
);
915 * gether_disconnect - notify network layer that USB link is inactive
916 * @link: the USB link, on which gether_connect() was called
917 * Context: irqs blocked
919 * This is called to deactivate endpoints and let the network layer know
920 * the connection went inactive ("no carrier").
922 * On return, the state is as if gether_connect() had never been called.
923 * The endpoints are inactive, and accordingly without active USB I/O.
924 * Pointers to endpoint descriptors and endpoint private data are nulled.
926 void gether_disconnect(struct gether
*link
)
928 struct eth_dev
*dev
= link
->ioport
;
929 struct usb_request
*req
;
935 DBG(dev
, "%s\n", __func__
);
937 netif_stop_queue(dev
->net
);
938 netif_carrier_off(dev
->net
);
940 /* disable endpoints, forcing (synchronous) completion
941 * of all pending i/o. then free the request objects
942 * and forget about the endpoints.
944 usb_ep_disable(link
->in_ep
);
945 spin_lock(&dev
->req_lock
);
946 while (!list_empty(&dev
->tx_reqs
)) {
947 req
= container_of(dev
->tx_reqs
.next
,
948 struct usb_request
, list
);
949 list_del(&req
->list
);
951 spin_unlock(&dev
->req_lock
);
952 usb_ep_free_request(link
->in_ep
, req
);
953 spin_lock(&dev
->req_lock
);
955 spin_unlock(&dev
->req_lock
);
956 link
->in_ep
->driver_data
= NULL
;
957 link
->in_ep
->desc
= NULL
;
959 usb_ep_disable(link
->out_ep
);
960 spin_lock(&dev
->req_lock
);
961 while (!list_empty(&dev
->rx_reqs
)) {
962 req
= container_of(dev
->rx_reqs
.next
,
963 struct usb_request
, list
);
964 list_del(&req
->list
);
966 spin_unlock(&dev
->req_lock
);
967 usb_ep_free_request(link
->out_ep
, req
);
968 spin_lock(&dev
->req_lock
);
970 spin_unlock(&dev
->req_lock
);
971 link
->out_ep
->driver_data
= NULL
;
972 link
->out_ep
->desc
= NULL
;
974 /* finish forgetting about this USB link episode */
979 spin_lock(&dev
->lock
);
980 dev
->port_usb
= NULL
;
981 spin_unlock(&dev
->lock
);