drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / gadget / u_ether.c
1 /*
2 * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
3 *
4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 /* #define VERBOSE_DEBUG */
15 #ifdef pr_fmt
16 #undef pr_fmt
17 #endif
18 #define pr_fmt(fmt) "["KBUILD_MODNAME"]" fmt
19
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/gfp.h>
23 #include <linux/device.h>
24 #include <linux/ctype.h>
25 #include <linux/etherdevice.h>
26 #include <linux/ethtool.h>
27 #include <linux/if_vlan.h>
28 #include "u_ether.h"
29
30 /*
31 * This component encapsulates the Ethernet link glue needed to provide
32 * one (!) network link through the USB gadget stack, normally "usb0".
33 *
34 * The control and data models are handled by the function driver which
35 * connects to this code; such as CDC Ethernet (ECM or EEM),
36 * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
37 * management.
38 *
39 * Link level addressing is handled by this component using module
40 * parameters; if no such parameters are provided, random link level
41 * addresses are used. Each end of the link uses one address. The
42 * host end address is exported in various ways, and is often recorded
43 * in configuration databases.
44 *
45 * The driver which assembles each configuration using such a link is
46 * responsible for ensuring that each configuration includes at most one
47 * instance of is network link. (The network layer provides ways for
48 * this single "physical" link to be used by multiple virtual links.)
49 */
50
51 #define UETH__VERSION "29-May-2008"
52
53 static struct workqueue_struct *uether_wq;
54 static struct workqueue_struct *uether_wq1;
55
56
57 struct eth_dev {
58 /* lock is held while accessing port_usb
59 */
60 spinlock_t lock;
61 struct gether *port_usb;
62
63 struct net_device *net;
64 struct usb_gadget *gadget;
65
66 spinlock_t req_lock; /* guard {tx}_reqs */
67 spinlock_t reqrx_lock; /* guard {rx}_reqs */
68 struct list_head tx_reqs, rx_reqs;
69 unsigned tx_qlen;
70 /* Minimum number of TX USB request queued to UDC */
71 #define TX_REQ_THRESHOLD 5
72 int no_tx_req_used;
73 int tx_skb_hold_count;
74 u32 tx_req_bufsize;
75
76 struct sk_buff_head rx_frames;
77
78 unsigned header_len;
79 unsigned int ul_max_pkts_per_xfer;
80 unsigned int dl_max_pkts_per_xfer;
81 struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
82 int (*unwrap)(struct gether *,
83 struct sk_buff *skb,
84 struct sk_buff_head *list);
85
86 struct work_struct work;
87 struct work_struct rx_work;
88 struct work_struct rx_work1;
89 unsigned long todo;
90 #define WORK_RX_MEMORY 0
91
92 bool zlp;
93 u8 host_mac[ETH_ALEN];
94 };
95
96 /*-------------------------------------------------------------------------*/
97
98 #define RX_EXTRA 20 /* bytes guarding against rx overflows */
99
100 #define DEFAULT_QLEN 2 /* double buffering by default */
101
102 static unsigned qmult = 10;
103 module_param(qmult, uint, S_IRUGO|S_IWUSR);
104 MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
105
106 static unsigned tx_wakeup_threshold = 13;
107 module_param(tx_wakeup_threshold, uint, S_IRUGO|S_IWUSR);
108 MODULE_PARM_DESC(tx_wakeup_threshold, "tx wakeup threshold value");
109
110 /* for dual-speed hardware, use deeper queues at high/super speed */
111 static inline int qlen(struct usb_gadget *gadget)
112 {
113 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
114 gadget->speed == USB_SPEED_SUPER))
115 return qmult * DEFAULT_QLEN;
116 else
117 return DEFAULT_QLEN;
118 }
119
120 static inline int qlenrx(struct usb_gadget *gadget)
121 {
122 if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
123 gadget->speed == USB_SPEED_SUPER))
124 return qmult * DEFAULT_QLEN;
125
126 else
127 return DEFAULT_QLEN;
128 }
129
130 /*-------------------------------------------------------------------------*/
131
132 /* REVISIT there must be a better way than having two sets
133 * of debug calls ...
134 */
135
136 #undef DBG
137 #undef VDBG
138 #undef ERROR
139 #undef INFO
140
141 #define xprintk(d, level, fmt, args...) \
142 printk(level "%s: " fmt , (d)->net->name , ## args)
143
144 #ifdef DEBUG
145 #undef DEBUG
146 #define DBG(dev, fmt, args...) \
147 xprintk(dev , KERN_DEBUG , fmt , ## args)
148 #else
149 #define DBG(dev, fmt, args...) \
150 do { } while (0)
151 #endif /* DEBUG */
152
153 #ifdef VERBOSE_DEBUG
154 #define VDBG DBG
155 #else
156 #define VDBG(dev, fmt, args...) \
157 do { } while (0)
158 #endif /* DEBUG */
159
160 #define ERROR(dev, fmt, args...) \
161 xprintk(dev , KERN_ERR , fmt , ## args)
162 #define INFO(dev, fmt, args...) \
163 xprintk(dev , KERN_INFO , fmt , ## args)
164
165 /*-------------------------------------------------------------------------*/
166
167 unsigned long rndis_test_rx_usb_in = 0 ;
168 unsigned long rndis_test_rx_net_out = 0 ;
169 unsigned long rndis_test_rx_nomem = 0 ;
170 unsigned long rndis_test_rx_error = 0 ;
171
172 unsigned long rndis_test_tx_net_in = 0 ;
173 unsigned long rndis_test_tx_busy = 0 ;
174 unsigned long rndis_test_tx_usb_out = 0 ;
175 unsigned long rndis_test_tx_complete = 0 ;
176
177 /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
178
179 static int ueth_change_mtu(struct net_device *net, int new_mtu)
180 {
181 struct eth_dev *dev = netdev_priv(net);
182 unsigned long flags;
183 int status = 0;
184
185 /* don't change MTU on "live" link (peer won't know) */
186 spin_lock_irqsave(&dev->lock, flags);
187 if (dev->port_usb)
188 status = -EBUSY;
189 else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
190 status = -ERANGE;
191 else
192 net->mtu = new_mtu;
193 spin_unlock_irqrestore(&dev->lock, flags);
194
195 pr_debug("[XLOG_INFO][UTHER]ueth_change_mtu to %d, status is %d\n", new_mtu , status);
196
197 return status;
198 }
199
200 static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
201 {
202 struct eth_dev *dev = netdev_priv(net);
203
204 strlcpy(p->driver, "g_ether", sizeof(p->driver));
205 strlcpy(p->version, UETH__VERSION, sizeof(p->version));
206 strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
207 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
208 }
209
210 /* REVISIT can also support:
211 * - WOL (by tracking suspends and issuing remote wakeup)
212 * - msglevel (implies updated messaging)
213 * - ... probably more ethtool ops
214 */
215
216 static const struct ethtool_ops ops = {
217 .get_drvinfo = eth_get_drvinfo,
218 .get_link = ethtool_op_get_link,
219 };
220
221 static void defer_kevent(struct eth_dev *dev, int flag)
222 {
223 if (test_and_set_bit(flag, &dev->todo))
224 return;
225 if (!schedule_work(&dev->work))
226 ERROR(dev, "kevent %d may have been dropped\n", flag);
227 else
228 DBG(dev, "kevent %d scheduled\n", flag);
229 }
230
231 static void rx_complete(struct usb_ep *ep, struct usb_request *req);
232 static void tx_complete(struct usb_ep *ep, struct usb_request *req);
233
234 static int
235 rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
236 {
237 struct sk_buff *skb;
238 int retval = -ENOMEM;
239 size_t size = 0;
240 struct usb_ep *out;
241 unsigned long flags;
242
243 spin_lock_irqsave(&dev->lock, flags);
244 if (dev->port_usb)
245 out = dev->port_usb->out_ep;
246 else
247 out = NULL;
248 spin_unlock_irqrestore(&dev->lock, flags);
249
250 if (!out)
251 return -ENOTCONN;
252
253
254 /* Padding up to RX_EXTRA handles minor disagreements with host.
255 * Normally we use the USB "terminate on short read" convention;
256 * so allow up to (N*maxpacket), since that memory is normally
257 * already allocated. Some hardware doesn't deal well with short
258 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
259 * byte off the end (to force hardware errors on overflow).
260 *
261 * RNDIS uses internal framing, and explicitly allows senders to
262 * pad to end-of-packet. That's potentially nice for speed, but
263 * means receivers can't recover lost synch on their own (because
264 * new packets don't only start after a short RX).
265 */
266 size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
267 size += dev->port_usb->header_len;
268 /*
269 size += out->maxpacket - 1;
270 size -= size % out->maxpacket;
271 */
272 if (dev->ul_max_pkts_per_xfer)
273 size *= dev->ul_max_pkts_per_xfer;
274
275 if (dev->port_usb->is_fixed)
276 size = max_t(size_t, size, dev->port_usb->fixed_out_len);
277
278 pr_debug("%s: size: %d, mtu: %d, header_len: %d, maxpacket: %d, ul_max_pkts_per_xfer: %d",
279 __func__, (int)size, dev->net->mtu, dev->port_usb->header_len, out->maxpacket, dev->ul_max_pkts_per_xfer);
280 skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
281 if (skb == NULL) {
282 pr_debug("[XLOG_INFO][UTHER]rx_submit : no rx skb\n");
283 DBG(dev, "no rx skb\n");
284 rndis_test_rx_nomem ++ ;
285 goto enomem;
286 }
287
288 /* Some platforms perform better when IP packets are aligned,
289 * but on at least one, checksumming fails otherwise. Note:
290 * RNDIS headers involve variable numbers of LE32 values.
291 */
292 skb_reserve(skb, NET_IP_ALIGN);
293
294 req->buf = skb->data;
295 req->length = size;
296 req->context = skb;
297
298 retval = usb_ep_queue(out, req, gfp_flags);
299 if (retval == -ENOMEM)
300 enomem:
301 defer_kevent(dev, WORK_RX_MEMORY);
302 if (retval) {
303 DBG(dev, "rx submit --> %d\n", retval);
304 if (skb)
305 dev_kfree_skb_any(skb);
306 }
307 return retval;
308 }
309
310 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
311 {
312 struct sk_buff *skb = req->context;
313 struct eth_dev *dev = ep->driver_data;
314 int status = req->status;
315 bool queue = 0;
316
317 switch (status) {
318
319 /* normal completion */
320 case 0:
321 pr_debug("%s: transferred size: %d", __func__, req->actual);
322 skb_put(skb, req->actual);
323
324 if (dev->unwrap) {
325 unsigned long flags;
326
327 spin_lock_irqsave(&dev->lock, flags);
328 if (dev->port_usb) {
329 status = dev->unwrap(dev->port_usb,
330 skb,
331 &dev->rx_frames);
332 if (status == -EINVAL)
333 dev->net->stats.rx_errors++;
334 else if (status == -EOVERFLOW)
335 dev->net->stats.rx_over_errors++;
336 } else {
337 dev_kfree_skb_any(skb);
338 status = -ENOTCONN;
339 }
340 spin_unlock_irqrestore(&dev->lock, flags);
341 } else {
342 skb_queue_tail(&dev->rx_frames, skb);
343 }
344
345 if (!status)
346 queue = 1;
347
348 rndis_test_rx_usb_in ++ ;
349 break;
350
351 /* software-driven interface shutdown */
352 case -ECONNRESET: /* unlink */
353 case -ESHUTDOWN: /* disconnect etc */
354 VDBG(dev, "rx shutdown, code %d\n", status);
355 goto quiesce;
356
357 /* for hardware automagic (such as pxa) */
358 case -ECONNABORTED: /* endpoint reset */
359 DBG(dev, "rx %s reset\n", ep->name);
360 defer_kevent(dev, WORK_RX_MEMORY);
361 quiesce:
362 dev_kfree_skb_any(skb);
363 goto clean;
364
365 /* data overrun */
366 case -EOVERFLOW:
367 dev->net->stats.rx_over_errors++;
368 /* FALLTHROUGH */
369
370 default:
371 queue = 1;
372 dev_kfree_skb_any(skb);
373 dev->net->stats.rx_errors++;
374 DBG(dev, "rx status %d\n", status);
375 break;
376 }
377
378 clean:
379 spin_lock(&dev->reqrx_lock);
380 list_add(&req->list, &dev->rx_reqs);
381 spin_unlock(&dev->reqrx_lock);
382
383 if (queue)
384 {
385 queue_work(uether_wq, &dev->rx_work);
386 queue_work(uether_wq1, &dev->rx_work1);
387 }
388 }
389
390 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
391 {
392 unsigned i;
393 struct usb_request *req;
394 bool usb_in;
395
396 if (!n)
397 return -ENOMEM;
398
399 /* queue/recycle up to N requests */
400 i = n;
401 list_for_each_entry(req, list, list) {
402 if (i-- == 0)
403 goto extra;
404 }
405
406 if (ep->desc->bEndpointAddress & USB_DIR_IN)
407 usb_in = true;
408 else
409 usb_in = false;
410
411 while (i--) {
412 req = usb_ep_alloc_request(ep, GFP_ATOMIC);
413 if (!req)
414 return list_empty(list) ? -ENOMEM : 0;
415 /* update completion handler */
416 if (usb_in)
417 req->complete = tx_complete;
418 else
419 req->complete = rx_complete;
420
421 list_add(&req->list, list);
422 }
423 return 0;
424
425 extra:
426 /* free extras */
427 for (;;) {
428 struct list_head *next;
429
430 next = req->list.next;
431 list_del(&req->list);
432 usb_ep_free_request(ep, req);
433
434 if (next == list)
435 break;
436
437 req = container_of(next, struct usb_request, list);
438 }
439 return 0;
440 }
441
442 static int alloc_tx_requests(struct eth_dev *dev, struct gether *link, unsigned n)
443 {
444 int status;
445
446 spin_lock(&dev->req_lock);
447 status = prealloc(&dev->tx_reqs, link->in_ep, n);
448 if (status < 0)
449 goto fail;
450
451 goto done;
452 fail:
453 DBG(dev, "can't alloc tx requests\n");
454 pr_debug("[XLOG_INFO][UTHER]alloc_requests : can't alloc requests\n");
455 done:
456 spin_unlock(&dev->req_lock);
457 return status;
458 }
459 static int alloc_rx_requests(struct eth_dev *dev, struct gether *link, unsigned n)
460 {
461 int status;
462
463 spin_lock(&dev->reqrx_lock);
464
465 status = prealloc(&dev->rx_reqs, link->out_ep, n);
466 if (status < 0)
467 goto fail;
468 goto done;
469 fail:
470 DBG(dev, "can't alloc rx requests\n");
471 pr_debug("[XLOG_INFO][UTHER]alloc_requests : can't alloc rxrequests\n");
472 done:
473 spin_unlock(&dev->reqrx_lock);
474 return status;
475 }
476
477 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
478 {
479 struct usb_request *req;
480 unsigned long flags;
481 int req_cnt = 0;
482
483 /* fill unused rxq slots with some skb */
484 spin_lock_irqsave(&dev->reqrx_lock, flags);
485 while (!list_empty(&dev->rx_reqs)) {
486 /* break the nexus of continuous completion and re-submission*/
487 if (++req_cnt > qlenrx(dev->gadget))
488 break;
489
490 req = container_of(dev->rx_reqs.next,
491 struct usb_request, list);
492 list_del_init(&req->list);
493 spin_unlock_irqrestore(&dev->reqrx_lock, flags);
494
495 if (rx_submit(dev, req, gfp_flags) < 0) {
496 spin_lock_irqsave(&dev->reqrx_lock, flags);
497 list_add(&req->list, &dev->rx_reqs);
498 spin_unlock_irqrestore(&dev->reqrx_lock, flags);
499 defer_kevent(dev, WORK_RX_MEMORY);
500 return;
501 }
502
503 spin_lock_irqsave(&dev->reqrx_lock, flags);
504 }
505 spin_unlock_irqrestore(&dev->reqrx_lock, flags);
506 }
507
508 static void process_rx_w(struct work_struct *work)
509 {
510 struct eth_dev *dev = container_of(work, struct eth_dev, rx_work);
511 struct sk_buff *skb;
512 int status = 0;
513
514 if (!dev->port_usb)
515 return;
516
517 while ((skb = skb_dequeue(&dev->rx_frames))) {
518 if (status < 0
519 || ETH_HLEN > skb->len
520 || skb->len > VLAN_ETH_FRAME_LEN) {
521 dev->net->stats.rx_errors++;
522 dev->net->stats.rx_length_errors++;
523 rndis_test_rx_error++ ;
524 DBG(dev, "rx length %d\n", skb->len);
525 dev_kfree_skb_any(skb);
526 continue;
527 }
528 skb->protocol = eth_type_trans(skb, dev->net);
529 dev->net->stats.rx_packets++;
530 dev->net->stats.rx_bytes += skb->len;
531
532 rndis_test_rx_net_out ++ ;
533 status = netif_rx_ni(skb);
534 }
535
536 /* move to another workthread */
537 #if 0
538 if (netif_running(dev->net))
539 rx_fill(dev, GFP_KERNEL);
540 #endif
541 }
542
543 static void process_rx_w1(struct work_struct *work)
544 {
545 struct eth_dev *dev = container_of(work, struct eth_dev, rx_work1);
546
547 if (!dev->port_usb)
548 return;
549
550 if (netif_running(dev->net))
551 rx_fill(dev, GFP_KERNEL);
552 }
553
554 static void eth_work(struct work_struct *work)
555 {
556 struct eth_dev *dev = container_of(work, struct eth_dev, work);
557
558 if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
559 if (netif_running(dev->net))
560 rx_fill(dev, GFP_KERNEL);
561 }
562
563 if (dev->todo)
564 DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
565 }
566
567 static void tx_complete(struct usb_ep *ep, struct usb_request *req)
568 {
569 struct sk_buff *skb;
570 struct eth_dev *dev;
571 struct net_device *net;
572 struct usb_request *new_req;
573 struct usb_ep *in;
574 int length;
575 int retval;
576
577 if (!ep->driver_data) {
578 usb_ep_free_request(ep, req);
579 return;
580 }
581
582 dev = ep->driver_data;
583 net = dev->net;
584
585 if (!dev->port_usb) {
586 usb_ep_free_request(ep, req);
587 return;
588 }
589
590 switch (req->status) {
591 default:
592 dev->net->stats.tx_errors++;
593 VDBG(dev, "tx err %d\n", req->status);
594 /* FALLTHROUGH */
595 case -ECONNRESET: /* unlink */
596 case -ESHUTDOWN: /* disconnect etc */
597 break;
598 case 0:
599 if (!req->zero)
600 dev->net->stats.tx_bytes += req->length-1;
601 else
602 dev->net->stats.tx_bytes += req->length;
603 }
604 dev->net->stats.tx_packets++;
605 rndis_test_tx_complete++ ;
606
607 spin_lock(&dev->req_lock);
608 list_add_tail(&req->list, &dev->tx_reqs);
609
610 if (dev->port_usb->multi_pkt_xfer && !req->context) {
611 dev->no_tx_req_used--;
612 req->length = 0;
613 in = dev->port_usb->in_ep;
614
615 if (!list_empty(&dev->tx_reqs)) {
616 new_req = container_of(dev->tx_reqs.next,
617 struct usb_request, list);
618 list_del(&new_req->list);
619 spin_unlock(&dev->req_lock);
620 if (new_req->length > 0) {
621 length = new_req->length;
622
623 /* NCM requires no zlp if transfer is
624 * dwNtbInMaxSize */
625 if (dev->port_usb->is_fixed &&
626 length == dev->port_usb->fixed_in_len &&
627 (length % in->maxpacket) == 0)
628 new_req->zero = 0;
629 else
630 new_req->zero = 1;
631
632 /* use zlp framing on tx for strict CDC-Ether
633 * conformance, though any robust network rx
634 * path ignores extra padding. and some hardware
635 * doesn't like to write zlps.
636 */
637 if (new_req->zero && !dev->zlp &&
638 (length % in->maxpacket) == 0) {
639 new_req->zero = 0;
640 length++;
641 }
642
643 new_req->length = length;
644 retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
645 switch (retval) {
646 default:
647 DBG(dev, "tx queue err %d\n", retval);
648 new_req->length = 0;
649 spin_lock(&dev->req_lock);
650 list_add_tail(&new_req->list,
651 &dev->tx_reqs);
652 spin_unlock(&dev->req_lock);
653 break;
654 case 0:
655 spin_lock(&dev->req_lock);
656 dev->no_tx_req_used++;
657 spin_unlock(&dev->req_lock);
658 net->trans_start = jiffies;
659 }
660 } else {
661 spin_lock(&dev->req_lock);
662 /*
663 * Put the idle request at the back of the
664 * queue. The xmit function will put the
665 * unfinished request at the beginning of the
666 * queue.
667 */
668 list_add_tail(&new_req->list, &dev->tx_reqs);
669 spin_unlock(&dev->req_lock);
670 }
671 } else {
672 spin_unlock(&dev->req_lock);
673 }
674 } else {
675 skb = req->context;
676 /* Is aggregation already enabled and buffers allocated ? */
677 if (dev->port_usb->multi_pkt_xfer && dev->tx_req_bufsize) {
678 req->buf = kzalloc(dev->tx_req_bufsize, GFP_ATOMIC);
679 req->context = NULL;
680 } else {
681 req->buf = NULL;
682 }
683
684 spin_unlock(&dev->req_lock);
685 dev_kfree_skb_any(skb);
686 }
687
688 if (netif_carrier_ok(dev->net))
689 {
690 spin_lock(&dev->req_lock);
691 if(dev->no_tx_req_used < tx_wakeup_threshold)
692 netif_wake_queue(dev->net);
693 spin_unlock(&dev->req_lock);
694 }
695
696 }
697
698 static inline int is_promisc(u16 cdc_filter)
699 {
700 return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
701 }
702
703 static int alloc_tx_buffer(struct eth_dev *dev)
704 {
705 struct list_head *act;
706 struct usb_request *req;
707
708 dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
709 (dev->net->mtu
710 + sizeof(struct ethhdr)
711 /* size of rndis_packet_msg_type */
712 + 44
713 + 22));
714
715 list_for_each(act, &dev->tx_reqs) {
716 req = container_of(act, struct usb_request, list);
717 if (!req->buf) {
718 req->buf = kzalloc(dev->tx_req_bufsize,
719 GFP_ATOMIC);
720 if (!req->buf)
721 goto free_buf;
722 }
723 /* req->context is not used for multi_pkt_xfers */
724 req->context = NULL;
725 }
726 return 0;
727
728 free_buf:
729 /* tx_req_bufsize = 0 retries mem alloc on next eth_start_xmit */
730 dev->tx_req_bufsize = 0;
731 list_for_each(act, &dev->tx_reqs) {
732 req = container_of(act, struct usb_request, list);
733 kfree(req->buf);
734 req->buf = NULL;
735 }
736 return -ENOMEM;
737 }
738
739 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
740 struct net_device *net)
741 {
742 struct eth_dev *dev = netdev_priv(net);
743 int length = skb->len;
744 int retval;
745 struct usb_request *req = NULL;
746 unsigned long flags;
747 struct usb_ep *in;
748 u16 cdc_filter;
749 bool multi_pkt_xfer = false;
750
751 //ALPS00542120
752 static unsigned int okCnt = 0, busyCnt = 0;
753 static int firstShot = 1, diffSec;
754 static struct timeval tv_last, tv_cur;
755
756 spin_lock_irqsave(&dev->lock, flags);
757 if (dev->port_usb) {
758 in = dev->port_usb->in_ep;
759 cdc_filter = dev->port_usb->cdc_filter;
760 multi_pkt_xfer = dev->port_usb->multi_pkt_xfer;
761 } else {
762 in = NULL;
763 cdc_filter = 0;
764 }
765 spin_unlock_irqrestore(&dev->lock, flags);
766
767 if (!in) {
768 dev_kfree_skb_any(skb);
769 return NETDEV_TX_OK;
770 }
771
772 /* Allocate memory for tx_reqs to support multi packet transfer */
773 spin_lock_irqsave(&dev->req_lock, flags);
774 if (multi_pkt_xfer && !dev->tx_req_bufsize) {
775 retval = alloc_tx_buffer(dev);
776 if (retval < 0) {
777 spin_unlock_irqrestore(&dev->req_lock, flags);
778 return -ENOMEM;
779 }
780 }
781 spin_unlock_irqrestore(&dev->req_lock, flags);
782
783 rndis_test_tx_net_in ++ ;
784
785 /* apply outgoing CDC or RNDIS filters */
786 if (!is_promisc(cdc_filter)) {
787 u8 *dest = skb->data;
788
789 if (is_multicast_ether_addr(dest)) {
790 u16 type;
791
792 /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
793 * SET_ETHERNET_MULTICAST_FILTERS requests
794 */
795 if (is_broadcast_ether_addr(dest))
796 type = USB_CDC_PACKET_TYPE_BROADCAST;
797 else
798 type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
799 if (!(cdc_filter & type)) {
800 dev_kfree_skb_any(skb);
801 pr_warning("cdc_filter error, cdc_filter is 0x%x , type is 0x%x \n", cdc_filter , type);
802 return NETDEV_TX_OK;
803 }
804 }
805 /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
806 }
807
808 spin_lock_irqsave(&dev->req_lock, flags);
809 /*
810 * this freelist can be empty if an interrupt triggered disconnect()
811 * and reconfigured the gadget (shutting down this queue) after the
812 * network stack decided to xmit but before we got the spinlock.
813 */
814 if (list_empty(&dev->tx_reqs)) {
815
816 busyCnt++;
817 do_gettimeofday(&tv_cur);
818
819 if(firstShot)
820 {
821 tv_last = tv_cur;
822 firstShot = 0;
823 printk(KERN_ERR "%s, NETDEV_TX_BUSY returned at firstShot , okCnt : %u, busyCnt : %u\n", __func__, okCnt, busyCnt);
824 }
825 else
826 {
827 diffSec = tv_cur.tv_sec - tv_last.tv_sec;
828 if(diffSec >=2 )
829 {
830 tv_last = tv_cur;
831 printk(KERN_ERR "%s, NETDEV_TX_BUSY returned, okCnt : %u, busyCnt : %u\n", __func__, okCnt, busyCnt);
832 }
833 }
834
835 spin_unlock_irqrestore(&dev->req_lock, flags);
836
837 rndis_test_tx_busy ++ ;
838 return NETDEV_TX_BUSY;
839 }
840 okCnt++;
841
842 req = container_of(dev->tx_reqs.next, struct usb_request, list);
843 list_del(&req->list);
844
845 /* temporarily stop TX queue when the freelist empties */
846 if (list_empty(&dev->tx_reqs))
847 netif_stop_queue(net);
848 spin_unlock_irqrestore(&dev->req_lock, flags);
849
850 /* no buffer copies needed, unless the network stack did it
851 * or the hardware can't use skb buffers.
852 * or there's not enough space for extra headers we need
853 */
854 spin_lock_irqsave(&dev->lock, flags);
855 if (dev->wrap) {
856 if (dev->port_usb)
857 skb = dev->wrap(dev->port_usb, skb);
858 if (!skb) {
859 spin_unlock_irqrestore(&dev->lock, flags);
860 goto drop;
861 }
862 }
863
864 if (multi_pkt_xfer) {
865
866 pr_debug("req->length:%d header_len:%u\n"
867 "skb->len:%d skb->data_len:%d\n",
868 req->length, dev->header_len,
869 skb->len, skb->data_len);
870
871 if (dev->port_usb == NULL)
872 {
873 dev_kfree_skb_any(skb);
874 pr_debug("eth_start_xmit, port_usb becomes NULL\n");
875 return NETDEV_TX_OK;
876 }
877 /* Add RNDIS Header */
878 memcpy(req->buf + req->length, dev->port_usb->header,
879 dev->header_len);
880 /* Increment req length by header size */
881 req->length += dev->header_len;
882 spin_unlock_irqrestore(&dev->lock, flags);
883 /* Copy received IP data from SKB */
884 memcpy(req->buf + req->length, skb->data, skb->len);
885 /* Increment req length by skb data length */
886 req->length += skb->len;
887 length = req->length;
888 dev_kfree_skb_any(skb);
889
890 spin_lock_irqsave(&dev->req_lock, flags);
891 dev->tx_skb_hold_count++;
892 //if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
893 if ((dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) && (length < (dev->port_usb->dl_max_transfer_len - dev->net->mtu))) {
894 if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
895 list_add(&req->list, &dev->tx_reqs);
896 spin_unlock_irqrestore(&dev->req_lock, flags);
897 goto success;
898 }
899 }
900
901 dev->no_tx_req_used++;
902 dev->tx_skb_hold_count = 0;
903 spin_unlock_irqrestore(&dev->req_lock, flags);
904 } else {
905 spin_unlock_irqrestore(&dev->lock, flags);
906 length = skb->len;
907 req->buf = skb->data;
908 req->context = skb;
909 }
910
911 if (dev->port_usb == NULL)
912 {
913 if (!multi_pkt_xfer)
914 dev_kfree_skb_any(skb);
915 pr_debug("eth_start_xmit, port_usb becomes NULL\n");
916 return NETDEV_TX_OK;
917 }
918
919 /* NCM requires no zlp if transfer is dwNtbInMaxSize */
920 if (dev->port_usb->is_fixed &&
921 length == dev->port_usb->fixed_in_len &&
922 (length % in->maxpacket) == 0)
923 req->zero = 0;
924 else
925 req->zero = 1;
926
927 /* use zlp framing on tx for strict CDC-Ether conformance,
928 * though any robust network rx path ignores extra padding.
929 * and some hardware doesn't like to write zlps.
930 */
931 if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
932 req->zero = 0;
933 length++;
934 }
935
936 req->length = length;
937
938 /* throttle high/super speed IRQ rate back slightly */
939 if (gadget_is_dualspeed(dev->gadget) &&
940 (dev->gadget->speed == USB_SPEED_HIGH ||
941 dev->gadget->speed == USB_SPEED_SUPER)) {
942 dev->tx_qlen++;
943 if (dev->tx_qlen == (qmult/2)) {
944 req->no_interrupt = 0;
945 dev->tx_qlen = 0;
946 } else {
947 req->no_interrupt = 1;
948 }
949 } else {
950 req->no_interrupt = 0;
951 }
952 rndis_test_tx_usb_out ++ ;
953
954 retval = usb_ep_queue(in, req, GFP_ATOMIC);
955 switch (retval) {
956 default:
957 DBG(dev, "tx queue err %d\n", retval);
958 pr_debug("[XLOG_INFO][UTHER]eth_start_xmit : tx queue err %d\n", retval);
959 break;
960 case 0:
961 net->trans_start = jiffies;
962 }
963
964 if (retval) {
965 if (!multi_pkt_xfer)
966 dev_kfree_skb_any(skb);
967 else
968 req->length = 0;
969 drop:
970 dev->net->stats.tx_dropped++;
971 spin_lock_irqsave(&dev->req_lock, flags);
972 if (list_empty(&dev->tx_reqs))
973 netif_start_queue(net);
974 list_add_tail(&req->list, &dev->tx_reqs);
975 spin_unlock_irqrestore(&dev->req_lock, flags);
976 }
977 success:
978 return NETDEV_TX_OK;
979 }
980
981 /*-------------------------------------------------------------------------*/
982
983 static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
984 {
985 DBG(dev, "%s\n", __func__);
986 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
987
988 /* fill the rx queue */
989 rx_fill(dev, gfp_flags);
990
991 /* and open the tx floodgates */
992 dev->tx_qlen = 0;
993 netif_wake_queue(dev->net);
994 }
995
996 static int eth_open(struct net_device *net)
997 {
998 struct eth_dev *dev = netdev_priv(net);
999 struct gether *link;
1000
1001 DBG(dev, "%s\n", __func__);
1002 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
1003
1004 if (netif_carrier_ok(dev->net))
1005 eth_start(dev, GFP_KERNEL);
1006
1007 spin_lock_irq(&dev->lock);
1008 link = dev->port_usb;
1009 if (link && link->open)
1010 link->open(link);
1011 spin_unlock_irq(&dev->lock);
1012
1013 return 0;
1014 }
1015
1016 static int eth_stop(struct net_device *net)
1017 {
1018 struct eth_dev *dev = netdev_priv(net);
1019 unsigned long flags;
1020
1021 VDBG(dev, "%s\n", __func__);
1022 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
1023
1024 netif_stop_queue(net);
1025
1026 DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
1027 dev->net->stats.rx_packets, dev->net->stats.tx_packets,
1028 dev->net->stats.rx_errors, dev->net->stats.tx_errors
1029 );
1030
1031 /* ensure there are no more active requests */
1032 spin_lock_irqsave(&dev->lock, flags);
1033 if (dev->port_usb) {
1034 struct gether *link = dev->port_usb;
1035 const struct usb_endpoint_descriptor *in;
1036 const struct usb_endpoint_descriptor *out;
1037
1038 if (link->close)
1039 link->close(link);
1040
1041 /* NOTE: we have no abort-queue primitive we could use
1042 * to cancel all pending I/O. Instead, we disable then
1043 * reenable the endpoints ... this idiom may leave toggle
1044 * wrong, but that's a self-correcting error.
1045 *
1046 * REVISIT: we *COULD* just let the transfers complete at
1047 * their own pace; the network stack can handle old packets.
1048 * For the moment we leave this here, since it works.
1049 */
1050 in = link->in_ep->desc;
1051 out = link->out_ep->desc;
1052 usb_ep_disable(link->in_ep);
1053 usb_ep_disable(link->out_ep);
1054 if (netif_carrier_ok(net)) {
1055 DBG(dev, "host still using in/out endpoints\n");
1056 link->in_ep->desc = in;
1057 link->out_ep->desc = out;
1058 usb_ep_enable(link->in_ep);
1059 usb_ep_enable(link->out_ep);
1060 }
1061 }
1062 spin_unlock_irqrestore(&dev->lock, flags);
1063
1064 return 0;
1065 }
1066
1067 /*-------------------------------------------------------------------------*/
1068
1069 /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
1070 static char *dev_addr;
1071 module_param(dev_addr, charp, S_IRUGO);
1072 MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
1073
1074 /* this address is invisible to ifconfig */
1075 static char *host_addr;
1076 module_param(host_addr, charp, S_IRUGO);
1077 MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
1078
1079 static int get_ether_addr(const char *str, u8 *dev_addr)
1080 {
1081 if (str) {
1082 unsigned i;
1083
1084 for (i = 0; i < 6; i++) {
1085 unsigned char num;
1086
1087 if ((*str == '.') || (*str == ':'))
1088 str++;
1089 num = hex_to_bin(*str++) << 4;
1090 num |= hex_to_bin(*str++);
1091 dev_addr [i] = num;
1092 }
1093 if (is_valid_ether_addr(dev_addr))
1094 return 0;
1095 }
1096 eth_random_addr(dev_addr);
1097 return 1;
1098 }
1099
1100 static const struct net_device_ops eth_netdev_ops = {
1101 .ndo_open = eth_open,
1102 .ndo_stop = eth_stop,
1103 .ndo_start_xmit = eth_start_xmit,
1104 .ndo_change_mtu = ueth_change_mtu,
1105 .ndo_set_mac_address = eth_mac_addr,
1106 .ndo_validate_addr = eth_validate_addr,
1107 };
1108
1109 static struct device_type gadget_type = {
1110 .name = "gadget",
1111 };
1112
1113 /**
1114 * gether_setup_name - initialize one ethernet-over-usb link
1115 * @g: gadget to associated with these links
1116 * @ethaddr: NULL, or a buffer in which the ethernet address of the
1117 * host side of the link is recorded
1118 * @netname: name for network device (for example, "usb")
1119 * Context: may sleep
1120 *
1121 * This sets up the single network link that may be exported by a
1122 * gadget driver using this framework. The link layer addresses are
1123 * set up using module parameters.
1124 *
1125 * Returns negative errno, or zero on success
1126 */
1127 struct eth_dev *gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
1128 const char *netname)
1129 {
1130 struct eth_dev *dev;
1131 struct net_device *net;
1132 int status;
1133
1134 net = alloc_etherdev(sizeof *dev);
1135 if (!net)
1136 return ERR_PTR(-ENOMEM);
1137
1138 dev = netdev_priv(net);
1139 spin_lock_init(&dev->lock);
1140 spin_lock_init(&dev->req_lock);
1141 spin_lock_init(&dev->reqrx_lock);
1142 INIT_WORK(&dev->work, eth_work);
1143 INIT_WORK(&dev->rx_work, process_rx_w);
1144 INIT_WORK(&dev->rx_work1, process_rx_w1);
1145 INIT_LIST_HEAD(&dev->tx_reqs);
1146 INIT_LIST_HEAD(&dev->rx_reqs);
1147
1148 skb_queue_head_init(&dev->rx_frames);
1149
1150 /* network device setup */
1151 dev->net = net;
1152 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
1153
1154 if (get_ether_addr(dev_addr, net->dev_addr))
1155 dev_warn(&g->dev,
1156 "using random %s ethernet address\n", "self");
1157 if (get_ether_addr(host_addr, dev->host_mac))
1158 dev_warn(&g->dev,
1159 "using random %s ethernet address\n", "host");
1160
1161 if (ethaddr)
1162 memcpy(ethaddr, dev->host_mac, ETH_ALEN);
1163
1164 net->netdev_ops = &eth_netdev_ops;
1165
1166 SET_ETHTOOL_OPS(net, &ops);
1167
1168 dev->gadget = g;
1169 SET_NETDEV_DEV(net, &g->dev);
1170 SET_NETDEV_DEVTYPE(net, &gadget_type);
1171
1172 status = register_netdev(net);
1173 if (status < 0) {
1174 dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
1175 free_netdev(net);
1176 dev = ERR_PTR(status);
1177 } else {
1178 INFO(dev, "MAC %pM\n", net->dev_addr);
1179 INFO(dev, "HOST MAC %pM\n", dev->host_mac);
1180
1181 /* two kinds of host-initiated state changes:
1182 * - iff DATA transfer is active, carrier is "on"
1183 * - tx queueing enabled if open *and* carrier is "on"
1184 */
1185 netif_carrier_off(net);
1186 }
1187
1188 return dev;
1189 }
1190
1191 /**
1192 * gether_cleanup - remove Ethernet-over-USB device
1193 * Context: may sleep
1194 *
1195 * This is called to free all resources allocated by @gether_setup().
1196 */
1197 void gether_cleanup(struct eth_dev *dev)
1198 {
1199 if (!dev)
1200 return;
1201
1202 unregister_netdev(dev->net);
1203 flush_work(&dev->work);
1204 free_netdev(dev->net);
1205 }
1206
1207 /**
1208 * gether_connect - notify network layer that USB link is active
1209 * @link: the USB link, set up with endpoints, descriptors matching
1210 * current device speed, and any framing wrapper(s) set up.
1211 * Context: irqs blocked
1212 *
1213 * This is called to activate endpoints and let the network layer know
1214 * the connection is active ("carrier detect"). It may cause the I/O
1215 * queues to open and start letting network packets flow, but will in
1216 * any case activate the endpoints so that they respond properly to the
1217 * USB host.
1218 *
1219 * Verify net_device pointer returned using IS_ERR(). If it doesn't
1220 * indicate some error code (negative errno), ep->driver_data values
1221 * have been overwritten.
1222 */
1223 struct net_device *gether_connect(struct gether *link)
1224 {
1225 struct eth_dev *dev = link->ioport;
1226 int result = 0;
1227
1228 if (!dev)
1229 return ERR_PTR(-EINVAL);
1230
1231 link->header = kzalloc(sizeof(struct rndis_packet_msg_type),
1232 GFP_ATOMIC);
1233 if (!link->header) {
1234 pr_err("RNDIS header memory allocation failed.\n");
1235 result = -ENOMEM;
1236 goto fail;
1237 }
1238
1239 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
1240
1241 link->in_ep->driver_data = dev;
1242 result = usb_ep_enable(link->in_ep);
1243 if (result != 0) {
1244 DBG(dev, "enable %s --> %d\n",
1245 link->in_ep->name, result);
1246 goto fail0;
1247 }
1248
1249 link->out_ep->driver_data = dev;
1250 result = usb_ep_enable(link->out_ep);
1251 if (result != 0) {
1252 DBG(dev, "enable %s --> %d\n",
1253 link->out_ep->name, result);
1254 goto fail1;
1255 }
1256
1257 if (result == 0)
1258 {
1259 result = alloc_tx_requests(dev, link, qlen(dev->gadget));
1260 if(result == 0)
1261 result = alloc_rx_requests(dev, link, qlenrx(dev->gadget));
1262 }
1263
1264 if (result == 0) {
1265 dev->zlp = link->is_zlp_ok;
1266 DBG(dev, "qlen %d\n", qlen(dev->gadget));
1267
1268 dev->header_len = link->header_len;
1269 dev->unwrap = link->unwrap;
1270 dev->wrap = link->wrap;
1271 dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
1272 dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
1273
1274 spin_lock(&dev->lock);
1275 dev->tx_skb_hold_count = 0;
1276 dev->no_tx_req_used = 0;
1277 dev->tx_req_bufsize = 0;
1278 dev->port_usb = link;
1279 if (netif_running(dev->net)) {
1280 if (link->open)
1281 link->open(link);
1282 } else {
1283 if (link->close)
1284 link->close(link);
1285 }
1286 spin_unlock(&dev->lock);
1287
1288 netif_carrier_on(dev->net);
1289 if (netif_running(dev->net))
1290 eth_start(dev, GFP_ATOMIC);
1291
1292 /* on error, disable any endpoints */
1293 } else {
1294 (void) usb_ep_disable(link->out_ep);
1295 fail1:
1296 (void) usb_ep_disable(link->in_ep);
1297 }
1298
1299 /* caller is responsible for cleanup on error */
1300 if (result < 0) {
1301 fail0:
1302 kfree(link->header);
1303 fail:
1304 return ERR_PTR(result);
1305 }
1306
1307 return dev->net;
1308 }
1309
1310 /**
1311 * gether_disconnect - notify network layer that USB link is inactive
1312 * @link: the USB link, on which gether_connect() was called
1313 * Context: irqs blocked
1314 *
1315 * This is called to deactivate endpoints and let the network layer know
1316 * the connection went inactive ("no carrier").
1317 *
1318 * On return, the state is as if gether_connect() had never been called.
1319 * The endpoints are inactive, and accordingly without active USB I/O.
1320 * Pointers to endpoint descriptors and endpoint private data are nulled.
1321 */
1322 void gether_disconnect(struct gether *link)
1323 {
1324 struct eth_dev *dev = link->ioport;
1325 struct usb_request *req;
1326 struct sk_buff *skb;
1327
1328 WARN_ON(!dev);
1329 if (!dev)
1330 return;
1331
1332 DBG(dev, "%s\n", __func__);
1333 pr_debug("[XLOG_INFO][UTHER]%s\n", __func__);
1334
1335 rndis_test_rx_usb_in = 0 ;
1336 rndis_test_rx_net_out = 0 ;
1337 rndis_test_rx_nomem = 0 ;
1338 rndis_test_rx_error = 0 ;
1339
1340 rndis_test_tx_net_in = 0 ;
1341 rndis_test_tx_busy = 0 ;
1342 rndis_test_tx_usb_out = 0 ;
1343 rndis_test_tx_complete = 0 ;
1344
1345 netif_stop_queue(dev->net);
1346 netif_carrier_off(dev->net);
1347
1348 /* disable endpoints, forcing (synchronous) completion
1349 * of all pending i/o. then free the request objects
1350 * and forget about the endpoints.
1351 */
1352 usb_ep_disable(link->in_ep);
1353 spin_lock(&dev->req_lock);
1354 while (!list_empty(&dev->tx_reqs)) {
1355 req = container_of(dev->tx_reqs.next,
1356 struct usb_request, list);
1357 list_del(&req->list);
1358
1359 spin_unlock(&dev->req_lock);
1360 if (link->multi_pkt_xfer) {
1361 kfree(req->buf);
1362 req->buf = NULL;
1363 }
1364 usb_ep_free_request(link->in_ep, req);
1365 spin_lock(&dev->req_lock);
1366 }
1367 /* Free rndis header buffer memory */
1368 kfree(link->header);
1369 link->header = NULL;
1370 spin_unlock(&dev->req_lock);
1371 link->in_ep->driver_data = NULL;
1372 link->in_ep->desc = NULL;
1373
1374 usb_ep_disable(link->out_ep);
1375 spin_lock(&dev->reqrx_lock);
1376 while (!list_empty(&dev->rx_reqs)) {
1377 req = container_of(dev->rx_reqs.next,
1378 struct usb_request, list);
1379 list_del(&req->list);
1380
1381 spin_unlock(&dev->reqrx_lock);
1382 usb_ep_free_request(link->out_ep, req);
1383 spin_lock(&dev->reqrx_lock);
1384 }
1385 spin_unlock(&dev->reqrx_lock);
1386
1387 spin_lock(&dev->rx_frames.lock);
1388 while ((skb = __skb_dequeue(&dev->rx_frames)))
1389 dev_kfree_skb_any(skb);
1390 spin_unlock(&dev->rx_frames.lock);
1391
1392 link->out_ep->driver_data = NULL;
1393 link->out_ep->desc = NULL;
1394
1395 /* finish forgetting about this USB link episode */
1396 dev->header_len = 0;
1397 dev->unwrap = NULL;
1398 dev->wrap = NULL;
1399
1400 spin_lock(&dev->lock);
1401 dev->port_usb = NULL;
1402 spin_unlock(&dev->lock);
1403 }
1404
1405 static int __init gether_init(void)
1406 {
1407 uether_wq = create_singlethread_workqueue("uether");
1408 if (!uether_wq) {
1409 pr_err("%s: Unable to create workqueue: uether\n", __func__);
1410 return -ENOMEM;
1411 }
1412 uether_wq1 = create_singlethread_workqueue("uether_rx1");
1413 if (!uether_wq1) {
1414 destroy_workqueue(uether_wq);
1415 pr_err("%s: Unable to create workqueue: uether\n", __func__);
1416 return -ENOMEM;
1417 }
1418 return 0;
1419 }
1420 module_init(gether_init);
1421
1422 static void __exit gether_exit(void)
1423 {
1424 destroy_workqueue(uether_wq);
1425 destroy_workqueue(uether_wq1);
1426 }
1427 module_exit(gether_exit);
1428 MODULE_DESCRIPTION("ethernet over USB driver");
1429 MODULE_LICENSE("GPL v2");