usb: gadget: don't release an existing dev->buf
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / drivers / usb / gadget / legacy / inode.c
1 /*
2 * inode.c -- user mode filesystem api for usb gadget controllers
3 *
4 * Copyright (C) 2003-2004 David Brownell
5 * Copyright (C) 2003 Agilent Technologies
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13
14 /* #define VERBOSE_DEBUG */
15
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/fs.h>
19 #include <linux/pagemap.h>
20 #include <linux/uts.h>
21 #include <linux/wait.h>
22 #include <linux/compiler.h>
23 #include <linux/uaccess.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/poll.h>
27 #include <linux/mmu_context.h>
28 #include <linux/aio.h>
29 #include <linux/uio.h>
30 #include <linux/refcount.h>
31 #include <linux/delay.h>
32 #include <linux/device.h>
33 #include <linux/moduleparam.h>
34
35 #include <linux/usb/gadgetfs.h>
36 #include <linux/usb/gadget.h>
37
38
39 /*
40 * The gadgetfs API maps each endpoint to a file descriptor so that you
41 * can use standard synchronous read/write calls for I/O. There's some
42 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode
43 * drivers show how this works in practice. You can also use AIO to
44 * eliminate I/O gaps between requests, to help when streaming data.
45 *
46 * Key parts that must be USB-specific are protocols defining how the
47 * read/write operations relate to the hardware state machines. There
48 * are two types of files. One type is for the device, implementing ep0.
49 * The other type is for each IN or OUT endpoint. In both cases, the
50 * user mode driver must configure the hardware before using it.
51 *
52 * - First, dev_config() is called when /dev/gadget/$CHIP is configured
53 * (by writing configuration and device descriptors). Afterwards it
54 * may serve as a source of device events, used to handle all control
55 * requests other than basic enumeration.
56 *
57 * - Then, after a SET_CONFIGURATION control request, ep_config() is
58 * called when each /dev/gadget/ep* file is configured (by writing
59 * endpoint descriptors). Afterwards these files are used to write()
60 * IN data or to read() OUT data. To halt the endpoint, a "wrong
61 * direction" request is issued (like reading an IN endpoint).
62 *
63 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
64 * not possible on all hardware. For example, precise fault handling with
65 * respect to data left in endpoint fifos after aborted operations; or
66 * selective clearing of endpoint halts, to implement SET_INTERFACE.
67 */
68
69 #define DRIVER_DESC "USB Gadget filesystem"
70 #define DRIVER_VERSION "24 Aug 2004"
71
72 static const char driver_desc [] = DRIVER_DESC;
73 static const char shortname [] = "gadgetfs";
74
75 MODULE_DESCRIPTION (DRIVER_DESC);
76 MODULE_AUTHOR ("David Brownell");
77 MODULE_LICENSE ("GPL");
78
79 static int ep_open(struct inode *, struct file *);
80
81
82 /*----------------------------------------------------------------------*/
83
84 #define GADGETFS_MAGIC 0xaee71ee7
85
86 /* /dev/gadget/$CHIP represents ep0 and the whole device */
87 enum ep0_state {
88 /* DISABLED is the initial state. */
89 STATE_DEV_DISABLED = 0,
90
91 /* Only one open() of /dev/gadget/$CHIP; only one file tracks
92 * ep0/device i/o modes and binding to the controller. Driver
93 * must always write descriptors to initialize the device, then
94 * the device becomes UNCONNECTED until enumeration.
95 */
96 STATE_DEV_OPENED,
97
98 /* From then on, ep0 fd is in either of two basic modes:
99 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
100 * - SETUP: read/write will transfer control data and succeed;
101 * or if "wrong direction", performs protocol stall
102 */
103 STATE_DEV_UNCONNECTED,
104 STATE_DEV_CONNECTED,
105 STATE_DEV_SETUP,
106
107 /* UNBOUND means the driver closed ep0, so the device won't be
108 * accessible again (DEV_DISABLED) until all fds are closed.
109 */
110 STATE_DEV_UNBOUND,
111 };
112
113 /* enough for the whole queue: most events invalidate others */
114 #define N_EVENT 5
115
116 #define RBUF_SIZE 256
117
118 struct dev_data {
119 spinlock_t lock;
120 refcount_t count;
121 int udc_usage;
122 enum ep0_state state; /* P: lock */
123 struct usb_gadgetfs_event event [N_EVENT];
124 unsigned ev_next;
125 struct fasync_struct *fasync;
126 u8 current_config;
127
128 /* drivers reading ep0 MUST handle control requests (SETUP)
129 * reported that way; else the host will time out.
130 */
131 unsigned usermode_setup : 1,
132 setup_in : 1,
133 setup_can_stall : 1,
134 setup_out_ready : 1,
135 setup_out_error : 1,
136 setup_abort : 1,
137 gadget_registered : 1;
138 unsigned setup_wLength;
139
140 /* the rest is basically write-once */
141 struct usb_config_descriptor *config, *hs_config;
142 struct usb_device_descriptor *dev;
143 struct usb_request *req;
144 struct usb_gadget *gadget;
145 struct list_head epfiles;
146 void *buf;
147 wait_queue_head_t wait;
148 struct super_block *sb;
149 struct dentry *dentry;
150
151 /* except this scratch i/o buffer for ep0 */
152 u8 rbuf[RBUF_SIZE];
153 };
154
155 static inline void get_dev (struct dev_data *data)
156 {
157 refcount_inc (&data->count);
158 }
159
160 static void put_dev (struct dev_data *data)
161 {
162 if (likely (!refcount_dec_and_test (&data->count)))
163 return;
164 /* needs no more cleanup */
165 BUG_ON (waitqueue_active (&data->wait));
166 kfree (data);
167 }
168
169 static struct dev_data *dev_new (void)
170 {
171 struct dev_data *dev;
172
173 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
174 if (!dev)
175 return NULL;
176 dev->state = STATE_DEV_DISABLED;
177 refcount_set (&dev->count, 1);
178 spin_lock_init (&dev->lock);
179 INIT_LIST_HEAD (&dev->epfiles);
180 init_waitqueue_head (&dev->wait);
181 return dev;
182 }
183
184 /*----------------------------------------------------------------------*/
185
186 /* other /dev/gadget/$ENDPOINT files represent endpoints */
187 enum ep_state {
188 STATE_EP_DISABLED = 0,
189 STATE_EP_READY,
190 STATE_EP_ENABLED,
191 STATE_EP_UNBOUND,
192 };
193
194 struct ep_data {
195 struct mutex lock;
196 enum ep_state state;
197 refcount_t count;
198 struct dev_data *dev;
199 /* must hold dev->lock before accessing ep or req */
200 struct usb_ep *ep;
201 struct usb_request *req;
202 ssize_t status;
203 char name [16];
204 struct usb_endpoint_descriptor desc, hs_desc;
205 struct list_head epfiles;
206 wait_queue_head_t wait;
207 struct dentry *dentry;
208 };
209
210 static inline void get_ep (struct ep_data *data)
211 {
212 refcount_inc (&data->count);
213 }
214
215 static void put_ep (struct ep_data *data)
216 {
217 if (likely (!refcount_dec_and_test (&data->count)))
218 return;
219 put_dev (data->dev);
220 /* needs no more cleanup */
221 BUG_ON (!list_empty (&data->epfiles));
222 BUG_ON (waitqueue_active (&data->wait));
223 kfree (data);
224 }
225
226 /*----------------------------------------------------------------------*/
227
228 /* most "how to use the hardware" policy choices are in userspace:
229 * mapping endpoint roles (which the driver needs) to the capabilities
230 * which the usb controller has. most of those capabilities are exposed
231 * implicitly, starting with the driver name and then endpoint names.
232 */
233
234 static const char *CHIP;
235
236 /*----------------------------------------------------------------------*/
237
238 /* NOTE: don't use dev_printk calls before binding to the gadget
239 * at the end of ep0 configuration, or after unbind.
240 */
241
242 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
243 #define xprintk(d,level,fmt,args...) \
244 printk(level "%s: " fmt , shortname , ## args)
245
246 #ifdef DEBUG
247 #define DBG(dev,fmt,args...) \
248 xprintk(dev , KERN_DEBUG , fmt , ## args)
249 #else
250 #define DBG(dev,fmt,args...) \
251 do { } while (0)
252 #endif /* DEBUG */
253
254 #ifdef VERBOSE_DEBUG
255 #define VDEBUG DBG
256 #else
257 #define VDEBUG(dev,fmt,args...) \
258 do { } while (0)
259 #endif /* DEBUG */
260
261 #define ERROR(dev,fmt,args...) \
262 xprintk(dev , KERN_ERR , fmt , ## args)
263 #define INFO(dev,fmt,args...) \
264 xprintk(dev , KERN_INFO , fmt , ## args)
265
266
267 /*----------------------------------------------------------------------*/
268
269 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
270 *
271 * After opening, configure non-control endpoints. Then use normal
272 * stream read() and write() requests; and maybe ioctl() to get more
273 * precise FIFO status when recovering from cancellation.
274 */
275
276 static void epio_complete (struct usb_ep *ep, struct usb_request *req)
277 {
278 struct ep_data *epdata = ep->driver_data;
279
280 if (!req->context)
281 return;
282 if (req->status)
283 epdata->status = req->status;
284 else
285 epdata->status = req->actual;
286 complete ((struct completion *)req->context);
287 }
288
289 /* tasklock endpoint, returning when it's connected.
290 * still need dev->lock to use epdata->ep.
291 */
292 static int
293 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write)
294 {
295 int val;
296
297 if (f_flags & O_NONBLOCK) {
298 if (!mutex_trylock(&epdata->lock))
299 goto nonblock;
300 if (epdata->state != STATE_EP_ENABLED &&
301 (!is_write || epdata->state != STATE_EP_READY)) {
302 mutex_unlock(&epdata->lock);
303 nonblock:
304 val = -EAGAIN;
305 } else
306 val = 0;
307 return val;
308 }
309
310 val = mutex_lock_interruptible(&epdata->lock);
311 if (val < 0)
312 return val;
313
314 switch (epdata->state) {
315 case STATE_EP_ENABLED:
316 return 0;
317 case STATE_EP_READY: /* not configured yet */
318 if (is_write)
319 return 0;
320 // FALLTHRU
321 case STATE_EP_UNBOUND: /* clean disconnect */
322 break;
323 // case STATE_EP_DISABLED: /* "can't happen" */
324 default: /* error! */
325 pr_debug ("%s: ep %p not available, state %d\n",
326 shortname, epdata, epdata->state);
327 }
328 mutex_unlock(&epdata->lock);
329 return -ENODEV;
330 }
331
332 static ssize_t
333 ep_io (struct ep_data *epdata, void *buf, unsigned len)
334 {
335 DECLARE_COMPLETION_ONSTACK (done);
336 int value;
337
338 spin_lock_irq (&epdata->dev->lock);
339 if (likely (epdata->ep != NULL)) {
340 struct usb_request *req = epdata->req;
341
342 req->context = &done;
343 req->complete = epio_complete;
344 req->buf = buf;
345 req->length = len;
346 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
347 } else
348 value = -ENODEV;
349 spin_unlock_irq (&epdata->dev->lock);
350
351 if (likely (value == 0)) {
352 value = wait_event_interruptible (done.wait, done.done);
353 if (value != 0) {
354 spin_lock_irq (&epdata->dev->lock);
355 if (likely (epdata->ep != NULL)) {
356 DBG (epdata->dev, "%s i/o interrupted\n",
357 epdata->name);
358 usb_ep_dequeue (epdata->ep, epdata->req);
359 spin_unlock_irq (&epdata->dev->lock);
360
361 wait_event (done.wait, done.done);
362 if (epdata->status == -ECONNRESET)
363 epdata->status = -EINTR;
364 } else {
365 spin_unlock_irq (&epdata->dev->lock);
366
367 DBG (epdata->dev, "endpoint gone\n");
368 epdata->status = -ENODEV;
369 }
370 }
371 return epdata->status;
372 }
373 return value;
374 }
375
376 static int
377 ep_release (struct inode *inode, struct file *fd)
378 {
379 struct ep_data *data = fd->private_data;
380 int value;
381
382 value = mutex_lock_interruptible(&data->lock);
383 if (value < 0)
384 return value;
385
386 /* clean up if this can be reopened */
387 if (data->state != STATE_EP_UNBOUND) {
388 data->state = STATE_EP_DISABLED;
389 data->desc.bDescriptorType = 0;
390 data->hs_desc.bDescriptorType = 0;
391 usb_ep_disable(data->ep);
392 }
393 mutex_unlock(&data->lock);
394 put_ep (data);
395 return 0;
396 }
397
398 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
399 {
400 struct ep_data *data = fd->private_data;
401 int status;
402
403 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0)
404 return status;
405
406 spin_lock_irq (&data->dev->lock);
407 if (likely (data->ep != NULL)) {
408 switch (code) {
409 case GADGETFS_FIFO_STATUS:
410 status = usb_ep_fifo_status (data->ep);
411 break;
412 case GADGETFS_FIFO_FLUSH:
413 usb_ep_fifo_flush (data->ep);
414 break;
415 case GADGETFS_CLEAR_HALT:
416 status = usb_ep_clear_halt (data->ep);
417 break;
418 default:
419 status = -ENOTTY;
420 }
421 } else
422 status = -ENODEV;
423 spin_unlock_irq (&data->dev->lock);
424 mutex_unlock(&data->lock);
425 return status;
426 }
427
428 /*----------------------------------------------------------------------*/
429
430 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
431
432 struct kiocb_priv {
433 struct usb_request *req;
434 struct ep_data *epdata;
435 struct kiocb *iocb;
436 struct mm_struct *mm;
437 struct work_struct work;
438 void *buf;
439 struct iov_iter to;
440 const void *to_free;
441 unsigned actual;
442 };
443
444 static int ep_aio_cancel(struct kiocb *iocb)
445 {
446 struct kiocb_priv *priv = iocb->private;
447 struct ep_data *epdata;
448 int value;
449
450 local_irq_disable();
451 epdata = priv->epdata;
452 // spin_lock(&epdata->dev->lock);
453 if (likely(epdata && epdata->ep && priv->req))
454 value = usb_ep_dequeue (epdata->ep, priv->req);
455 else
456 value = -EINVAL;
457 // spin_unlock(&epdata->dev->lock);
458 local_irq_enable();
459
460 return value;
461 }
462
463 static void ep_user_copy_worker(struct work_struct *work)
464 {
465 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
466 struct mm_struct *mm = priv->mm;
467 struct kiocb *iocb = priv->iocb;
468 size_t ret;
469
470 use_mm(mm);
471 ret = copy_to_iter(priv->buf, priv->actual, &priv->to);
472 unuse_mm(mm);
473 if (!ret)
474 ret = -EFAULT;
475
476 /* completing the iocb can drop the ctx and mm, don't touch mm after */
477 iocb->ki_complete(iocb, ret, ret);
478
479 kfree(priv->buf);
480 kfree(priv->to_free);
481 kfree(priv);
482 }
483
484 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
485 {
486 struct kiocb *iocb = req->context;
487 struct kiocb_priv *priv = iocb->private;
488 struct ep_data *epdata = priv->epdata;
489
490 /* lock against disconnect (and ideally, cancel) */
491 spin_lock(&epdata->dev->lock);
492 priv->req = NULL;
493 priv->epdata = NULL;
494
495 /* if this was a write or a read returning no data then we
496 * don't need to copy anything to userspace, so we can
497 * complete the aio request immediately.
498 */
499 if (priv->to_free == NULL || unlikely(req->actual == 0)) {
500 kfree(req->buf);
501 kfree(priv->to_free);
502 kfree(priv);
503 iocb->private = NULL;
504 /* aio_complete() reports bytes-transferred _and_ faults */
505
506 iocb->ki_complete(iocb, req->actual ? req->actual : req->status,
507 req->status);
508 } else {
509 /* ep_copy_to_user() won't report both; we hide some faults */
510 if (unlikely(0 != req->status))
511 DBG(epdata->dev, "%s fault %d len %d\n",
512 ep->name, req->status, req->actual);
513
514 priv->buf = req->buf;
515 priv->actual = req->actual;
516 INIT_WORK(&priv->work, ep_user_copy_worker);
517 schedule_work(&priv->work);
518 }
519
520 usb_ep_free_request(ep, req);
521 spin_unlock(&epdata->dev->lock);
522 put_ep(epdata);
523 }
524
525 static ssize_t ep_aio(struct kiocb *iocb,
526 struct kiocb_priv *priv,
527 struct ep_data *epdata,
528 char *buf,
529 size_t len)
530 {
531 struct usb_request *req;
532 ssize_t value;
533
534 iocb->private = priv;
535 priv->iocb = iocb;
536
537 kiocb_set_cancel_fn(iocb, ep_aio_cancel);
538 get_ep(epdata);
539 priv->epdata = epdata;
540 priv->actual = 0;
541 priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
542
543 /* each kiocb is coupled to one usb_request, but we can't
544 * allocate or submit those if the host disconnected.
545 */
546 spin_lock_irq(&epdata->dev->lock);
547 value = -ENODEV;
548 if (unlikely(epdata->ep == NULL))
549 goto fail;
550
551 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
552 value = -ENOMEM;
553 if (unlikely(!req))
554 goto fail;
555
556 priv->req = req;
557 req->buf = buf;
558 req->length = len;
559 req->complete = ep_aio_complete;
560 req->context = iocb;
561 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
562 if (unlikely(0 != value)) {
563 usb_ep_free_request(epdata->ep, req);
564 goto fail;
565 }
566 spin_unlock_irq(&epdata->dev->lock);
567 return -EIOCBQUEUED;
568
569 fail:
570 spin_unlock_irq(&epdata->dev->lock);
571 kfree(priv->to_free);
572 kfree(priv);
573 put_ep(epdata);
574 return value;
575 }
576
577 static ssize_t
578 ep_read_iter(struct kiocb *iocb, struct iov_iter *to)
579 {
580 struct file *file = iocb->ki_filp;
581 struct ep_data *epdata = file->private_data;
582 size_t len = iov_iter_count(to);
583 ssize_t value;
584 char *buf;
585
586 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0)
587 return value;
588
589 /* halt any endpoint by doing a "wrong direction" i/o call */
590 if (usb_endpoint_dir_in(&epdata->desc)) {
591 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
592 !is_sync_kiocb(iocb)) {
593 mutex_unlock(&epdata->lock);
594 return -EINVAL;
595 }
596 DBG (epdata->dev, "%s halt\n", epdata->name);
597 spin_lock_irq(&epdata->dev->lock);
598 if (likely(epdata->ep != NULL))
599 usb_ep_set_halt(epdata->ep);
600 spin_unlock_irq(&epdata->dev->lock);
601 mutex_unlock(&epdata->lock);
602 return -EBADMSG;
603 }
604
605 buf = kmalloc(len, GFP_KERNEL);
606 if (unlikely(!buf)) {
607 mutex_unlock(&epdata->lock);
608 return -ENOMEM;
609 }
610 if (is_sync_kiocb(iocb)) {
611 value = ep_io(epdata, buf, len);
612 if (value >= 0 && (copy_to_iter(buf, value, to) != value))
613 value = -EFAULT;
614 } else {
615 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
616 value = -ENOMEM;
617 if (!priv)
618 goto fail;
619 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL);
620 if (!priv->to_free) {
621 kfree(priv);
622 goto fail;
623 }
624 value = ep_aio(iocb, priv, epdata, buf, len);
625 if (value == -EIOCBQUEUED)
626 buf = NULL;
627 }
628 fail:
629 kfree(buf);
630 mutex_unlock(&epdata->lock);
631 return value;
632 }
633
634 static ssize_t ep_config(struct ep_data *, const char *, size_t);
635
636 static ssize_t
637 ep_write_iter(struct kiocb *iocb, struct iov_iter *from)
638 {
639 struct file *file = iocb->ki_filp;
640 struct ep_data *epdata = file->private_data;
641 size_t len = iov_iter_count(from);
642 bool configured;
643 ssize_t value;
644 char *buf;
645
646 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0)
647 return value;
648
649 configured = epdata->state == STATE_EP_ENABLED;
650
651 /* halt any endpoint by doing a "wrong direction" i/o call */
652 if (configured && !usb_endpoint_dir_in(&epdata->desc)) {
653 if (usb_endpoint_xfer_isoc(&epdata->desc) ||
654 !is_sync_kiocb(iocb)) {
655 mutex_unlock(&epdata->lock);
656 return -EINVAL;
657 }
658 DBG (epdata->dev, "%s halt\n", epdata->name);
659 spin_lock_irq(&epdata->dev->lock);
660 if (likely(epdata->ep != NULL))
661 usb_ep_set_halt(epdata->ep);
662 spin_unlock_irq(&epdata->dev->lock);
663 mutex_unlock(&epdata->lock);
664 return -EBADMSG;
665 }
666
667 buf = kmalloc(len, GFP_KERNEL);
668 if (unlikely(!buf)) {
669 mutex_unlock(&epdata->lock);
670 return -ENOMEM;
671 }
672
673 if (unlikely(!copy_from_iter_full(buf, len, from))) {
674 value = -EFAULT;
675 goto out;
676 }
677
678 if (unlikely(!configured)) {
679 value = ep_config(epdata, buf, len);
680 } else if (is_sync_kiocb(iocb)) {
681 value = ep_io(epdata, buf, len);
682 } else {
683 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL);
684 value = -ENOMEM;
685 if (priv) {
686 value = ep_aio(iocb, priv, epdata, buf, len);
687 if (value == -EIOCBQUEUED)
688 buf = NULL;
689 }
690 }
691 out:
692 kfree(buf);
693 mutex_unlock(&epdata->lock);
694 return value;
695 }
696
697 /*----------------------------------------------------------------------*/
698
699 /* used after endpoint configuration */
700 static const struct file_operations ep_io_operations = {
701 .owner = THIS_MODULE,
702
703 .open = ep_open,
704 .release = ep_release,
705 .llseek = no_llseek,
706 .unlocked_ioctl = ep_ioctl,
707 .read_iter = ep_read_iter,
708 .write_iter = ep_write_iter,
709 };
710
711 /* ENDPOINT INITIALIZATION
712 *
713 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
714 * status = write (fd, descriptors, sizeof descriptors)
715 *
716 * That write establishes the endpoint configuration, configuring
717 * the controller to process bulk, interrupt, or isochronous transfers
718 * at the right maxpacket size, and so on.
719 *
720 * The descriptors are message type 1, identified by a host order u32
721 * at the beginning of what's written. Descriptor order is: full/low
722 * speed descriptor, then optional high speed descriptor.
723 */
724 static ssize_t
725 ep_config (struct ep_data *data, const char *buf, size_t len)
726 {
727 struct usb_ep *ep;
728 u32 tag;
729 int value, length = len;
730
731 if (data->state != STATE_EP_READY) {
732 value = -EL2HLT;
733 goto fail;
734 }
735
736 value = len;
737 if (len < USB_DT_ENDPOINT_SIZE + 4)
738 goto fail0;
739
740 /* we might need to change message format someday */
741 memcpy(&tag, buf, 4);
742 if (tag != 1) {
743 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
744 goto fail0;
745 }
746 buf += 4;
747 len -= 4;
748
749 /* NOTE: audio endpoint extensions not accepted here;
750 * just don't include the extra bytes.
751 */
752
753 /* full/low speed descriptor, then high speed */
754 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE);
755 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
756 || data->desc.bDescriptorType != USB_DT_ENDPOINT)
757 goto fail0;
758 if (len != USB_DT_ENDPOINT_SIZE) {
759 if (len != 2 * USB_DT_ENDPOINT_SIZE)
760 goto fail0;
761 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
762 USB_DT_ENDPOINT_SIZE);
763 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
764 || data->hs_desc.bDescriptorType
765 != USB_DT_ENDPOINT) {
766 DBG(data->dev, "config %s, bad hs length or type\n",
767 data->name);
768 goto fail0;
769 }
770 }
771
772 spin_lock_irq (&data->dev->lock);
773 if (data->dev->state == STATE_DEV_UNBOUND) {
774 value = -ENOENT;
775 goto gone;
776 } else {
777 ep = data->ep;
778 if (ep == NULL) {
779 value = -ENODEV;
780 goto gone;
781 }
782 }
783 switch (data->dev->gadget->speed) {
784 case USB_SPEED_LOW:
785 case USB_SPEED_FULL:
786 ep->desc = &data->desc;
787 break;
788 case USB_SPEED_HIGH:
789 /* fails if caller didn't provide that descriptor... */
790 ep->desc = &data->hs_desc;
791 break;
792 default:
793 DBG(data->dev, "unconnected, %s init abandoned\n",
794 data->name);
795 value = -EINVAL;
796 goto gone;
797 }
798 value = usb_ep_enable(ep);
799 if (value == 0) {
800 data->state = STATE_EP_ENABLED;
801 value = length;
802 }
803 gone:
804 spin_unlock_irq (&data->dev->lock);
805 if (value < 0) {
806 fail:
807 data->desc.bDescriptorType = 0;
808 data->hs_desc.bDescriptorType = 0;
809 }
810 return value;
811 fail0:
812 value = -EINVAL;
813 goto fail;
814 }
815
816 static int
817 ep_open (struct inode *inode, struct file *fd)
818 {
819 struct ep_data *data = inode->i_private;
820 int value = -EBUSY;
821
822 if (mutex_lock_interruptible(&data->lock) != 0)
823 return -EINTR;
824 spin_lock_irq (&data->dev->lock);
825 if (data->dev->state == STATE_DEV_UNBOUND)
826 value = -ENOENT;
827 else if (data->state == STATE_EP_DISABLED) {
828 value = 0;
829 data->state = STATE_EP_READY;
830 get_ep (data);
831 fd->private_data = data;
832 VDEBUG (data->dev, "%s ready\n", data->name);
833 } else
834 DBG (data->dev, "%s state %d\n",
835 data->name, data->state);
836 spin_unlock_irq (&data->dev->lock);
837 mutex_unlock(&data->lock);
838 return value;
839 }
840
841 /*----------------------------------------------------------------------*/
842
843 /* EP0 IMPLEMENTATION can be partly in userspace.
844 *
845 * Drivers that use this facility receive various events, including
846 * control requests the kernel doesn't handle. Drivers that don't
847 * use this facility may be too simple-minded for real applications.
848 */
849
850 static inline void ep0_readable (struct dev_data *dev)
851 {
852 wake_up (&dev->wait);
853 kill_fasync (&dev->fasync, SIGIO, POLL_IN);
854 }
855
856 static void clean_req (struct usb_ep *ep, struct usb_request *req)
857 {
858 struct dev_data *dev = ep->driver_data;
859
860 if (req->buf != dev->rbuf) {
861 kfree(req->buf);
862 req->buf = dev->rbuf;
863 }
864 req->complete = epio_complete;
865 dev->setup_out_ready = 0;
866 }
867
868 static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
869 {
870 struct dev_data *dev = ep->driver_data;
871 unsigned long flags;
872 int free = 1;
873
874 /* for control OUT, data must still get to userspace */
875 spin_lock_irqsave(&dev->lock, flags);
876 if (!dev->setup_in) {
877 dev->setup_out_error = (req->status != 0);
878 if (!dev->setup_out_error)
879 free = 0;
880 dev->setup_out_ready = 1;
881 ep0_readable (dev);
882 }
883
884 /* clean up as appropriate */
885 if (free && req->buf != &dev->rbuf)
886 clean_req (ep, req);
887 req->complete = epio_complete;
888 spin_unlock_irqrestore(&dev->lock, flags);
889 }
890
891 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
892 {
893 struct dev_data *dev = ep->driver_data;
894
895 if (dev->setup_out_ready) {
896 DBG (dev, "ep0 request busy!\n");
897 return -EBUSY;
898 }
899 if (len > sizeof (dev->rbuf))
900 req->buf = kmalloc(len, GFP_ATOMIC);
901 if (req->buf == NULL) {
902 req->buf = dev->rbuf;
903 return -ENOMEM;
904 }
905 req->complete = ep0_complete;
906 req->length = len;
907 req->zero = 0;
908 return 0;
909 }
910
911 static ssize_t
912 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
913 {
914 struct dev_data *dev = fd->private_data;
915 ssize_t retval;
916 enum ep0_state state;
917
918 spin_lock_irq (&dev->lock);
919 if (dev->state <= STATE_DEV_OPENED) {
920 retval = -EINVAL;
921 goto done;
922 }
923
924 /* report fd mode change before acting on it */
925 if (dev->setup_abort) {
926 dev->setup_abort = 0;
927 retval = -EIDRM;
928 goto done;
929 }
930
931 /* control DATA stage */
932 if ((state = dev->state) == STATE_DEV_SETUP) {
933
934 if (dev->setup_in) { /* stall IN */
935 VDEBUG(dev, "ep0in stall\n");
936 (void) usb_ep_set_halt (dev->gadget->ep0);
937 retval = -EL2HLT;
938 dev->state = STATE_DEV_CONNECTED;
939
940 } else if (len == 0) { /* ack SET_CONFIGURATION etc */
941 struct usb_ep *ep = dev->gadget->ep0;
942 struct usb_request *req = dev->req;
943
944 if ((retval = setup_req (ep, req, 0)) == 0) {
945 ++dev->udc_usage;
946 spin_unlock_irq (&dev->lock);
947 retval = usb_ep_queue (ep, req, GFP_KERNEL);
948 spin_lock_irq (&dev->lock);
949 --dev->udc_usage;
950 }
951 dev->state = STATE_DEV_CONNECTED;
952
953 /* assume that was SET_CONFIGURATION */
954 if (dev->current_config) {
955 unsigned power;
956
957 if (gadget_is_dualspeed(dev->gadget)
958 && (dev->gadget->speed
959 == USB_SPEED_HIGH))
960 power = dev->hs_config->bMaxPower;
961 else
962 power = dev->config->bMaxPower;
963 usb_gadget_vbus_draw(dev->gadget, 2 * power);
964 }
965
966 } else { /* collect OUT data */
967 if ((fd->f_flags & O_NONBLOCK) != 0
968 && !dev->setup_out_ready) {
969 retval = -EAGAIN;
970 goto done;
971 }
972 spin_unlock_irq (&dev->lock);
973 retval = wait_event_interruptible (dev->wait,
974 dev->setup_out_ready != 0);
975
976 /* FIXME state could change from under us */
977 spin_lock_irq (&dev->lock);
978 if (retval)
979 goto done;
980
981 if (dev->state != STATE_DEV_SETUP) {
982 retval = -ECANCELED;
983 goto done;
984 }
985 dev->state = STATE_DEV_CONNECTED;
986
987 if (dev->setup_out_error)
988 retval = -EIO;
989 else {
990 len = min (len, (size_t)dev->req->actual);
991 ++dev->udc_usage;
992 spin_unlock_irq(&dev->lock);
993 if (copy_to_user (buf, dev->req->buf, len))
994 retval = -EFAULT;
995 else
996 retval = len;
997 spin_lock_irq(&dev->lock);
998 --dev->udc_usage;
999 clean_req (dev->gadget->ep0, dev->req);
1000 /* NOTE userspace can't yet choose to stall */
1001 }
1002 }
1003 goto done;
1004 }
1005
1006 /* else normal: return event data */
1007 if (len < sizeof dev->event [0]) {
1008 retval = -EINVAL;
1009 goto done;
1010 }
1011 len -= len % sizeof (struct usb_gadgetfs_event);
1012 dev->usermode_setup = 1;
1013
1014 scan:
1015 /* return queued events right away */
1016 if (dev->ev_next != 0) {
1017 unsigned i, n;
1018
1019 n = len / sizeof (struct usb_gadgetfs_event);
1020 if (dev->ev_next < n)
1021 n = dev->ev_next;
1022
1023 /* ep0 i/o has special semantics during STATE_DEV_SETUP */
1024 for (i = 0; i < n; i++) {
1025 if (dev->event [i].type == GADGETFS_SETUP) {
1026 dev->state = STATE_DEV_SETUP;
1027 n = i + 1;
1028 break;
1029 }
1030 }
1031 spin_unlock_irq (&dev->lock);
1032 len = n * sizeof (struct usb_gadgetfs_event);
1033 if (copy_to_user (buf, &dev->event, len))
1034 retval = -EFAULT;
1035 else
1036 retval = len;
1037 if (len > 0) {
1038 /* NOTE this doesn't guard against broken drivers;
1039 * concurrent ep0 readers may lose events.
1040 */
1041 spin_lock_irq (&dev->lock);
1042 if (dev->ev_next > n) {
1043 memmove(&dev->event[0], &dev->event[n],
1044 sizeof (struct usb_gadgetfs_event)
1045 * (dev->ev_next - n));
1046 }
1047 dev->ev_next -= n;
1048 spin_unlock_irq (&dev->lock);
1049 }
1050 return retval;
1051 }
1052 if (fd->f_flags & O_NONBLOCK) {
1053 retval = -EAGAIN;
1054 goto done;
1055 }
1056
1057 switch (state) {
1058 default:
1059 DBG (dev, "fail %s, state %d\n", __func__, state);
1060 retval = -ESRCH;
1061 break;
1062 case STATE_DEV_UNCONNECTED:
1063 case STATE_DEV_CONNECTED:
1064 spin_unlock_irq (&dev->lock);
1065 DBG (dev, "%s wait\n", __func__);
1066
1067 /* wait for events */
1068 retval = wait_event_interruptible (dev->wait,
1069 dev->ev_next != 0);
1070 if (retval < 0)
1071 return retval;
1072 spin_lock_irq (&dev->lock);
1073 goto scan;
1074 }
1075
1076 done:
1077 spin_unlock_irq (&dev->lock);
1078 return retval;
1079 }
1080
1081 static struct usb_gadgetfs_event *
1082 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
1083 {
1084 struct usb_gadgetfs_event *event;
1085 unsigned i;
1086
1087 switch (type) {
1088 /* these events purge the queue */
1089 case GADGETFS_DISCONNECT:
1090 if (dev->state == STATE_DEV_SETUP)
1091 dev->setup_abort = 1;
1092 // FALL THROUGH
1093 case GADGETFS_CONNECT:
1094 dev->ev_next = 0;
1095 break;
1096 case GADGETFS_SETUP: /* previous request timed out */
1097 case GADGETFS_SUSPEND: /* same effect */
1098 /* these events can't be repeated */
1099 for (i = 0; i != dev->ev_next; i++) {
1100 if (dev->event [i].type != type)
1101 continue;
1102 DBG(dev, "discard old event[%d] %d\n", i, type);
1103 dev->ev_next--;
1104 if (i == dev->ev_next)
1105 break;
1106 /* indices start at zero, for simplicity */
1107 memmove (&dev->event [i], &dev->event [i + 1],
1108 sizeof (struct usb_gadgetfs_event)
1109 * (dev->ev_next - i));
1110 }
1111 break;
1112 default:
1113 BUG ();
1114 }
1115 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
1116 event = &dev->event [dev->ev_next++];
1117 BUG_ON (dev->ev_next > N_EVENT);
1118 memset (event, 0, sizeof *event);
1119 event->type = type;
1120 return event;
1121 }
1122
1123 static ssize_t
1124 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1125 {
1126 struct dev_data *dev = fd->private_data;
1127 ssize_t retval = -ESRCH;
1128
1129 /* report fd mode change before acting on it */
1130 if (dev->setup_abort) {
1131 dev->setup_abort = 0;
1132 retval = -EIDRM;
1133
1134 /* data and/or status stage for control request */
1135 } else if (dev->state == STATE_DEV_SETUP) {
1136
1137 len = min_t(size_t, len, dev->setup_wLength);
1138 if (dev->setup_in) {
1139 retval = setup_req (dev->gadget->ep0, dev->req, len);
1140 if (retval == 0) {
1141 dev->state = STATE_DEV_CONNECTED;
1142 ++dev->udc_usage;
1143 spin_unlock_irq (&dev->lock);
1144 if (copy_from_user (dev->req->buf, buf, len))
1145 retval = -EFAULT;
1146 else {
1147 if (len < dev->setup_wLength)
1148 dev->req->zero = 1;
1149 retval = usb_ep_queue (
1150 dev->gadget->ep0, dev->req,
1151 GFP_KERNEL);
1152 }
1153 spin_lock_irq(&dev->lock);
1154 --dev->udc_usage;
1155 if (retval < 0) {
1156 clean_req (dev->gadget->ep0, dev->req);
1157 } else
1158 retval = len;
1159
1160 return retval;
1161 }
1162
1163 /* can stall some OUT transfers */
1164 } else if (dev->setup_can_stall) {
1165 VDEBUG(dev, "ep0out stall\n");
1166 (void) usb_ep_set_halt (dev->gadget->ep0);
1167 retval = -EL2HLT;
1168 dev->state = STATE_DEV_CONNECTED;
1169 } else {
1170 DBG(dev, "bogus ep0out stall!\n");
1171 }
1172 } else
1173 DBG (dev, "fail %s, state %d\n", __func__, dev->state);
1174
1175 return retval;
1176 }
1177
1178 static int
1179 ep0_fasync (int f, struct file *fd, int on)
1180 {
1181 struct dev_data *dev = fd->private_data;
1182 // caller must F_SETOWN before signal delivery happens
1183 VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
1184 return fasync_helper (f, fd, on, &dev->fasync);
1185 }
1186
1187 static struct usb_gadget_driver gadgetfs_driver;
1188
1189 static int
1190 dev_release (struct inode *inode, struct file *fd)
1191 {
1192 struct dev_data *dev = fd->private_data;
1193
1194 /* closing ep0 === shutdown all */
1195
1196 if (dev->gadget_registered) {
1197 usb_gadget_unregister_driver (&gadgetfs_driver);
1198 dev->gadget_registered = false;
1199 }
1200
1201 /* at this point "good" hardware has disconnected the
1202 * device from USB; the host won't see it any more.
1203 * alternatively, all host requests will time out.
1204 */
1205
1206 kfree (dev->buf);
1207 dev->buf = NULL;
1208
1209 /* other endpoints were all decoupled from this device */
1210 spin_lock_irq(&dev->lock);
1211 dev->state = STATE_DEV_DISABLED;
1212 spin_unlock_irq(&dev->lock);
1213
1214 put_dev (dev);
1215 return 0;
1216 }
1217
1218 static unsigned int
1219 ep0_poll (struct file *fd, poll_table *wait)
1220 {
1221 struct dev_data *dev = fd->private_data;
1222 int mask = 0;
1223
1224 if (dev->state <= STATE_DEV_OPENED)
1225 return DEFAULT_POLLMASK;
1226
1227 poll_wait(fd, &dev->wait, wait);
1228
1229 spin_lock_irq (&dev->lock);
1230
1231 /* report fd mode change before acting on it */
1232 if (dev->setup_abort) {
1233 dev->setup_abort = 0;
1234 mask = POLLHUP;
1235 goto out;
1236 }
1237
1238 if (dev->state == STATE_DEV_SETUP) {
1239 if (dev->setup_in || dev->setup_can_stall)
1240 mask = POLLOUT;
1241 } else {
1242 if (dev->ev_next != 0)
1243 mask = POLLIN;
1244 }
1245 out:
1246 spin_unlock_irq(&dev->lock);
1247 return mask;
1248 }
1249
1250 static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1251 {
1252 struct dev_data *dev = fd->private_data;
1253 struct usb_gadget *gadget = dev->gadget;
1254 long ret = -ENOTTY;
1255
1256 spin_lock_irq(&dev->lock);
1257 if (dev->state == STATE_DEV_OPENED ||
1258 dev->state == STATE_DEV_UNBOUND) {
1259 /* Not bound to a UDC */
1260 } else if (gadget->ops->ioctl) {
1261 ++dev->udc_usage;
1262 spin_unlock_irq(&dev->lock);
1263
1264 ret = gadget->ops->ioctl (gadget, code, value);
1265
1266 spin_lock_irq(&dev->lock);
1267 --dev->udc_usage;
1268 }
1269 spin_unlock_irq(&dev->lock);
1270
1271 return ret;
1272 }
1273
1274 /*----------------------------------------------------------------------*/
1275
1276 /* The in-kernel gadget driver handles most ep0 issues, in particular
1277 * enumerating the single configuration (as provided from user space).
1278 *
1279 * Unrecognized ep0 requests may be handled in user space.
1280 */
1281
1282 static void make_qualifier (struct dev_data *dev)
1283 {
1284 struct usb_qualifier_descriptor qual;
1285 struct usb_device_descriptor *desc;
1286
1287 qual.bLength = sizeof qual;
1288 qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
1289 qual.bcdUSB = cpu_to_le16 (0x0200);
1290
1291 desc = dev->dev;
1292 qual.bDeviceClass = desc->bDeviceClass;
1293 qual.bDeviceSubClass = desc->bDeviceSubClass;
1294 qual.bDeviceProtocol = desc->bDeviceProtocol;
1295
1296 /* assumes ep0 uses the same value for both speeds ... */
1297 qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1298
1299 qual.bNumConfigurations = 1;
1300 qual.bRESERVED = 0;
1301
1302 memcpy (dev->rbuf, &qual, sizeof qual);
1303 }
1304
1305 static int
1306 config_buf (struct dev_data *dev, u8 type, unsigned index)
1307 {
1308 int len;
1309 int hs = 0;
1310
1311 /* only one configuration */
1312 if (index > 0)
1313 return -EINVAL;
1314
1315 if (gadget_is_dualspeed(dev->gadget)) {
1316 hs = (dev->gadget->speed == USB_SPEED_HIGH);
1317 if (type == USB_DT_OTHER_SPEED_CONFIG)
1318 hs = !hs;
1319 }
1320 if (hs) {
1321 dev->req->buf = dev->hs_config;
1322 len = le16_to_cpu(dev->hs_config->wTotalLength);
1323 } else {
1324 dev->req->buf = dev->config;
1325 len = le16_to_cpu(dev->config->wTotalLength);
1326 }
1327 ((u8 *)dev->req->buf) [1] = type;
1328 return len;
1329 }
1330
1331 static int
1332 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1333 {
1334 struct dev_data *dev = get_gadget_data (gadget);
1335 struct usb_request *req = dev->req;
1336 int value = -EOPNOTSUPP;
1337 struct usb_gadgetfs_event *event;
1338 u16 w_value = le16_to_cpu(ctrl->wValue);
1339 u16 w_length = le16_to_cpu(ctrl->wLength);
1340
1341 if (w_length > RBUF_SIZE) {
1342 if (ctrl->bRequestType & USB_DIR_IN) {
1343 /* Cast away the const, we are going to overwrite on purpose. */
1344 __le16 *temp = (__le16 *)&ctrl->wLength;
1345
1346 *temp = cpu_to_le16(RBUF_SIZE);
1347 w_length = RBUF_SIZE;
1348 } else {
1349 return value;
1350 }
1351 }
1352
1353 spin_lock (&dev->lock);
1354 dev->setup_abort = 0;
1355 if (dev->state == STATE_DEV_UNCONNECTED) {
1356 if (gadget_is_dualspeed(gadget)
1357 && gadget->speed == USB_SPEED_HIGH
1358 && dev->hs_config == NULL) {
1359 spin_unlock(&dev->lock);
1360 ERROR (dev, "no high speed config??\n");
1361 return -EINVAL;
1362 }
1363
1364 dev->state = STATE_DEV_CONNECTED;
1365
1366 INFO (dev, "connected\n");
1367 event = next_event (dev, GADGETFS_CONNECT);
1368 event->u.speed = gadget->speed;
1369 ep0_readable (dev);
1370
1371 /* host may have given up waiting for response. we can miss control
1372 * requests handled lower down (device/endpoint status and features);
1373 * then ep0_{read,write} will report the wrong status. controller
1374 * driver will have aborted pending i/o.
1375 */
1376 } else if (dev->state == STATE_DEV_SETUP)
1377 dev->setup_abort = 1;
1378
1379 req->buf = dev->rbuf;
1380 req->context = NULL;
1381 value = -EOPNOTSUPP;
1382 switch (ctrl->bRequest) {
1383
1384 case USB_REQ_GET_DESCRIPTOR:
1385 if (ctrl->bRequestType != USB_DIR_IN)
1386 goto unrecognized;
1387 switch (w_value >> 8) {
1388
1389 case USB_DT_DEVICE:
1390 value = min (w_length, (u16) sizeof *dev->dev);
1391 dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
1392 req->buf = dev->dev;
1393 break;
1394 case USB_DT_DEVICE_QUALIFIER:
1395 if (!dev->hs_config)
1396 break;
1397 value = min (w_length, (u16)
1398 sizeof (struct usb_qualifier_descriptor));
1399 make_qualifier (dev);
1400 break;
1401 case USB_DT_OTHER_SPEED_CONFIG:
1402 // FALLTHROUGH
1403 case USB_DT_CONFIG:
1404 value = config_buf (dev,
1405 w_value >> 8,
1406 w_value & 0xff);
1407 if (value >= 0)
1408 value = min (w_length, (u16) value);
1409 break;
1410 case USB_DT_STRING:
1411 goto unrecognized;
1412
1413 default: // all others are errors
1414 break;
1415 }
1416 break;
1417
1418 /* currently one config, two speeds */
1419 case USB_REQ_SET_CONFIGURATION:
1420 if (ctrl->bRequestType != 0)
1421 goto unrecognized;
1422 if (0 == (u8) w_value) {
1423 value = 0;
1424 dev->current_config = 0;
1425 usb_gadget_vbus_draw(gadget, 8 /* mA */ );
1426 // user mode expected to disable endpoints
1427 } else {
1428 u8 config, power;
1429
1430 if (gadget_is_dualspeed(gadget)
1431 && gadget->speed == USB_SPEED_HIGH) {
1432 config = dev->hs_config->bConfigurationValue;
1433 power = dev->hs_config->bMaxPower;
1434 } else {
1435 config = dev->config->bConfigurationValue;
1436 power = dev->config->bMaxPower;
1437 }
1438
1439 if (config == (u8) w_value) {
1440 value = 0;
1441 dev->current_config = config;
1442 usb_gadget_vbus_draw(gadget, 2 * power);
1443 }
1444 }
1445
1446 /* report SET_CONFIGURATION like any other control request,
1447 * except that usermode may not stall this. the next
1448 * request mustn't be allowed start until this finishes:
1449 * endpoints and threads set up, etc.
1450 *
1451 * NOTE: older PXA hardware (before PXA 255: without UDCCFR)
1452 * has bad/racey automagic that prevents synchronizing here.
1453 * even kernel mode drivers often miss them.
1454 */
1455 if (value == 0) {
1456 INFO (dev, "configuration #%d\n", dev->current_config);
1457 usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
1458 if (dev->usermode_setup) {
1459 dev->setup_can_stall = 0;
1460 goto delegate;
1461 }
1462 }
1463 break;
1464
1465 #ifndef CONFIG_USB_PXA25X
1466 /* PXA automagically handles this request too */
1467 case USB_REQ_GET_CONFIGURATION:
1468 if (ctrl->bRequestType != 0x80)
1469 goto unrecognized;
1470 *(u8 *)req->buf = dev->current_config;
1471 value = min (w_length, (u16) 1);
1472 break;
1473 #endif
1474
1475 default:
1476 unrecognized:
1477 VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
1478 dev->usermode_setup ? "delegate" : "fail",
1479 ctrl->bRequestType, ctrl->bRequest,
1480 w_value, le16_to_cpu(ctrl->wIndex), w_length);
1481
1482 /* if there's an ep0 reader, don't stall */
1483 if (dev->usermode_setup) {
1484 dev->setup_can_stall = 1;
1485 delegate:
1486 dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
1487 ? 1 : 0;
1488 dev->setup_wLength = w_length;
1489 dev->setup_out_ready = 0;
1490 dev->setup_out_error = 0;
1491 value = 0;
1492
1493 /* read DATA stage for OUT right away */
1494 if (unlikely (!dev->setup_in && w_length)) {
1495 value = setup_req (gadget->ep0, dev->req,
1496 w_length);
1497 if (value < 0)
1498 break;
1499
1500 ++dev->udc_usage;
1501 spin_unlock (&dev->lock);
1502 value = usb_ep_queue (gadget->ep0, dev->req,
1503 GFP_KERNEL);
1504 spin_lock (&dev->lock);
1505 --dev->udc_usage;
1506 if (value < 0) {
1507 clean_req (gadget->ep0, dev->req);
1508 break;
1509 }
1510
1511 /* we can't currently stall these */
1512 dev->setup_can_stall = 0;
1513 }
1514
1515 /* state changes when reader collects event */
1516 event = next_event (dev, GADGETFS_SETUP);
1517 event->u.setup = *ctrl;
1518 ep0_readable (dev);
1519 spin_unlock (&dev->lock);
1520 return 0;
1521 }
1522 }
1523
1524 /* proceed with data transfer and status phases? */
1525 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
1526 req->length = value;
1527 req->zero = value < w_length;
1528
1529 ++dev->udc_usage;
1530 spin_unlock (&dev->lock);
1531 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1532 spin_lock(&dev->lock);
1533 --dev->udc_usage;
1534 spin_unlock(&dev->lock);
1535 if (value < 0) {
1536 DBG (dev, "ep_queue --> %d\n", value);
1537 req->status = 0;
1538 }
1539 return value;
1540 }
1541
1542 /* device stalls when value < 0 */
1543 spin_unlock (&dev->lock);
1544 return value;
1545 }
1546
1547 static void destroy_ep_files (struct dev_data *dev)
1548 {
1549 DBG (dev, "%s %d\n", __func__, dev->state);
1550
1551 /* dev->state must prevent interference */
1552 spin_lock_irq (&dev->lock);
1553 while (!list_empty(&dev->epfiles)) {
1554 struct ep_data *ep;
1555 struct inode *parent;
1556 struct dentry *dentry;
1557
1558 /* break link to FS */
1559 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1560 list_del_init (&ep->epfiles);
1561 spin_unlock_irq (&dev->lock);
1562
1563 dentry = ep->dentry;
1564 ep->dentry = NULL;
1565 parent = d_inode(dentry->d_parent);
1566
1567 /* break link to controller */
1568 mutex_lock(&ep->lock);
1569 if (ep->state == STATE_EP_ENABLED)
1570 (void) usb_ep_disable (ep->ep);
1571 ep->state = STATE_EP_UNBOUND;
1572 usb_ep_free_request (ep->ep, ep->req);
1573 ep->ep = NULL;
1574 mutex_unlock(&ep->lock);
1575
1576 wake_up (&ep->wait);
1577 put_ep (ep);
1578
1579 /* break link to dcache */
1580 inode_lock(parent);
1581 d_delete (dentry);
1582 dput (dentry);
1583 inode_unlock(parent);
1584
1585 spin_lock_irq (&dev->lock);
1586 }
1587 spin_unlock_irq (&dev->lock);
1588 }
1589
1590
1591 static struct dentry *
1592 gadgetfs_create_file (struct super_block *sb, char const *name,
1593 void *data, const struct file_operations *fops);
1594
1595 static int activate_ep_files (struct dev_data *dev)
1596 {
1597 struct usb_ep *ep;
1598 struct ep_data *data;
1599
1600 gadget_for_each_ep (ep, dev->gadget) {
1601
1602 data = kzalloc(sizeof(*data), GFP_KERNEL);
1603 if (!data)
1604 goto enomem0;
1605 data->state = STATE_EP_DISABLED;
1606 mutex_init(&data->lock);
1607 init_waitqueue_head (&data->wait);
1608
1609 strncpy (data->name, ep->name, sizeof (data->name) - 1);
1610 refcount_set (&data->count, 1);
1611 data->dev = dev;
1612 get_dev (dev);
1613
1614 data->ep = ep;
1615 ep->driver_data = data;
1616
1617 data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
1618 if (!data->req)
1619 goto enomem1;
1620
1621 data->dentry = gadgetfs_create_file (dev->sb, data->name,
1622 data, &ep_io_operations);
1623 if (!data->dentry)
1624 goto enomem2;
1625 list_add_tail (&data->epfiles, &dev->epfiles);
1626 }
1627 return 0;
1628
1629 enomem2:
1630 usb_ep_free_request (ep, data->req);
1631 enomem1:
1632 put_dev (dev);
1633 kfree (data);
1634 enomem0:
1635 DBG (dev, "%s enomem\n", __func__);
1636 destroy_ep_files (dev);
1637 return -ENOMEM;
1638 }
1639
1640 static void
1641 gadgetfs_unbind (struct usb_gadget *gadget)
1642 {
1643 struct dev_data *dev = get_gadget_data (gadget);
1644
1645 DBG (dev, "%s\n", __func__);
1646
1647 spin_lock_irq (&dev->lock);
1648 dev->state = STATE_DEV_UNBOUND;
1649 while (dev->udc_usage > 0) {
1650 spin_unlock_irq(&dev->lock);
1651 usleep_range(1000, 2000);
1652 spin_lock_irq(&dev->lock);
1653 }
1654 spin_unlock_irq (&dev->lock);
1655
1656 destroy_ep_files (dev);
1657 gadget->ep0->driver_data = NULL;
1658 set_gadget_data (gadget, NULL);
1659
1660 /* we've already been disconnected ... no i/o is active */
1661 if (dev->req)
1662 usb_ep_free_request (gadget->ep0, dev->req);
1663 DBG (dev, "%s done\n", __func__);
1664 put_dev (dev);
1665 }
1666
1667 static struct dev_data *the_device;
1668
1669 static int gadgetfs_bind(struct usb_gadget *gadget,
1670 struct usb_gadget_driver *driver)
1671 {
1672 struct dev_data *dev = the_device;
1673
1674 if (!dev)
1675 return -ESRCH;
1676 if (0 != strcmp (CHIP, gadget->name)) {
1677 pr_err("%s expected %s controller not %s\n",
1678 shortname, CHIP, gadget->name);
1679 return -ENODEV;
1680 }
1681
1682 set_gadget_data (gadget, dev);
1683 dev->gadget = gadget;
1684 gadget->ep0->driver_data = dev;
1685
1686 /* preallocate control response and buffer */
1687 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
1688 if (!dev->req)
1689 goto enomem;
1690 dev->req->context = NULL;
1691 dev->req->complete = epio_complete;
1692
1693 if (activate_ep_files (dev) < 0)
1694 goto enomem;
1695
1696 INFO (dev, "bound to %s driver\n", gadget->name);
1697 spin_lock_irq(&dev->lock);
1698 dev->state = STATE_DEV_UNCONNECTED;
1699 spin_unlock_irq(&dev->lock);
1700 get_dev (dev);
1701 return 0;
1702
1703 enomem:
1704 gadgetfs_unbind (gadget);
1705 return -ENOMEM;
1706 }
1707
1708 static void
1709 gadgetfs_disconnect (struct usb_gadget *gadget)
1710 {
1711 struct dev_data *dev = get_gadget_data (gadget);
1712 unsigned long flags;
1713
1714 spin_lock_irqsave (&dev->lock, flags);
1715 if (dev->state == STATE_DEV_UNCONNECTED)
1716 goto exit;
1717 dev->state = STATE_DEV_UNCONNECTED;
1718
1719 INFO (dev, "disconnected\n");
1720 next_event (dev, GADGETFS_DISCONNECT);
1721 ep0_readable (dev);
1722 exit:
1723 spin_unlock_irqrestore (&dev->lock, flags);
1724 }
1725
1726 static void
1727 gadgetfs_suspend (struct usb_gadget *gadget)
1728 {
1729 struct dev_data *dev = get_gadget_data (gadget);
1730 unsigned long flags;
1731
1732 INFO (dev, "suspended from state %d\n", dev->state);
1733 spin_lock_irqsave(&dev->lock, flags);
1734 switch (dev->state) {
1735 case STATE_DEV_SETUP: // VERY odd... host died??
1736 case STATE_DEV_CONNECTED:
1737 case STATE_DEV_UNCONNECTED:
1738 next_event (dev, GADGETFS_SUSPEND);
1739 ep0_readable (dev);
1740 /* FALLTHROUGH */
1741 default:
1742 break;
1743 }
1744 spin_unlock_irqrestore(&dev->lock, flags);
1745 }
1746
1747 static struct usb_gadget_driver gadgetfs_driver = {
1748 .function = (char *) driver_desc,
1749 .bind = gadgetfs_bind,
1750 .unbind = gadgetfs_unbind,
1751 .setup = gadgetfs_setup,
1752 .reset = gadgetfs_disconnect,
1753 .disconnect = gadgetfs_disconnect,
1754 .suspend = gadgetfs_suspend,
1755
1756 .driver = {
1757 .name = (char *) shortname,
1758 },
1759 };
1760
1761 /*----------------------------------------------------------------------*/
1762 /* DEVICE INITIALIZATION
1763 *
1764 * fd = open ("/dev/gadget/$CHIP", O_RDWR)
1765 * status = write (fd, descriptors, sizeof descriptors)
1766 *
1767 * That write establishes the device configuration, so the kernel can
1768 * bind to the controller ... guaranteeing it can handle enumeration
1769 * at all necessary speeds. Descriptor order is:
1770 *
1771 * . message tag (u32, host order) ... for now, must be zero; it
1772 * would change to support features like multi-config devices
1773 * . full/low speed config ... all wTotalLength bytes (with interface,
1774 * class, altsetting, endpoint, and other descriptors)
1775 * . high speed config ... all descriptors, for high speed operation;
1776 * this one's optional except for high-speed hardware
1777 * . device descriptor
1778 *
1779 * Endpoints are not yet enabled. Drivers must wait until device
1780 * configuration and interface altsetting changes create
1781 * the need to configure (or unconfigure) them.
1782 *
1783 * After initialization, the device stays active for as long as that
1784 * $CHIP file is open. Events must then be read from that descriptor,
1785 * such as configuration notifications.
1786 */
1787
1788 static int is_valid_config(struct usb_config_descriptor *config,
1789 unsigned int total)
1790 {
1791 return config->bDescriptorType == USB_DT_CONFIG
1792 && config->bLength == USB_DT_CONFIG_SIZE
1793 && total >= USB_DT_CONFIG_SIZE
1794 && config->bConfigurationValue != 0
1795 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
1796 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
1797 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
1798 /* FIXME check lengths: walk to end */
1799 }
1800
1801 static ssize_t
1802 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1803 {
1804 struct dev_data *dev = fd->private_data;
1805 ssize_t value = len, length = len;
1806 unsigned total;
1807 u32 tag;
1808 char *kbuf;
1809
1810 spin_lock_irq(&dev->lock);
1811 if (dev->state > STATE_DEV_OPENED) {
1812 value = ep0_write(fd, buf, len, ptr);
1813 spin_unlock_irq(&dev->lock);
1814 return value;
1815 }
1816 spin_unlock_irq(&dev->lock);
1817
1818 if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
1819 (len > PAGE_SIZE * 4))
1820 return -EINVAL;
1821
1822 /* we might need to change message format someday */
1823 if (copy_from_user (&tag, buf, 4))
1824 return -EFAULT;
1825 if (tag != 0)
1826 return -EINVAL;
1827 buf += 4;
1828 length -= 4;
1829
1830 kbuf = memdup_user(buf, length);
1831 if (IS_ERR(kbuf))
1832 return PTR_ERR(kbuf);
1833
1834 spin_lock_irq (&dev->lock);
1835 value = -EINVAL;
1836 if (dev->buf) {
1837 spin_unlock_irq(&dev->lock);
1838 kfree(kbuf);
1839 return value;
1840 }
1841 dev->buf = kbuf;
1842
1843 /* full or low speed config */
1844 dev->config = (void *) kbuf;
1845 total = le16_to_cpu(dev->config->wTotalLength);
1846 if (!is_valid_config(dev->config, total) ||
1847 total > length - USB_DT_DEVICE_SIZE)
1848 goto fail;
1849 kbuf += total;
1850 length -= total;
1851
1852 /* optional high speed config */
1853 if (kbuf [1] == USB_DT_CONFIG) {
1854 dev->hs_config = (void *) kbuf;
1855 total = le16_to_cpu(dev->hs_config->wTotalLength);
1856 if (!is_valid_config(dev->hs_config, total) ||
1857 total > length - USB_DT_DEVICE_SIZE)
1858 goto fail;
1859 kbuf += total;
1860 length -= total;
1861 } else {
1862 dev->hs_config = NULL;
1863 }
1864
1865 /* could support multiple configs, using another encoding! */
1866
1867 /* device descriptor (tweaked for paranoia) */
1868 if (length != USB_DT_DEVICE_SIZE)
1869 goto fail;
1870 dev->dev = (void *)kbuf;
1871 if (dev->dev->bLength != USB_DT_DEVICE_SIZE
1872 || dev->dev->bDescriptorType != USB_DT_DEVICE
1873 || dev->dev->bNumConfigurations != 1)
1874 goto fail;
1875 dev->dev->bcdUSB = cpu_to_le16 (0x0200);
1876
1877 /* triggers gadgetfs_bind(); then we can enumerate. */
1878 spin_unlock_irq (&dev->lock);
1879 if (dev->hs_config)
1880 gadgetfs_driver.max_speed = USB_SPEED_HIGH;
1881 else
1882 gadgetfs_driver.max_speed = USB_SPEED_FULL;
1883
1884 value = usb_gadget_probe_driver(&gadgetfs_driver);
1885 if (value != 0) {
1886 kfree (dev->buf);
1887 dev->buf = NULL;
1888 } else {
1889 /* at this point "good" hardware has for the first time
1890 * let the USB the host see us. alternatively, if users
1891 * unplug/replug that will clear all the error state.
1892 *
1893 * note: everything running before here was guaranteed
1894 * to choke driver model style diagnostics. from here
1895 * on, they can work ... except in cleanup paths that
1896 * kick in after the ep0 descriptor is closed.
1897 */
1898 value = len;
1899 dev->gadget_registered = true;
1900 }
1901 return value;
1902
1903 fail:
1904 spin_unlock_irq (&dev->lock);
1905 pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
1906 kfree (dev->buf);
1907 dev->buf = NULL;
1908 return value;
1909 }
1910
1911 static int
1912 dev_open (struct inode *inode, struct file *fd)
1913 {
1914 struct dev_data *dev = inode->i_private;
1915 int value = -EBUSY;
1916
1917 spin_lock_irq(&dev->lock);
1918 if (dev->state == STATE_DEV_DISABLED) {
1919 dev->ev_next = 0;
1920 dev->state = STATE_DEV_OPENED;
1921 fd->private_data = dev;
1922 get_dev (dev);
1923 value = 0;
1924 }
1925 spin_unlock_irq(&dev->lock);
1926 return value;
1927 }
1928
1929 static const struct file_operations ep0_operations = {
1930 .llseek = no_llseek,
1931
1932 .open = dev_open,
1933 .read = ep0_read,
1934 .write = dev_config,
1935 .fasync = ep0_fasync,
1936 .poll = ep0_poll,
1937 .unlocked_ioctl = dev_ioctl,
1938 .release = dev_release,
1939 };
1940
1941 /*----------------------------------------------------------------------*/
1942
1943 /* FILESYSTEM AND SUPERBLOCK OPERATIONS
1944 *
1945 * Mounting the filesystem creates a controller file, used first for
1946 * device configuration then later for event monitoring.
1947 */
1948
1949
1950 /* FIXME PAM etc could set this security policy without mount options
1951 * if epfiles inherited ownership and permissons from ep0 ...
1952 */
1953
1954 static unsigned default_uid;
1955 static unsigned default_gid;
1956 static unsigned default_perm = S_IRUSR | S_IWUSR;
1957
1958 module_param (default_uid, uint, 0644);
1959 module_param (default_gid, uint, 0644);
1960 module_param (default_perm, uint, 0644);
1961
1962
1963 static struct inode *
1964 gadgetfs_make_inode (struct super_block *sb,
1965 void *data, const struct file_operations *fops,
1966 int mode)
1967 {
1968 struct inode *inode = new_inode (sb);
1969
1970 if (inode) {
1971 inode->i_ino = get_next_ino();
1972 inode->i_mode = mode;
1973 inode->i_uid = make_kuid(&init_user_ns, default_uid);
1974 inode->i_gid = make_kgid(&init_user_ns, default_gid);
1975 inode->i_atime = inode->i_mtime = inode->i_ctime
1976 = current_time(inode);
1977 inode->i_private = data;
1978 inode->i_fop = fops;
1979 }
1980 return inode;
1981 }
1982
1983 /* creates in fs root directory, so non-renamable and non-linkable.
1984 * so inode and dentry are paired, until device reconfig.
1985 */
1986 static struct dentry *
1987 gadgetfs_create_file (struct super_block *sb, char const *name,
1988 void *data, const struct file_operations *fops)
1989 {
1990 struct dentry *dentry;
1991 struct inode *inode;
1992
1993 dentry = d_alloc_name(sb->s_root, name);
1994 if (!dentry)
1995 return NULL;
1996
1997 inode = gadgetfs_make_inode (sb, data, fops,
1998 S_IFREG | (default_perm & S_IRWXUGO));
1999 if (!inode) {
2000 dput(dentry);
2001 return NULL;
2002 }
2003 d_add (dentry, inode);
2004 return dentry;
2005 }
2006
2007 static const struct super_operations gadget_fs_operations = {
2008 .statfs = simple_statfs,
2009 .drop_inode = generic_delete_inode,
2010 };
2011
2012 static int
2013 gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
2014 {
2015 struct inode *inode;
2016 struct dev_data *dev;
2017
2018 if (the_device)
2019 return -ESRCH;
2020
2021 CHIP = usb_get_gadget_udc_name();
2022 if (!CHIP)
2023 return -ENODEV;
2024
2025 /* superblock */
2026 sb->s_blocksize = PAGE_SIZE;
2027 sb->s_blocksize_bits = PAGE_SHIFT;
2028 sb->s_magic = GADGETFS_MAGIC;
2029 sb->s_op = &gadget_fs_operations;
2030 sb->s_time_gran = 1;
2031
2032 /* root inode */
2033 inode = gadgetfs_make_inode (sb,
2034 NULL, &simple_dir_operations,
2035 S_IFDIR | S_IRUGO | S_IXUGO);
2036 if (!inode)
2037 goto Enomem;
2038 inode->i_op = &simple_dir_inode_operations;
2039 if (!(sb->s_root = d_make_root (inode)))
2040 goto Enomem;
2041
2042 /* the ep0 file is named after the controller we expect;
2043 * user mode code can use it for sanity checks, like we do.
2044 */
2045 dev = dev_new ();
2046 if (!dev)
2047 goto Enomem;
2048
2049 dev->sb = sb;
2050 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations);
2051 if (!dev->dentry) {
2052 put_dev(dev);
2053 goto Enomem;
2054 }
2055
2056 /* other endpoint files are available after hardware setup,
2057 * from binding to a controller.
2058 */
2059 the_device = dev;
2060 return 0;
2061
2062 Enomem:
2063 return -ENOMEM;
2064 }
2065
2066 /* "mount -t gadgetfs path /dev/gadget" ends up here */
2067 static struct dentry *
2068 gadgetfs_mount (struct file_system_type *t, int flags,
2069 const char *path, void *opts)
2070 {
2071 return mount_single (t, flags, opts, gadgetfs_fill_super);
2072 }
2073
2074 static void
2075 gadgetfs_kill_sb (struct super_block *sb)
2076 {
2077 kill_litter_super (sb);
2078 if (the_device) {
2079 put_dev (the_device);
2080 the_device = NULL;
2081 }
2082 kfree(CHIP);
2083 CHIP = NULL;
2084 }
2085
2086 /*----------------------------------------------------------------------*/
2087
2088 static struct file_system_type gadgetfs_type = {
2089 .owner = THIS_MODULE,
2090 .name = shortname,
2091 .mount = gadgetfs_mount,
2092 .kill_sb = gadgetfs_kill_sb,
2093 };
2094 MODULE_ALIAS_FS("gadgetfs");
2095
2096 /*----------------------------------------------------------------------*/
2097
2098 static int __init init (void)
2099 {
2100 int status;
2101
2102 status = register_filesystem (&gadgetfs_type);
2103 if (status == 0)
2104 pr_info ("%s: %s, version " DRIVER_VERSION "\n",
2105 shortname, driver_desc);
2106 return status;
2107 }
2108 module_init (init);
2109
2110 static void __exit cleanup (void)
2111 {
2112 pr_debug ("unregister %s\n", shortname);
2113 unregister_filesystem (&gadgetfs_type);
2114 }
2115 module_exit (cleanup);
2116