Merge tag 'usb-for-v3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi...
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Jul 2014 18:33:41 +0000 (11:33 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 21 Jul 2014 18:33:41 +0000 (11:33 -0700)
Felipe writes:

usb: patches for v3.17 merge window

Surprisingly enough, while a big set of patches, the majority is
composed of cleanups (using devm_*, fixing sparse errors, moving
code around, adding const, etc).

The highlights are addition of new support for PLX USB338x devices,
and support for USB 2.0-only configurations of the DWC3 IP core.

Signed-of-by: Felipe Balbi <balbi@ti.com>
16 files changed:
1  2 
Documentation/DocBook/gadget.tmpl
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_rndis.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/gr_udc.c
drivers/usb/gadget/udc/mv_udc_core.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/ux500.c
drivers/usb/phy/phy-msm-usb.c
drivers/usb/phy/phy-tegra-usb.c
include/uapi/linux/usb/functionfs.h

Simple merge
Simple merge
Simple merge
Simple merge
index 0000000000000000000000000000000000000000,fe45060e0a7a3e19df5b0383d88daefeae8e19a6..dc30adf15a01d22cbde3c3a195a146373e302ef0
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,3347 +1,3349 @@@
 -      for (lang = ffs->stringtabs; *lang; ++lang) {
 -              struct usb_string *str = (*lang)->strings;
 -              int id = first_id;
 -              for (; str->s; ++id, ++str)
 -                      str->id = id;
+ /*
+  * f_fs.c -- user mode file system API for USB composite function controllers
+  *
+  * Copyright (C) 2010 Samsung Electronics
+  * Author: Michal Nazarewicz <mina86@mina86.com>
+  *
+  * Based on inode.c (GadgetFS) which was:
+  * Copyright (C) 2003-2004 David Brownell
+  * Copyright (C) 2003 Agilent Technologies
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  */
+ /* #define DEBUG */
+ /* #define VERBOSE_DEBUG */
+ #include <linux/blkdev.h>
+ #include <linux/pagemap.h>
+ #include <linux/export.h>
+ #include <linux/hid.h>
+ #include <linux/module.h>
+ #include <asm/unaligned.h>
+ #include <linux/usb/composite.h>
+ #include <linux/usb/functionfs.h>
+ #include <linux/aio.h>
+ #include <linux/mmu_context.h>
+ #include <linux/poll.h>
+ #include "u_fs.h"
+ #include "u_f.h"
+ #include "u_os_desc.h"
+ #include "configfs.h"
+ #define FUNCTIONFS_MAGIC      0xa647361 /* Chosen by a honest dice roll ;) */
+ /* Reference counter handling */
+ static void ffs_data_get(struct ffs_data *ffs);
+ static void ffs_data_put(struct ffs_data *ffs);
+ /* Creates new ffs_data object. */
+ static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
+ /* Opened counter handling. */
+ static void ffs_data_opened(struct ffs_data *ffs);
+ static void ffs_data_closed(struct ffs_data *ffs);
+ /* Called with ffs->mutex held; take over ownership of data. */
+ static int __must_check
+ __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
+ static int __must_check
+ __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
+ /* The function structure ***************************************************/
+ struct ffs_ep;
+ struct ffs_function {
+       struct usb_configuration        *conf;
+       struct usb_gadget               *gadget;
+       struct ffs_data                 *ffs;
+       struct ffs_ep                   *eps;
+       u8                              eps_revmap[16];
+       short                           *interfaces_nums;
+       struct usb_function             function;
+ };
+ static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
+ {
+       return container_of(f, struct ffs_function, function);
+ }
+ static inline enum ffs_setup_state
+ ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
+ {
+       return (enum ffs_setup_state)
+               cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
+ }
+ static void ffs_func_eps_disable(struct ffs_function *func);
+ static int __must_check ffs_func_eps_enable(struct ffs_function *func);
+ static int ffs_func_bind(struct usb_configuration *,
+                        struct usb_function *);
+ static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
+ static void ffs_func_disable(struct usb_function *);
+ static int ffs_func_setup(struct usb_function *,
+                         const struct usb_ctrlrequest *);
+ static void ffs_func_suspend(struct usb_function *);
+ static void ffs_func_resume(struct usb_function *);
+ static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
+ static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
+ /* The endpoints structures *************************************************/
+ struct ffs_ep {
+       struct usb_ep                   *ep;    /* P: ffs->eps_lock */
+       struct usb_request              *req;   /* P: epfile->mutex */
+       /* [0]: full speed, [1]: high speed, [2]: super speed */
+       struct usb_endpoint_descriptor  *descs[3];
+       u8                              num;
+       int                             status; /* P: epfile->mutex */
+ };
+ struct ffs_epfile {
+       /* Protects ep->ep and ep->req. */
+       struct mutex                    mutex;
+       wait_queue_head_t               wait;
+       struct ffs_data                 *ffs;
+       struct ffs_ep                   *ep;    /* P: ffs->eps_lock */
+       struct dentry                   *dentry;
+       char                            name[5];
+       unsigned char                   in;     /* P: ffs->eps_lock */
+       unsigned char                   isoc;   /* P: ffs->eps_lock */
+       unsigned char                   _pad;
+ };
+ /*  ffs_io_data structure ***************************************************/
+ struct ffs_io_data {
+       bool aio;
+       bool read;
+       struct kiocb *kiocb;
+       const struct iovec *iovec;
+       unsigned long nr_segs;
+       char __user *buf;
+       size_t len;
+       struct mm_struct *mm;
+       struct work_struct work;
+       struct usb_ep *ep;
+       struct usb_request *req;
+ };
+ static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
+ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
+ static struct inode *__must_check
+ ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
+                  const struct file_operations *fops,
+                  struct dentry **dentry_p);
+ /* Devices management *******************************************************/
+ DEFINE_MUTEX(ffs_lock);
+ EXPORT_SYMBOL_GPL(ffs_lock);
+ static struct ffs_dev *_ffs_find_dev(const char *name);
+ static struct ffs_dev *_ffs_alloc_dev(void);
+ static int _ffs_name_dev(struct ffs_dev *dev, const char *name);
+ static void _ffs_free_dev(struct ffs_dev *dev);
+ static void *ffs_acquire_dev(const char *dev_name);
+ static void ffs_release_dev(struct ffs_data *ffs_data);
+ static int ffs_ready(struct ffs_data *ffs);
+ static void ffs_closed(struct ffs_data *ffs);
+ /* Misc helper functions ****************************************************/
+ static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+       __attribute__((warn_unused_result, nonnull));
+ static char *ffs_prepare_buffer(const char __user *buf, size_t len)
+       __attribute__((warn_unused_result, nonnull));
+ /* Control file aka ep0 *****************************************************/
+ static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+ {
+       struct ffs_data *ffs = req->context;
+       complete_all(&ffs->ep0req_completion);
+ }
+ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+ {
+       struct usb_request *req = ffs->ep0req;
+       int ret;
+       req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
+       spin_unlock_irq(&ffs->ev.waitq.lock);
+       req->buf      = data;
+       req->length   = len;
+       /*
+        * UDC layer requires to provide a buffer even for ZLP, but should
+        * not use it at all. Let's provide some poisoned pointer to catch
+        * possible bug in the driver.
+        */
+       if (req->buf == NULL)
+               req->buf = (void *)0xDEADBABE;
+       reinit_completion(&ffs->ep0req_completion);
+       ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
+       if (unlikely(ret < 0))
+               return ret;
+       ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
+       if (unlikely(ret)) {
+               usb_ep_dequeue(ffs->gadget->ep0, req);
+               return -EINTR;
+       }
+       ffs->setup_state = FFS_NO_SETUP;
+       return req->status ? req->status : req->actual;
+ }
+ static int __ffs_ep0_stall(struct ffs_data *ffs)
+ {
+       if (ffs->ev.can_stall) {
+               pr_vdebug("ep0 stall\n");
+               usb_ep_set_halt(ffs->gadget->ep0);
+               ffs->setup_state = FFS_NO_SETUP;
+               return -EL2HLT;
+       } else {
+               pr_debug("bogus ep0 stall!\n");
+               return -ESRCH;
+       }
+ }
+ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
+                            size_t len, loff_t *ptr)
+ {
+       struct ffs_data *ffs = file->private_data;
+       ssize_t ret;
+       char *data;
+       ENTER();
+       /* Fast check if setup was canceled */
+       if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
+               return -EIDRM;
+       /* Acquire mutex */
+       ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+       if (unlikely(ret < 0))
+               return ret;
+       /* Check state */
+       switch (ffs->state) {
+       case FFS_READ_DESCRIPTORS:
+       case FFS_READ_STRINGS:
+               /* Copy data */
+               if (unlikely(len < 16)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               data = ffs_prepare_buffer(buf, len);
+               if (IS_ERR(data)) {
+                       ret = PTR_ERR(data);
+                       break;
+               }
+               /* Handle data */
+               if (ffs->state == FFS_READ_DESCRIPTORS) {
+                       pr_info("read descriptors\n");
+                       ret = __ffs_data_got_descs(ffs, data, len);
+                       if (unlikely(ret < 0))
+                               break;
+                       ffs->state = FFS_READ_STRINGS;
+                       ret = len;
+               } else {
+                       pr_info("read strings\n");
+                       ret = __ffs_data_got_strings(ffs, data, len);
+                       if (unlikely(ret < 0))
+                               break;
+                       ret = ffs_epfiles_create(ffs);
+                       if (unlikely(ret)) {
+                               ffs->state = FFS_CLOSING;
+                               break;
+                       }
+                       ffs->state = FFS_ACTIVE;
+                       mutex_unlock(&ffs->mutex);
+                       ret = ffs_ready(ffs);
+                       if (unlikely(ret < 0)) {
+                               ffs->state = FFS_CLOSING;
+                               return ret;
+                       }
+                       set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
+                       return len;
+               }
+               break;
+       case FFS_ACTIVE:
+               data = NULL;
+               /*
+                * We're called from user space, we can use _irq
+                * rather then _irqsave
+                */
+               spin_lock_irq(&ffs->ev.waitq.lock);
+               switch (ffs_setup_state_clear_cancelled(ffs)) {
+               case FFS_SETUP_CANCELLED:
+                       ret = -EIDRM;
+                       goto done_spin;
+               case FFS_NO_SETUP:
+                       ret = -ESRCH;
+                       goto done_spin;
+               case FFS_SETUP_PENDING:
+                       break;
+               }
+               /* FFS_SETUP_PENDING */
+               if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
+                       spin_unlock_irq(&ffs->ev.waitq.lock);
+                       ret = __ffs_ep0_stall(ffs);
+                       break;
+               }
+               /* FFS_SETUP_PENDING and not stall */
+               len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+               spin_unlock_irq(&ffs->ev.waitq.lock);
+               data = ffs_prepare_buffer(buf, len);
+               if (IS_ERR(data)) {
+                       ret = PTR_ERR(data);
+                       break;
+               }
+               spin_lock_irq(&ffs->ev.waitq.lock);
+               /*
+                * We are guaranteed to be still in FFS_ACTIVE state
+                * but the state of setup could have changed from
+                * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
+                * to check for that.  If that happened we copied data
+                * from user space in vain but it's unlikely.
+                *
+                * For sure we are not in FFS_NO_SETUP since this is
+                * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
+                * transition can be performed and it's protected by
+                * mutex.
+                */
+               if (ffs_setup_state_clear_cancelled(ffs) ==
+                   FFS_SETUP_CANCELLED) {
+                       ret = -EIDRM;
+ done_spin:
+                       spin_unlock_irq(&ffs->ev.waitq.lock);
+               } else {
+                       /* unlocks spinlock */
+                       ret = __ffs_ep0_queue_wait(ffs, data, len);
+               }
+               kfree(data);
+               break;
+       default:
+               ret = -EBADFD;
+               break;
+       }
+       mutex_unlock(&ffs->mutex);
+       return ret;
+ }
+ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
+                                    size_t n)
+ {
+       /*
+        * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
+        * to release them.
+        */
+       struct usb_functionfs_event events[n];
+       unsigned i = 0;
+       memset(events, 0, sizeof events);
+       do {
+               events[i].type = ffs->ev.types[i];
+               if (events[i].type == FUNCTIONFS_SETUP) {
+                       events[i].u.setup = ffs->ev.setup;
+                       ffs->setup_state = FFS_SETUP_PENDING;
+               }
+       } while (++i < n);
+       if (n < ffs->ev.count) {
+               ffs->ev.count -= n;
+               memmove(ffs->ev.types, ffs->ev.types + n,
+                       ffs->ev.count * sizeof *ffs->ev.types);
+       } else {
+               ffs->ev.count = 0;
+       }
+       spin_unlock_irq(&ffs->ev.waitq.lock);
+       mutex_unlock(&ffs->mutex);
+       return unlikely(__copy_to_user(buf, events, sizeof events))
+               ? -EFAULT : sizeof events;
+ }
+ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
+                           size_t len, loff_t *ptr)
+ {
+       struct ffs_data *ffs = file->private_data;
+       char *data = NULL;
+       size_t n;
+       int ret;
+       ENTER();
+       /* Fast check if setup was canceled */
+       if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
+               return -EIDRM;
+       /* Acquire mutex */
+       ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+       if (unlikely(ret < 0))
+               return ret;
+       /* Check state */
+       if (ffs->state != FFS_ACTIVE) {
+               ret = -EBADFD;
+               goto done_mutex;
+       }
+       /*
+        * We're called from user space, we can use _irq rather then
+        * _irqsave
+        */
+       spin_lock_irq(&ffs->ev.waitq.lock);
+       switch (ffs_setup_state_clear_cancelled(ffs)) {
+       case FFS_SETUP_CANCELLED:
+               ret = -EIDRM;
+               break;
+       case FFS_NO_SETUP:
+               n = len / sizeof(struct usb_functionfs_event);
+               if (unlikely(!n)) {
+                       ret = -EINVAL;
+                       break;
+               }
+               if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
+                       ret = -EAGAIN;
+                       break;
+               }
+               if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
+                                                       ffs->ev.count)) {
+                       ret = -EINTR;
+                       break;
+               }
+               return __ffs_ep0_read_events(ffs, buf,
+                                            min(n, (size_t)ffs->ev.count));
+       case FFS_SETUP_PENDING:
+               if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
+                       spin_unlock_irq(&ffs->ev.waitq.lock);
+                       ret = __ffs_ep0_stall(ffs);
+                       goto done_mutex;
+               }
+               len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+               spin_unlock_irq(&ffs->ev.waitq.lock);
+               if (likely(len)) {
+                       data = kmalloc(len, GFP_KERNEL);
+                       if (unlikely(!data)) {
+                               ret = -ENOMEM;
+                               goto done_mutex;
+                       }
+               }
+               spin_lock_irq(&ffs->ev.waitq.lock);
+               /* See ffs_ep0_write() */
+               if (ffs_setup_state_clear_cancelled(ffs) ==
+                   FFS_SETUP_CANCELLED) {
+                       ret = -EIDRM;
+                       break;
+               }
+               /* unlocks spinlock */
+               ret = __ffs_ep0_queue_wait(ffs, data, len);
+               if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
+                       ret = -EFAULT;
+               goto done_mutex;
+       default:
+               ret = -EBADFD;
+               break;
+       }
+       spin_unlock_irq(&ffs->ev.waitq.lock);
+ done_mutex:
+       mutex_unlock(&ffs->mutex);
+       kfree(data);
+       return ret;
+ }
+ static int ffs_ep0_open(struct inode *inode, struct file *file)
+ {
+       struct ffs_data *ffs = inode->i_private;
+       ENTER();
+       if (unlikely(ffs->state == FFS_CLOSING))
+               return -EBUSY;
+       file->private_data = ffs;
+       ffs_data_opened(ffs);
+       return 0;
+ }
+ static int ffs_ep0_release(struct inode *inode, struct file *file)
+ {
+       struct ffs_data *ffs = file->private_data;
+       ENTER();
+       ffs_data_closed(ffs);
+       return 0;
+ }
+ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
+ {
+       struct ffs_data *ffs = file->private_data;
+       struct usb_gadget *gadget = ffs->gadget;
+       long ret;
+       ENTER();
+       if (code == FUNCTIONFS_INTERFACE_REVMAP) {
+               struct ffs_function *func = ffs->func;
+               ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
+       } else if (gadget && gadget->ops->ioctl) {
+               ret = gadget->ops->ioctl(gadget, code, value);
+       } else {
+               ret = -ENOTTY;
+       }
+       return ret;
+ }
+ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait)
+ {
+       struct ffs_data *ffs = file->private_data;
+       unsigned int mask = POLLWRNORM;
+       int ret;
+       poll_wait(file, &ffs->ev.waitq, wait);
+       ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+       if (unlikely(ret < 0))
+               return mask;
+       switch (ffs->state) {
+       case FFS_READ_DESCRIPTORS:
+       case FFS_READ_STRINGS:
+               mask |= POLLOUT;
+               break;
+       case FFS_ACTIVE:
+               switch (ffs->setup_state) {
+               case FFS_NO_SETUP:
+                       if (ffs->ev.count)
+                               mask |= POLLIN;
+                       break;
+               case FFS_SETUP_PENDING:
+               case FFS_SETUP_CANCELLED:
+                       mask |= (POLLIN | POLLOUT);
+                       break;
+               }
+       case FFS_CLOSING:
+               break;
+       }
+       mutex_unlock(&ffs->mutex);
+       return mask;
+ }
+ static const struct file_operations ffs_ep0_operations = {
+       .llseek =       no_llseek,
+       .open =         ffs_ep0_open,
+       .write =        ffs_ep0_write,
+       .read =         ffs_ep0_read,
+       .release =      ffs_ep0_release,
+       .unlocked_ioctl =       ffs_ep0_ioctl,
+       .poll =         ffs_ep0_poll,
+ };
+ /* "Normal" endpoints operations ********************************************/
+ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
+ {
+       ENTER();
+       if (likely(req->context)) {
+               struct ffs_ep *ep = _ep->driver_data;
+               ep->status = req->status ? req->status : req->actual;
+               complete(req->context);
+       }
+ }
+ static void ffs_user_copy_worker(struct work_struct *work)
+ {
+       struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+                                                  work);
+       int ret = io_data->req->status ? io_data->req->status :
+                                        io_data->req->actual;
+       if (io_data->read && ret > 0) {
+               int i;
+               size_t pos = 0;
+               use_mm(io_data->mm);
+               for (i = 0; i < io_data->nr_segs; i++) {
+                       if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
+                                                &io_data->buf[pos],
+                                                io_data->iovec[i].iov_len))) {
+                               ret = -EFAULT;
+                               break;
+                       }
+                       pos += io_data->iovec[i].iov_len;
+               }
+               unuse_mm(io_data->mm);
+       }
+       aio_complete(io_data->kiocb, ret, ret);
+       usb_ep_free_request(io_data->ep, io_data->req);
+       io_data->kiocb->private = NULL;
+       if (io_data->read)
+               kfree(io_data->iovec);
+       kfree(io_data->buf);
+       kfree(io_data);
+ }
+ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
+                                        struct usb_request *req)
+ {
+       struct ffs_io_data *io_data = req->context;
+       ENTER();
+       INIT_WORK(&io_data->work, ffs_user_copy_worker);
+       schedule_work(&io_data->work);
+ }
+ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+ {
+       struct ffs_epfile *epfile = file->private_data;
+       struct ffs_ep *ep;
+       char *data = NULL;
+       ssize_t ret, data_len;
+       int halt;
+       /* Are we still active? */
+       if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
+               ret = -ENODEV;
+               goto error;
+       }
+       /* Wait for endpoint to be enabled */
+       ep = epfile->ep;
+       if (!ep) {
+               if (file->f_flags & O_NONBLOCK) {
+                       ret = -EAGAIN;
+                       goto error;
+               }
+               ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep));
+               if (ret) {
+                       ret = -EINTR;
+                       goto error;
+               }
+       }
+       /* Do we halt? */
+       halt = (!io_data->read == !epfile->in);
+       if (halt && epfile->isoc) {
+               ret = -EINVAL;
+               goto error;
+       }
+       /* Allocate & copy */
+       if (!halt) {
+               /*
+                * if we _do_ wait above, the epfile->ffs->gadget might be NULL
+                * before the waiting completes, so do not assign to 'gadget' earlier
+                */
+               struct usb_gadget *gadget = epfile->ffs->gadget;
+               spin_lock_irq(&epfile->ffs->eps_lock);
+               /* In the meantime, endpoint got disabled or changed. */
+               if (epfile->ep != ep) {
+                       spin_unlock_irq(&epfile->ffs->eps_lock);
+                       return -ESHUTDOWN;
+               }
+               /*
+                * Controller may require buffer size to be aligned to
+                * maxpacketsize of an out endpoint.
+                */
+               data_len = io_data->read ?
+                          usb_ep_align_maybe(gadget, ep->ep, io_data->len) :
+                          io_data->len;
+               spin_unlock_irq(&epfile->ffs->eps_lock);
+               data = kmalloc(data_len, GFP_KERNEL);
+               if (unlikely(!data))
+                       return -ENOMEM;
+               if (io_data->aio && !io_data->read) {
+                       int i;
+                       size_t pos = 0;
+                       for (i = 0; i < io_data->nr_segs; i++) {
+                               if (unlikely(copy_from_user(&data[pos],
+                                            io_data->iovec[i].iov_base,
+                                            io_data->iovec[i].iov_len))) {
+                                       ret = -EFAULT;
+                                       goto error;
+                               }
+                               pos += io_data->iovec[i].iov_len;
+                       }
+               } else {
+                       if (!io_data->read &&
+                           unlikely(__copy_from_user(data, io_data->buf,
+                                                     io_data->len))) {
+                               ret = -EFAULT;
+                               goto error;
+                       }
+               }
+       }
+       /* We will be using request */
+       ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
+       if (unlikely(ret))
+               goto error;
+       spin_lock_irq(&epfile->ffs->eps_lock);
+       if (epfile->ep != ep) {
+               /* In the meantime, endpoint got disabled or changed. */
+               ret = -ESHUTDOWN;
+               spin_unlock_irq(&epfile->ffs->eps_lock);
+       } else if (halt) {
+               /* Halt */
+               if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
+                       usb_ep_set_halt(ep->ep);
+               spin_unlock_irq(&epfile->ffs->eps_lock);
+               ret = -EBADMSG;
+       } else {
+               /* Fire the request */
+               struct usb_request *req;
+               if (io_data->aio) {
+                       req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
+                       if (unlikely(!req))
+                               goto error_lock;
+                       req->buf      = data;
+                       req->length   = io_data->len;
+                       io_data->buf = data;
+                       io_data->ep = ep->ep;
+                       io_data->req = req;
+                       req->context  = io_data;
+                       req->complete = ffs_epfile_async_io_complete;
+                       ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+                       if (unlikely(ret)) {
+                               usb_ep_free_request(ep->ep, req);
+                               goto error_lock;
+                       }
+                       ret = -EIOCBQUEUED;
+                       spin_unlock_irq(&epfile->ffs->eps_lock);
+               } else {
+                       DECLARE_COMPLETION_ONSTACK(done);
+                       req = ep->req;
+                       req->buf      = data;
+                       req->length   = io_data->len;
+                       req->context  = &done;
+                       req->complete = ffs_epfile_io_complete;
+                       ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+                       spin_unlock_irq(&epfile->ffs->eps_lock);
+                       if (unlikely(ret < 0)) {
+                               /* nop */
+                       } else if (unlikely(
+                                  wait_for_completion_interruptible(&done))) {
+                               ret = -EINTR;
+                               usb_ep_dequeue(ep->ep, req);
+                       } else {
+                               /*
+                                * XXX We may end up silently droping data
+                                * here.  Since data_len (i.e. req->length) may
+                                * be bigger than len (after being rounded up
+                                * to maxpacketsize), we may end up with more
+                                * data then user space has space for.
+                                */
+                               ret = ep->status;
+                               if (io_data->read && ret > 0) {
+                                       ret = min_t(size_t, ret, io_data->len);
+                                       if (unlikely(copy_to_user(io_data->buf,
+                                               data, ret)))
+                                               ret = -EFAULT;
+                               }
+                       }
+                       kfree(data);
+               }
+       }
+       mutex_unlock(&epfile->mutex);
+       return ret;
+ error_lock:
+       spin_unlock_irq(&epfile->ffs->eps_lock);
+       mutex_unlock(&epfile->mutex);
+ error:
+       kfree(data);
+       return ret;
+ }
+ static ssize_t
+ ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
+                loff_t *ptr)
+ {
+       struct ffs_io_data io_data;
+       ENTER();
+       io_data.aio = false;
+       io_data.read = false;
+       io_data.buf = (char * __user)buf;
+       io_data.len = len;
+       return ffs_epfile_io(file, &io_data);
+ }
+ static ssize_t
+ ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
+ {
+       struct ffs_io_data io_data;
+       ENTER();
+       io_data.aio = false;
+       io_data.read = true;
+       io_data.buf = buf;
+       io_data.len = len;
+       return ffs_epfile_io(file, &io_data);
+ }
+ static int
+ ffs_epfile_open(struct inode *inode, struct file *file)
+ {
+       struct ffs_epfile *epfile = inode->i_private;
+       ENTER();
+       if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+               return -ENODEV;
+       file->private_data = epfile;
+       ffs_data_opened(epfile->ffs);
+       return 0;
+ }
+ static int ffs_aio_cancel(struct kiocb *kiocb)
+ {
+       struct ffs_io_data *io_data = kiocb->private;
+       struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+       int value;
+       ENTER();
+       spin_lock_irq(&epfile->ffs->eps_lock);
+       if (likely(io_data && io_data->ep && io_data->req))
+               value = usb_ep_dequeue(io_data->ep, io_data->req);
+       else
+               value = -EINVAL;
+       spin_unlock_irq(&epfile->ffs->eps_lock);
+       return value;
+ }
+ static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb,
+                                   const struct iovec *iovec,
+                                   unsigned long nr_segs, loff_t loff)
+ {
+       struct ffs_io_data *io_data;
+       ENTER();
+       io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
+       if (unlikely(!io_data))
+               return -ENOMEM;
+       io_data->aio = true;
+       io_data->read = false;
+       io_data->kiocb = kiocb;
+       io_data->iovec = iovec;
+       io_data->nr_segs = nr_segs;
+       io_data->len = kiocb->ki_nbytes;
+       io_data->mm = current->mm;
+       kiocb->private = io_data;
+       kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+       return ffs_epfile_io(kiocb->ki_filp, io_data);
+ }
+ static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb,
+                                  const struct iovec *iovec,
+                                  unsigned long nr_segs, loff_t loff)
+ {
+       struct ffs_io_data *io_data;
+       struct iovec *iovec_copy;
+       ENTER();
+       iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL);
+       if (unlikely(!iovec_copy))
+               return -ENOMEM;
+       memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs);
+       io_data = kmalloc(sizeof(*io_data), GFP_KERNEL);
+       if (unlikely(!io_data)) {
+               kfree(iovec_copy);
+               return -ENOMEM;
+       }
+       io_data->aio = true;
+       io_data->read = true;
+       io_data->kiocb = kiocb;
+       io_data->iovec = iovec_copy;
+       io_data->nr_segs = nr_segs;
+       io_data->len = kiocb->ki_nbytes;
+       io_data->mm = current->mm;
+       kiocb->private = io_data;
+       kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+       return ffs_epfile_io(kiocb->ki_filp, io_data);
+ }
+ static int
+ ffs_epfile_release(struct inode *inode, struct file *file)
+ {
+       struct ffs_epfile *epfile = inode->i_private;
+       ENTER();
+       ffs_data_closed(epfile->ffs);
+       return 0;
+ }
+ static long ffs_epfile_ioctl(struct file *file, unsigned code,
+                            unsigned long value)
+ {
+       struct ffs_epfile *epfile = file->private_data;
+       int ret;
+       ENTER();
+       if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+               return -ENODEV;
+       spin_lock_irq(&epfile->ffs->eps_lock);
+       if (likely(epfile->ep)) {
+               switch (code) {
+               case FUNCTIONFS_FIFO_STATUS:
+                       ret = usb_ep_fifo_status(epfile->ep->ep);
+                       break;
+               case FUNCTIONFS_FIFO_FLUSH:
+                       usb_ep_fifo_flush(epfile->ep->ep);
+                       ret = 0;
+                       break;
+               case FUNCTIONFS_CLEAR_HALT:
+                       ret = usb_ep_clear_halt(epfile->ep->ep);
+                       break;
+               case FUNCTIONFS_ENDPOINT_REVMAP:
+                       ret = epfile->ep->num;
+                       break;
+               default:
+                       ret = -ENOTTY;
+               }
+       } else {
+               ret = -ENODEV;
+       }
+       spin_unlock_irq(&epfile->ffs->eps_lock);
+       return ret;
+ }
+ static const struct file_operations ffs_epfile_operations = {
+       .llseek =       no_llseek,
+       .open =         ffs_epfile_open,
+       .write =        ffs_epfile_write,
+       .read =         ffs_epfile_read,
+       .aio_write =    ffs_epfile_aio_write,
+       .aio_read =     ffs_epfile_aio_read,
+       .release =      ffs_epfile_release,
+       .unlocked_ioctl =       ffs_epfile_ioctl,
+ };
+ /* File system and super block operations ***********************************/
+ /*
+  * Mounting the file system creates a controller file, used first for
+  * function configuration then later for event monitoring.
+  */
+ static struct inode *__must_check
+ ffs_sb_make_inode(struct super_block *sb, void *data,
+                 const struct file_operations *fops,
+                 const struct inode_operations *iops,
+                 struct ffs_file_perms *perms)
+ {
+       struct inode *inode;
+       ENTER();
+       inode = new_inode(sb);
+       if (likely(inode)) {
+               struct timespec current_time = CURRENT_TIME;
+               inode->i_ino     = get_next_ino();
+               inode->i_mode    = perms->mode;
+               inode->i_uid     = perms->uid;
+               inode->i_gid     = perms->gid;
+               inode->i_atime   = current_time;
+               inode->i_mtime   = current_time;
+               inode->i_ctime   = current_time;
+               inode->i_private = data;
+               if (fops)
+                       inode->i_fop = fops;
+               if (iops)
+                       inode->i_op  = iops;
+       }
+       return inode;
+ }
+ /* Create "regular" file */
+ static struct inode *ffs_sb_create_file(struct super_block *sb,
+                                       const char *name, void *data,
+                                       const struct file_operations *fops,
+                                       struct dentry **dentry_p)
+ {
+       struct ffs_data *ffs = sb->s_fs_info;
+       struct dentry   *dentry;
+       struct inode    *inode;
+       ENTER();
+       dentry = d_alloc_name(sb->s_root, name);
+       if (unlikely(!dentry))
+               return NULL;
+       inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
+       if (unlikely(!inode)) {
+               dput(dentry);
+               return NULL;
+       }
+       d_add(dentry, inode);
+       if (dentry_p)
+               *dentry_p = dentry;
+       return inode;
+ }
+ /* Super block */
+ static const struct super_operations ffs_sb_operations = {
+       .statfs =       simple_statfs,
+       .drop_inode =   generic_delete_inode,
+ };
+ struct ffs_sb_fill_data {
+       struct ffs_file_perms perms;
+       umode_t root_mode;
+       const char *dev_name;
+       struct ffs_data *ffs_data;
+ };
+ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
+ {
+       struct ffs_sb_fill_data *data = _data;
+       struct inode    *inode;
+       struct ffs_data *ffs = data->ffs_data;
+       ENTER();
+       ffs->sb              = sb;
+       data->ffs_data       = NULL;
+       sb->s_fs_info        = ffs;
+       sb->s_blocksize      = PAGE_CACHE_SIZE;
+       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_magic          = FUNCTIONFS_MAGIC;
+       sb->s_op             = &ffs_sb_operations;
+       sb->s_time_gran      = 1;
+       /* Root inode */
+       data->perms.mode = data->root_mode;
+       inode = ffs_sb_make_inode(sb, NULL,
+                                 &simple_dir_operations,
+                                 &simple_dir_inode_operations,
+                                 &data->perms);
+       sb->s_root = d_make_root(inode);
+       if (unlikely(!sb->s_root))
+               return -ENOMEM;
+       /* EP0 file */
+       if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
+                                        &ffs_ep0_operations, NULL)))
+               return -ENOMEM;
+       return 0;
+ }
+ static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
+ {
+       ENTER();
+       if (!opts || !*opts)
+               return 0;
+       for (;;) {
+               unsigned long value;
+               char *eq, *comma;
+               /* Option limit */
+               comma = strchr(opts, ',');
+               if (comma)
+                       *comma = 0;
+               /* Value limit */
+               eq = strchr(opts, '=');
+               if (unlikely(!eq)) {
+                       pr_err("'=' missing in %s\n", opts);
+                       return -EINVAL;
+               }
+               *eq = 0;
+               /* Parse value */
+               if (kstrtoul(eq + 1, 0, &value)) {
+                       pr_err("%s: invalid value: %s\n", opts, eq + 1);
+                       return -EINVAL;
+               }
+               /* Interpret option */
+               switch (eq - opts) {
+               case 5:
+                       if (!memcmp(opts, "rmode", 5))
+                               data->root_mode  = (value & 0555) | S_IFDIR;
+                       else if (!memcmp(opts, "fmode", 5))
+                               data->perms.mode = (value & 0666) | S_IFREG;
+                       else
+                               goto invalid;
+                       break;
+               case 4:
+                       if (!memcmp(opts, "mode", 4)) {
+                               data->root_mode  = (value & 0555) | S_IFDIR;
+                               data->perms.mode = (value & 0666) | S_IFREG;
+                       } else {
+                               goto invalid;
+                       }
+                       break;
+               case 3:
+                       if (!memcmp(opts, "uid", 3)) {
+                               data->perms.uid = make_kuid(current_user_ns(), value);
+                               if (!uid_valid(data->perms.uid)) {
+                                       pr_err("%s: unmapped value: %lu\n", opts, value);
+                                       return -EINVAL;
+                               }
+                       } else if (!memcmp(opts, "gid", 3)) {
+                               data->perms.gid = make_kgid(current_user_ns(), value);
+                               if (!gid_valid(data->perms.gid)) {
+                                       pr_err("%s: unmapped value: %lu\n", opts, value);
+                                       return -EINVAL;
+                               }
+                       } else {
+                               goto invalid;
+                       }
+                       break;
+               default:
+ invalid:
+                       pr_err("%s: invalid option\n", opts);
+                       return -EINVAL;
+               }
+               /* Next iteration */
+               if (!comma)
+                       break;
+               opts = comma + 1;
+       }
+       return 0;
+ }
+ /* "mount -t functionfs dev_name /dev/function" ends up here */
+ static struct dentry *
+ ffs_fs_mount(struct file_system_type *t, int flags,
+             const char *dev_name, void *opts)
+ {
+       struct ffs_sb_fill_data data = {
+               .perms = {
+                       .mode = S_IFREG | 0600,
+                       .uid = GLOBAL_ROOT_UID,
+                       .gid = GLOBAL_ROOT_GID,
+               },
+               .root_mode = S_IFDIR | 0500,
+       };
+       struct dentry *rv;
+       int ret;
+       void *ffs_dev;
+       struct ffs_data *ffs;
+       ENTER();
+       ret = ffs_fs_parse_opts(&data, opts);
+       if (unlikely(ret < 0))
+               return ERR_PTR(ret);
+       ffs = ffs_data_new();
+       if (unlikely(!ffs))
+               return ERR_PTR(-ENOMEM);
+       ffs->file_perms = data.perms;
+       ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
+       if (unlikely(!ffs->dev_name)) {
+               ffs_data_put(ffs);
+               return ERR_PTR(-ENOMEM);
+       }
+       ffs_dev = ffs_acquire_dev(dev_name);
+       if (IS_ERR(ffs_dev)) {
+               ffs_data_put(ffs);
+               return ERR_CAST(ffs_dev);
+       }
+       ffs->private_data = ffs_dev;
+       data.ffs_data = ffs;
+       rv = mount_nodev(t, flags, &data, ffs_sb_fill);
+       if (IS_ERR(rv) && data.ffs_data) {
+               ffs_release_dev(data.ffs_data);
+               ffs_data_put(data.ffs_data);
+       }
+       return rv;
+ }
+ static void
+ ffs_fs_kill_sb(struct super_block *sb)
+ {
+       ENTER();
+       kill_litter_super(sb);
+       if (sb->s_fs_info) {
+               ffs_release_dev(sb->s_fs_info);
+               ffs_data_put(sb->s_fs_info);
+       }
+ }
+ static struct file_system_type ffs_fs_type = {
+       .owner          = THIS_MODULE,
+       .name           = "functionfs",
+       .mount          = ffs_fs_mount,
+       .kill_sb        = ffs_fs_kill_sb,
+ };
+ MODULE_ALIAS_FS("functionfs");
+ /* Driver's main init/cleanup functions *************************************/
+ static int functionfs_init(void)
+ {
+       int ret;
+       ENTER();
+       ret = register_filesystem(&ffs_fs_type);
+       if (likely(!ret))
+               pr_info("file system registered\n");
+       else
+               pr_err("failed registering file system (%d)\n", ret);
+       return ret;
+ }
+ static void functionfs_cleanup(void)
+ {
+       ENTER();
+       pr_info("unloading\n");
+       unregister_filesystem(&ffs_fs_type);
+ }
+ /* ffs_data and ffs_function construction and destruction code **************/
+ static void ffs_data_clear(struct ffs_data *ffs);
+ static void ffs_data_reset(struct ffs_data *ffs);
+ static void ffs_data_get(struct ffs_data *ffs)
+ {
+       ENTER();
+       atomic_inc(&ffs->ref);
+ }
+ static void ffs_data_opened(struct ffs_data *ffs)
+ {
+       ENTER();
+       atomic_inc(&ffs->ref);
+       atomic_inc(&ffs->opened);
+ }
+ static void ffs_data_put(struct ffs_data *ffs)
+ {
+       ENTER();
+       if (unlikely(atomic_dec_and_test(&ffs->ref))) {
+               pr_info("%s(): freeing\n", __func__);
+               ffs_data_clear(ffs);
+               BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
+                      waitqueue_active(&ffs->ep0req_completion.wait));
+               kfree(ffs->dev_name);
+               kfree(ffs);
+       }
+ }
+ static void ffs_data_closed(struct ffs_data *ffs)
+ {
+       ENTER();
+       if (atomic_dec_and_test(&ffs->opened)) {
+               ffs->state = FFS_CLOSING;
+               ffs_data_reset(ffs);
+       }
+       ffs_data_put(ffs);
+ }
+ static struct ffs_data *ffs_data_new(void)
+ {
+       struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
+       if (unlikely(!ffs))
+               return NULL;
+       ENTER();
+       atomic_set(&ffs->ref, 1);
+       atomic_set(&ffs->opened, 0);
+       ffs->state = FFS_READ_DESCRIPTORS;
+       mutex_init(&ffs->mutex);
+       spin_lock_init(&ffs->eps_lock);
+       init_waitqueue_head(&ffs->ev.waitq);
+       init_completion(&ffs->ep0req_completion);
+       /* XXX REVISIT need to update it in some places, or do we? */
+       ffs->ev.can_stall = 1;
+       return ffs;
+ }
+ static void ffs_data_clear(struct ffs_data *ffs)
+ {
+       ENTER();
+       if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
+               ffs_closed(ffs);
+       BUG_ON(ffs->gadget);
+       if (ffs->epfiles)
+               ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+       kfree(ffs->raw_descs_data);
+       kfree(ffs->raw_strings);
+       kfree(ffs->stringtabs);
+ }
+ static void ffs_data_reset(struct ffs_data *ffs)
+ {
+       ENTER();
+       ffs_data_clear(ffs);
+       ffs->epfiles = NULL;
+       ffs->raw_descs_data = NULL;
+       ffs->raw_descs = NULL;
+       ffs->raw_strings = NULL;
+       ffs->stringtabs = NULL;
+       ffs->raw_descs_length = 0;
+       ffs->fs_descs_count = 0;
+       ffs->hs_descs_count = 0;
+       ffs->ss_descs_count = 0;
+       ffs->strings_count = 0;
+       ffs->interfaces_count = 0;
+       ffs->eps_count = 0;
+       ffs->ev.count = 0;
+       ffs->state = FFS_READ_DESCRIPTORS;
+       ffs->setup_state = FFS_NO_SETUP;
+       ffs->flags = 0;
+ }
+ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
+ {
+       struct usb_gadget_strings **lang;
+       int first_id;
+       ENTER();
+       if (WARN_ON(ffs->state != FFS_ACTIVE
+                || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
+               return -EBADFD;
+       first_id = usb_string_ids_n(cdev, ffs->strings_count);
+       if (unlikely(first_id < 0))
+               return first_id;
+       ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
+       if (unlikely(!ffs->ep0req))
+               return -ENOMEM;
+       ffs->ep0req->complete = ffs_ep0_complete;
+       ffs->ep0req->context = ffs;
+       lang = ffs->stringtabs;
++      if (lang) {
++              for (; *lang; ++lang) {
++                      struct usb_string *str = (*lang)->strings;
++                      int id = first_id;
++                      for (; str->s; ++id, ++str)
++                              str->id = id;
++              }
+       }
+       ffs->gadget = cdev->gadget;
+       ffs_data_get(ffs);
+       return 0;
+ }
+ static void functionfs_unbind(struct ffs_data *ffs)
+ {
+       ENTER();
+       if (!WARN_ON(!ffs->gadget)) {
+               usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
+               ffs->ep0req = NULL;
+               ffs->gadget = NULL;
+               clear_bit(FFS_FL_BOUND, &ffs->flags);
+               ffs_data_put(ffs);
+       }
+ }
+ static int ffs_epfiles_create(struct ffs_data *ffs)
+ {
+       struct ffs_epfile *epfile, *epfiles;
+       unsigned i, count;
+       ENTER();
+       count = ffs->eps_count;
+       epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
+       if (!epfiles)
+               return -ENOMEM;
+       epfile = epfiles;
+       for (i = 1; i <= count; ++i, ++epfile) {
+               epfile->ffs = ffs;
+               mutex_init(&epfile->mutex);
+               init_waitqueue_head(&epfile->wait);
+               sprintf(epfiles->name, "ep%u",  i);
+               if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile,
+                                                &ffs_epfile_operations,
+                                                &epfile->dentry))) {
+                       ffs_epfiles_destroy(epfiles, i - 1);
+                       return -ENOMEM;
+               }
+       }
+       ffs->epfiles = epfiles;
+       return 0;
+ }
+ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
+ {
+       struct ffs_epfile *epfile = epfiles;
+       ENTER();
+       for (; count; --count, ++epfile) {
+               BUG_ON(mutex_is_locked(&epfile->mutex) ||
+                      waitqueue_active(&epfile->wait));
+               if (epfile->dentry) {
+                       d_delete(epfile->dentry);
+                       dput(epfile->dentry);
+                       epfile->dentry = NULL;
+               }
+       }
+       kfree(epfiles);
+ }
+ static void ffs_func_eps_disable(struct ffs_function *func)
+ {
+       struct ffs_ep *ep         = func->eps;
+       struct ffs_epfile *epfile = func->ffs->epfiles;
+       unsigned count            = func->ffs->eps_count;
+       unsigned long flags;
+       spin_lock_irqsave(&func->ffs->eps_lock, flags);
+       do {
+               /* pending requests get nuked */
+               if (likely(ep->ep))
+                       usb_ep_disable(ep->ep);
+               epfile->ep = NULL;
+               ++ep;
+               ++epfile;
+       } while (--count);
+       spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+ }
+ static int ffs_func_eps_enable(struct ffs_function *func)
+ {
+       struct ffs_data *ffs      = func->ffs;
+       struct ffs_ep *ep         = func->eps;
+       struct ffs_epfile *epfile = ffs->epfiles;
+       unsigned count            = ffs->eps_count;
+       unsigned long flags;
+       int ret = 0;
+       spin_lock_irqsave(&func->ffs->eps_lock, flags);
+       do {
+               struct usb_endpoint_descriptor *ds;
+               int desc_idx;
+               if (ffs->gadget->speed == USB_SPEED_SUPER)
+                       desc_idx = 2;
+               else if (ffs->gadget->speed == USB_SPEED_HIGH)
+                       desc_idx = 1;
+               else
+                       desc_idx = 0;
+               /* fall-back to lower speed if desc missing for current speed */
+               do {
+                       ds = ep->descs[desc_idx];
+               } while (!ds && --desc_idx >= 0);
+               if (!ds) {
+                       ret = -EINVAL;
+                       break;
+               }
+               ep->ep->driver_data = ep;
+               ep->ep->desc = ds;
+               ret = usb_ep_enable(ep->ep);
+               if (likely(!ret)) {
+                       epfile->ep = ep;
+                       epfile->in = usb_endpoint_dir_in(ds);
+                       epfile->isoc = usb_endpoint_xfer_isoc(ds);
+               } else {
+                       break;
+               }
+               wake_up(&epfile->wait);
+               ++ep;
+               ++epfile;
+       } while (--count);
+       spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+       return ret;
+ }
+ /* Parsing and building descriptors and strings *****************************/
+ /*
+  * This validates if data pointed by data is a valid USB descriptor as
+  * well as record how many interfaces, endpoints and strings are
+  * required by given configuration.  Returns address after the
+  * descriptor or NULL if data is invalid.
+  */
+ enum ffs_entity_type {
+       FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
+ };
+ enum ffs_os_desc_type {
+       FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP
+ };
+ typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
+                                  u8 *valuep,
+                                  struct usb_descriptor_header *desc,
+                                  void *priv);
+ typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
+                                   struct usb_os_desc_header *h, void *data,
+                                   unsigned len, void *priv);
+ static int __must_check ffs_do_single_desc(char *data, unsigned len,
+                                          ffs_entity_callback entity,
+                                          void *priv)
+ {
+       struct usb_descriptor_header *_ds = (void *)data;
+       u8 length;
+       int ret;
+       ENTER();
+       /* At least two bytes are required: length and type */
+       if (len < 2) {
+               pr_vdebug("descriptor too short\n");
+               return -EINVAL;
+       }
+       /* If we have at least as many bytes as the descriptor takes? */
+       length = _ds->bLength;
+       if (len < length) {
+               pr_vdebug("descriptor longer then available data\n");
+               return -EINVAL;
+       }
+ #define __entity_check_INTERFACE(val)  1
+ #define __entity_check_STRING(val)     (val)
+ #define __entity_check_ENDPOINT(val)   ((val) & USB_ENDPOINT_NUMBER_MASK)
+ #define __entity(type, val) do {                                      \
+               pr_vdebug("entity " #type "(%02x)\n", (val));           \
+               if (unlikely(!__entity_check_ ##type(val))) {           \
+                       pr_vdebug("invalid entity's value\n");          \
+                       return -EINVAL;                                 \
+               }                                                       \
+               ret = entity(FFS_ ##type, &val, _ds, priv);             \
+               if (unlikely(ret < 0)) {                                \
+                       pr_debug("entity " #type "(%02x); ret = %d\n",  \
+                                (val), ret);                           \
+                       return ret;                                     \
+               }                                                       \
+       } while (0)
+       /* Parse descriptor depending on type. */
+       switch (_ds->bDescriptorType) {
+       case USB_DT_DEVICE:
+       case USB_DT_CONFIG:
+       case USB_DT_STRING:
+       case USB_DT_DEVICE_QUALIFIER:
+               /* function can't have any of those */
+               pr_vdebug("descriptor reserved for gadget: %d\n",
+                     _ds->bDescriptorType);
+               return -EINVAL;
+       case USB_DT_INTERFACE: {
+               struct usb_interface_descriptor *ds = (void *)_ds;
+               pr_vdebug("interface descriptor\n");
+               if (length != sizeof *ds)
+                       goto inv_length;
+               __entity(INTERFACE, ds->bInterfaceNumber);
+               if (ds->iInterface)
+                       __entity(STRING, ds->iInterface);
+       }
+               break;
+       case USB_DT_ENDPOINT: {
+               struct usb_endpoint_descriptor *ds = (void *)_ds;
+               pr_vdebug("endpoint descriptor\n");
+               if (length != USB_DT_ENDPOINT_SIZE &&
+                   length != USB_DT_ENDPOINT_AUDIO_SIZE)
+                       goto inv_length;
+               __entity(ENDPOINT, ds->bEndpointAddress);
+       }
+               break;
+       case HID_DT_HID:
+               pr_vdebug("hid descriptor\n");
+               if (length != sizeof(struct hid_descriptor))
+                       goto inv_length;
+               break;
+       case USB_DT_OTG:
+               if (length != sizeof(struct usb_otg_descriptor))
+                       goto inv_length;
+               break;
+       case USB_DT_INTERFACE_ASSOCIATION: {
+               struct usb_interface_assoc_descriptor *ds = (void *)_ds;
+               pr_vdebug("interface association descriptor\n");
+               if (length != sizeof *ds)
+                       goto inv_length;
+               if (ds->iFunction)
+                       __entity(STRING, ds->iFunction);
+       }
+               break;
+       case USB_DT_SS_ENDPOINT_COMP:
+               pr_vdebug("EP SS companion descriptor\n");
+               if (length != sizeof(struct usb_ss_ep_comp_descriptor))
+                       goto inv_length;
+               break;
+       case USB_DT_OTHER_SPEED_CONFIG:
+       case USB_DT_INTERFACE_POWER:
+       case USB_DT_DEBUG:
+       case USB_DT_SECURITY:
+       case USB_DT_CS_RADIO_CONTROL:
+               /* TODO */
+               pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
+               return -EINVAL;
+       default:
+               /* We should never be here */
+               pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
+               return -EINVAL;
+ inv_length:
+               pr_vdebug("invalid length: %d (descriptor %d)\n",
+                         _ds->bLength, _ds->bDescriptorType);
+               return -EINVAL;
+       }
+ #undef __entity
+ #undef __entity_check_DESCRIPTOR
+ #undef __entity_check_INTERFACE
+ #undef __entity_check_STRING
+ #undef __entity_check_ENDPOINT
+       return length;
+ }
+ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
+                                    ffs_entity_callback entity, void *priv)
+ {
+       const unsigned _len = len;
+       unsigned long num = 0;
+       ENTER();
+       for (;;) {
+               int ret;
+               if (num == count)
+                       data = NULL;
+               /* Record "descriptor" entity */
+               ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
+               if (unlikely(ret < 0)) {
+                       pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
+                                num, ret);
+                       return ret;
+               }
+               if (!data)
+                       return _len - len;
+               ret = ffs_do_single_desc(data, len, entity, priv);
+               if (unlikely(ret < 0)) {
+                       pr_debug("%s returns %d\n", __func__, ret);
+                       return ret;
+               }
+               len -= ret;
+               data += ret;
+               ++num;
+       }
+ }
+ static int __ffs_data_do_entity(enum ffs_entity_type type,
+                               u8 *valuep, struct usb_descriptor_header *desc,
+                               void *priv)
+ {
+       struct ffs_data *ffs = priv;
+       ENTER();
+       switch (type) {
+       case FFS_DESCRIPTOR:
+               break;
+       case FFS_INTERFACE:
+               /*
+                * Interfaces are indexed from zero so if we
+                * encountered interface "n" then there are at least
+                * "n+1" interfaces.
+                */
+               if (*valuep >= ffs->interfaces_count)
+                       ffs->interfaces_count = *valuep + 1;
+               break;
+       case FFS_STRING:
+               /*
+                * Strings are indexed from 1 (0 is magic ;) reserved
+                * for languages list or some such)
+                */
+               if (*valuep > ffs->strings_count)
+                       ffs->strings_count = *valuep;
+               break;
+       case FFS_ENDPOINT:
+               /* Endpoints are indexed from 1 as well. */
+               if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
+                       ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
+               break;
+       }
+       return 0;
+ }
+ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
+                                  struct usb_os_desc_header *desc)
+ {
+       u16 bcd_version = le16_to_cpu(desc->bcdVersion);
+       u16 w_index = le16_to_cpu(desc->wIndex);
+       if (bcd_version != 1) {
+               pr_vdebug("unsupported os descriptors version: %d",
+                         bcd_version);
+               return -EINVAL;
+       }
+       switch (w_index) {
+       case 0x4:
+               *next_type = FFS_OS_DESC_EXT_COMPAT;
+               break;
+       case 0x5:
+               *next_type = FFS_OS_DESC_EXT_PROP;
+               break;
+       default:
+               pr_vdebug("unsupported os descriptor type: %d", w_index);
+               return -EINVAL;
+       }
+       return sizeof(*desc);
+ }
+ /*
+  * Process all extended compatibility/extended property descriptors
+  * of a feature descriptor
+  */
+ static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
+                                             enum ffs_os_desc_type type,
+                                             u16 feature_count,
+                                             ffs_os_desc_callback entity,
+                                             void *priv,
+                                             struct usb_os_desc_header *h)
+ {
+       int ret;
+       const unsigned _len = len;
+       ENTER();
+       /* loop over all ext compat/ext prop descriptors */
+       while (feature_count--) {
+               ret = entity(type, h, data, len, priv);
+               if (unlikely(ret < 0)) {
+                       pr_debug("bad OS descriptor, type: %d\n", type);
+                       return ret;
+               }
+               data += ret;
+               len -= ret;
+       }
+       return _len - len;
+ }
+ /* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
+ static int __must_check ffs_do_os_descs(unsigned count,
+                                       char *data, unsigned len,
+                                       ffs_os_desc_callback entity, void *priv)
+ {
+       const unsigned _len = len;
+       unsigned long num = 0;
+       ENTER();
+       for (num = 0; num < count; ++num) {
+               int ret;
+               enum ffs_os_desc_type type;
+               u16 feature_count;
+               struct usb_os_desc_header *desc = (void *)data;
+               if (len < sizeof(*desc))
+                       return -EINVAL;
+               /*
+                * Record "descriptor" entity.
+                * Process dwLength, bcdVersion, wIndex, get b/wCount.
+                * Move the data pointer to the beginning of extended
+                * compatibilities proper or extended properties proper
+                * portions of the data
+                */
+               if (le32_to_cpu(desc->dwLength) > len)
+                       return -EINVAL;
+               ret = __ffs_do_os_desc_header(&type, desc);
+               if (unlikely(ret < 0)) {
+                       pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
+                                num, ret);
+                       return ret;
+               }
+               /*
+                * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??"
+                */
+               feature_count = le16_to_cpu(desc->wCount);
+               if (type == FFS_OS_DESC_EXT_COMPAT &&
+                   (feature_count > 255 || desc->Reserved))
+                               return -EINVAL;
+               len -= ret;
+               data += ret;
+               /*
+                * Process all function/property descriptors
+                * of this Feature Descriptor
+                */
+               ret = ffs_do_single_os_desc(data, len, type,
+                                           feature_count, entity, priv, desc);
+               if (unlikely(ret < 0)) {
+                       pr_debug("%s returns %d\n", __func__, ret);
+                       return ret;
+               }
+               len -= ret;
+               data += ret;
+       }
+       return _len - len;
+ }
+ /**
+  * Validate contents of the buffer from userspace related to OS descriptors.
+  */
+ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
+                                struct usb_os_desc_header *h, void *data,
+                                unsigned len, void *priv)
+ {
+       struct ffs_data *ffs = priv;
+       u8 length;
+       ENTER();
+       switch (type) {
+       case FFS_OS_DESC_EXT_COMPAT: {
+               struct usb_ext_compat_desc *d = data;
+               int i;
+               if (len < sizeof(*d) ||
+                   d->bFirstInterfaceNumber >= ffs->interfaces_count ||
+                   d->Reserved1)
+                       return -EINVAL;
+               for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
+                       if (d->Reserved2[i])
+                               return -EINVAL;
+               length = sizeof(struct usb_ext_compat_desc);
+       }
+               break;
+       case FFS_OS_DESC_EXT_PROP: {
+               struct usb_ext_prop_desc *d = data;
+               u32 type, pdl;
+               u16 pnl;
+               if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
+                       return -EINVAL;
+               length = le32_to_cpu(d->dwSize);
+               type = le32_to_cpu(d->dwPropertyDataType);
+               if (type < USB_EXT_PROP_UNICODE ||
+                   type > USB_EXT_PROP_UNICODE_MULTI) {
+                       pr_vdebug("unsupported os descriptor property type: %d",
+                                 type);
+                       return -EINVAL;
+               }
+               pnl = le16_to_cpu(d->wPropertyNameLength);
+               pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
+               if (length != 14 + pnl + pdl) {
+                       pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
+                                 length, pnl, pdl, type);
+                       return -EINVAL;
+               }
+               ++ffs->ms_os_descs_ext_prop_count;
+               /* property name reported to the host as "WCHAR"s */
+               ffs->ms_os_descs_ext_prop_name_len += pnl * 2;
+               ffs->ms_os_descs_ext_prop_data_len += pdl;
+       }
+               break;
+       default:
+               pr_vdebug("unknown descriptor: %d\n", type);
+               return -EINVAL;
+       }
+       return length;
+ }
+ static int __ffs_data_got_descs(struct ffs_data *ffs,
+                               char *const _data, size_t len)
+ {
+       char *data = _data, *raw_descs;
+       unsigned os_descs_count = 0, counts[3], flags;
+       int ret = -EINVAL, i;
+       ENTER();
+       if (get_unaligned_le32(data + 4) != len)
+               goto error;
+       switch (get_unaligned_le32(data)) {
+       case FUNCTIONFS_DESCRIPTORS_MAGIC:
+               flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC;
+               data += 8;
+               len  -= 8;
+               break;
+       case FUNCTIONFS_DESCRIPTORS_MAGIC_V2:
+               flags = get_unaligned_le32(data + 8);
+               if (flags & ~(FUNCTIONFS_HAS_FS_DESC |
+                             FUNCTIONFS_HAS_HS_DESC |
+                             FUNCTIONFS_HAS_SS_DESC |
+                             FUNCTIONFS_HAS_MS_OS_DESC)) {
+                       ret = -ENOSYS;
+                       goto error;
+               }
+               data += 12;
+               len  -= 12;
+               break;
+       default:
+               goto error;
+       }
+       /* Read fs_count, hs_count and ss_count (if present) */
+       for (i = 0; i < 3; ++i) {
+               if (!(flags & (1 << i))) {
+                       counts[i] = 0;
+               } else if (len < 4) {
+                       goto error;
+               } else {
+                       counts[i] = get_unaligned_le32(data);
+                       data += 4;
+                       len  -= 4;
+               }
+       }
+       if (flags & (1 << i)) {
+               os_descs_count = get_unaligned_le32(data);
+               data += 4;
+               len -= 4;
+       };
+       /* Read descriptors */
+       raw_descs = data;
+       for (i = 0; i < 3; ++i) {
+               if (!counts[i])
+                       continue;
+               ret = ffs_do_descs(counts[i], data, len,
+                                  __ffs_data_do_entity, ffs);
+               if (ret < 0)
+                       goto error;
+               data += ret;
+               len  -= ret;
+       }
+       if (os_descs_count) {
+               ret = ffs_do_os_descs(os_descs_count, data, len,
+                                     __ffs_data_do_os_desc, ffs);
+               if (ret < 0)
+                       goto error;
+               data += ret;
+               len -= ret;
+       }
+       if (raw_descs == data || len) {
+               ret = -EINVAL;
+               goto error;
+       }
+       ffs->raw_descs_data     = _data;
+       ffs->raw_descs          = raw_descs;
+       ffs->raw_descs_length   = data - raw_descs;
+       ffs->fs_descs_count     = counts[0];
+       ffs->hs_descs_count     = counts[1];
+       ffs->ss_descs_count     = counts[2];
+       ffs->ms_os_descs_count  = os_descs_count;
+       return 0;
+ error:
+       kfree(_data);
+       return ret;
+ }
+ static int __ffs_data_got_strings(struct ffs_data *ffs,
+                                 char *const _data, size_t len)
+ {
+       u32 str_count, needed_count, lang_count;
+       struct usb_gadget_strings **stringtabs, *t;
+       struct usb_string *strings, *s;
+       const char *data = _data;
+       ENTER();
+       if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+                    get_unaligned_le32(data + 4) != len))
+               goto error;
+       str_count  = get_unaligned_le32(data + 8);
+       lang_count = get_unaligned_le32(data + 12);
+       /* if one is zero the other must be zero */
+       if (unlikely(!str_count != !lang_count))
+               goto error;
+       /* Do we have at least as many strings as descriptors need? */
+       needed_count = ffs->strings_count;
+       if (unlikely(str_count < needed_count))
+               goto error;
+       /*
+        * If we don't need any strings just return and free all
+        * memory.
+        */
+       if (!needed_count) {
+               kfree(_data);
+               return 0;
+       }
+       /* Allocate everything in one chunk so there's less maintenance. */
+       {
+               unsigned i = 0;
+               vla_group(d);
+               vla_item(d, struct usb_gadget_strings *, stringtabs,
+                       lang_count + 1);
+               vla_item(d, struct usb_gadget_strings, stringtab, lang_count);
+               vla_item(d, struct usb_string, strings,
+                       lang_count*(needed_count+1));
+               char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL);
+               if (unlikely(!vlabuf)) {
+                       kfree(_data);
+                       return -ENOMEM;
+               }
+               /* Initialize the VLA pointers */
+               stringtabs = vla_ptr(vlabuf, d, stringtabs);
+               t = vla_ptr(vlabuf, d, stringtab);
+               i = lang_count;
+               do {
+                       *stringtabs++ = t++;
+               } while (--i);
+               *stringtabs = NULL;
+               /* stringtabs = vlabuf = d_stringtabs for later kfree */
+               stringtabs = vla_ptr(vlabuf, d, stringtabs);
+               t = vla_ptr(vlabuf, d, stringtab);
+               s = vla_ptr(vlabuf, d, strings);
+               strings = s;
+       }
+       /* For each language */
+       data += 16;
+       len -= 16;
+       do { /* lang_count > 0 so we can use do-while */
+               unsigned needed = needed_count;
+               if (unlikely(len < 3))
+                       goto error_free;
+               t->language = get_unaligned_le16(data);
+               t->strings  = s;
+               ++t;
+               data += 2;
+               len -= 2;
+               /* For each string */
+               do { /* str_count > 0 so we can use do-while */
+                       size_t length = strnlen(data, len);
+                       if (unlikely(length == len))
+                               goto error_free;
+                       /*
+                        * User may provide more strings then we need,
+                        * if that's the case we simply ignore the
+                        * rest
+                        */
+                       if (likely(needed)) {
+                               /*
+                                * s->id will be set while adding
+                                * function to configuration so for
+                                * now just leave garbage here.
+                                */
+                               s->s = data;
+                               --needed;
+                               ++s;
+                       }
+                       data += length + 1;
+                       len -= length + 1;
+               } while (--str_count);
+               s->id = 0;   /* terminator */
+               s->s = NULL;
+               ++s;
+       } while (--lang_count);
+       /* Some garbage left? */
+       if (unlikely(len))
+               goto error_free;
+       /* Done! */
+       ffs->stringtabs = stringtabs;
+       ffs->raw_strings = _data;
+       return 0;
+ error_free:
+       kfree(stringtabs);
+ error:
+       kfree(_data);
+       return -EINVAL;
+ }
+ /* Events handling and management *******************************************/
+ static void __ffs_event_add(struct ffs_data *ffs,
+                           enum usb_functionfs_event_type type)
+ {
+       enum usb_functionfs_event_type rem_type1, rem_type2 = type;
+       int neg = 0;
+       /*
+        * Abort any unhandled setup
+        *
+        * We do not need to worry about some cmpxchg() changing value
+        * of ffs->setup_state without holding the lock because when
+        * state is FFS_SETUP_PENDING cmpxchg() in several places in
+        * the source does nothing.
+        */
+       if (ffs->setup_state == FFS_SETUP_PENDING)
+               ffs->setup_state = FFS_SETUP_CANCELLED;
+       switch (type) {
+       case FUNCTIONFS_RESUME:
+               rem_type2 = FUNCTIONFS_SUSPEND;
+               /* FALL THROUGH */
+       case FUNCTIONFS_SUSPEND:
+       case FUNCTIONFS_SETUP:
+               rem_type1 = type;
+               /* Discard all similar events */
+               break;
+       case FUNCTIONFS_BIND:
+       case FUNCTIONFS_UNBIND:
+       case FUNCTIONFS_DISABLE:
+       case FUNCTIONFS_ENABLE:
+               /* Discard everything other then power management. */
+               rem_type1 = FUNCTIONFS_SUSPEND;
+               rem_type2 = FUNCTIONFS_RESUME;
+               neg = 1;
+               break;
+       default:
+               BUG();
+       }
+       {
+               u8 *ev  = ffs->ev.types, *out = ev;
+               unsigned n = ffs->ev.count;
+               for (; n; --n, ++ev)
+                       if ((*ev == rem_type1 || *ev == rem_type2) == neg)
+                               *out++ = *ev;
+                       else
+                               pr_vdebug("purging event %d\n", *ev);
+               ffs->ev.count = out - ffs->ev.types;
+       }
+       pr_vdebug("adding event %d\n", type);
+       ffs->ev.types[ffs->ev.count++] = type;
+       wake_up_locked(&ffs->ev.waitq);
+ }
+ static void ffs_event_add(struct ffs_data *ffs,
+                         enum usb_functionfs_event_type type)
+ {
+       unsigned long flags;
+       spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+       __ffs_event_add(ffs, type);
+       spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+ }
+ /* Bind/unbind USB function hooks *******************************************/
+ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
+                                   struct usb_descriptor_header *desc,
+                                   void *priv)
+ {
+       struct usb_endpoint_descriptor *ds = (void *)desc;
+       struct ffs_function *func = priv;
+       struct ffs_ep *ffs_ep;
+       unsigned ep_desc_id, idx;
+       static const char *speed_names[] = { "full", "high", "super" };
+       if (type != FFS_DESCRIPTOR)
+               return 0;
+       /*
+        * If ss_descriptors is not NULL, we are reading super speed
+        * descriptors; if hs_descriptors is not NULL, we are reading high
+        * speed descriptors; otherwise, we are reading full speed
+        * descriptors.
+        */
+       if (func->function.ss_descriptors) {
+               ep_desc_id = 2;
+               func->function.ss_descriptors[(long)valuep] = desc;
+       } else if (func->function.hs_descriptors) {
+               ep_desc_id = 1;
+               func->function.hs_descriptors[(long)valuep] = desc;
+       } else {
+               ep_desc_id = 0;
+               func->function.fs_descriptors[(long)valuep]    = desc;
+       }
+       if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+               return 0;
+       idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
+       ffs_ep = func->eps + idx;
+       if (unlikely(ffs_ep->descs[ep_desc_id])) {
+               pr_err("two %sspeed descriptors for EP %d\n",
+                         speed_names[ep_desc_id],
+                         ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+               return -EINVAL;
+       }
+       ffs_ep->descs[ep_desc_id] = ds;
+       ffs_dump_mem(": Original  ep desc", ds, ds->bLength);
+       if (ffs_ep->ep) {
+               ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
+               if (!ds->wMaxPacketSize)
+                       ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
+       } else {
+               struct usb_request *req;
+               struct usb_ep *ep;
+               pr_vdebug("autoconfig\n");
+               ep = usb_ep_autoconfig(func->gadget, ds);
+               if (unlikely(!ep))
+                       return -ENOTSUPP;
+               ep->driver_data = func->eps + idx;
+               req = usb_ep_alloc_request(ep, GFP_KERNEL);
+               if (unlikely(!req))
+                       return -ENOMEM;
+               ffs_ep->ep  = ep;
+               ffs_ep->req = req;
+               func->eps_revmap[ds->bEndpointAddress &
+                                USB_ENDPOINT_NUMBER_MASK] = idx + 1;
+       }
+       ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
+       return 0;
+ }
+ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
+                                  struct usb_descriptor_header *desc,
+                                  void *priv)
+ {
+       struct ffs_function *func = priv;
+       unsigned idx;
+       u8 newValue;
+       switch (type) {
+       default:
+       case FFS_DESCRIPTOR:
+               /* Handled in previous pass by __ffs_func_bind_do_descs() */
+               return 0;
+       case FFS_INTERFACE:
+               idx = *valuep;
+               if (func->interfaces_nums[idx] < 0) {
+                       int id = usb_interface_id(func->conf, &func->function);
+                       if (unlikely(id < 0))
+                               return id;
+                       func->interfaces_nums[idx] = id;
+               }
+               newValue = func->interfaces_nums[idx];
+               break;
+       case FFS_STRING:
+               /* String' IDs are allocated when fsf_data is bound to cdev */
+               newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
+               break;
+       case FFS_ENDPOINT:
+               /*
+                * USB_DT_ENDPOINT are handled in
+                * __ffs_func_bind_do_descs().
+                */
+               if (desc->bDescriptorType == USB_DT_ENDPOINT)
+                       return 0;
+               idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
+               if (unlikely(!func->eps[idx].ep))
+                       return -EINVAL;
+               {
+                       struct usb_endpoint_descriptor **descs;
+                       descs = func->eps[idx].descs;
+                       newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
+               }
+               break;
+       }
+       pr_vdebug("%02x -> %02x\n", *valuep, newValue);
+       *valuep = newValue;
+       return 0;
+ }
+ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
+                                     struct usb_os_desc_header *h, void *data,
+                                     unsigned len, void *priv)
+ {
+       struct ffs_function *func = priv;
+       u8 length = 0;
+       switch (type) {
+       case FFS_OS_DESC_EXT_COMPAT: {
+               struct usb_ext_compat_desc *desc = data;
+               struct usb_os_desc_table *t;
+               t = &func->function.os_desc_table[desc->bFirstInterfaceNumber];
+               t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber];
+               memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID,
+                      ARRAY_SIZE(desc->CompatibleID) +
+                      ARRAY_SIZE(desc->SubCompatibleID));
+               length = sizeof(*desc);
+       }
+               break;
+       case FFS_OS_DESC_EXT_PROP: {
+               struct usb_ext_prop_desc *desc = data;
+               struct usb_os_desc_table *t;
+               struct usb_os_desc_ext_prop *ext_prop;
+               char *ext_prop_name;
+               char *ext_prop_data;
+               t = &func->function.os_desc_table[h->interface];
+               t->if_id = func->interfaces_nums[h->interface];
+               ext_prop = func->ffs->ms_os_descs_ext_prop_avail;
+               func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop);
+               ext_prop->type = le32_to_cpu(desc->dwPropertyDataType);
+               ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength);
+               ext_prop->data_len = le32_to_cpu(*(u32 *)
+                       usb_ext_prop_data_len_ptr(data, ext_prop->name_len));
+               length = ext_prop->name_len + ext_prop->data_len + 14;
+               ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail;
+               func->ffs->ms_os_descs_ext_prop_name_avail +=
+                       ext_prop->name_len;
+               ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail;
+               func->ffs->ms_os_descs_ext_prop_data_avail +=
+                       ext_prop->data_len;
+               memcpy(ext_prop_data,
+                      usb_ext_prop_data_ptr(data, ext_prop->name_len),
+                      ext_prop->data_len);
+               /* unicode data reported to the host as "WCHAR"s */
+               switch (ext_prop->type) {
+               case USB_EXT_PROP_UNICODE:
+               case USB_EXT_PROP_UNICODE_ENV:
+               case USB_EXT_PROP_UNICODE_LINK:
+               case USB_EXT_PROP_UNICODE_MULTI:
+                       ext_prop->data_len *= 2;
+                       break;
+               }
+               ext_prop->data = ext_prop_data;
+               memcpy(ext_prop_name, usb_ext_prop_name_ptr(data),
+                      ext_prop->name_len);
+               /* property name reported to the host as "WCHAR"s */
+               ext_prop->name_len *= 2;
+               ext_prop->name = ext_prop_name;
+               t->os_desc->ext_prop_len +=
+                       ext_prop->name_len + ext_prop->data_len + 14;
+               ++t->os_desc->ext_prop_count;
+               list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop);
+       }
+               break;
+       default:
+               pr_vdebug("unknown descriptor: %d\n", type);
+       }
+       return length;
+ }
+ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
+                                               struct usb_configuration *c)
+ {
+       struct ffs_function *func = ffs_func_from_usb(f);
+       struct f_fs_opts *ffs_opts =
+               container_of(f->fi, struct f_fs_opts, func_inst);
+       int ret;
+       ENTER();
+       /*
+        * Legacy gadget triggers binding in functionfs_ready_callback,
+        * which already uses locking; taking the same lock here would
+        * cause a deadlock.
+        *
+        * Configfs-enabled gadgets however do need ffs_dev_lock.
+        */
+       if (!ffs_opts->no_configfs)
+               ffs_dev_lock();
+       ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV;
+       func->ffs = ffs_opts->dev->ffs_data;
+       if (!ffs_opts->no_configfs)
+               ffs_dev_unlock();
+       if (ret)
+               return ERR_PTR(ret);
+       func->conf = c;
+       func->gadget = c->cdev->gadget;
+       ffs_data_get(func->ffs);
+       /*
+        * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+        * configurations are bound in sequence with list_for_each_entry,
+        * in each configuration its functions are bound in sequence
+        * with list_for_each_entry, so we assume no race condition
+        * with regard to ffs_opts->bound access
+        */
+       if (!ffs_opts->refcnt) {
+               ret = functionfs_bind(func->ffs, c->cdev);
+               if (ret)
+                       return ERR_PTR(ret);
+       }
+       ffs_opts->refcnt++;
+       func->function.strings = func->ffs->stringtabs;
+       return ffs_opts;
+ }
+ static int _ffs_func_bind(struct usb_configuration *c,
+                         struct usb_function *f)
+ {
+       struct ffs_function *func = ffs_func_from_usb(f);
+       struct ffs_data *ffs = func->ffs;
+       const int full = !!func->ffs->fs_descs_count;
+       const int high = gadget_is_dualspeed(func->gadget) &&
+               func->ffs->hs_descs_count;
+       const int super = gadget_is_superspeed(func->gadget) &&
+               func->ffs->ss_descs_count;
+       int fs_len, hs_len, ss_len, ret, i;
+       /* Make it a single chunk, less management later on */
+       vla_group(d);
+       vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count);
+       vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs,
+               full ? ffs->fs_descs_count + 1 : 0);
+       vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs,
+               high ? ffs->hs_descs_count + 1 : 0);
+       vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs,
+               super ? ffs->ss_descs_count + 1 : 0);
+       vla_item_with_sz(d, short, inums, ffs->interfaces_count);
+       vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table,
+                        c->cdev->use_os_string ? ffs->interfaces_count : 0);
+       vla_item_with_sz(d, char[16], ext_compat,
+                        c->cdev->use_os_string ? ffs->interfaces_count : 0);
+       vla_item_with_sz(d, struct usb_os_desc, os_desc,
+                        c->cdev->use_os_string ? ffs->interfaces_count : 0);
+       vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop,
+                        ffs->ms_os_descs_ext_prop_count);
+       vla_item_with_sz(d, char, ext_prop_name,
+                        ffs->ms_os_descs_ext_prop_name_len);
+       vla_item_with_sz(d, char, ext_prop_data,
+                        ffs->ms_os_descs_ext_prop_data_len);
+       vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length);
+       char *vlabuf;
+       ENTER();
+       /* Has descriptors only for speeds gadget does not support */
+       if (unlikely(!(full | high | super)))
+               return -ENOTSUPP;
+       /* Allocate a single chunk, less management later on */
+       vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL);
+       if (unlikely(!vlabuf))
+               return -ENOMEM;
+       ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop);
+       ffs->ms_os_descs_ext_prop_name_avail =
+               vla_ptr(vlabuf, d, ext_prop_name);
+       ffs->ms_os_descs_ext_prop_data_avail =
+               vla_ptr(vlabuf, d, ext_prop_data);
+       /* Copy descriptors  */
+       memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs,
+              ffs->raw_descs_length);
+       memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
+       for (ret = ffs->eps_count; ret; --ret) {
+               struct ffs_ep *ptr;
+               ptr = vla_ptr(vlabuf, d, eps);
+               ptr[ret].num = -1;
+       }
+       /* Save pointers
+        * d_eps == vlabuf, func->eps used to kfree vlabuf later
+       */
+       func->eps             = vla_ptr(vlabuf, d, eps);
+       func->interfaces_nums = vla_ptr(vlabuf, d, inums);
+       /*
+        * Go through all the endpoint descriptors and allocate
+        * endpoints first, so that later we can rewrite the endpoint
+        * numbers without worrying that it may be described later on.
+        */
+       if (likely(full)) {
+               func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
+               fs_len = ffs_do_descs(ffs->fs_descs_count,
+                                     vla_ptr(vlabuf, d, raw_descs),
+                                     d_raw_descs__sz,
+                                     __ffs_func_bind_do_descs, func);
+               if (unlikely(fs_len < 0)) {
+                       ret = fs_len;
+                       goto error;
+               }
+       } else {
+               fs_len = 0;
+       }
+       if (likely(high)) {
+               func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
+               hs_len = ffs_do_descs(ffs->hs_descs_count,
+                                     vla_ptr(vlabuf, d, raw_descs) + fs_len,
+                                     d_raw_descs__sz - fs_len,
+                                     __ffs_func_bind_do_descs, func);
+               if (unlikely(hs_len < 0)) {
+                       ret = hs_len;
+                       goto error;
+               }
+       } else {
+               hs_len = 0;
+       }
+       if (likely(super)) {
+               func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
+               ss_len = ffs_do_descs(ffs->ss_descs_count,
+                               vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
+                               d_raw_descs__sz - fs_len - hs_len,
+                               __ffs_func_bind_do_descs, func);
+               if (unlikely(ss_len < 0)) {
+                       ret = ss_len;
+                       goto error;
+               }
+       } else {
+               ss_len = 0;
+       }
+       /*
+        * Now handle interface numbers allocation and interface and
+        * endpoint numbers rewriting.  We can do that in one go
+        * now.
+        */
+       ret = ffs_do_descs(ffs->fs_descs_count +
+                          (high ? ffs->hs_descs_count : 0) +
+                          (super ? ffs->ss_descs_count : 0),
+                          vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
+                          __ffs_func_bind_do_nums, func);
+       if (unlikely(ret < 0))
+               goto error;
+       func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
+       if (c->cdev->use_os_string)
+               for (i = 0; i < ffs->interfaces_count; ++i) {
+                       struct usb_os_desc *desc;
+                       desc = func->function.os_desc_table[i].os_desc =
+                               vla_ptr(vlabuf, d, os_desc) +
+                               i * sizeof(struct usb_os_desc);
+                       desc->ext_compat_id =
+                               vla_ptr(vlabuf, d, ext_compat) + i * 16;
+                       INIT_LIST_HEAD(&desc->ext_prop);
+               }
+       ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+                             vla_ptr(vlabuf, d, raw_descs) +
+                             fs_len + hs_len + ss_len,
+                             d_raw_descs__sz - fs_len - hs_len - ss_len,
+                             __ffs_func_bind_do_os_desc, func);
+       if (unlikely(ret < 0))
+               goto error;
+       func->function.os_desc_n =
+               c->cdev->use_os_string ? ffs->interfaces_count : 0;
+       /* And we're done */
+       ffs_event_add(ffs, FUNCTIONFS_BIND);
+       return 0;
+ error:
+       /* XXX Do we need to release all claimed endpoints here? */
+       return ret;
+ }
+ static int ffs_func_bind(struct usb_configuration *c,
+                        struct usb_function *f)
+ {
+       struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
+       if (IS_ERR(ffs_opts))
+               return PTR_ERR(ffs_opts);
+       return _ffs_func_bind(c, f);
+ }
+ /* Other USB function hooks *************************************************/
+ static int ffs_func_set_alt(struct usb_function *f,
+                           unsigned interface, unsigned alt)
+ {
+       struct ffs_function *func = ffs_func_from_usb(f);
+       struct ffs_data *ffs = func->ffs;
+       int ret = 0, intf;
+       if (alt != (unsigned)-1) {
+               intf = ffs_func_revmap_intf(func, interface);
+               if (unlikely(intf < 0))
+                       return intf;
+       }
+       if (ffs->func)
+               ffs_func_eps_disable(ffs->func);
+       if (ffs->state != FFS_ACTIVE)
+               return -ENODEV;
+       if (alt == (unsigned)-1) {
+               ffs->func = NULL;
+               ffs_event_add(ffs, FUNCTIONFS_DISABLE);
+               return 0;
+       }
+       ffs->func = func;
+       ret = ffs_func_eps_enable(func);
+       if (likely(ret >= 0))
+               ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+       return ret;
+ }
+ static void ffs_func_disable(struct usb_function *f)
+ {
+       ffs_func_set_alt(f, 0, (unsigned)-1);
+ }
+ static int ffs_func_setup(struct usb_function *f,
+                         const struct usb_ctrlrequest *creq)
+ {
+       struct ffs_function *func = ffs_func_from_usb(f);
+       struct ffs_data *ffs = func->ffs;
+       unsigned long flags;
+       int ret;
+       ENTER();
+       pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
+       pr_vdebug("creq->bRequest     = %02x\n", creq->bRequest);
+       pr_vdebug("creq->wValue       = %04x\n", le16_to_cpu(creq->wValue));
+       pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
+       pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
+       /*
+        * Most requests directed to interface go through here
+        * (notable exceptions are set/get interface) so we need to
+        * handle them.  All other either handled by composite or
+        * passed to usb_configuration->setup() (if one is set).  No
+        * matter, we will handle requests directed to endpoint here
+        * as well (as it's straightforward) but what to do with any
+        * other request?
+        */
+       if (ffs->state != FFS_ACTIVE)
+               return -ENODEV;
+       switch (creq->bRequestType & USB_RECIP_MASK) {
+       case USB_RECIP_INTERFACE:
+               ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
+               if (unlikely(ret < 0))
+                       return ret;
+               break;
+       case USB_RECIP_ENDPOINT:
+               ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
+               if (unlikely(ret < 0))
+                       return ret;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+       ffs->ev.setup = *creq;
+       ffs->ev.setup.wIndex = cpu_to_le16(ret);
+       __ffs_event_add(ffs, FUNCTIONFS_SETUP);
+       spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+       return 0;
+ }
+ static void ffs_func_suspend(struct usb_function *f)
+ {
+       ENTER();
+       ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
+ }
+ static void ffs_func_resume(struct usb_function *f)
+ {
+       ENTER();
+       ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
+ }
+ /* Endpoint and interface numbers reverse mapping ***************************/
+ static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
+ {
+       num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
+       return num ? num : -EDOM;
+ }
+ static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
+ {
+       short *nums = func->interfaces_nums;
+       unsigned count = func->ffs->interfaces_count;
+       for (; count; --count, ++nums) {
+               if (*nums >= 0 && *nums == intf)
+                       return nums - func->interfaces_nums;
+       }
+       return -EDOM;
+ }
+ /* Devices management *******************************************************/
+ static LIST_HEAD(ffs_devices);
+ static struct ffs_dev *_ffs_do_find_dev(const char *name)
+ {
+       struct ffs_dev *dev;
+       list_for_each_entry(dev, &ffs_devices, entry) {
+               if (!dev->name || !name)
+                       continue;
+               if (strcmp(dev->name, name) == 0)
+                       return dev;
+       }
+       return NULL;
+ }
+ /*
+  * ffs_lock must be taken by the caller of this function
+  */
+ static struct ffs_dev *_ffs_get_single_dev(void)
+ {
+       struct ffs_dev *dev;
+       if (list_is_singular(&ffs_devices)) {
+               dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
+               if (dev->single)
+                       return dev;
+       }
+       return NULL;
+ }
+ /*
+  * ffs_lock must be taken by the caller of this function
+  */
+ static struct ffs_dev *_ffs_find_dev(const char *name)
+ {
+       struct ffs_dev *dev;
+       dev = _ffs_get_single_dev();
+       if (dev)
+               return dev;
+       return _ffs_do_find_dev(name);
+ }
+ /* Configfs support *********************************************************/
+ static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
+ {
+       return container_of(to_config_group(item), struct f_fs_opts,
+                           func_inst.group);
+ }
+ static void ffs_attr_release(struct config_item *item)
+ {
+       struct f_fs_opts *opts = to_ffs_opts(item);
+       usb_put_function_instance(&opts->func_inst);
+ }
+ static struct configfs_item_operations ffs_item_ops = {
+       .release        = ffs_attr_release,
+ };
+ static struct config_item_type ffs_func_type = {
+       .ct_item_ops    = &ffs_item_ops,
+       .ct_owner       = THIS_MODULE,
+ };
+ /* Function registration interface ******************************************/
+ static void ffs_free_inst(struct usb_function_instance *f)
+ {
+       struct f_fs_opts *opts;
+       opts = to_f_fs_opts(f);
+       ffs_dev_lock();
+       _ffs_free_dev(opts->dev);
+       ffs_dev_unlock();
+       kfree(opts);
+ }
+ #define MAX_INST_NAME_LEN     40
+ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name)
+ {
+       struct f_fs_opts *opts;
+       char *ptr;
+       const char *tmp;
+       int name_len, ret;
+       name_len = strlen(name) + 1;
+       if (name_len > MAX_INST_NAME_LEN)
+               return -ENAMETOOLONG;
+       ptr = kstrndup(name, name_len, GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+       opts = to_f_fs_opts(fi);
+       tmp = NULL;
+       ffs_dev_lock();
+       tmp = opts->dev->name_allocated ? opts->dev->name : NULL;
+       ret = _ffs_name_dev(opts->dev, ptr);
+       if (ret) {
+               kfree(ptr);
+               ffs_dev_unlock();
+               return ret;
+       }
+       opts->dev->name_allocated = true;
+       ffs_dev_unlock();
+       kfree(tmp);
+       return 0;
+ }
+ static struct usb_function_instance *ffs_alloc_inst(void)
+ {
+       struct f_fs_opts *opts;
+       struct ffs_dev *dev;
+       opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+       if (!opts)
+               return ERR_PTR(-ENOMEM);
+       opts->func_inst.set_inst_name = ffs_set_inst_name;
+       opts->func_inst.free_func_inst = ffs_free_inst;
+       ffs_dev_lock();
+       dev = _ffs_alloc_dev();
+       ffs_dev_unlock();
+       if (IS_ERR(dev)) {
+               kfree(opts);
+               return ERR_CAST(dev);
+       }
+       opts->dev = dev;
+       dev->opts = opts;
+       config_group_init_type_name(&opts->func_inst.group, "",
+                                   &ffs_func_type);
+       return &opts->func_inst;
+ }
+ static void ffs_free(struct usb_function *f)
+ {
+       kfree(ffs_func_from_usb(f));
+ }
+ static void ffs_func_unbind(struct usb_configuration *c,
+                           struct usb_function *f)
+ {
+       struct ffs_function *func = ffs_func_from_usb(f);
+       struct ffs_data *ffs = func->ffs;
+       struct f_fs_opts *opts =
+               container_of(f->fi, struct f_fs_opts, func_inst);
+       struct ffs_ep *ep = func->eps;
+       unsigned count = ffs->eps_count;
+       unsigned long flags;
+       ENTER();
+       if (ffs->func == func) {
+               ffs_func_eps_disable(func);
+               ffs->func = NULL;
+       }
+       if (!--opts->refcnt)
+               functionfs_unbind(ffs);
+       /* cleanup after autoconfig */
+       spin_lock_irqsave(&func->ffs->eps_lock, flags);
+       do {
+               if (ep->ep && ep->req)
+                       usb_ep_free_request(ep->ep, ep->req);
+               ep->req = NULL;
+               ++ep;
+       } while (--count);
+       spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+       kfree(func->eps);
+       func->eps = NULL;
+       /*
+        * eps, descriptors and interfaces_nums are allocated in the
+        * same chunk so only one free is required.
+        */
+       func->function.fs_descriptors = NULL;
+       func->function.hs_descriptors = NULL;
+       func->function.ss_descriptors = NULL;
+       func->interfaces_nums = NULL;
+       ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+ }
+ static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
+ {
+       struct ffs_function *func;
+       ENTER();
+       func = kzalloc(sizeof(*func), GFP_KERNEL);
+       if (unlikely(!func))
+               return ERR_PTR(-ENOMEM);
+       func->function.name    = "Function FS Gadget";
+       func->function.bind    = ffs_func_bind;
+       func->function.unbind  = ffs_func_unbind;
+       func->function.set_alt = ffs_func_set_alt;
+       func->function.disable = ffs_func_disable;
+       func->function.setup   = ffs_func_setup;
+       func->function.suspend = ffs_func_suspend;
+       func->function.resume  = ffs_func_resume;
+       func->function.free_func = ffs_free;
+       return &func->function;
+ }
+ /*
+  * ffs_lock must be taken by the caller of this function
+  */
+ static struct ffs_dev *_ffs_alloc_dev(void)
+ {
+       struct ffs_dev *dev;
+       int ret;
+       if (_ffs_get_single_dev())
+                       return ERR_PTR(-EBUSY);
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return ERR_PTR(-ENOMEM);
+       if (list_empty(&ffs_devices)) {
+               ret = functionfs_init();
+               if (ret) {
+                       kfree(dev);
+                       return ERR_PTR(ret);
+               }
+       }
+       list_add(&dev->entry, &ffs_devices);
+       return dev;
+ }
+ /*
+  * ffs_lock must be taken by the caller of this function
+  * The caller is responsible for "name" being available whenever f_fs needs it
+  */
+ static int _ffs_name_dev(struct ffs_dev *dev, const char *name)
+ {
+       struct ffs_dev *existing;
+       existing = _ffs_do_find_dev(name);
+       if (existing)
+               return -EBUSY;
+       dev->name = name;
+       return 0;
+ }
+ /*
+  * The caller is responsible for "name" being available whenever f_fs needs it
+  */
+ int ffs_name_dev(struct ffs_dev *dev, const char *name)
+ {
+       int ret;
+       ffs_dev_lock();
+       ret = _ffs_name_dev(dev, name);
+       ffs_dev_unlock();
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(ffs_name_dev);
+ int ffs_single_dev(struct ffs_dev *dev)
+ {
+       int ret;
+       ret = 0;
+       ffs_dev_lock();
+       if (!list_is_singular(&ffs_devices))
+               ret = -EBUSY;
+       else
+               dev->single = true;
+       ffs_dev_unlock();
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(ffs_single_dev);
+ /*
+  * ffs_lock must be taken by the caller of this function
+  */
+ static void _ffs_free_dev(struct ffs_dev *dev)
+ {
+       list_del(&dev->entry);
+       if (dev->name_allocated)
+               kfree(dev->name);
+       kfree(dev);
+       if (list_empty(&ffs_devices))
+               functionfs_cleanup();
+ }
+ static void *ffs_acquire_dev(const char *dev_name)
+ {
+       struct ffs_dev *ffs_dev;
+       ENTER();
+       ffs_dev_lock();
+       ffs_dev = _ffs_find_dev(dev_name);
+       if (!ffs_dev)
+               ffs_dev = ERR_PTR(-ENOENT);
+       else if (ffs_dev->mounted)
+               ffs_dev = ERR_PTR(-EBUSY);
+       else if (ffs_dev->ffs_acquire_dev_callback &&
+           ffs_dev->ffs_acquire_dev_callback(ffs_dev))
+               ffs_dev = ERR_PTR(-ENOENT);
+       else
+               ffs_dev->mounted = true;
+       ffs_dev_unlock();
+       return ffs_dev;
+ }
+ static void ffs_release_dev(struct ffs_data *ffs_data)
+ {
+       struct ffs_dev *ffs_dev;
+       ENTER();
+       ffs_dev_lock();
+       ffs_dev = ffs_data->private_data;
+       if (ffs_dev) {
+               ffs_dev->mounted = false;
+               if (ffs_dev->ffs_release_dev_callback)
+                       ffs_dev->ffs_release_dev_callback(ffs_dev);
+       }
+       ffs_dev_unlock();
+ }
+ static int ffs_ready(struct ffs_data *ffs)
+ {
+       struct ffs_dev *ffs_obj;
+       int ret = 0;
+       ENTER();
+       ffs_dev_lock();
+       ffs_obj = ffs->private_data;
+       if (!ffs_obj) {
+               ret = -EINVAL;
+               goto done;
+       }
+       if (WARN_ON(ffs_obj->desc_ready)) {
+               ret = -EBUSY;
+               goto done;
+       }
+       ffs_obj->desc_ready = true;
+       ffs_obj->ffs_data = ffs;
+       if (ffs_obj->ffs_ready_callback)
+               ret = ffs_obj->ffs_ready_callback(ffs);
+ done:
+       ffs_dev_unlock();
+       return ret;
+ }
+ static void ffs_closed(struct ffs_data *ffs)
+ {
+       struct ffs_dev *ffs_obj;
+       ENTER();
+       ffs_dev_lock();
+       ffs_obj = ffs->private_data;
+       if (!ffs_obj)
+               goto done;
+       ffs_obj->desc_ready = false;
+       if (ffs_obj->ffs_closed_callback)
+               ffs_obj->ffs_closed_callback(ffs);
+       if (!ffs_obj->opts || ffs_obj->opts->no_configfs
+           || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
+               goto done;
+       unregister_gadget_item(ffs_obj->opts->
+                              func_inst.group.cg_item.ci_parent->ci_parent);
+ done:
+       ffs_dev_unlock();
+ }
+ /* Misc helper functions ****************************************************/
+ static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+ {
+       return nonblock
+               ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
+               : mutex_lock_interruptible(mutex);
+ }
+ static char *ffs_prepare_buffer(const char __user *buf, size_t len)
+ {
+       char *data;
+       if (unlikely(!len))
+               return NULL;
+       data = kmalloc(len, GFP_KERNEL);
+       if (unlikely(!data))
+               return ERR_PTR(-ENOMEM);
+       if (unlikely(__copy_from_user(data, buf, len))) {
+               kfree(data);
+               return ERR_PTR(-EFAULT);
+       }
+       pr_vdebug("Buffer from user space:\n");
+       ffs_dump_mem("", data, len);
+       return data;
+ }
+ DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Michal Nazarewicz");
index 0000000000000000000000000000000000000000,a7b6bbbd697d49f74219e095a6922c41e382a056..ddb09dc6d1f2382f556ce378dada0b13df1f8462
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1033 +1,1035 @@@
 -                      return PTR_ERR(f->os_desc_table);
+ /*
+  * f_rndis.c -- RNDIS link function driver
+  *
+  * Copyright (C) 2003-2005,2008 David Brownell
+  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+  * Copyright (C) 2008 Nokia Corporation
+  * Copyright (C) 2009 Samsung Electronics
+  *                    Author: Michal Nazarewicz (mina86@mina86.com)
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  */
+ /* #define VERBOSE_DEBUG */
+ #include <linux/slab.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/device.h>
+ #include <linux/etherdevice.h>
+ #include <linux/atomic.h>
+ #include "u_ether.h"
+ #include "u_ether_configfs.h"
+ #include "u_rndis.h"
+ #include "rndis.h"
+ #include "configfs.h"
+ /*
+  * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+  * been promoted instead of the standard CDC Ethernet.  The published RNDIS
+  * spec is ambiguous, incomplete, and needlessly complex.  Variants such as
+  * ActiveSync have even worse status in terms of specification.
+  *
+  * In short:  it's a protocol controlled by (and for) Microsoft, not for an
+  * Open ecosystem or markets.  Linux supports it *only* because Microsoft
+  * doesn't support the CDC Ethernet standard.
+  *
+  * The RNDIS data transfer model is complex, with multiple Ethernet packets
+  * per USB message, and out of band data.  The control model is built around
+  * what's essentially an "RNDIS RPC" protocol.  It's all wrapped in a CDC ACM
+  * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+  * useless (they're ignored).  RNDIS expects to be the only function in its
+  * configuration, so it's no real help if you need composite devices; and
+  * it expects to be the first configuration too.
+  *
+  * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+  * discount the fluff that its RPC can be made to deliver: it doesn't need
+  * a NOP altsetting for the data interface.  That lets it work on some of the
+  * "so smart it's stupid" hardware which takes over configuration changes
+  * from the software, and adds restrictions like "no altsettings".
+  *
+  * Unfortunately MSFT's RNDIS drivers are buggy.  They hang or oops, and
+  * have all sorts of contrary-to-specification oddities that can prevent
+  * them from working sanely.  Since bugfixes (or accurate specs, letting
+  * Linux work around those bugs) are unlikely to ever come from MSFT, you
+  * may want to avoid using RNDIS on purely operational grounds.
+  *
+  * Omissions from the RNDIS 1.0 specification include:
+  *
+  *   - Power management ... references data that's scattered around lots
+  *     of other documentation, which is incorrect/incomplete there too.
+  *
+  *   - There are various undocumented protocol requirements, like the need
+  *     to send garbage in some control-OUT messages.
+  *
+  *   - MS-Windows drivers sometimes emit undocumented requests.
+  */
+ struct f_rndis {
+       struct gether                   port;
+       u8                              ctrl_id, data_id;
+       u8                              ethaddr[ETH_ALEN];
+       u32                             vendorID;
+       const char                      *manufacturer;
+       int                             config;
+       struct usb_ep                   *notify;
+       struct usb_request              *notify_req;
+       atomic_t                        notify_count;
+ };
+ static inline struct f_rndis *func_to_rndis(struct usb_function *f)
+ {
+       return container_of(f, struct f_rndis, port.func);
+ }
+ /* peak (theoretical) bulk transfer rate in bits-per-second */
+ static unsigned int bitrate(struct usb_gadget *g)
+ {
+       if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+               return 13 * 1024 * 8 * 1000 * 8;
+       else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+               return 13 * 512 * 8 * 1000 * 8;
+       else
+               return 19 * 64 * 1 * 1000 * 8;
+ }
+ /*-------------------------------------------------------------------------*/
+ /*
+  */
+ #define RNDIS_STATUS_INTERVAL_MS      32
+ #define STATUS_BYTECOUNT              8       /* 8 bytes data */
+ /* interface descriptor: */
+ static struct usb_interface_descriptor rndis_control_intf = {
+       .bLength =              sizeof rndis_control_intf,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       /* .bInterfaceNumber = DYNAMIC */
+       /* status endpoint is optional; this could be patched later */
+       .bNumEndpoints =        1,
+       .bInterfaceClass =      USB_CLASS_COMM,
+       .bInterfaceSubClass =   USB_CDC_SUBCLASS_ACM,
+       .bInterfaceProtocol =   USB_CDC_ACM_PROTO_VENDOR,
+       /* .iInterface = DYNAMIC */
+ };
+ static struct usb_cdc_header_desc header_desc = {
+       .bLength =              sizeof header_desc,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubType =   USB_CDC_HEADER_TYPE,
+       .bcdCDC =               cpu_to_le16(0x0110),
+ };
+ static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = {
+       .bLength =              sizeof call_mgmt_descriptor,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubType =   USB_CDC_CALL_MANAGEMENT_TYPE,
+       .bmCapabilities =       0x00,
+       .bDataInterface =       0x01,
+ };
+ static struct usb_cdc_acm_descriptor rndis_acm_descriptor = {
+       .bLength =              sizeof rndis_acm_descriptor,
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubType =   USB_CDC_ACM_TYPE,
+       .bmCapabilities =       0x00,
+ };
+ static struct usb_cdc_union_desc rndis_union_desc = {
+       .bLength =              sizeof(rndis_union_desc),
+       .bDescriptorType =      USB_DT_CS_INTERFACE,
+       .bDescriptorSubType =   USB_CDC_UNION_TYPE,
+       /* .bMasterInterface0 = DYNAMIC */
+       /* .bSlaveInterface0 =  DYNAMIC */
+ };
+ /* the data interface has two bulk endpoints */
+ static struct usb_interface_descriptor rndis_data_intf = {
+       .bLength =              sizeof rndis_data_intf,
+       .bDescriptorType =      USB_DT_INTERFACE,
+       /* .bInterfaceNumber = DYNAMIC */
+       .bNumEndpoints =        2,
+       .bInterfaceClass =      USB_CLASS_CDC_DATA,
+       .bInterfaceSubClass =   0,
+       .bInterfaceProtocol =   0,
+       /* .iInterface = DYNAMIC */
+ };
+ static struct usb_interface_assoc_descriptor
+ rndis_iad_descriptor = {
+       .bLength =              sizeof rndis_iad_descriptor,
+       .bDescriptorType =      USB_DT_INTERFACE_ASSOCIATION,
+       .bFirstInterface =      0, /* XXX, hardcoded */
+       .bInterfaceCount =      2,      // control + data
+       .bFunctionClass =       USB_CLASS_COMM,
+       .bFunctionSubClass =    USB_CDC_SUBCLASS_ETHERNET,
+       .bFunctionProtocol =    USB_CDC_PROTO_NONE,
+       /* .iFunction = DYNAMIC */
+ };
+ /* full speed support: */
+ static struct usb_endpoint_descriptor fs_notify_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_XFER_INT,
+       .wMaxPacketSize =       cpu_to_le16(STATUS_BYTECOUNT),
+       .bInterval =            RNDIS_STATUS_INTERVAL_MS,
+ };
+ static struct usb_endpoint_descriptor fs_in_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_XFER_BULK,
+ };
+ static struct usb_endpoint_descriptor fs_out_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_OUT,
+       .bmAttributes =         USB_ENDPOINT_XFER_BULK,
+ };
+ static struct usb_descriptor_header *eth_fs_function[] = {
+       (struct usb_descriptor_header *) &rndis_iad_descriptor,
+       /* control interface matches ACM, not Ethernet */
+       (struct usb_descriptor_header *) &rndis_control_intf,
+       (struct usb_descriptor_header *) &header_desc,
+       (struct usb_descriptor_header *) &call_mgmt_descriptor,
+       (struct usb_descriptor_header *) &rndis_acm_descriptor,
+       (struct usb_descriptor_header *) &rndis_union_desc,
+       (struct usb_descriptor_header *) &fs_notify_desc,
+       /* data interface has no altsetting */
+       (struct usb_descriptor_header *) &rndis_data_intf,
+       (struct usb_descriptor_header *) &fs_in_desc,
+       (struct usb_descriptor_header *) &fs_out_desc,
+       NULL,
+ };
+ /* high speed support: */
+ static struct usb_endpoint_descriptor hs_notify_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_XFER_INT,
+       .wMaxPacketSize =       cpu_to_le16(STATUS_BYTECOUNT),
+       .bInterval =            USB_MS_TO_HS_INTERVAL(RNDIS_STATUS_INTERVAL_MS)
+ };
+ static struct usb_endpoint_descriptor hs_in_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize =       cpu_to_le16(512),
+ };
+ static struct usb_endpoint_descriptor hs_out_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_OUT,
+       .bmAttributes =         USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize =       cpu_to_le16(512),
+ };
+ static struct usb_descriptor_header *eth_hs_function[] = {
+       (struct usb_descriptor_header *) &rndis_iad_descriptor,
+       /* control interface matches ACM, not Ethernet */
+       (struct usb_descriptor_header *) &rndis_control_intf,
+       (struct usb_descriptor_header *) &header_desc,
+       (struct usb_descriptor_header *) &call_mgmt_descriptor,
+       (struct usb_descriptor_header *) &rndis_acm_descriptor,
+       (struct usb_descriptor_header *) &rndis_union_desc,
+       (struct usb_descriptor_header *) &hs_notify_desc,
+       /* data interface has no altsetting */
+       (struct usb_descriptor_header *) &rndis_data_intf,
+       (struct usb_descriptor_header *) &hs_in_desc,
+       (struct usb_descriptor_header *) &hs_out_desc,
+       NULL,
+ };
+ /* super speed support: */
+ static struct usb_endpoint_descriptor ss_notify_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_XFER_INT,
+       .wMaxPacketSize =       cpu_to_le16(STATUS_BYTECOUNT),
+       .bInterval =            USB_MS_TO_HS_INTERVAL(RNDIS_STATUS_INTERVAL_MS)
+ };
+ static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
+       .bLength =              sizeof ss_intr_comp_desc,
+       .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
+       /* the following 3 values can be tweaked if necessary */
+       /* .bMaxBurst =         0, */
+       /* .bmAttributes =      0, */
+       .wBytesPerInterval =    cpu_to_le16(STATUS_BYTECOUNT),
+ };
+ static struct usb_endpoint_descriptor ss_in_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_IN,
+       .bmAttributes =         USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize =       cpu_to_le16(1024),
+ };
+ static struct usb_endpoint_descriptor ss_out_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     USB_DIR_OUT,
+       .bmAttributes =         USB_ENDPOINT_XFER_BULK,
+       .wMaxPacketSize =       cpu_to_le16(1024),
+ };
+ static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+       .bLength =              sizeof ss_bulk_comp_desc,
+       .bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
+       /* the following 2 values can be tweaked if necessary */
+       /* .bMaxBurst =         0, */
+       /* .bmAttributes =      0, */
+ };
+ static struct usb_descriptor_header *eth_ss_function[] = {
+       (struct usb_descriptor_header *) &rndis_iad_descriptor,
+       /* control interface matches ACM, not Ethernet */
+       (struct usb_descriptor_header *) &rndis_control_intf,
+       (struct usb_descriptor_header *) &header_desc,
+       (struct usb_descriptor_header *) &call_mgmt_descriptor,
+       (struct usb_descriptor_header *) &rndis_acm_descriptor,
+       (struct usb_descriptor_header *) &rndis_union_desc,
+       (struct usb_descriptor_header *) &ss_notify_desc,
+       (struct usb_descriptor_header *) &ss_intr_comp_desc,
+       /* data interface has no altsetting */
+       (struct usb_descriptor_header *) &rndis_data_intf,
+       (struct usb_descriptor_header *) &ss_in_desc,
+       (struct usb_descriptor_header *) &ss_bulk_comp_desc,
+       (struct usb_descriptor_header *) &ss_out_desc,
+       (struct usb_descriptor_header *) &ss_bulk_comp_desc,
+       NULL,
+ };
+ /* string descriptors: */
+ static struct usb_string rndis_string_defs[] = {
+       [0].s = "RNDIS Communications Control",
+       [1].s = "RNDIS Ethernet Data",
+       [2].s = "RNDIS",
+       {  } /* end of list */
+ };
+ static struct usb_gadget_strings rndis_string_table = {
+       .language =             0x0409, /* en-us */
+       .strings =              rndis_string_defs,
+ };
+ static struct usb_gadget_strings *rndis_strings[] = {
+       &rndis_string_table,
+       NULL,
+ };
+ /*-------------------------------------------------------------------------*/
+ static struct sk_buff *rndis_add_header(struct gether *port,
+                                       struct sk_buff *skb)
+ {
+       struct sk_buff *skb2;
+       skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
+       if (skb2)
+               rndis_add_hdr(skb2);
+       dev_kfree_skb(skb);
+       return skb2;
+ }
+ static void rndis_response_available(void *_rndis)
+ {
+       struct f_rndis                  *rndis = _rndis;
+       struct usb_request              *req = rndis->notify_req;
+       struct usb_composite_dev        *cdev = rndis->port.func.config->cdev;
+       __le32                          *data = req->buf;
+       int                             status;
+       if (atomic_inc_return(&rndis->notify_count) != 1)
+               return;
+       /* Send RNDIS RESPONSE_AVAILABLE notification; a
+        * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+        *
+        * This is the only notification defined by RNDIS.
+        */
+       data[0] = cpu_to_le32(1);
+       data[1] = cpu_to_le32(0);
+       status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+       if (status) {
+               atomic_dec(&rndis->notify_count);
+               DBG(cdev, "notify/0 --> %d\n", status);
+       }
+ }
+ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
+ {
+       struct f_rndis                  *rndis = req->context;
+       struct usb_composite_dev        *cdev = rndis->port.func.config->cdev;
+       int                             status = req->status;
+       /* after TX:
+        *  - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+        *  - RNDIS_RESPONSE_AVAILABLE (status/irq)
+        */
+       switch (status) {
+       case -ECONNRESET:
+       case -ESHUTDOWN:
+               /* connection gone */
+               atomic_set(&rndis->notify_count, 0);
+               break;
+       default:
+               DBG(cdev, "RNDIS %s response error %d, %d/%d\n",
+                       ep->name, status,
+                       req->actual, req->length);
+               /* FALLTHROUGH */
+       case 0:
+               if (ep != rndis->notify)
+                       break;
+               /* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+                * notifications by resending until we're done
+                */
+               if (atomic_dec_and_test(&rndis->notify_count))
+                       break;
+               status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+               if (status) {
+                       atomic_dec(&rndis->notify_count);
+                       DBG(cdev, "notify/1 --> %d\n", status);
+               }
+               break;
+       }
+ }
+ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
+ {
+       struct f_rndis                  *rndis = req->context;
+       int                             status;
+       /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+ //    spin_lock(&dev->lock);
+       status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
+       if (status < 0)
+               pr_err("RNDIS command error %d, %d/%d\n",
+                       status, req->actual, req->length);
+ //    spin_unlock(&dev->lock);
+ }
+ static int
+ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+ {
+       struct f_rndis          *rndis = func_to_rndis(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       struct usb_request      *req = cdev->req;
+       int                     value = -EOPNOTSUPP;
+       u16                     w_index = le16_to_cpu(ctrl->wIndex);
+       u16                     w_value = le16_to_cpu(ctrl->wValue);
+       u16                     w_length = le16_to_cpu(ctrl->wLength);
+       /* composite driver infrastructure handles everything except
+        * CDC class messages; interface activation uses set_alt().
+        */
+       switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+       /* RNDIS uses the CDC command encapsulation mechanism to implement
+        * an RPC scheme, with much getting/setting of attributes by OID.
+        */
+       case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+                       | USB_CDC_SEND_ENCAPSULATED_COMMAND:
+               if (w_value || w_index != rndis->ctrl_id)
+                       goto invalid;
+               /* read the request; process it later */
+               value = w_length;
+               req->complete = rndis_command_complete;
+               req->context = rndis;
+               /* later, rndis_response_available() sends a notification */
+               break;
+       case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+                       | USB_CDC_GET_ENCAPSULATED_RESPONSE:
+               if (w_value || w_index != rndis->ctrl_id)
+                       goto invalid;
+               else {
+                       u8 *buf;
+                       u32 n;
+                       /* return the result */
+                       buf = rndis_get_next_response(rndis->config, &n);
+                       if (buf) {
+                               memcpy(req->buf, buf, n);
+                               req->complete = rndis_response_complete;
+                               req->context = rndis;
+                               rndis_free_response(rndis->config, buf);
+                               value = n;
+                       }
+                       /* else stalls ... spec says to avoid that */
+               }
+               break;
+       default:
+ invalid:
+               VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+       }
+       /* respond with data transfer or status phase? */
+       if (value >= 0) {
+               DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, w_index, w_length);
+               req->zero = (value < w_length);
+               req->length = value;
+               value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+               if (value < 0)
+                       ERROR(cdev, "rndis response on err %d\n", value);
+       }
+       /* device either stalls (value < 0) or reports success */
+       return value;
+ }
+ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ {
+       struct f_rndis          *rndis = func_to_rndis(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       /* we know alt == 0 */
+       if (intf == rndis->ctrl_id) {
+               if (rndis->notify->driver_data) {
+                       VDBG(cdev, "reset rndis control %d\n", intf);
+                       usb_ep_disable(rndis->notify);
+               }
+               if (!rndis->notify->desc) {
+                       VDBG(cdev, "init rndis ctrl %d\n", intf);
+                       if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+                               goto fail;
+               }
+               usb_ep_enable(rndis->notify);
+               rndis->notify->driver_data = rndis;
+       } else if (intf == rndis->data_id) {
+               struct net_device       *net;
+               if (rndis->port.in_ep->driver_data) {
+                       DBG(cdev, "reset rndis\n");
+                       gether_disconnect(&rndis->port);
+               }
+               if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
+                       DBG(cdev, "init rndis\n");
+                       if (config_ep_by_speed(cdev->gadget, f,
+                                              rndis->port.in_ep) ||
+                           config_ep_by_speed(cdev->gadget, f,
+                                              rndis->port.out_ep)) {
+                               rndis->port.in_ep->desc = NULL;
+                               rndis->port.out_ep->desc = NULL;
+                               goto fail;
+                       }
+               }
+               /* Avoid ZLPs; they can be troublesome. */
+               rndis->port.is_zlp_ok = false;
+               /* RNDIS should be in the "RNDIS uninitialized" state,
+                * either never activated or after rndis_uninit().
+                *
+                * We don't want data to flow here until a nonzero packet
+                * filter is set, at which point it enters "RNDIS data
+                * initialized" state ... but we do want the endpoints
+                * to be activated.  It's a strange little state.
+                *
+                * REVISIT the RNDIS gadget code has done this wrong for a
+                * very long time.  We need another call to the link layer
+                * code -- gether_updown(...bool) maybe -- to do it right.
+                */
+               rndis->port.cdc_filter = 0;
+               DBG(cdev, "RNDIS RX/TX early activation ... \n");
+               net = gether_connect(&rndis->port);
+               if (IS_ERR(net))
+                       return PTR_ERR(net);
+               rndis_set_param_dev(rndis->config, net,
+                               &rndis->port.cdc_filter);
+       } else
+               goto fail;
+       return 0;
+ fail:
+       return -EINVAL;
+ }
+ static void rndis_disable(struct usb_function *f)
+ {
+       struct f_rndis          *rndis = func_to_rndis(f);
+       struct usb_composite_dev *cdev = f->config->cdev;
+       if (!rndis->notify->driver_data)
+               return;
+       DBG(cdev, "rndis deactivated\n");
+       rndis_uninit(rndis->config);
+       gether_disconnect(&rndis->port);
+       usb_ep_disable(rndis->notify);
+       rndis->notify->driver_data = NULL;
+ }
+ /*-------------------------------------------------------------------------*/
+ /*
+  * This isn't quite the same mechanism as CDC Ethernet, since the
+  * notification scheme passes less data, but the same set of link
+  * states must be tested.  A key difference is that altsettings are
+  * not used to tell whether the link should send packets or not.
+  */
+ static void rndis_open(struct gether *geth)
+ {
+       struct f_rndis          *rndis = func_to_rndis(&geth->func);
+       struct usb_composite_dev *cdev = geth->func.config->cdev;
+       DBG(cdev, "%s\n", __func__);
+       rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3,
+                               bitrate(cdev->gadget) / 100);
+       rndis_signal_connect(rndis->config);
+ }
+ static void rndis_close(struct gether *geth)
+ {
+       struct f_rndis          *rndis = func_to_rndis(&geth->func);
+       DBG(geth->func.config->cdev, "%s\n", __func__);
+       rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
+       rndis_signal_disconnect(rndis->config);
+ }
+ /*-------------------------------------------------------------------------*/
+ /* Some controllers can't support RNDIS ... */
+ static inline bool can_support_rndis(struct usb_configuration *c)
+ {
+       /* everything else is *presumably* fine */
+       return true;
+ }
+ /* ethernet function driver setup/binding */
+ static int
+ rndis_bind(struct usb_configuration *c, struct usb_function *f)
+ {
+       struct usb_composite_dev *cdev = c->cdev;
+       struct f_rndis          *rndis = func_to_rndis(f);
+       struct usb_string       *us;
+       int                     status;
+       struct usb_ep           *ep;
+       struct f_rndis_opts *rndis_opts;
+       if (!can_support_rndis(c))
+               return -EINVAL;
+       rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst);
+       if (cdev->use_os_string) {
+               f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+                                          GFP_KERNEL);
+               if (!f->os_desc_table)
 -                                     THIS_MODULE);
++                      return -ENOMEM;
+               f->os_desc_n = 1;
+               f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
+       }
+       /*
+        * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
+        * configurations are bound in sequence with list_for_each_entry,
+        * in each configuration its functions are bound in sequence
+        * with list_for_each_entry, so we assume no race condition
+        * with regard to rndis_opts->bound access
+        */
+       if (!rndis_opts->bound) {
+               gether_set_gadget(rndis_opts->net, cdev->gadget);
+               status = gether_register_netdev(rndis_opts->net);
+               if (status)
+                       goto fail;
+               rndis_opts->bound = true;
+       }
+       us = usb_gstrings_attach(cdev, rndis_strings,
+                                ARRAY_SIZE(rndis_string_defs));
+       if (IS_ERR(us)) {
+               status = PTR_ERR(us);
+               goto fail;
+       }
+       rndis_control_intf.iInterface = us[0].id;
+       rndis_data_intf.iInterface = us[1].id;
+       rndis_iad_descriptor.iFunction = us[2].id;
+       /* allocate instance-specific interface IDs */
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       rndis->ctrl_id = status;
+       rndis_iad_descriptor.bFirstInterface = status;
+       rndis_control_intf.bInterfaceNumber = status;
+       rndis_union_desc.bMasterInterface0 = status;
+       if (cdev->use_os_string)
+               f->os_desc_table[0].if_id =
+                       rndis_iad_descriptor.bFirstInterface;
+       status = usb_interface_id(c, f);
+       if (status < 0)
+               goto fail;
+       rndis->data_id = status;
+       rndis_data_intf.bInterfaceNumber = status;
+       rndis_union_desc.bSlaveInterface0 = status;
+       status = -ENODEV;
+       /* allocate instance-specific endpoints */
+       ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc);
+       if (!ep)
+               goto fail;
+       rndis->port.in_ep = ep;
+       ep->driver_data = cdev; /* claim */
+       ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc);
+       if (!ep)
+               goto fail;
+       rndis->port.out_ep = ep;
+       ep->driver_data = cdev; /* claim */
+       /* NOTE:  a status/notification endpoint is, strictly speaking,
+        * optional.  We don't treat it that way though!  It's simpler,
+        * and some newer profiles don't treat it as optional.
+        */
+       ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc);
+       if (!ep)
+               goto fail;
+       rndis->notify = ep;
+       ep->driver_data = cdev; /* claim */
+       status = -ENOMEM;
+       /* allocate notification request and buffer */
+       rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+       if (!rndis->notify_req)
+               goto fail;
+       rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
+       if (!rndis->notify_req->buf)
+               goto fail;
+       rndis->notify_req->length = STATUS_BYTECOUNT;
+       rndis->notify_req->context = rndis;
+       rndis->notify_req->complete = rndis_response_complete;
+       /* support all relevant hardware speeds... we expect that when
+        * hardware is dual speed, all bulk-capable endpoints work at
+        * both speeds
+        */
+       hs_in_desc.bEndpointAddress = fs_in_desc.bEndpointAddress;
+       hs_out_desc.bEndpointAddress = fs_out_desc.bEndpointAddress;
+       hs_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
+       ss_in_desc.bEndpointAddress = fs_in_desc.bEndpointAddress;
+       ss_out_desc.bEndpointAddress = fs_out_desc.bEndpointAddress;
+       ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress;
+       status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function,
+                       eth_ss_function);
+       if (status)
+               goto fail;
+       rndis->port.open = rndis_open;
+       rndis->port.close = rndis_close;
+       rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
+       rndis_set_host_mac(rndis->config, rndis->ethaddr);
+       if (rndis->manufacturer && rndis->vendorID &&
+                       rndis_set_param_vendor(rndis->config, rndis->vendorID,
+                                              rndis->manufacturer))
+               goto fail;
+       /* NOTE:  all that is done without knowing or caring about
+        * the network link ... which is unavailable to this code
+        * until we're activated via set_alt().
+        */
+       DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+                       gadget_is_superspeed(c->cdev->gadget) ? "super" :
+                       gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+                       rndis->port.in_ep->name, rndis->port.out_ep->name,
+                       rndis->notify->name);
+       return 0;
+ fail:
+       kfree(f->os_desc_table);
+       f->os_desc_n = 0;
+       usb_free_all_descriptors(f);
+       if (rndis->notify_req) {
+               kfree(rndis->notify_req->buf);
+               usb_ep_free_request(rndis->notify, rndis->notify_req);
+       }
+       /* we might as well release our claims on endpoints */
+       if (rndis->notify)
+               rndis->notify->driver_data = NULL;
+       if (rndis->port.out_ep)
+               rndis->port.out_ep->driver_data = NULL;
+       if (rndis->port.in_ep)
+               rndis->port.in_ep->driver_data = NULL;
+       ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+       return status;
+ }
+ void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net)
+ {
+       struct f_rndis_opts *opts;
+       opts = container_of(f, struct f_rndis_opts, func_inst);
+       if (opts->bound)
+               gether_cleanup(netdev_priv(opts->net));
+       else
+               free_netdev(opts->net);
+       opts->borrowed_net = opts->bound = true;
+       opts->net = net;
+ }
+ EXPORT_SYMBOL_GPL(rndis_borrow_net);
+ static inline struct f_rndis_opts *to_f_rndis_opts(struct config_item *item)
+ {
+       return container_of(to_config_group(item), struct f_rndis_opts,
+                           func_inst.group);
+ }
+ /* f_rndis_item_ops */
+ USB_ETHERNET_CONFIGFS_ITEM(rndis);
+ /* f_rndis_opts_dev_addr */
+ USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(rndis);
+ /* f_rndis_opts_host_addr */
+ USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(rndis);
+ /* f_rndis_opts_qmult */
+ USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(rndis);
+ /* f_rndis_opts_ifname */
+ USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(rndis);
+ static struct configfs_attribute *rndis_attrs[] = {
+       &f_rndis_opts_dev_addr.attr,
+       &f_rndis_opts_host_addr.attr,
+       &f_rndis_opts_qmult.attr,
+       &f_rndis_opts_ifname.attr,
+       NULL,
+ };
+ static struct config_item_type rndis_func_type = {
+       .ct_item_ops    = &rndis_item_ops,
+       .ct_attrs       = rndis_attrs,
+       .ct_owner       = THIS_MODULE,
+ };
+ static void rndis_free_inst(struct usb_function_instance *f)
+ {
+       struct f_rndis_opts *opts;
+       opts = container_of(f, struct f_rndis_opts, func_inst);
+       if (!opts->borrowed_net) {
+               if (opts->bound)
+                       gether_cleanup(netdev_priv(opts->net));
+               else
+                       free_netdev(opts->net);
+       }
+       kfree(opts->rndis_os_desc.group.default_groups); /* single VLA chunk */
+       kfree(opts);
+ }
+ static struct usb_function_instance *rndis_alloc_inst(void)
+ {
+       struct f_rndis_opts *opts;
+       struct usb_os_desc *descs[1];
++      char *names[1];
+       opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+       if (!opts)
+               return ERR_PTR(-ENOMEM);
+       opts->rndis_os_desc.ext_compat_id = opts->rndis_ext_compat_id;
+       mutex_init(&opts->lock);
+       opts->func_inst.free_func_inst = rndis_free_inst;
+       opts->net = gether_setup_default();
+       if (IS_ERR(opts->net)) {
+               struct net_device *net = opts->net;
+               kfree(opts);
+               return ERR_CAST(net);
+       }
+       INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop);
+       descs[0] = &opts->rndis_os_desc;
++      names[0] = "rndis";
+       usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
++                                     names, THIS_MODULE);
+       config_group_init_type_name(&opts->func_inst.group, "",
+                                   &rndis_func_type);
+       return &opts->func_inst;
+ }
+ static void rndis_free(struct usb_function *f)
+ {
+       struct f_rndis *rndis;
+       struct f_rndis_opts *opts;
+       rndis = func_to_rndis(f);
+       rndis_deregister(rndis->config);
+       opts = container_of(f->fi, struct f_rndis_opts, func_inst);
+       kfree(rndis);
+       mutex_lock(&opts->lock);
+       opts->refcnt--;
+       mutex_unlock(&opts->lock);
+ }
+ static void rndis_unbind(struct usb_configuration *c, struct usb_function *f)
+ {
+       struct f_rndis          *rndis = func_to_rndis(f);
+       kfree(f->os_desc_table);
+       f->os_desc_n = 0;
+       usb_free_all_descriptors(f);
+       kfree(rndis->notify_req->buf);
+       usb_ep_free_request(rndis->notify, rndis->notify_req);
+ }
+ static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
+ {
+       struct f_rndis  *rndis;
+       struct f_rndis_opts *opts;
+       int status;
+       /* allocate and initialize one new instance */
+       rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
+       if (!rndis)
+               return ERR_PTR(-ENOMEM);
+       opts = container_of(fi, struct f_rndis_opts, func_inst);
+       mutex_lock(&opts->lock);
+       opts->refcnt++;
+       gether_get_host_addr_u8(opts->net, rndis->ethaddr);
+       rndis->vendorID = opts->vendor_id;
+       rndis->manufacturer = opts->manufacturer;
+       rndis->port.ioport = netdev_priv(opts->net);
+       mutex_unlock(&opts->lock);
+       /* RNDIS activates when the host changes this filter */
+       rndis->port.cdc_filter = 0;
+       /* RNDIS has special (and complex) framing */
+       rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
+       rndis->port.wrap = rndis_add_header;
+       rndis->port.unwrap = rndis_rm_hdr;
+       rndis->port.func.name = "rndis";
+       /* descriptors are per-instance copies */
+       rndis->port.func.bind = rndis_bind;
+       rndis->port.func.unbind = rndis_unbind;
+       rndis->port.func.set_alt = rndis_set_alt;
+       rndis->port.func.setup = rndis_setup;
+       rndis->port.func.disable = rndis_disable;
+       rndis->port.func.free_func = rndis_free;
+       status = rndis_register(rndis_response_available, rndis);
+       if (status < 0) {
+               kfree(rndis);
+               return ERR_PTR(status);
+       }
+       rndis->config = status;
+       return &rndis->port.func;
+ }
+ DECLARE_USB_FUNCTION(rndis, rndis_alloc_inst, rndis_alloc);
+ static int __init rndis_mod_init(void)
+ {
+       int ret;
+       ret = rndis_init();
+       if (ret)
+               return ret;
+       return usb_function_register(&rndisusb_func);
+ }
+ module_init(rndis_mod_init);
+ static void __exit rndis_mod_exit(void)
+ {
+       usb_function_unregister(&rndisusb_func);
+       rndis_exit();
+ }
+ module_exit(rndis_mod_exit);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("David Brownell");
index 0000000000000000000000000000000000000000,6e6f87656e7b0f5795caa17f387aa6cf89de2d41..d50adda913cfa6d57e1e6191ab5444d6981af47a
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1179 +1,1182 @@@
+ /*
+  * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+  *
+  * Copyright (C) 2003-2005,2008 David Brownell
+  * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+  * Copyright (C) 2008 Nokia Corporation
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  */
+ /* #define VERBOSE_DEBUG */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/gfp.h>
+ #include <linux/device.h>
+ #include <linux/ctype.h>
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
+ #include <linux/if_vlan.h>
+ #include "u_ether.h"
+ /*
+  * This component encapsulates the Ethernet link glue needed to provide
+  * one (!) network link through the USB gadget stack, normally "usb0".
+  *
+  * The control and data models are handled by the function driver which
+  * connects to this code; such as CDC Ethernet (ECM or EEM),
+  * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
+  * management.
+  *
+  * Link level addressing is handled by this component using module
+  * parameters; if no such parameters are provided, random link level
+  * addresses are used.  Each end of the link uses one address.  The
+  * host end address is exported in various ways, and is often recorded
+  * in configuration databases.
+  *
+  * The driver which assembles each configuration using such a link is
+  * responsible for ensuring that each configuration includes at most one
+  * instance of is network link.  (The network layer provides ways for
+  * this single "physical" link to be used by multiple virtual links.)
+  */
+ #define UETH__VERSION "29-May-2008"
+ struct eth_dev {
+       /* lock is held while accessing port_usb
+        */
+       spinlock_t              lock;
+       struct gether           *port_usb;
+       struct net_device       *net;
+       struct usb_gadget       *gadget;
+       spinlock_t              req_lock;       /* guard {rx,tx}_reqs */
+       struct list_head        tx_reqs, rx_reqs;
+       atomic_t                tx_qlen;
+       struct sk_buff_head     rx_frames;
+       unsigned                qmult;
+       unsigned                header_len;
+       struct sk_buff          *(*wrap)(struct gether *, struct sk_buff *skb);
+       int                     (*unwrap)(struct gether *,
+                                               struct sk_buff *skb,
+                                               struct sk_buff_head *list);
+       struct work_struct      work;
+       unsigned long           todo;
+ #define       WORK_RX_MEMORY          0
+       bool                    zlp;
+       u8                      host_mac[ETH_ALEN];
+       u8                      dev_mac[ETH_ALEN];
+ };
+ /*-------------------------------------------------------------------------*/
+ #define RX_EXTRA      20      /* bytes guarding against rx overflows */
+ #define DEFAULT_QLEN  2       /* double buffering by default */
+ /* for dual-speed hardware, use deeper queues at high/super speed */
+ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
+ {
+       if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
+                                           gadget->speed == USB_SPEED_SUPER))
+               return qmult * DEFAULT_QLEN;
+       else
+               return DEFAULT_QLEN;
+ }
+ /*-------------------------------------------------------------------------*/
+ /* REVISIT there must be a better way than having two sets
+  * of debug calls ...
+  */
+ #undef DBG
+ #undef VDBG
+ #undef ERROR
+ #undef INFO
+ #define xprintk(d, level, fmt, args...) \
+       printk(level "%s: " fmt , (d)->net->name , ## args)
+ #ifdef DEBUG
+ #undef DEBUG
+ #define DBG(dev, fmt, args...) \
+       xprintk(dev , KERN_DEBUG , fmt , ## args)
+ #else
+ #define DBG(dev, fmt, args...) \
+       do { } while (0)
+ #endif /* DEBUG */
+ #ifdef VERBOSE_DEBUG
+ #define VDBG  DBG
+ #else
+ #define VDBG(dev, fmt, args...) \
+       do { } while (0)
+ #endif /* DEBUG */
+ #define ERROR(dev, fmt, args...) \
+       xprintk(dev , KERN_ERR , fmt , ## args)
+ #define INFO(dev, fmt, args...) \
+       xprintk(dev , KERN_INFO , fmt , ## args)
+ /*-------------------------------------------------------------------------*/
+ /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+ static int ueth_change_mtu(struct net_device *net, int new_mtu)
+ {
+       struct eth_dev  *dev = netdev_priv(net);
+       unsigned long   flags;
+       int             status = 0;
+       /* don't change MTU on "live" link (peer won't know) */
+       spin_lock_irqsave(&dev->lock, flags);
+       if (dev->port_usb)
+               status = -EBUSY;
+       else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+               status = -ERANGE;
+       else
+               net->mtu = new_mtu;
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return status;
+ }
+ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+ {
+       struct eth_dev *dev = netdev_priv(net);
+       strlcpy(p->driver, "g_ether", sizeof(p->driver));
+       strlcpy(p->version, UETH__VERSION, sizeof(p->version));
+       strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version));
+       strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info));
+ }
+ /* REVISIT can also support:
+  *   - WOL (by tracking suspends and issuing remote wakeup)
+  *   - msglevel (implies updated messaging)
+  *   - ... probably more ethtool ops
+  */
+ static const struct ethtool_ops ops = {
+       .get_drvinfo = eth_get_drvinfo,
+       .get_link = ethtool_op_get_link,
+ };
+ static void defer_kevent(struct eth_dev *dev, int flag)
+ {
+       if (test_and_set_bit(flag, &dev->todo))
+               return;
+       if (!schedule_work(&dev->work))
+               ERROR(dev, "kevent %d may have been dropped\n", flag);
+       else
+               DBG(dev, "kevent %d scheduled\n", flag);
+ }
+ static void rx_complete(struct usb_ep *ep, struct usb_request *req);
+ static int
+ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+ {
+       struct sk_buff  *skb;
+       int             retval = -ENOMEM;
+       size_t          size = 0;
+       struct usb_ep   *out;
+       unsigned long   flags;
+       spin_lock_irqsave(&dev->lock, flags);
+       if (dev->port_usb)
+               out = dev->port_usb->out_ep;
+       else
+               out = NULL;
+       spin_unlock_irqrestore(&dev->lock, flags);
+       if (!out)
+               return -ENOTCONN;
+       /* Padding up to RX_EXTRA handles minor disagreements with host.
+        * Normally we use the USB "terminate on short read" convention;
+        * so allow up to (N*maxpacket), since that memory is normally
+        * already allocated.  Some hardware doesn't deal well with short
+        * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
+        * byte off the end (to force hardware errors on overflow).
+        *
+        * RNDIS uses internal framing, and explicitly allows senders to
+        * pad to end-of-packet.  That's potentially nice for speed, but
+        * means receivers can't recover lost synch on their own (because
+        * new packets don't only start after a short RX).
+        */
+       size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
+       size += dev->port_usb->header_len;
+       size += out->maxpacket - 1;
+       size -= size % out->maxpacket;
+       if (dev->port_usb->is_fixed)
+               size = max_t(size_t, size, dev->port_usb->fixed_out_len);
+       skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+       if (skb == NULL) {
+               DBG(dev, "no rx skb\n");
+               goto enomem;
+       }
+       /* Some platforms perform better when IP packets are aligned,
+        * but on at least one, checksumming fails otherwise.  Note:
+        * RNDIS headers involve variable numbers of LE32 values.
+        */
+       skb_reserve(skb, NET_IP_ALIGN);
+       req->buf = skb->data;
+       req->length = size;
+       req->complete = rx_complete;
+       req->context = skb;
+       retval = usb_ep_queue(out, req, gfp_flags);
+       if (retval == -ENOMEM)
+ enomem:
+               defer_kevent(dev, WORK_RX_MEMORY);
+       if (retval) {
+               DBG(dev, "rx submit --> %d\n", retval);
+               if (skb)
+                       dev_kfree_skb_any(skb);
+               spin_lock_irqsave(&dev->req_lock, flags);
+               list_add(&req->list, &dev->rx_reqs);
+               spin_unlock_irqrestore(&dev->req_lock, flags);
+       }
+       return retval;
+ }
+ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
+ {
+       struct sk_buff  *skb = req->context, *skb2;
+       struct eth_dev  *dev = ep->driver_data;
+       int             status = req->status;
+       switch (status) {
+       /* normal completion */
+       case 0:
+               skb_put(skb, req->actual);
+               if (dev->unwrap) {
+                       unsigned long   flags;
+                       spin_lock_irqsave(&dev->lock, flags);
+                       if (dev->port_usb) {
+                               status = dev->unwrap(dev->port_usb,
+                                                       skb,
+                                                       &dev->rx_frames);
+                       } else {
+                               dev_kfree_skb_any(skb);
+                               status = -ENOTCONN;
+                       }
+                       spin_unlock_irqrestore(&dev->lock, flags);
+               } else {
+                       skb_queue_tail(&dev->rx_frames, skb);
+               }
+               skb = NULL;
+               skb2 = skb_dequeue(&dev->rx_frames);
+               while (skb2) {
+                       if (status < 0
+                                       || ETH_HLEN > skb2->len
+                                       || skb2->len > VLAN_ETH_FRAME_LEN) {
+                               dev->net->stats.rx_errors++;
+                               dev->net->stats.rx_length_errors++;
+                               DBG(dev, "rx length %d\n", skb2->len);
+                               dev_kfree_skb_any(skb2);
+                               goto next_frame;
+                       }
+                       skb2->protocol = eth_type_trans(skb2, dev->net);
+                       dev->net->stats.rx_packets++;
+                       dev->net->stats.rx_bytes += skb2->len;
+                       /* no buffer copies needed, unless hardware can't
+                        * use skb buffers.
+                        */
+                       status = netif_rx(skb2);
+ next_frame:
+                       skb2 = skb_dequeue(&dev->rx_frames);
+               }
+               break;
+       /* software-driven interface shutdown */
+       case -ECONNRESET:               /* unlink */
+       case -ESHUTDOWN:                /* disconnect etc */
+               VDBG(dev, "rx shutdown, code %d\n", status);
+               goto quiesce;
+       /* for hardware automagic (such as pxa) */
+       case -ECONNABORTED:             /* endpoint reset */
+               DBG(dev, "rx %s reset\n", ep->name);
+               defer_kevent(dev, WORK_RX_MEMORY);
+ quiesce:
+               dev_kfree_skb_any(skb);
+               goto clean;
+       /* data overrun */
+       case -EOVERFLOW:
+               dev->net->stats.rx_over_errors++;
+               /* FALLTHROUGH */
+       default:
+               dev->net->stats.rx_errors++;
+               DBG(dev, "rx status %d\n", status);
+               break;
+       }
+       if (skb)
+               dev_kfree_skb_any(skb);
+       if (!netif_running(dev->net)) {
+ clean:
+               spin_lock(&dev->req_lock);
+               list_add(&req->list, &dev->rx_reqs);
+               spin_unlock(&dev->req_lock);
+               req = NULL;
+       }
+       if (req)
+               rx_submit(dev, req, GFP_ATOMIC);
+ }
+ static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
+ {
+       unsigned                i;
+       struct usb_request      *req;
+       if (!n)
+               return -ENOMEM;
+       /* queue/recycle up to N requests */
+       i = n;
+       list_for_each_entry(req, list, list) {
+               if (i-- == 0)
+                       goto extra;
+       }
+       while (i--) {
+               req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+               if (!req)
+                       return list_empty(list) ? -ENOMEM : 0;
+               list_add(&req->list, list);
+       }
+       return 0;
+ extra:
+       /* free extras */
+       for (;;) {
+               struct list_head        *next;
+               next = req->list.next;
+               list_del(&req->list);
+               usb_ep_free_request(ep, req);
+               if (next == list)
+                       break;
+               req = container_of(next, struct usb_request, list);
+       }
+       return 0;
+ }
+ static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
+ {
+       int     status;
+       spin_lock(&dev->req_lock);
+       status = prealloc(&dev->tx_reqs, link->in_ep, n);
+       if (status < 0)
+               goto fail;
+       status = prealloc(&dev->rx_reqs, link->out_ep, n);
+       if (status < 0)
+               goto fail;
+       goto done;
+ fail:
+       DBG(dev, "can't alloc requests\n");
+ done:
+       spin_unlock(&dev->req_lock);
+       return status;
+ }
+ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
+ {
+       struct usb_request      *req;
+       unsigned long           flags;
+       /* fill unused rxq slots with some skb */
+       spin_lock_irqsave(&dev->req_lock, flags);
+       while (!list_empty(&dev->rx_reqs)) {
+               req = container_of(dev->rx_reqs.next,
+                               struct usb_request, list);
+               list_del_init(&req->list);
+               spin_unlock_irqrestore(&dev->req_lock, flags);
+               if (rx_submit(dev, req, gfp_flags) < 0) {
+                       defer_kevent(dev, WORK_RX_MEMORY);
+                       return;
+               }
+               spin_lock_irqsave(&dev->req_lock, flags);
+       }
+       spin_unlock_irqrestore(&dev->req_lock, flags);
+ }
+ static void eth_work(struct work_struct *work)
+ {
+       struct eth_dev  *dev = container_of(work, struct eth_dev, work);
+       if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
+               if (netif_running(dev->net))
+                       rx_fill(dev, GFP_KERNEL);
+       }
+       if (dev->todo)
+               DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
+ }
+ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
+ {
+       struct sk_buff  *skb = req->context;
+       struct eth_dev  *dev = ep->driver_data;
+       switch (req->status) {
+       default:
+               dev->net->stats.tx_errors++;
+               VDBG(dev, "tx err %d\n", req->status);
+               /* FALLTHROUGH */
+       case -ECONNRESET:               /* unlink */
+       case -ESHUTDOWN:                /* disconnect etc */
+               break;
+       case 0:
+               dev->net->stats.tx_bytes += skb->len;
+       }
+       dev->net->stats.tx_packets++;
+       spin_lock(&dev->req_lock);
+       list_add(&req->list, &dev->tx_reqs);
+       spin_unlock(&dev->req_lock);
+       dev_kfree_skb_any(skb);
+       atomic_dec(&dev->tx_qlen);
+       if (netif_carrier_ok(dev->net))
+               netif_wake_queue(dev->net);
+ }
+ static inline int is_promisc(u16 cdc_filter)
+ {
+       return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
+ }
+ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+                                       struct net_device *net)
+ {
+       struct eth_dev          *dev = netdev_priv(net);
+       int                     length = 0;
+       int                     retval;
+       struct usb_request      *req = NULL;
+       unsigned long           flags;
+       struct usb_ep           *in;
+       u16                     cdc_filter;
+       spin_lock_irqsave(&dev->lock, flags);
+       if (dev->port_usb) {
+               in = dev->port_usb->in_ep;
+               cdc_filter = dev->port_usb->cdc_filter;
+       } else {
+               in = NULL;
+               cdc_filter = 0;
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+       if (skb && !in) {
+               dev_kfree_skb_any(skb);
+               return NETDEV_TX_OK;
+       }
+       /* apply outgoing CDC or RNDIS filters */
+       if (skb && !is_promisc(cdc_filter)) {
+               u8              *dest = skb->data;
+               if (is_multicast_ether_addr(dest)) {
+                       u16     type;
+                       /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
+                        * SET_ETHERNET_MULTICAST_FILTERS requests
+                        */
+                       if (is_broadcast_ether_addr(dest))
+                               type = USB_CDC_PACKET_TYPE_BROADCAST;
+                       else
+                               type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
+                       if (!(cdc_filter & type)) {
+                               dev_kfree_skb_any(skb);
+                               return NETDEV_TX_OK;
+                       }
+               }
+               /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
+       }
+       spin_lock_irqsave(&dev->req_lock, flags);
+       /*
+        * this freelist can be empty if an interrupt triggered disconnect()
+        * and reconfigured the gadget (shutting down this queue) after the
+        * network stack decided to xmit but before we got the spinlock.
+        */
+       if (list_empty(&dev->tx_reqs)) {
+               spin_unlock_irqrestore(&dev->req_lock, flags);
+               return NETDEV_TX_BUSY;
+       }
+       req = container_of(dev->tx_reqs.next, struct usb_request, list);
+       list_del(&req->list);
+       /* temporarily stop TX queue when the freelist empties */
+       if (list_empty(&dev->tx_reqs))
+               netif_stop_queue(net);
+       spin_unlock_irqrestore(&dev->req_lock, flags);
+       /* no buffer copies needed, unless the network stack did it
+        * or the hardware can't use skb buffers.
+        * or there's not enough space for extra headers we need
+        */
+       if (dev->wrap) {
+               unsigned long   flags;
+               spin_lock_irqsave(&dev->lock, flags);
+               if (dev->port_usb)
+                       skb = dev->wrap(dev->port_usb, skb);
+               spin_unlock_irqrestore(&dev->lock, flags);
+               if (!skb) {
+                       /* Multi frame CDC protocols may store the frame for
+                        * later which is not a dropped frame.
+                        */
+                       if (dev->port_usb->supports_multi_frame)
+                               goto multiframe;
+                       goto drop;
+               }
+       }
+       length = skb->len;
+       req->buf = skb->data;
+       req->context = skb;
+       req->complete = tx_complete;
+       /* NCM requires no zlp if transfer is dwNtbInMaxSize */
+       if (dev->port_usb->is_fixed &&
+           length == dev->port_usb->fixed_in_len &&
+           (length % in->maxpacket) == 0)
+               req->zero = 0;
+       else
+               req->zero = 1;
+       /* use zlp framing on tx for strict CDC-Ether conformance,
+        * though any robust network rx path ignores extra padding.
+        * and some hardware doesn't like to write zlps.
+        */
+       if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+               length++;
+       req->length = length;
+       /* throttle high/super speed IRQ rate back slightly */
+       if (gadget_is_dualspeed(dev->gadget))
+               req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
+                                    dev->gadget->speed == USB_SPEED_SUPER)
+                       ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
+                       : 0;
+       retval = usb_ep_queue(in, req, GFP_ATOMIC);
+       switch (retval) {
+       default:
+               DBG(dev, "tx queue err %d\n", retval);
+               break;
+       case 0:
+               net->trans_start = jiffies;
+               atomic_inc(&dev->tx_qlen);
+       }
+       if (retval) {
+               dev_kfree_skb_any(skb);
+ drop:
+               dev->net->stats.tx_dropped++;
+ multiframe:
+               spin_lock_irqsave(&dev->req_lock, flags);
+               if (list_empty(&dev->tx_reqs))
+                       netif_start_queue(net);
+               list_add(&req->list, &dev->tx_reqs);
+               spin_unlock_irqrestore(&dev->req_lock, flags);
+       }
+       return NETDEV_TX_OK;
+ }
+ /*-------------------------------------------------------------------------*/
+ static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
+ {
+       DBG(dev, "%s\n", __func__);
+       /* fill the rx queue */
+       rx_fill(dev, gfp_flags);
+       /* and open the tx floodgates */
+       atomic_set(&dev->tx_qlen, 0);
+       netif_wake_queue(dev->net);
+ }
+ static int eth_open(struct net_device *net)
+ {
+       struct eth_dev  *dev = netdev_priv(net);
+       struct gether   *link;
+       DBG(dev, "%s\n", __func__);
+       if (netif_carrier_ok(dev->net))
+               eth_start(dev, GFP_KERNEL);
+       spin_lock_irq(&dev->lock);
+       link = dev->port_usb;
+       if (link && link->open)
+               link->open(link);
+       spin_unlock_irq(&dev->lock);
+       return 0;
+ }
+ static int eth_stop(struct net_device *net)
+ {
+       struct eth_dev  *dev = netdev_priv(net);
+       unsigned long   flags;
+       VDBG(dev, "%s\n", __func__);
+       netif_stop_queue(net);
+       DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+               dev->net->stats.rx_packets, dev->net->stats.tx_packets,
+               dev->net->stats.rx_errors, dev->net->stats.tx_errors
+               );
+       /* ensure there are no more active requests */
+       spin_lock_irqsave(&dev->lock, flags);
+       if (dev->port_usb) {
+               struct gether   *link = dev->port_usb;
+               const struct usb_endpoint_descriptor *in;
+               const struct usb_endpoint_descriptor *out;
+               if (link->close)
+                       link->close(link);
+               /* NOTE:  we have no abort-queue primitive we could use
+                * to cancel all pending I/O.  Instead, we disable then
+                * reenable the endpoints ... this idiom may leave toggle
+                * wrong, but that's a self-correcting error.
+                *
+                * REVISIT:  we *COULD* just let the transfers complete at
+                * their own pace; the network stack can handle old packets.
+                * For the moment we leave this here, since it works.
+                */
+               in = link->in_ep->desc;
+               out = link->out_ep->desc;
+               usb_ep_disable(link->in_ep);
+               usb_ep_disable(link->out_ep);
+               if (netif_carrier_ok(net)) {
+                       DBG(dev, "host still using in/out endpoints\n");
+                       link->in_ep->desc = in;
+                       link->out_ep->desc = out;
+                       usb_ep_enable(link->in_ep);
+                       usb_ep_enable(link->out_ep);
+               }
+       }
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return 0;
+ }
+ /*-------------------------------------------------------------------------*/
+ static int get_ether_addr(const char *str, u8 *dev_addr)
+ {
+       if (str) {
+               unsigned        i;
+               for (i = 0; i < 6; i++) {
+                       unsigned char num;
+                       if ((*str == '.') || (*str == ':'))
+                               str++;
+                       num = hex_to_bin(*str++) << 4;
+                       num |= hex_to_bin(*str++);
+                       dev_addr [i] = num;
+               }
+               if (is_valid_ether_addr(dev_addr))
+                       return 0;
+       }
+       eth_random_addr(dev_addr);
+       return 1;
+ }
+ static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
+ {
+       if (len < 18)
+               return -EINVAL;
+       snprintf(str, len, "%02x:%02x:%02x:%02x:%02x:%02x",
+                dev_addr[0], dev_addr[1], dev_addr[2],
+                dev_addr[3], dev_addr[4], dev_addr[5]);
+       return 18;
+ }
+ static const struct net_device_ops eth_netdev_ops = {
+       .ndo_open               = eth_open,
+       .ndo_stop               = eth_stop,
+       .ndo_start_xmit         = eth_start_xmit,
+       .ndo_change_mtu         = ueth_change_mtu,
+       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+ };
+ static struct device_type gadget_type = {
+       .name   = "gadget",
+ };
+ /**
+  * gether_setup_name - initialize one ethernet-over-usb link
+  * @g: gadget to associated with these links
+  * @ethaddr: NULL, or a buffer in which the ethernet address of the
+  *    host side of the link is recorded
+  * @netname: name for network device (for example, "usb")
+  * Context: may sleep
+  *
+  * This sets up the single network link that may be exported by a
+  * gadget driver using this framework.  The link layer addresses are
+  * set up using module parameters.
+  *
+  * Returns an eth_dev pointer on success, or an ERR_PTR on failure.
+  */
+ struct eth_dev *gether_setup_name(struct usb_gadget *g,
+               const char *dev_addr, const char *host_addr,
+               u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname)
+ {
+       struct eth_dev          *dev;
+       struct net_device       *net;
+       int                     status;
+       net = alloc_etherdev(sizeof *dev);
+       if (!net)
+               return ERR_PTR(-ENOMEM);
+       dev = netdev_priv(net);
+       spin_lock_init(&dev->lock);
+       spin_lock_init(&dev->req_lock);
+       INIT_WORK(&dev->work, eth_work);
+       INIT_LIST_HEAD(&dev->tx_reqs);
+       INIT_LIST_HEAD(&dev->rx_reqs);
+       skb_queue_head_init(&dev->rx_frames);
+       /* network device setup */
+       dev->net = net;
+       dev->qmult = qmult;
+       snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+       if (get_ether_addr(dev_addr, net->dev_addr))
+               dev_warn(&g->dev,
+                       "using random %s ethernet address\n", "self");
+       if (get_ether_addr(host_addr, dev->host_mac))
+               dev_warn(&g->dev,
+                       "using random %s ethernet address\n", "host");
+       if (ethaddr)
+               memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+       net->netdev_ops = &eth_netdev_ops;
+       net->ethtool_ops = &ops;
+       dev->gadget = g;
+       SET_NETDEV_DEV(net, &g->dev);
+       SET_NETDEV_DEVTYPE(net, &gadget_type);
+       status = register_netdev(net);
+       if (status < 0) {
+               dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+               free_netdev(net);
+               dev = ERR_PTR(status);
+       } else {
+               INFO(dev, "MAC %pM\n", net->dev_addr);
+               INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+               /*
+                * two kinds of host-initiated state changes:
+                *  - iff DATA transfer is active, carrier is "on"
+                *  - tx queueing enabled if open *and* carrier is "on"
+                */
+               netif_carrier_off(net);
+       }
+       return dev;
+ }
+ EXPORT_SYMBOL_GPL(gether_setup_name);
+ struct net_device *gether_setup_name_default(const char *netname)
+ {
+       struct net_device       *net;
+       struct eth_dev          *dev;
+       net = alloc_etherdev(sizeof(*dev));
+       if (!net)
+               return ERR_PTR(-ENOMEM);
+       dev = netdev_priv(net);
+       spin_lock_init(&dev->lock);
+       spin_lock_init(&dev->req_lock);
+       INIT_WORK(&dev->work, eth_work);
+       INIT_LIST_HEAD(&dev->tx_reqs);
+       INIT_LIST_HEAD(&dev->rx_reqs);
+       skb_queue_head_init(&dev->rx_frames);
+       /* network device setup */
+       dev->net = net;
+       dev->qmult = QMULT_DEFAULT;
+       snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+       eth_random_addr(dev->dev_mac);
+       pr_warn("using random %s ethernet address\n", "self");
+       eth_random_addr(dev->host_mac);
+       pr_warn("using random %s ethernet address\n", "host");
+       net->netdev_ops = &eth_netdev_ops;
+       net->ethtool_ops = &ops;
+       SET_NETDEV_DEVTYPE(net, &gadget_type);
+       return net;
+ }
+ EXPORT_SYMBOL_GPL(gether_setup_name_default);
+ int gether_register_netdev(struct net_device *net)
+ {
+       struct eth_dev *dev;
+       struct usb_gadget *g;
+       struct sockaddr sa;
+       int status;
+       if (!net->dev.parent)
+               return -EINVAL;
+       dev = netdev_priv(net);
+       g = dev->gadget;
+       status = register_netdev(net);
+       if (status < 0) {
+               dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+               return status;
+       } else {
+               INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+               /* two kinds of host-initiated state changes:
+                *  - iff DATA transfer is active, carrier is "on"
+                *  - tx queueing enabled if open *and* carrier is "on"
+                */
+               netif_carrier_off(net);
+       }
+       sa.sa_family = net->type;
+       memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN);
+       rtnl_lock();
+       status = dev_set_mac_address(net, &sa);
+       rtnl_unlock();
+       if (status)
+               pr_warn("cannot set self ethernet address: %d\n", status);
+       else
+               INFO(dev, "MAC %pM\n", dev->dev_mac);
+       return status;
+ }
+ EXPORT_SYMBOL_GPL(gether_register_netdev);
+ void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
+ {
+       struct eth_dev *dev;
+       dev = netdev_priv(net);
+       dev->gadget = g;
+       SET_NETDEV_DEV(net, &g->dev);
+ }
+ EXPORT_SYMBOL_GPL(gether_set_gadget);
+ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
+ {
+       struct eth_dev *dev;
+       u8 new_addr[ETH_ALEN];
+       dev = netdev_priv(net);
+       if (get_ether_addr(dev_addr, new_addr))
+               return -EINVAL;
+       memcpy(dev->dev_mac, new_addr, ETH_ALEN);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(gether_set_dev_addr);
+ int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len)
+ {
+       struct eth_dev *dev;
+       dev = netdev_priv(net);
+       return get_ether_addr_str(dev->dev_mac, dev_addr, len);
+ }
+ EXPORT_SYMBOL_GPL(gether_get_dev_addr);
+ int gether_set_host_addr(struct net_device *net, const char *host_addr)
+ {
+       struct eth_dev *dev;
+       u8 new_addr[ETH_ALEN];
+       dev = netdev_priv(net);
+       if (get_ether_addr(host_addr, new_addr))
+               return -EINVAL;
+       memcpy(dev->host_mac, new_addr, ETH_ALEN);
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(gether_set_host_addr);
+ int gether_get_host_addr(struct net_device *net, char *host_addr, int len)
+ {
+       struct eth_dev *dev;
+       dev = netdev_priv(net);
+       return get_ether_addr_str(dev->host_mac, host_addr, len);
+ }
+ EXPORT_SYMBOL_GPL(gether_get_host_addr);
+ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
+ {
+       struct eth_dev *dev;
+       if (len < 13)
+               return -EINVAL;
+       dev = netdev_priv(net);
+       snprintf(host_addr, len, "%pm", dev->host_mac);
+       return strlen(host_addr);
+ }
+ EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
+ void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN])
+ {
+       struct eth_dev *dev;
+       dev = netdev_priv(net);
+       memcpy(host_mac, dev->host_mac, ETH_ALEN);
+ }
+ EXPORT_SYMBOL_GPL(gether_get_host_addr_u8);
+ void gether_set_qmult(struct net_device *net, unsigned qmult)
+ {
+       struct eth_dev *dev;
+       dev = netdev_priv(net);
+       dev->qmult = qmult;
+ }
+ EXPORT_SYMBOL_GPL(gether_set_qmult);
+ unsigned gether_get_qmult(struct net_device *net)
+ {
+       struct eth_dev *dev;
+       dev = netdev_priv(net);
+       return dev->qmult;
+ }
+ EXPORT_SYMBOL_GPL(gether_get_qmult);
+ int gether_get_ifname(struct net_device *net, char *name, int len)
+ {
+       rtnl_lock();
+       strlcpy(name, netdev_name(net), len);
+       rtnl_unlock();
+       return strlen(name);
+ }
+ EXPORT_SYMBOL_GPL(gether_get_ifname);
+ /**
+  * gether_cleanup - remove Ethernet-over-USB device
+  * Context: may sleep
+  *
+  * This is called to free all resources allocated by @gether_setup().
+  */
+ void gether_cleanup(struct eth_dev *dev)
+ {
+       if (!dev)
+               return;
+       unregister_netdev(dev->net);
+       flush_work(&dev->work);
+       free_netdev(dev->net);
+ }
+ EXPORT_SYMBOL_GPL(gether_cleanup);
+ /**
+  * gether_connect - notify network layer that USB link is active
+  * @link: the USB link, set up with endpoints, descriptors matching
+  *    current device speed, and any framing wrapper(s) set up.
+  * Context: irqs blocked
+  *
+  * This is called to activate endpoints and let the network layer know
+  * the connection is active ("carrier detect").  It may cause the I/O
+  * queues to open and start letting network packets flow, but will in
+  * any case activate the endpoints so that they respond properly to the
+  * USB host.
+  *
+  * Verify net_device pointer returned using IS_ERR().  If it doesn't
+  * indicate some error code (negative errno), ep->driver_data values
+  * have been overwritten.
+  */
+ struct net_device *gether_connect(struct gether *link)
+ {
+       struct eth_dev          *dev = link->ioport;
+       int                     result = 0;
+       if (!dev)
+               return ERR_PTR(-EINVAL);
+       link->in_ep->driver_data = dev;
+       result = usb_ep_enable(link->in_ep);
+       if (result != 0) {
+               DBG(dev, "enable %s --> %d\n",
+                       link->in_ep->name, result);
+               goto fail0;
+       }
+       link->out_ep->driver_data = dev;
+       result = usb_ep_enable(link->out_ep);
+       if (result != 0) {
+               DBG(dev, "enable %s --> %d\n",
+                       link->out_ep->name, result);
+               goto fail1;
+       }
+       if (result == 0)
+               result = alloc_requests(dev, link, qlen(dev->gadget,
+                                       dev->qmult));
+       if (result == 0) {
+               dev->zlp = link->is_zlp_ok;
+               DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult));
+               dev->header_len = link->header_len;
+               dev->unwrap = link->unwrap;
+               dev->wrap = link->wrap;
+               spin_lock(&dev->lock);
+               dev->port_usb = link;
+               if (netif_running(dev->net)) {
+                       if (link->open)
+                               link->open(link);
+               } else {
+                       if (link->close)
+                               link->close(link);
+               }
+               spin_unlock(&dev->lock);
+               netif_carrier_on(dev->net);
+               if (netif_running(dev->net))
+                       eth_start(dev, GFP_ATOMIC);
+       /* on error, disable any endpoints  */
+       } else {
+               (void) usb_ep_disable(link->out_ep);
+ fail1:
+               (void) usb_ep_disable(link->in_ep);
+       }
+ fail0:
+       /* caller is responsible for cleanup on error */
+       if (result < 0)
+               return ERR_PTR(result);
+       return dev->net;
+ }
+ EXPORT_SYMBOL_GPL(gether_connect);
+ /**
+  * gether_disconnect - notify network layer that USB link is inactive
+  * @link: the USB link, on which gether_connect() was called
+  * Context: irqs blocked
+  *
+  * This is called to deactivate endpoints and let the network layer know
+  * the connection went inactive ("no carrier").
+  *
+  * On return, the state is as if gether_connect() had never been called.
+  * The endpoints are inactive, and accordingly without active USB I/O.
+  * Pointers to endpoint descriptors and endpoint private data are nulled.
+  */
+ void gether_disconnect(struct gether *link)
+ {
+       struct eth_dev          *dev = link->ioport;
+       struct usb_request      *req;
+       WARN_ON(!dev);
+       if (!dev)
+               return;
+       DBG(dev, "%s\n", __func__);
++      netif_tx_lock(dev->net);
+       netif_stop_queue(dev->net);
++      netif_tx_unlock(dev->net);
++
+       netif_carrier_off(dev->net);
+       /* disable endpoints, forcing (synchronous) completion
+        * of all pending i/o.  then free the request objects
+        * and forget about the endpoints.
+        */
+       usb_ep_disable(link->in_ep);
+       spin_lock(&dev->req_lock);
+       while (!list_empty(&dev->tx_reqs)) {
+               req = container_of(dev->tx_reqs.next,
+                                       struct usb_request, list);
+               list_del(&req->list);
+               spin_unlock(&dev->req_lock);
+               usb_ep_free_request(link->in_ep, req);
+               spin_lock(&dev->req_lock);
+       }
+       spin_unlock(&dev->req_lock);
+       link->in_ep->driver_data = NULL;
+       link->in_ep->desc = NULL;
+       usb_ep_disable(link->out_ep);
+       spin_lock(&dev->req_lock);
+       while (!list_empty(&dev->rx_reqs)) {
+               req = container_of(dev->rx_reqs.next,
+                                       struct usb_request, list);
+               list_del(&req->list);
+               spin_unlock(&dev->req_lock);
+               usb_ep_free_request(link->out_ep, req);
+               spin_lock(&dev->req_lock);
+       }
+       spin_unlock(&dev->req_lock);
+       link->out_ep->driver_data = NULL;
+       link->out_ep->desc = NULL;
+       /* finish forgetting about this USB link episode */
+       dev->header_len = 0;
+       dev->unwrap = NULL;
+       dev->wrap = NULL;
+       spin_lock(&dev->lock);
+       dev->port_usb = NULL;
+       spin_unlock(&dev->lock);
+ }
+ EXPORT_SYMBOL_GPL(gether_disconnect);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("David Brownell");
index 0000000000000000000000000000000000000000,ee6c16416c300121aad92cca9479fb0613b96af9..2e4ce7704908bc78e4ed2385842a1e6dbec1d59c
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,2142 +1,2147 @@@
 -      put_dev (dev);
+ /*
+  * inode.c -- user mode filesystem api for usb gadget controllers
+  *
+  * Copyright (C) 2003-2004 David Brownell
+  * Copyright (C) 2003 Agilent Technologies
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License as published by
+  * the Free Software Foundation; either version 2 of the License, or
+  * (at your option) any later version.
+  */
+ /* #define VERBOSE_DEBUG */
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/fs.h>
+ #include <linux/pagemap.h>
+ #include <linux/uts.h>
+ #include <linux/wait.h>
+ #include <linux/compiler.h>
+ #include <asm/uaccess.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/poll.h>
+ #include <linux/mmu_context.h>
+ #include <linux/aio.h>
+ #include <linux/device.h>
+ #include <linux/moduleparam.h>
+ #include <linux/usb/gadgetfs.h>
+ #include <linux/usb/gadget.h>
+ /*
+  * The gadgetfs API maps each endpoint to a file descriptor so that you
+  * can use standard synchronous read/write calls for I/O.  There's some
+  * O_NONBLOCK and O_ASYNC/FASYNC style i/o support.  Example usermode
+  * drivers show how this works in practice.  You can also use AIO to
+  * eliminate I/O gaps between requests, to help when streaming data.
+  *
+  * Key parts that must be USB-specific are protocols defining how the
+  * read/write operations relate to the hardware state machines.  There
+  * are two types of files.  One type is for the device, implementing ep0.
+  * The other type is for each IN or OUT endpoint.  In both cases, the
+  * user mode driver must configure the hardware before using it.
+  *
+  * - First, dev_config() is called when /dev/gadget/$CHIP is configured
+  *   (by writing configuration and device descriptors).  Afterwards it
+  *   may serve as a source of device events, used to handle all control
+  *   requests other than basic enumeration.
+  *
+  * - Then, after a SET_CONFIGURATION control request, ep_config() is
+  *   called when each /dev/gadget/ep* file is configured (by writing
+  *   endpoint descriptors).  Afterwards these files are used to write()
+  *   IN data or to read() OUT data.  To halt the endpoint, a "wrong
+  *   direction" request is issued (like reading an IN endpoint).
+  *
+  * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe
+  * not possible on all hardware.  For example, precise fault handling with
+  * respect to data left in endpoint fifos after aborted operations; or
+  * selective clearing of endpoint halts, to implement SET_INTERFACE.
+  */
+ #define       DRIVER_DESC     "USB Gadget filesystem"
+ #define       DRIVER_VERSION  "24 Aug 2004"
+ static const char driver_desc [] = DRIVER_DESC;
+ static const char shortname [] = "gadgetfs";
+ MODULE_DESCRIPTION (DRIVER_DESC);
+ MODULE_AUTHOR ("David Brownell");
+ MODULE_LICENSE ("GPL");
+ /*----------------------------------------------------------------------*/
+ #define GADGETFS_MAGIC                0xaee71ee7
+ /* /dev/gadget/$CHIP represents ep0 and the whole device */
+ enum ep0_state {
+       /* DISBLED is the initial state.
+        */
+       STATE_DEV_DISABLED = 0,
+       /* Only one open() of /dev/gadget/$CHIP; only one file tracks
+        * ep0/device i/o modes and binding to the controller.  Driver
+        * must always write descriptors to initialize the device, then
+        * the device becomes UNCONNECTED until enumeration.
+        */
+       STATE_DEV_OPENED,
+       /* From then on, ep0 fd is in either of two basic modes:
+        * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it
+        * - SETUP: read/write will transfer control data and succeed;
+        *   or if "wrong direction", performs protocol stall
+        */
+       STATE_DEV_UNCONNECTED,
+       STATE_DEV_CONNECTED,
+       STATE_DEV_SETUP,
+       /* UNBOUND means the driver closed ep0, so the device won't be
+        * accessible again (DEV_DISABLED) until all fds are closed.
+        */
+       STATE_DEV_UNBOUND,
+ };
+ /* enough for the whole queue: most events invalidate others */
+ #define       N_EVENT                 5
+ struct dev_data {
+       spinlock_t                      lock;
+       atomic_t                        count;
+       enum ep0_state                  state;          /* P: lock */
+       struct usb_gadgetfs_event       event [N_EVENT];
+       unsigned                        ev_next;
+       struct fasync_struct            *fasync;
+       u8                              current_config;
+       /* drivers reading ep0 MUST handle control requests (SETUP)
+        * reported that way; else the host will time out.
+        */
+       unsigned                        usermode_setup : 1,
+                                       setup_in : 1,
+                                       setup_can_stall : 1,
+                                       setup_out_ready : 1,
+                                       setup_out_error : 1,
+                                       setup_abort : 1;
+       unsigned                        setup_wLength;
+       /* the rest is basically write-once */
+       struct usb_config_descriptor    *config, *hs_config;
+       struct usb_device_descriptor    *dev;
+       struct usb_request              *req;
+       struct usb_gadget               *gadget;
+       struct list_head                epfiles;
+       void                            *buf;
+       wait_queue_head_t               wait;
+       struct super_block              *sb;
+       struct dentry                   *dentry;
+       /* except this scratch i/o buffer for ep0 */
+       u8                              rbuf [256];
+ };
+ static inline void get_dev (struct dev_data *data)
+ {
+       atomic_inc (&data->count);
+ }
+ static void put_dev (struct dev_data *data)
+ {
+       if (likely (!atomic_dec_and_test (&data->count)))
+               return;
+       /* needs no more cleanup */
+       BUG_ON (waitqueue_active (&data->wait));
+       kfree (data);
+ }
+ static struct dev_data *dev_new (void)
+ {
+       struct dev_data         *dev;
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return NULL;
+       dev->state = STATE_DEV_DISABLED;
+       atomic_set (&dev->count, 1);
+       spin_lock_init (&dev->lock);
+       INIT_LIST_HEAD (&dev->epfiles);
+       init_waitqueue_head (&dev->wait);
+       return dev;
+ }
+ /*----------------------------------------------------------------------*/
+ /* other /dev/gadget/$ENDPOINT files represent endpoints */
+ enum ep_state {
+       STATE_EP_DISABLED = 0,
+       STATE_EP_READY,
+       STATE_EP_ENABLED,
+       STATE_EP_UNBOUND,
+ };
+ struct ep_data {
+       struct mutex                    lock;
+       enum ep_state                   state;
+       atomic_t                        count;
+       struct dev_data                 *dev;
+       /* must hold dev->lock before accessing ep or req */
+       struct usb_ep                   *ep;
+       struct usb_request              *req;
+       ssize_t                         status;
+       char                            name [16];
+       struct usb_endpoint_descriptor  desc, hs_desc;
+       struct list_head                epfiles;
+       wait_queue_head_t               wait;
+       struct dentry                   *dentry;
+       struct inode                    *inode;
+ };
+ static inline void get_ep (struct ep_data *data)
+ {
+       atomic_inc (&data->count);
+ }
+ static void put_ep (struct ep_data *data)
+ {
+       if (likely (!atomic_dec_and_test (&data->count)))
+               return;
+       put_dev (data->dev);
+       /* needs no more cleanup */
+       BUG_ON (!list_empty (&data->epfiles));
+       BUG_ON (waitqueue_active (&data->wait));
+       kfree (data);
+ }
+ /*----------------------------------------------------------------------*/
+ /* most "how to use the hardware" policy choices are in userspace:
+  * mapping endpoint roles (which the driver needs) to the capabilities
+  * which the usb controller has.  most of those capabilities are exposed
+  * implicitly, starting with the driver name and then endpoint names.
+  */
+ static const char *CHIP;
+ /*----------------------------------------------------------------------*/
+ /* NOTE:  don't use dev_printk calls before binding to the gadget
+  * at the end of ep0 configuration, or after unbind.
+  */
+ /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */
+ #define xprintk(d,level,fmt,args...) \
+       printk(level "%s: " fmt , shortname , ## args)
+ #ifdef DEBUG
+ #define DBG(dev,fmt,args...) \
+       xprintk(dev , KERN_DEBUG , fmt , ## args)
+ #else
+ #define DBG(dev,fmt,args...) \
+       do { } while (0)
+ #endif /* DEBUG */
+ #ifdef VERBOSE_DEBUG
+ #define VDEBUG        DBG
+ #else
+ #define VDEBUG(dev,fmt,args...) \
+       do { } while (0)
+ #endif /* DEBUG */
+ #define ERROR(dev,fmt,args...) \
+       xprintk(dev , KERN_ERR , fmt , ## args)
+ #define INFO(dev,fmt,args...) \
+       xprintk(dev , KERN_INFO , fmt , ## args)
+ /*----------------------------------------------------------------------*/
+ /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso)
+  *
+  * After opening, configure non-control endpoints.  Then use normal
+  * stream read() and write() requests; and maybe ioctl() to get more
+  * precise FIFO status when recovering from cancellation.
+  */
+ static void epio_complete (struct usb_ep *ep, struct usb_request *req)
+ {
+       struct ep_data  *epdata = ep->driver_data;
+       if (!req->context)
+               return;
+       if (req->status)
+               epdata->status = req->status;
+       else
+               epdata->status = req->actual;
+       complete ((struct completion *)req->context);
+ }
+ /* tasklock endpoint, returning when it's connected.
+  * still need dev->lock to use epdata->ep.
+  */
+ static int
+ get_ready_ep (unsigned f_flags, struct ep_data *epdata)
+ {
+       int     val;
+       if (f_flags & O_NONBLOCK) {
+               if (!mutex_trylock(&epdata->lock))
+                       goto nonblock;
+               if (epdata->state != STATE_EP_ENABLED) {
+                       mutex_unlock(&epdata->lock);
+ nonblock:
+                       val = -EAGAIN;
+               } else
+                       val = 0;
+               return val;
+       }
+       val = mutex_lock_interruptible(&epdata->lock);
+       if (val < 0)
+               return val;
+       switch (epdata->state) {
+       case STATE_EP_ENABLED:
+               break;
+       // case STATE_EP_DISABLED:              /* "can't happen" */
+       // case STATE_EP_READY:                 /* "can't happen" */
+       default:                                /* error! */
+               pr_debug ("%s: ep %p not available, state %d\n",
+                               shortname, epdata, epdata->state);
+               // FALLTHROUGH
+       case STATE_EP_UNBOUND:                  /* clean disconnect */
+               val = -ENODEV;
+               mutex_unlock(&epdata->lock);
+       }
+       return val;
+ }
+ static ssize_t
+ ep_io (struct ep_data *epdata, void *buf, unsigned len)
+ {
+       DECLARE_COMPLETION_ONSTACK (done);
+       int value;
+       spin_lock_irq (&epdata->dev->lock);
+       if (likely (epdata->ep != NULL)) {
+               struct usb_request      *req = epdata->req;
+               req->context = &done;
+               req->complete = epio_complete;
+               req->buf = buf;
+               req->length = len;
+               value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC);
+       } else
+               value = -ENODEV;
+       spin_unlock_irq (&epdata->dev->lock);
+       if (likely (value == 0)) {
+               value = wait_event_interruptible (done.wait, done.done);
+               if (value != 0) {
+                       spin_lock_irq (&epdata->dev->lock);
+                       if (likely (epdata->ep != NULL)) {
+                               DBG (epdata->dev, "%s i/o interrupted\n",
+                                               epdata->name);
+                               usb_ep_dequeue (epdata->ep, epdata->req);
+                               spin_unlock_irq (&epdata->dev->lock);
+                               wait_event (done.wait, done.done);
+                               if (epdata->status == -ECONNRESET)
+                                       epdata->status = -EINTR;
+                       } else {
+                               spin_unlock_irq (&epdata->dev->lock);
+                               DBG (epdata->dev, "endpoint gone\n");
+                               epdata->status = -ENODEV;
+                       }
+               }
+               return epdata->status;
+       }
+       return value;
+ }
+ /* handle a synchronous OUT bulk/intr/iso transfer */
+ static ssize_t
+ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ {
+       struct ep_data          *data = fd->private_data;
+       void                    *kbuf;
+       ssize_t                 value;
+       if ((value = get_ready_ep (fd->f_flags, data)) < 0)
+               return value;
+       /* halt any endpoint by doing a "wrong direction" i/o call */
+       if (usb_endpoint_dir_in(&data->desc)) {
+               if (usb_endpoint_xfer_isoc(&data->desc)) {
+                       mutex_unlock(&data->lock);
+                       return -EINVAL;
+               }
+               DBG (data->dev, "%s halt\n", data->name);
+               spin_lock_irq (&data->dev->lock);
+               if (likely (data->ep != NULL))
+                       usb_ep_set_halt (data->ep);
+               spin_unlock_irq (&data->dev->lock);
+               mutex_unlock(&data->lock);
+               return -EBADMSG;
+       }
+       /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */
+       value = -ENOMEM;
+       kbuf = kmalloc (len, GFP_KERNEL);
+       if (unlikely (!kbuf))
+               goto free1;
+       value = ep_io (data, kbuf, len);
+       VDEBUG (data->dev, "%s read %zu OUT, status %d\n",
+               data->name, len, (int) value);
+       if (value >= 0 && copy_to_user (buf, kbuf, value))
+               value = -EFAULT;
+ free1:
+       mutex_unlock(&data->lock);
+       kfree (kbuf);
+       return value;
+ }
+ /* handle a synchronous IN bulk/intr/iso transfer */
+ static ssize_t
+ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ {
+       struct ep_data          *data = fd->private_data;
+       void                    *kbuf;
+       ssize_t                 value;
+       if ((value = get_ready_ep (fd->f_flags, data)) < 0)
+               return value;
+       /* halt any endpoint by doing a "wrong direction" i/o call */
+       if (!usb_endpoint_dir_in(&data->desc)) {
+               if (usb_endpoint_xfer_isoc(&data->desc)) {
+                       mutex_unlock(&data->lock);
+                       return -EINVAL;
+               }
+               DBG (data->dev, "%s halt\n", data->name);
+               spin_lock_irq (&data->dev->lock);
+               if (likely (data->ep != NULL))
+                       usb_ep_set_halt (data->ep);
+               spin_unlock_irq (&data->dev->lock);
+               mutex_unlock(&data->lock);
+               return -EBADMSG;
+       }
+       /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */
+       value = -ENOMEM;
+       kbuf = memdup_user(buf, len);
+       if (!kbuf) {
+               value = PTR_ERR(kbuf);
+               goto free1;
+       }
+       value = ep_io (data, kbuf, len);
+       VDEBUG (data->dev, "%s write %zu IN, status %d\n",
+               data->name, len, (int) value);
+ free1:
+       mutex_unlock(&data->lock);
+       return value;
+ }
+ static int
+ ep_release (struct inode *inode, struct file *fd)
+ {
+       struct ep_data          *data = fd->private_data;
+       int value;
+       value = mutex_lock_interruptible(&data->lock);
+       if (value < 0)
+               return value;
+       /* clean up if this can be reopened */
+       if (data->state != STATE_EP_UNBOUND) {
+               data->state = STATE_EP_DISABLED;
+               data->desc.bDescriptorType = 0;
+               data->hs_desc.bDescriptorType = 0;
+               usb_ep_disable(data->ep);
+       }
+       mutex_unlock(&data->lock);
+       put_ep (data);
+       return 0;
+ }
+ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
+ {
+       struct ep_data          *data = fd->private_data;
+       int                     status;
+       if ((status = get_ready_ep (fd->f_flags, data)) < 0)
+               return status;
+       spin_lock_irq (&data->dev->lock);
+       if (likely (data->ep != NULL)) {
+               switch (code) {
+               case GADGETFS_FIFO_STATUS:
+                       status = usb_ep_fifo_status (data->ep);
+                       break;
+               case GADGETFS_FIFO_FLUSH:
+                       usb_ep_fifo_flush (data->ep);
+                       break;
+               case GADGETFS_CLEAR_HALT:
+                       status = usb_ep_clear_halt (data->ep);
+                       break;
+               default:
+                       status = -ENOTTY;
+               }
+       } else
+               status = -ENODEV;
+       spin_unlock_irq (&data->dev->lock);
+       mutex_unlock(&data->lock);
+       return status;
+ }
+ /*----------------------------------------------------------------------*/
+ /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */
+ struct kiocb_priv {
+       struct usb_request      *req;
+       struct ep_data          *epdata;
+       struct kiocb            *iocb;
+       struct mm_struct        *mm;
+       struct work_struct      work;
+       void                    *buf;
+       const struct iovec      *iv;
+       unsigned long           nr_segs;
+       unsigned                actual;
+ };
+ static int ep_aio_cancel(struct kiocb *iocb)
+ {
+       struct kiocb_priv       *priv = iocb->private;
+       struct ep_data          *epdata;
+       int                     value;
+       local_irq_disable();
+       epdata = priv->epdata;
+       // spin_lock(&epdata->dev->lock);
+       if (likely(epdata && epdata->ep && priv->req))
+               value = usb_ep_dequeue (epdata->ep, priv->req);
+       else
+               value = -EINVAL;
+       // spin_unlock(&epdata->dev->lock);
+       local_irq_enable();
+       return value;
+ }
+ static ssize_t ep_copy_to_user(struct kiocb_priv *priv)
+ {
+       ssize_t                 len, total;
+       void                    *to_copy;
+       int                     i;
+       /* copy stuff into user buffers */
+       total = priv->actual;
+       len = 0;
+       to_copy = priv->buf;
+       for (i=0; i < priv->nr_segs; i++) {
+               ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total);
+               if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) {
+                       if (len == 0)
+                               len = -EFAULT;
+                       break;
+               }
+               total -= this;
+               len += this;
+               to_copy += this;
+               if (total == 0)
+                       break;
+       }
+       return len;
+ }
+ static void ep_user_copy_worker(struct work_struct *work)
+ {
+       struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work);
+       struct mm_struct *mm = priv->mm;
+       struct kiocb *iocb = priv->iocb;
+       size_t ret;
+       use_mm(mm);
+       ret = ep_copy_to_user(priv);
+       unuse_mm(mm);
+       /* completing the iocb can drop the ctx and mm, don't touch mm after */
+       aio_complete(iocb, ret, ret);
+       kfree(priv->buf);
+       kfree(priv);
+ }
+ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
+ {
+       struct kiocb            *iocb = req->context;
+       struct kiocb_priv       *priv = iocb->private;
+       struct ep_data          *epdata = priv->epdata;
+       /* lock against disconnect (and ideally, cancel) */
+       spin_lock(&epdata->dev->lock);
+       priv->req = NULL;
+       priv->epdata = NULL;
+       /* if this was a write or a read returning no data then we
+        * don't need to copy anything to userspace, so we can
+        * complete the aio request immediately.
+        */
+       if (priv->iv == NULL || unlikely(req->actual == 0)) {
+               kfree(req->buf);
+               kfree(priv);
+               iocb->private = NULL;
+               /* aio_complete() reports bytes-transferred _and_ faults */
+               aio_complete(iocb, req->actual ? req->actual : req->status,
+                               req->status);
+       } else {
+               /* ep_copy_to_user() won't report both; we hide some faults */
+               if (unlikely(0 != req->status))
+                       DBG(epdata->dev, "%s fault %d len %d\n",
+                               ep->name, req->status, req->actual);
+               priv->buf = req->buf;
+               priv->actual = req->actual;
+               schedule_work(&priv->work);
+       }
+       spin_unlock(&epdata->dev->lock);
+       usb_ep_free_request(ep, req);
+       put_ep(epdata);
+ }
+ static ssize_t
+ ep_aio_rwtail(
+       struct kiocb    *iocb,
+       char            *buf,
+       size_t          len,
+       struct ep_data  *epdata,
+       const struct iovec *iv,
+       unsigned long   nr_segs
+ )
+ {
+       struct kiocb_priv       *priv;
+       struct usb_request      *req;
+       ssize_t                 value;
+       priv = kmalloc(sizeof *priv, GFP_KERNEL);
+       if (!priv) {
+               value = -ENOMEM;
+ fail:
+               kfree(buf);
+               return value;
+       }
+       iocb->private = priv;
+       priv->iocb = iocb;
+       priv->iv = iv;
+       priv->nr_segs = nr_segs;
+       INIT_WORK(&priv->work, ep_user_copy_worker);
+       value = get_ready_ep(iocb->ki_filp->f_flags, epdata);
+       if (unlikely(value < 0)) {
+               kfree(priv);
+               goto fail;
+       }
+       kiocb_set_cancel_fn(iocb, ep_aio_cancel);
+       get_ep(epdata);
+       priv->epdata = epdata;
+       priv->actual = 0;
+       priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */
+       /* each kiocb is coupled to one usb_request, but we can't
+        * allocate or submit those if the host disconnected.
+        */
+       spin_lock_irq(&epdata->dev->lock);
+       if (likely(epdata->ep)) {
+               req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC);
+               if (likely(req)) {
+                       priv->req = req;
+                       req->buf = buf;
+                       req->length = len;
+                       req->complete = ep_aio_complete;
+                       req->context = iocb;
+                       value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC);
+                       if (unlikely(0 != value))
+                               usb_ep_free_request(epdata->ep, req);
+               } else
+                       value = -EAGAIN;
+       } else
+               value = -ENODEV;
+       spin_unlock_irq(&epdata->dev->lock);
+       mutex_unlock(&epdata->lock);
+       if (unlikely(value)) {
+               kfree(priv);
+               put_ep(epdata);
+       } else
+               value = -EIOCBQUEUED;
+       return value;
+ }
+ static ssize_t
+ ep_aio_read(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t o)
+ {
+       struct ep_data          *epdata = iocb->ki_filp->private_data;
+       char                    *buf;
+       if (unlikely(usb_endpoint_dir_in(&epdata->desc)))
+               return -EINVAL;
+       buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
+       if (unlikely(!buf))
+               return -ENOMEM;
+       return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs);
+ }
+ static ssize_t
+ ep_aio_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t o)
+ {
+       struct ep_data          *epdata = iocb->ki_filp->private_data;
+       char                    *buf;
+       size_t                  len = 0;
+       int                     i = 0;
+       if (unlikely(!usb_endpoint_dir_in(&epdata->desc)))
+               return -EINVAL;
+       buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL);
+       if (unlikely(!buf))
+               return -ENOMEM;
+       for (i=0; i < nr_segs; i++) {
+               if (unlikely(copy_from_user(&buf[len], iov[i].iov_base,
+                               iov[i].iov_len) != 0)) {
+                       kfree(buf);
+                       return -EFAULT;
+               }
+               len += iov[i].iov_len;
+       }
+       return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0);
+ }
+ /*----------------------------------------------------------------------*/
+ /* used after endpoint configuration */
+ static const struct file_operations ep_io_operations = {
+       .owner =        THIS_MODULE,
+       .llseek =       no_llseek,
+       .read =         ep_read,
+       .write =        ep_write,
+       .unlocked_ioctl = ep_ioctl,
+       .release =      ep_release,
+       .aio_read =     ep_aio_read,
+       .aio_write =    ep_aio_write,
+ };
+ /* ENDPOINT INITIALIZATION
+  *
+  *     fd = open ("/dev/gadget/$ENDPOINT", O_RDWR)
+  *     status = write (fd, descriptors, sizeof descriptors)
+  *
+  * That write establishes the endpoint configuration, configuring
+  * the controller to process bulk, interrupt, or isochronous transfers
+  * at the right maxpacket size, and so on.
+  *
+  * The descriptors are message type 1, identified by a host order u32
+  * at the beginning of what's written.  Descriptor order is: full/low
+  * speed descriptor, then optional high speed descriptor.
+  */
+ static ssize_t
+ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ {
+       struct ep_data          *data = fd->private_data;
+       struct usb_ep           *ep;
+       u32                     tag;
+       int                     value, length = len;
+       value = mutex_lock_interruptible(&data->lock);
+       if (value < 0)
+               return value;
+       if (data->state != STATE_EP_READY) {
+               value = -EL2HLT;
+               goto fail;
+       }
+       value = len;
+       if (len < USB_DT_ENDPOINT_SIZE + 4)
+               goto fail0;
+       /* we might need to change message format someday */
+       if (copy_from_user (&tag, buf, 4)) {
+               goto fail1;
+       }
+       if (tag != 1) {
+               DBG(data->dev, "config %s, bad tag %d\n", data->name, tag);
+               goto fail0;
+       }
+       buf += 4;
+       len -= 4;
+       /* NOTE:  audio endpoint extensions not accepted here;
+        * just don't include the extra bytes.
+        */
+       /* full/low speed descriptor, then high speed */
+       if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) {
+               goto fail1;
+       }
+       if (data->desc.bLength != USB_DT_ENDPOINT_SIZE
+                       || data->desc.bDescriptorType != USB_DT_ENDPOINT)
+               goto fail0;
+       if (len != USB_DT_ENDPOINT_SIZE) {
+               if (len != 2 * USB_DT_ENDPOINT_SIZE)
+                       goto fail0;
+               if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE,
+                                       USB_DT_ENDPOINT_SIZE)) {
+                       goto fail1;
+               }
+               if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE
+                               || data->hs_desc.bDescriptorType
+                                       != USB_DT_ENDPOINT) {
+                       DBG(data->dev, "config %s, bad hs length or type\n",
+                                       data->name);
+                       goto fail0;
+               }
+       }
+       spin_lock_irq (&data->dev->lock);
+       if (data->dev->state == STATE_DEV_UNBOUND) {
+               value = -ENOENT;
+               goto gone;
+       } else if ((ep = data->ep) == NULL) {
+               value = -ENODEV;
+               goto gone;
+       }
+       switch (data->dev->gadget->speed) {
+       case USB_SPEED_LOW:
+       case USB_SPEED_FULL:
+               ep->desc = &data->desc;
+               value = usb_ep_enable(ep);
+               if (value == 0)
+                       data->state = STATE_EP_ENABLED;
+               break;
+       case USB_SPEED_HIGH:
+               /* fails if caller didn't provide that descriptor... */
+               ep->desc = &data->hs_desc;
+               value = usb_ep_enable(ep);
+               if (value == 0)
+                       data->state = STATE_EP_ENABLED;
+               break;
+       default:
+               DBG(data->dev, "unconnected, %s init abandoned\n",
+                               data->name);
+               value = -EINVAL;
+       }
+       if (value == 0) {
+               fd->f_op = &ep_io_operations;
+               value = length;
+       }
+ gone:
+       spin_unlock_irq (&data->dev->lock);
+       if (value < 0) {
+ fail:
+               data->desc.bDescriptorType = 0;
+               data->hs_desc.bDescriptorType = 0;
+       }
+       mutex_unlock(&data->lock);
+       return value;
+ fail0:
+       value = -EINVAL;
+       goto fail;
+ fail1:
+       value = -EFAULT;
+       goto fail;
+ }
+ static int
+ ep_open (struct inode *inode, struct file *fd)
+ {
+       struct ep_data          *data = inode->i_private;
+       int                     value = -EBUSY;
+       if (mutex_lock_interruptible(&data->lock) != 0)
+               return -EINTR;
+       spin_lock_irq (&data->dev->lock);
+       if (data->dev->state == STATE_DEV_UNBOUND)
+               value = -ENOENT;
+       else if (data->state == STATE_EP_DISABLED) {
+               value = 0;
+               data->state = STATE_EP_READY;
+               get_ep (data);
+               fd->private_data = data;
+               VDEBUG (data->dev, "%s ready\n", data->name);
+       } else
+               DBG (data->dev, "%s state %d\n",
+                       data->name, data->state);
+       spin_unlock_irq (&data->dev->lock);
+       mutex_unlock(&data->lock);
+       return value;
+ }
+ /* used before endpoint configuration */
+ static const struct file_operations ep_config_operations = {
+       .llseek =       no_llseek,
+       .open =         ep_open,
+       .write =        ep_config,
+       .release =      ep_release,
+ };
+ /*----------------------------------------------------------------------*/
+ /* EP0 IMPLEMENTATION can be partly in userspace.
+  *
+  * Drivers that use this facility receive various events, including
+  * control requests the kernel doesn't handle.  Drivers that don't
+  * use this facility may be too simple-minded for real applications.
+  */
+ static inline void ep0_readable (struct dev_data *dev)
+ {
+       wake_up (&dev->wait);
+       kill_fasync (&dev->fasync, SIGIO, POLL_IN);
+ }
+ static void clean_req (struct usb_ep *ep, struct usb_request *req)
+ {
+       struct dev_data         *dev = ep->driver_data;
+       if (req->buf != dev->rbuf) {
+               kfree(req->buf);
+               req->buf = dev->rbuf;
+       }
+       req->complete = epio_complete;
+       dev->setup_out_ready = 0;
+ }
+ static void ep0_complete (struct usb_ep *ep, struct usb_request *req)
+ {
+       struct dev_data         *dev = ep->driver_data;
+       unsigned long           flags;
+       int                     free = 1;
+       /* for control OUT, data must still get to userspace */
+       spin_lock_irqsave(&dev->lock, flags);
+       if (!dev->setup_in) {
+               dev->setup_out_error = (req->status != 0);
+               if (!dev->setup_out_error)
+                       free = 0;
+               dev->setup_out_ready = 1;
+               ep0_readable (dev);
+       }
+       /* clean up as appropriate */
+       if (free && req->buf != &dev->rbuf)
+               clean_req (ep, req);
+       req->complete = epio_complete;
+       spin_unlock_irqrestore(&dev->lock, flags);
+ }
+ static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len)
+ {
+       struct dev_data *dev = ep->driver_data;
+       if (dev->setup_out_ready) {
+               DBG (dev, "ep0 request busy!\n");
+               return -EBUSY;
+       }
+       if (len > sizeof (dev->rbuf))
+               req->buf = kmalloc(len, GFP_ATOMIC);
+       if (req->buf == NULL) {
+               req->buf = dev->rbuf;
+               return -ENOMEM;
+       }
+       req->complete = ep0_complete;
+       req->length = len;
+       req->zero = 0;
+       return 0;
+ }
+ static ssize_t
+ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ {
+       struct dev_data                 *dev = fd->private_data;
+       ssize_t                         retval;
+       enum ep0_state                  state;
+       spin_lock_irq (&dev->lock);
+       /* report fd mode change before acting on it */
+       if (dev->setup_abort) {
+               dev->setup_abort = 0;
+               retval = -EIDRM;
+               goto done;
+       }
+       /* control DATA stage */
+       if ((state = dev->state) == STATE_DEV_SETUP) {
+               if (dev->setup_in) {            /* stall IN */
+                       VDEBUG(dev, "ep0in stall\n");
+                       (void) usb_ep_set_halt (dev->gadget->ep0);
+                       retval = -EL2HLT;
+                       dev->state = STATE_DEV_CONNECTED;
+               } else if (len == 0) {          /* ack SET_CONFIGURATION etc */
+                       struct usb_ep           *ep = dev->gadget->ep0;
+                       struct usb_request      *req = dev->req;
+                       if ((retval = setup_req (ep, req, 0)) == 0)
+                               retval = usb_ep_queue (ep, req, GFP_ATOMIC);
+                       dev->state = STATE_DEV_CONNECTED;
+                       /* assume that was SET_CONFIGURATION */
+                       if (dev->current_config) {
+                               unsigned power;
+                               if (gadget_is_dualspeed(dev->gadget)
+                                               && (dev->gadget->speed
+                                                       == USB_SPEED_HIGH))
+                                       power = dev->hs_config->bMaxPower;
+                               else
+                                       power = dev->config->bMaxPower;
+                               usb_gadget_vbus_draw(dev->gadget, 2 * power);
+                       }
+               } else {                        /* collect OUT data */
+                       if ((fd->f_flags & O_NONBLOCK) != 0
+                                       && !dev->setup_out_ready) {
+                               retval = -EAGAIN;
+                               goto done;
+                       }
+                       spin_unlock_irq (&dev->lock);
+                       retval = wait_event_interruptible (dev->wait,
+                                       dev->setup_out_ready != 0);
+                       /* FIXME state could change from under us */
+                       spin_lock_irq (&dev->lock);
+                       if (retval)
+                               goto done;
+                       if (dev->state != STATE_DEV_SETUP) {
+                               retval = -ECANCELED;
+                               goto done;
+                       }
+                       dev->state = STATE_DEV_CONNECTED;
+                       if (dev->setup_out_error)
+                               retval = -EIO;
+                       else {
+                               len = min (len, (size_t)dev->req->actual);
+ // FIXME don't call this with the spinlock held ...
+                               if (copy_to_user (buf, dev->req->buf, len))
+                                       retval = -EFAULT;
+                               else
+                                       retval = len;
+                               clean_req (dev->gadget->ep0, dev->req);
+                               /* NOTE userspace can't yet choose to stall */
+                       }
+               }
+               goto done;
+       }
+       /* else normal: return event data */
+       if (len < sizeof dev->event [0]) {
+               retval = -EINVAL;
+               goto done;
+       }
+       len -= len % sizeof (struct usb_gadgetfs_event);
+       dev->usermode_setup = 1;
+ scan:
+       /* return queued events right away */
+       if (dev->ev_next != 0) {
+               unsigned                i, n;
+               n = len / sizeof (struct usb_gadgetfs_event);
+               if (dev->ev_next < n)
+                       n = dev->ev_next;
+               /* ep0 i/o has special semantics during STATE_DEV_SETUP */
+               for (i = 0; i < n; i++) {
+                       if (dev->event [i].type == GADGETFS_SETUP) {
+                               dev->state = STATE_DEV_SETUP;
+                               n = i + 1;
+                               break;
+                       }
+               }
+               spin_unlock_irq (&dev->lock);
+               len = n * sizeof (struct usb_gadgetfs_event);
+               if (copy_to_user (buf, &dev->event, len))
+                       retval = -EFAULT;
+               else
+                       retval = len;
+               if (len > 0) {
+                       /* NOTE this doesn't guard against broken drivers;
+                        * concurrent ep0 readers may lose events.
+                        */
+                       spin_lock_irq (&dev->lock);
+                       if (dev->ev_next > n) {
+                               memmove(&dev->event[0], &dev->event[n],
+                                       sizeof (struct usb_gadgetfs_event)
+                                               * (dev->ev_next - n));
+                       }
+                       dev->ev_next -= n;
+                       spin_unlock_irq (&dev->lock);
+               }
+               return retval;
+       }
+       if (fd->f_flags & O_NONBLOCK) {
+               retval = -EAGAIN;
+               goto done;
+       }
+       switch (state) {
+       default:
+               DBG (dev, "fail %s, state %d\n", __func__, state);
+               retval = -ESRCH;
+               break;
+       case STATE_DEV_UNCONNECTED:
+       case STATE_DEV_CONNECTED:
+               spin_unlock_irq (&dev->lock);
+               DBG (dev, "%s wait\n", __func__);
+               /* wait for events */
+               retval = wait_event_interruptible (dev->wait,
+                               dev->ev_next != 0);
+               if (retval < 0)
+                       return retval;
+               spin_lock_irq (&dev->lock);
+               goto scan;
+       }
+ done:
+       spin_unlock_irq (&dev->lock);
+       return retval;
+ }
+ static struct usb_gadgetfs_event *
+ next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type)
+ {
+       struct usb_gadgetfs_event       *event;
+       unsigned                        i;
+       switch (type) {
+       /* these events purge the queue */
+       case GADGETFS_DISCONNECT:
+               if (dev->state == STATE_DEV_SETUP)
+                       dev->setup_abort = 1;
+               // FALL THROUGH
+       case GADGETFS_CONNECT:
+               dev->ev_next = 0;
+               break;
+       case GADGETFS_SETUP:            /* previous request timed out */
+       case GADGETFS_SUSPEND:          /* same effect */
+               /* these events can't be repeated */
+               for (i = 0; i != dev->ev_next; i++) {
+                       if (dev->event [i].type != type)
+                               continue;
+                       DBG(dev, "discard old event[%d] %d\n", i, type);
+                       dev->ev_next--;
+                       if (i == dev->ev_next)
+                               break;
+                       /* indices start at zero, for simplicity */
+                       memmove (&dev->event [i], &dev->event [i + 1],
+                               sizeof (struct usb_gadgetfs_event)
+                                       * (dev->ev_next - i));
+               }
+               break;
+       default:
+               BUG ();
+       }
+       VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type);
+       event = &dev->event [dev->ev_next++];
+       BUG_ON (dev->ev_next > N_EVENT);
+       memset (event, 0, sizeof *event);
+       event->type = type;
+       return event;
+ }
+ static ssize_t
+ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ {
+       struct dev_data         *dev = fd->private_data;
+       ssize_t                 retval = -ESRCH;
+       spin_lock_irq (&dev->lock);
+       /* report fd mode change before acting on it */
+       if (dev->setup_abort) {
+               dev->setup_abort = 0;
+               retval = -EIDRM;
+       /* data and/or status stage for control request */
+       } else if (dev->state == STATE_DEV_SETUP) {
+               /* IN DATA+STATUS caller makes len <= wLength */
+               if (dev->setup_in) {
+                       retval = setup_req (dev->gadget->ep0, dev->req, len);
+                       if (retval == 0) {
+                               dev->state = STATE_DEV_CONNECTED;
+                               spin_unlock_irq (&dev->lock);
+                               if (copy_from_user (dev->req->buf, buf, len))
+                                       retval = -EFAULT;
+                               else {
+                                       if (len < dev->setup_wLength)
+                                               dev->req->zero = 1;
+                                       retval = usb_ep_queue (
+                                               dev->gadget->ep0, dev->req,
+                                               GFP_KERNEL);
+                               }
+                               if (retval < 0) {
+                                       spin_lock_irq (&dev->lock);
+                                       clean_req (dev->gadget->ep0, dev->req);
+                                       spin_unlock_irq (&dev->lock);
+                               } else
+                                       retval = len;
+                               return retval;
+                       }
+               /* can stall some OUT transfers */
+               } else if (dev->setup_can_stall) {
+                       VDEBUG(dev, "ep0out stall\n");
+                       (void) usb_ep_set_halt (dev->gadget->ep0);
+                       retval = -EL2HLT;
+                       dev->state = STATE_DEV_CONNECTED;
+               } else {
+                       DBG(dev, "bogus ep0out stall!\n");
+               }
+       } else
+               DBG (dev, "fail %s, state %d\n", __func__, dev->state);
+       spin_unlock_irq (&dev->lock);
+       return retval;
+ }
+ static int
+ ep0_fasync (int f, struct file *fd, int on)
+ {
+       struct dev_data         *dev = fd->private_data;
+       // caller must F_SETOWN before signal delivery happens
+       VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off");
+       return fasync_helper (f, fd, on, &dev->fasync);
+ }
+ static struct usb_gadget_driver gadgetfs_driver;
+ static int
+ dev_release (struct inode *inode, struct file *fd)
+ {
+       struct dev_data         *dev = fd->private_data;
+       /* closing ep0 === shutdown all */
+       usb_gadget_unregister_driver (&gadgetfs_driver);
+       /* at this point "good" hardware has disconnected the
+        * device from USB; the host won't see it any more.
+        * alternatively, all host requests will time out.
+        */
+       kfree (dev->buf);
+       dev->buf = NULL;
++      /* other endpoints were all decoupled from this device */
++      spin_lock_irq(&dev->lock);
++      dev->state = STATE_DEV_DISABLED;
++      spin_unlock_irq(&dev->lock);
++
++      put_dev (dev);
+       return 0;
+ }
+ static unsigned int
+ ep0_poll (struct file *fd, poll_table *wait)
+ {
+        struct dev_data         *dev = fd->private_data;
+        int                     mask = 0;
+        poll_wait(fd, &dev->wait, wait);
+        spin_lock_irq (&dev->lock);
+        /* report fd mode change before acting on it */
+        if (dev->setup_abort) {
+                dev->setup_abort = 0;
+                mask = POLLHUP;
+                goto out;
+        }
+        if (dev->state == STATE_DEV_SETUP) {
+                if (dev->setup_in || dev->setup_can_stall)
+                        mask = POLLOUT;
+        } else {
+                if (dev->ev_next != 0)
+                        mask = POLLIN;
+        }
+ out:
+        spin_unlock_irq(&dev->lock);
+        return mask;
+ }
+ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
+ {
+       struct dev_data         *dev = fd->private_data;
+       struct usb_gadget       *gadget = dev->gadget;
+       long ret = -ENOTTY;
+       if (gadget->ops->ioctl)
+               ret = gadget->ops->ioctl (gadget, code, value);
+       return ret;
+ }
+ /* used after device configuration */
+ static const struct file_operations ep0_io_operations = {
+       .owner =        THIS_MODULE,
+       .llseek =       no_llseek,
+       .read =         ep0_read,
+       .write =        ep0_write,
+       .fasync =       ep0_fasync,
+       .poll =         ep0_poll,
+       .unlocked_ioctl =       dev_ioctl,
+       .release =      dev_release,
+ };
+ /*----------------------------------------------------------------------*/
+ /* The in-kernel gadget driver handles most ep0 issues, in particular
+  * enumerating the single configuration (as provided from user space).
+  *
+  * Unrecognized ep0 requests may be handled in user space.
+  */
+ static void make_qualifier (struct dev_data *dev)
+ {
+       struct usb_qualifier_descriptor         qual;
+       struct usb_device_descriptor            *desc;
+       qual.bLength = sizeof qual;
+       qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER;
+       qual.bcdUSB = cpu_to_le16 (0x0200);
+       desc = dev->dev;
+       qual.bDeviceClass = desc->bDeviceClass;
+       qual.bDeviceSubClass = desc->bDeviceSubClass;
+       qual.bDeviceProtocol = desc->bDeviceProtocol;
+       /* assumes ep0 uses the same value for both speeds ... */
+       qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
+       qual.bNumConfigurations = 1;
+       qual.bRESERVED = 0;
+       memcpy (dev->rbuf, &qual, sizeof qual);
+ }
+ static int
+ config_buf (struct dev_data *dev, u8 type, unsigned index)
+ {
+       int             len;
+       int             hs = 0;
+       /* only one configuration */
+       if (index > 0)
+               return -EINVAL;
+       if (gadget_is_dualspeed(dev->gadget)) {
+               hs = (dev->gadget->speed == USB_SPEED_HIGH);
+               if (type == USB_DT_OTHER_SPEED_CONFIG)
+                       hs = !hs;
+       }
+       if (hs) {
+               dev->req->buf = dev->hs_config;
+               len = le16_to_cpu(dev->hs_config->wTotalLength);
+       } else {
+               dev->req->buf = dev->config;
+               len = le16_to_cpu(dev->config->wTotalLength);
+       }
+       ((u8 *)dev->req->buf) [1] = type;
+       return len;
+ }
+ static int
+ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ {
+       struct dev_data                 *dev = get_gadget_data (gadget);
+       struct usb_request              *req = dev->req;
+       int                             value = -EOPNOTSUPP;
+       struct usb_gadgetfs_event       *event;
+       u16                             w_value = le16_to_cpu(ctrl->wValue);
+       u16                             w_length = le16_to_cpu(ctrl->wLength);
+       spin_lock (&dev->lock);
+       dev->setup_abort = 0;
+       if (dev->state == STATE_DEV_UNCONNECTED) {
+               if (gadget_is_dualspeed(gadget)
+                               && gadget->speed == USB_SPEED_HIGH
+                               && dev->hs_config == NULL) {
+                       spin_unlock(&dev->lock);
+                       ERROR (dev, "no high speed config??\n");
+                       return -EINVAL;
+               }
+               dev->state = STATE_DEV_CONNECTED;
+               INFO (dev, "connected\n");
+               event = next_event (dev, GADGETFS_CONNECT);
+               event->u.speed = gadget->speed;
+               ep0_readable (dev);
+       /* host may have given up waiting for response.  we can miss control
+        * requests handled lower down (device/endpoint status and features);
+        * then ep0_{read,write} will report the wrong status. controller
+        * driver will have aborted pending i/o.
+        */
+       } else if (dev->state == STATE_DEV_SETUP)
+               dev->setup_abort = 1;
+       req->buf = dev->rbuf;
+       req->context = NULL;
+       value = -EOPNOTSUPP;
+       switch (ctrl->bRequest) {
+       case USB_REQ_GET_DESCRIPTOR:
+               if (ctrl->bRequestType != USB_DIR_IN)
+                       goto unrecognized;
+               switch (w_value >> 8) {
+               case USB_DT_DEVICE:
+                       value = min (w_length, (u16) sizeof *dev->dev);
+                       dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket;
+                       req->buf = dev->dev;
+                       break;
+               case USB_DT_DEVICE_QUALIFIER:
+                       if (!dev->hs_config)
+                               break;
+                       value = min (w_length, (u16)
+                               sizeof (struct usb_qualifier_descriptor));
+                       make_qualifier (dev);
+                       break;
+               case USB_DT_OTHER_SPEED_CONFIG:
+                       // FALLTHROUGH
+               case USB_DT_CONFIG:
+                       value = config_buf (dev,
+                                       w_value >> 8,
+                                       w_value & 0xff);
+                       if (value >= 0)
+                               value = min (w_length, (u16) value);
+                       break;
+               case USB_DT_STRING:
+                       goto unrecognized;
+               default:                // all others are errors
+                       break;
+               }
+               break;
+       /* currently one config, two speeds */
+       case USB_REQ_SET_CONFIGURATION:
+               if (ctrl->bRequestType != 0)
+                       goto unrecognized;
+               if (0 == (u8) w_value) {
+                       value = 0;
+                       dev->current_config = 0;
+                       usb_gadget_vbus_draw(gadget, 8 /* mA */ );
+                       // user mode expected to disable endpoints
+               } else {
+                       u8      config, power;
+                       if (gadget_is_dualspeed(gadget)
+                                       && gadget->speed == USB_SPEED_HIGH) {
+                               config = dev->hs_config->bConfigurationValue;
+                               power = dev->hs_config->bMaxPower;
+                       } else {
+                               config = dev->config->bConfigurationValue;
+                               power = dev->config->bMaxPower;
+                       }
+                       if (config == (u8) w_value) {
+                               value = 0;
+                               dev->current_config = config;
+                               usb_gadget_vbus_draw(gadget, 2 * power);
+                       }
+               }
+               /* report SET_CONFIGURATION like any other control request,
+                * except that usermode may not stall this.  the next
+                * request mustn't be allowed start until this finishes:
+                * endpoints and threads set up, etc.
+                *
+                * NOTE:  older PXA hardware (before PXA 255: without UDCCFR)
+                * has bad/racey automagic that prevents synchronizing here.
+                * even kernel mode drivers often miss them.
+                */
+               if (value == 0) {
+                       INFO (dev, "configuration #%d\n", dev->current_config);
+                       usb_gadget_set_state(gadget, USB_STATE_CONFIGURED);
+                       if (dev->usermode_setup) {
+                               dev->setup_can_stall = 0;
+                               goto delegate;
+                       }
+               }
+               break;
+ #ifndef       CONFIG_USB_PXA25X
+       /* PXA automagically handles this request too */
+       case USB_REQ_GET_CONFIGURATION:
+               if (ctrl->bRequestType != 0x80)
+                       goto unrecognized;
+               *(u8 *)req->buf = dev->current_config;
+               value = min (w_length, (u16) 1);
+               break;
+ #endif
+       default:
+ unrecognized:
+               VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n",
+                       dev->usermode_setup ? "delegate" : "fail",
+                       ctrl->bRequestType, ctrl->bRequest,
+                       w_value, le16_to_cpu(ctrl->wIndex), w_length);
+               /* if there's an ep0 reader, don't stall */
+               if (dev->usermode_setup) {
+                       dev->setup_can_stall = 1;
+ delegate:
+                       dev->setup_in = (ctrl->bRequestType & USB_DIR_IN)
+                                               ? 1 : 0;
+                       dev->setup_wLength = w_length;
+                       dev->setup_out_ready = 0;
+                       dev->setup_out_error = 0;
+                       value = 0;
+                       /* read DATA stage for OUT right away */
+                       if (unlikely (!dev->setup_in && w_length)) {
+                               value = setup_req (gadget->ep0, dev->req,
+                                                       w_length);
+                               if (value < 0)
+                                       break;
+                               value = usb_ep_queue (gadget->ep0, dev->req,
+                                                       GFP_ATOMIC);
+                               if (value < 0) {
+                                       clean_req (gadget->ep0, dev->req);
+                                       break;
+                               }
+                               /* we can't currently stall these */
+                               dev->setup_can_stall = 0;
+                       }
+                       /* state changes when reader collects event */
+                       event = next_event (dev, GADGETFS_SETUP);
+                       event->u.setup = *ctrl;
+                       ep0_readable (dev);
+                       spin_unlock (&dev->lock);
+                       return 0;
+               }
+       }
+       /* proceed with data transfer and status phases? */
+       if (value >= 0 && dev->state != STATE_DEV_SETUP) {
+               req->length = value;
+               req->zero = value < w_length;
+               value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+               if (value < 0) {
+                       DBG (dev, "ep_queue --> %d\n", value);
+                       req->status = 0;
+               }
+       }
+       /* device stalls when value < 0 */
+       spin_unlock (&dev->lock);
+       return value;
+ }
+ static void destroy_ep_files (struct dev_data *dev)
+ {
+       DBG (dev, "%s %d\n", __func__, dev->state);
+       /* dev->state must prevent interference */
+       spin_lock_irq (&dev->lock);
+       while (!list_empty(&dev->epfiles)) {
+               struct ep_data  *ep;
+               struct inode    *parent;
+               struct dentry   *dentry;
+               /* break link to FS */
+               ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
+               list_del_init (&ep->epfiles);
+               dentry = ep->dentry;
+               ep->dentry = NULL;
+               parent = dentry->d_parent->d_inode;
+               /* break link to controller */
+               if (ep->state == STATE_EP_ENABLED)
+                       (void) usb_ep_disable (ep->ep);
+               ep->state = STATE_EP_UNBOUND;
+               usb_ep_free_request (ep->ep, ep->req);
+               ep->ep = NULL;
+               wake_up (&ep->wait);
+               put_ep (ep);
+               spin_unlock_irq (&dev->lock);
+               /* break link to dcache */
+               mutex_lock (&parent->i_mutex);
+               d_delete (dentry);
+               dput (dentry);
+               mutex_unlock (&parent->i_mutex);
+               spin_lock_irq (&dev->lock);
+       }
+       spin_unlock_irq (&dev->lock);
+ }
+ static struct inode *
+ gadgetfs_create_file (struct super_block *sb, char const *name,
+               void *data, const struct file_operations *fops,
+               struct dentry **dentry_p);
+ static int activate_ep_files (struct dev_data *dev)
+ {
+       struct usb_ep   *ep;
+       struct ep_data  *data;
+       gadget_for_each_ep (ep, dev->gadget) {
+               data = kzalloc(sizeof(*data), GFP_KERNEL);
+               if (!data)
+                       goto enomem0;
+               data->state = STATE_EP_DISABLED;
+               mutex_init(&data->lock);
+               init_waitqueue_head (&data->wait);
+               strncpy (data->name, ep->name, sizeof (data->name) - 1);
+               atomic_set (&data->count, 1);
+               data->dev = dev;
+               get_dev (dev);
+               data->ep = ep;
+               ep->driver_data = data;
+               data->req = usb_ep_alloc_request (ep, GFP_KERNEL);
+               if (!data->req)
+                       goto enomem1;
+               data->inode = gadgetfs_create_file (dev->sb, data->name,
+                               data, &ep_config_operations,
+                               &data->dentry);
+               if (!data->inode)
+                       goto enomem2;
+               list_add_tail (&data->epfiles, &dev->epfiles);
+       }
+       return 0;
+ enomem2:
+       usb_ep_free_request (ep, data->req);
+ enomem1:
+       put_dev (dev);
+       kfree (data);
+ enomem0:
+       DBG (dev, "%s enomem\n", __func__);
+       destroy_ep_files (dev);
+       return -ENOMEM;
+ }
+ static void
+ gadgetfs_unbind (struct usb_gadget *gadget)
+ {
+       struct dev_data         *dev = get_gadget_data (gadget);
+       DBG (dev, "%s\n", __func__);
+       spin_lock_irq (&dev->lock);
+       dev->state = STATE_DEV_UNBOUND;
+       spin_unlock_irq (&dev->lock);
+       destroy_ep_files (dev);
+       gadget->ep0->driver_data = NULL;
+       set_gadget_data (gadget, NULL);
+       /* we've already been disconnected ... no i/o is active */
+       if (dev->req)
+               usb_ep_free_request (gadget->ep0, dev->req);
+       DBG (dev, "%s done\n", __func__);
+       put_dev (dev);
+ }
+ static struct dev_data                *the_device;
+ static int gadgetfs_bind(struct usb_gadget *gadget,
+               struct usb_gadget_driver *driver)
+ {
+       struct dev_data         *dev = the_device;
+       if (!dev)
+               return -ESRCH;
+       if (0 != strcmp (CHIP, gadget->name)) {
+               pr_err("%s expected %s controller not %s\n",
+                       shortname, CHIP, gadget->name);
+               return -ENODEV;
+       }
+       set_gadget_data (gadget, dev);
+       dev->gadget = gadget;
+       gadget->ep0->driver_data = dev;
+       /* preallocate control response and buffer */
+       dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL);
+       if (!dev->req)
+               goto enomem;
+       dev->req->context = NULL;
+       dev->req->complete = epio_complete;
+       if (activate_ep_files (dev) < 0)
+               goto enomem;
+       INFO (dev, "bound to %s driver\n", gadget->name);
+       spin_lock_irq(&dev->lock);
+       dev->state = STATE_DEV_UNCONNECTED;
+       spin_unlock_irq(&dev->lock);
+       get_dev (dev);
+       return 0;
+ enomem:
+       gadgetfs_unbind (gadget);
+       return -ENOMEM;
+ }
+ static void
+ gadgetfs_disconnect (struct usb_gadget *gadget)
+ {
+       struct dev_data         *dev = get_gadget_data (gadget);
+       unsigned long           flags;
+       spin_lock_irqsave (&dev->lock, flags);
+       if (dev->state == STATE_DEV_UNCONNECTED)
+               goto exit;
+       dev->state = STATE_DEV_UNCONNECTED;
+       INFO (dev, "disconnected\n");
+       next_event (dev, GADGETFS_DISCONNECT);
+       ep0_readable (dev);
+ exit:
+       spin_unlock_irqrestore (&dev->lock, flags);
+ }
+ static void
+ gadgetfs_suspend (struct usb_gadget *gadget)
+ {
+       struct dev_data         *dev = get_gadget_data (gadget);
+       INFO (dev, "suspended from state %d\n", dev->state);
+       spin_lock (&dev->lock);
+       switch (dev->state) {
+       case STATE_DEV_SETUP:           // VERY odd... host died??
+       case STATE_DEV_CONNECTED:
+       case STATE_DEV_UNCONNECTED:
+               next_event (dev, GADGETFS_SUSPEND);
+               ep0_readable (dev);
+               /* FALLTHROUGH */
+       default:
+               break;
+       }
+       spin_unlock (&dev->lock);
+ }
+ static struct usb_gadget_driver gadgetfs_driver = {
+       .function       = (char *) driver_desc,
+       .bind           = gadgetfs_bind,
+       .unbind         = gadgetfs_unbind,
+       .setup          = gadgetfs_setup,
+       .disconnect     = gadgetfs_disconnect,
+       .suspend        = gadgetfs_suspend,
+       .driver = {
+               .name           = (char *) shortname,
+       },
+ };
+ /*----------------------------------------------------------------------*/
+ static void gadgetfs_nop(struct usb_gadget *arg) { }
+ static int gadgetfs_probe(struct usb_gadget *gadget,
+               struct usb_gadget_driver *driver)
+ {
+       CHIP = gadget->name;
+       return -EISNAM;
+ }
+ static struct usb_gadget_driver probe_driver = {
+       .max_speed      = USB_SPEED_HIGH,
+       .bind           = gadgetfs_probe,
+       .unbind         = gadgetfs_nop,
+       .setup          = (void *)gadgetfs_nop,
+       .disconnect     = gadgetfs_nop,
+       .driver = {
+               .name           = "nop",
+       },
+ };
+ /* DEVICE INITIALIZATION
+  *
+  *     fd = open ("/dev/gadget/$CHIP", O_RDWR)
+  *     status = write (fd, descriptors, sizeof descriptors)
+  *
+  * That write establishes the device configuration, so the kernel can
+  * bind to the controller ... guaranteeing it can handle enumeration
+  * at all necessary speeds.  Descriptor order is:
+  *
+  * . message tag (u32, host order) ... for now, must be zero; it
+  *    would change to support features like multi-config devices
+  * . full/low speed config ... all wTotalLength bytes (with interface,
+  *    class, altsetting, endpoint, and other descriptors)
+  * . high speed config ... all descriptors, for high speed operation;
+  *    this one's optional except for high-speed hardware
+  * . device descriptor
+  *
+  * Endpoints are not yet enabled. Drivers must wait until device
+  * configuration and interface altsetting changes create
+  * the need to configure (or unconfigure) them.
+  *
+  * After initialization, the device stays active for as long as that
+  * $CHIP file is open.  Events must then be read from that descriptor,
+  * such as configuration notifications.
+  */
+ static int is_valid_config (struct usb_config_descriptor *config)
+ {
+       return config->bDescriptorType == USB_DT_CONFIG
+               && config->bLength == USB_DT_CONFIG_SIZE
+               && config->bConfigurationValue != 0
+               && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
+               && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
+       /* FIXME if gadget->is_otg, _must_ include an otg descriptor */
+       /* FIXME check lengths: walk to end */
+ }
+ static ssize_t
+ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ {
+       struct dev_data         *dev = fd->private_data;
+       ssize_t                 value = len, length = len;
+       unsigned                total;
+       u32                     tag;
+       char                    *kbuf;
+       if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
+               return -EINVAL;
+       /* we might need to change message format someday */
+       if (copy_from_user (&tag, buf, 4))
+               return -EFAULT;
+       if (tag != 0)
+               return -EINVAL;
+       buf += 4;
+       length -= 4;
+       kbuf = memdup_user(buf, length);
+       if (IS_ERR(kbuf))
+               return PTR_ERR(kbuf);
+       spin_lock_irq (&dev->lock);
+       value = -EINVAL;
+       if (dev->buf)
+               goto fail;
+       dev->buf = kbuf;
+       /* full or low speed config */
+       dev->config = (void *) kbuf;
+       total = le16_to_cpu(dev->config->wTotalLength);
+       if (!is_valid_config (dev->config) || total >= length)
+               goto fail;
+       kbuf += total;
+       length -= total;
+       /* optional high speed config */
+       if (kbuf [1] == USB_DT_CONFIG) {
+               dev->hs_config = (void *) kbuf;
+               total = le16_to_cpu(dev->hs_config->wTotalLength);
+               if (!is_valid_config (dev->hs_config) || total >= length)
+                       goto fail;
+               kbuf += total;
+               length -= total;
+       }
+       /* could support multiple configs, using another encoding! */
+       /* device descriptor (tweaked for paranoia) */
+       if (length != USB_DT_DEVICE_SIZE)
+               goto fail;
+       dev->dev = (void *)kbuf;
+       if (dev->dev->bLength != USB_DT_DEVICE_SIZE
+                       || dev->dev->bDescriptorType != USB_DT_DEVICE
+                       || dev->dev->bNumConfigurations != 1)
+               goto fail;
+       dev->dev->bNumConfigurations = 1;
+       dev->dev->bcdUSB = cpu_to_le16 (0x0200);
+       /* triggers gadgetfs_bind(); then we can enumerate. */
+       spin_unlock_irq (&dev->lock);
+       if (dev->hs_config)
+               gadgetfs_driver.max_speed = USB_SPEED_HIGH;
+       else
+               gadgetfs_driver.max_speed = USB_SPEED_FULL;
+       value = usb_gadget_probe_driver(&gadgetfs_driver);
+       if (value != 0) {
+               kfree (dev->buf);
+               dev->buf = NULL;
+       } else {
+               /* at this point "good" hardware has for the first time
+                * let the USB the host see us.  alternatively, if users
+                * unplug/replug that will clear all the error state.
+                *
+                * note:  everything running before here was guaranteed
+                * to choke driver model style diagnostics.  from here
+                * on, they can work ... except in cleanup paths that
+                * kick in after the ep0 descriptor is closed.
+                */
+               fd->f_op = &ep0_io_operations;
+               value = len;
+       }
+       return value;
+ fail:
+       spin_unlock_irq (&dev->lock);
+       pr_debug ("%s: %s fail %Zd, %p\n", shortname, __func__, value, dev);
+       kfree (dev->buf);
+       dev->buf = NULL;
+       return value;
+ }
+ static int
+ dev_open (struct inode *inode, struct file *fd)
+ {
+       struct dev_data         *dev = inode->i_private;
+       int                     value = -EBUSY;
+       spin_lock_irq(&dev->lock);
+       if (dev->state == STATE_DEV_DISABLED) {
+               dev->ev_next = 0;
+               dev->state = STATE_DEV_OPENED;
+               fd->private_data = dev;
+               get_dev (dev);
+               value = 0;
+       }
+       spin_unlock_irq(&dev->lock);
+       return value;
+ }
+ static const struct file_operations dev_init_operations = {
+       .llseek =       no_llseek,
+       .open =         dev_open,
+       .write =        dev_config,
+       .fasync =       ep0_fasync,
+       .unlocked_ioctl = dev_ioctl,
+       .release =      dev_release,
+ };
+ /*----------------------------------------------------------------------*/
+ /* FILESYSTEM AND SUPERBLOCK OPERATIONS
+  *
+  * Mounting the filesystem creates a controller file, used first for
+  * device configuration then later for event monitoring.
+  */
+ /* FIXME PAM etc could set this security policy without mount options
+  * if epfiles inherited ownership and permissons from ep0 ...
+  */
+ static unsigned default_uid;
+ static unsigned default_gid;
+ static unsigned default_perm = S_IRUSR | S_IWUSR;
+ module_param (default_uid, uint, 0644);
+ module_param (default_gid, uint, 0644);
+ module_param (default_perm, uint, 0644);
+ static struct inode *
+ gadgetfs_make_inode (struct super_block *sb,
+               void *data, const struct file_operations *fops,
+               int mode)
+ {
+       struct inode *inode = new_inode (sb);
+       if (inode) {
+               inode->i_ino = get_next_ino();
+               inode->i_mode = mode;
+               inode->i_uid = make_kuid(&init_user_ns, default_uid);
+               inode->i_gid = make_kgid(&init_user_ns, default_gid);
+               inode->i_atime = inode->i_mtime = inode->i_ctime
+                               = CURRENT_TIME;
+               inode->i_private = data;
+               inode->i_fop = fops;
+       }
+       return inode;
+ }
+ /* creates in fs root directory, so non-renamable and non-linkable.
+  * so inode and dentry are paired, until device reconfig.
+  */
+ static struct inode *
+ gadgetfs_create_file (struct super_block *sb, char const *name,
+               void *data, const struct file_operations *fops,
+               struct dentry **dentry_p)
+ {
+       struct dentry   *dentry;
+       struct inode    *inode;
+       dentry = d_alloc_name(sb->s_root, name);
+       if (!dentry)
+               return NULL;
+       inode = gadgetfs_make_inode (sb, data, fops,
+                       S_IFREG | (default_perm & S_IRWXUGO));
+       if (!inode) {
+               dput(dentry);
+               return NULL;
+       }
+       d_add (dentry, inode);
+       *dentry_p = dentry;
+       return inode;
+ }
+ static const struct super_operations gadget_fs_operations = {
+       .statfs =       simple_statfs,
+       .drop_inode =   generic_delete_inode,
+ };
+ static int
+ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
+ {
+       struct inode    *inode;
+       struct dev_data *dev;
+       if (the_device)
+               return -ESRCH;
+       /* fake probe to determine $CHIP */
+       CHIP = NULL;
+       usb_gadget_probe_driver(&probe_driver);
+       if (!CHIP)
+               return -ENODEV;
+       /* superblock */
+       sb->s_blocksize = PAGE_CACHE_SIZE;
+       sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+       sb->s_magic = GADGETFS_MAGIC;
+       sb->s_op = &gadget_fs_operations;
+       sb->s_time_gran = 1;
+       /* root inode */
+       inode = gadgetfs_make_inode (sb,
+                       NULL, &simple_dir_operations,
+                       S_IFDIR | S_IRUGO | S_IXUGO);
+       if (!inode)
+               goto Enomem;
+       inode->i_op = &simple_dir_inode_operations;
+       if (!(sb->s_root = d_make_root (inode)))
+               goto Enomem;
+       /* the ep0 file is named after the controller we expect;
+        * user mode code can use it for sanity checks, like we do.
+        */
+       dev = dev_new ();
+       if (!dev)
+               goto Enomem;
+       dev->sb = sb;
+       if (!gadgetfs_create_file (sb, CHIP,
+                               dev, &dev_init_operations,
+                               &dev->dentry)) {
+               put_dev(dev);
+               goto Enomem;
+       }
+       /* other endpoint files are available after hardware setup,
+        * from binding to a controller.
+        */
+       the_device = dev;
+       return 0;
+ Enomem:
+       return -ENOMEM;
+ }
+ /* "mount -t gadgetfs path /dev/gadget" ends up here */
+ static struct dentry *
+ gadgetfs_mount (struct file_system_type *t, int flags,
+               const char *path, void *opts)
+ {
+       return mount_single (t, flags, opts, gadgetfs_fill_super);
+ }
+ static void
+ gadgetfs_kill_sb (struct super_block *sb)
+ {
+       kill_litter_super (sb);
+       if (the_device) {
+               put_dev (the_device);
+               the_device = NULL;
+       }
+ }
+ /*----------------------------------------------------------------------*/
+ static struct file_system_type gadgetfs_type = {
+       .owner          = THIS_MODULE,
+       .name           = shortname,
+       .mount          = gadgetfs_mount,
+       .kill_sb        = gadgetfs_kill_sb,
+ };
+ MODULE_ALIAS_FS("gadgetfs");
+ /*----------------------------------------------------------------------*/
+ static int __init init (void)
+ {
+       int status;
+       status = register_filesystem (&gadgetfs_type);
+       if (status == 0)
+               pr_info ("%s: %s, version " DRIVER_VERSION "\n",
+                       shortname, driver_desc);
+       return status;
+ }
+ module_init (init);
+ static void __exit cleanup (void)
+ {
+       pr_debug ("unregister %s\n", shortname);
+       unregister_filesystem (&gadgetfs_type);
+ }
+ module_exit (cleanup);
index 0000000000000000000000000000000000000000,5d93f2b1e394b355ebbe93caf9ab5e103acf77bd..08df5c4f46ce4137f91a2c86727765850924c7b2
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,2235 +1,2236 @@@
 -      } else if (nt == 0x11) {
 -              dev_err(dev->dev, "Invalid value for trans./microframe\n");
+ /*
+  * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
+  *
+  * 2013 (c) Aeroflex Gaisler AB
+  *
+  * This driver supports GRUSBDC USB Device Controller cores available in the
+  * GRLIB VHDL IP core library.
+  *
+  * Full documentation of the GRUSBDC core can be found here:
+  * http://www.gaisler.com/products/grlib/grip.pdf
+  *
+  * This program is free software; you can redistribute it and/or modify it
+  * under the terms of the GNU General Public License as published by the
+  * Free Software Foundation; either version 2 of the License, or (at your
+  * option) any later version.
+  *
+  * Contributors:
+  * - Andreas Larsson <andreas@gaisler.com>
+  * - Marko Isomaki
+  */
+ /*
+  * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
+  * individually configurable to any of the four USB transfer types. This driver
+  * only supports cores in DMA mode.
+  */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ #include <linux/errno.h>
+ #include <linux/list.h>
+ #include <linux/interrupt.h>
+ #include <linux/device.h>
+ #include <linux/usb/ch9.h>
+ #include <linux/usb/gadget.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmapool.h>
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+ #include <linux/of_platform.h>
+ #include <linux/of_irq.h>
+ #include <linux/of_address.h>
+ #include <asm/byteorder.h>
+ #include "gr_udc.h"
+ #define       DRIVER_NAME     "gr_udc"
+ #define       DRIVER_DESC     "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
+ static const char driver_name[] = DRIVER_NAME;
+ static const char driver_desc[] = DRIVER_DESC;
+ #define gr_read32(x) (ioread32be((x)))
+ #define gr_write32(x, v) (iowrite32be((v), (x)))
+ /* USB speed and corresponding string calculated from status register value */
+ #define GR_SPEED(status) \
+       ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
+ #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
+ /* Size of hardware buffer calculated from epctrl register value */
+ #define GR_BUFFER_SIZE(epctrl)                                              \
+       ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
+        GR_EPCTRL_BUFSZ_SCALER)
+ /* ---------------------------------------------------------------------- */
+ /* Debug printout functionality */
+ static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
+ static const char *gr_ep0state_string(enum gr_ep0state state)
+ {
+       static const char *const names[] = {
+               [GR_EP0_DISCONNECT] = "disconnect",
+               [GR_EP0_SETUP] = "setup",
+               [GR_EP0_IDATA] = "idata",
+               [GR_EP0_ODATA] = "odata",
+               [GR_EP0_ISTATUS] = "istatus",
+               [GR_EP0_OSTATUS] = "ostatus",
+               [GR_EP0_STALL] = "stall",
+               [GR_EP0_SUSPEND] = "suspend",
+       };
+       if (state < 0 || state >= ARRAY_SIZE(names))
+               return "UNKNOWN";
+       return names[state];
+ }
+ #ifdef VERBOSE_DEBUG
+ static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
+                               struct gr_request *req)
+ {
+       int buflen = ep->is_in ? req->req.length : req->req.actual;
+       int rowlen = 32;
+       int plen = min(rowlen, buflen);
+       dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
+               (buflen > plen ? " (truncated)" : ""));
+       print_hex_dump_debug("   ", DUMP_PREFIX_NONE,
+                            rowlen, 4, req->req.buf, plen, false);
+ }
+ static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
+                              u16 value, u16 index, u16 length)
+ {
+       dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
+                type, request, value, index, length);
+ }
+ #else /* !VERBOSE_DEBUG */
+ static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
+                               struct gr_request *req) {}
+ static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
+                              u16 value, u16 index, u16 length) {}
+ #endif /* VERBOSE_DEBUG */
+ /* ---------------------------------------------------------------------- */
+ /* Debugfs functionality */
+ #ifdef CONFIG_USB_GADGET_DEBUG_FS
+ static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
+ {
+       u32 epctrl = gr_read32(&ep->regs->epctrl);
+       u32 epstat = gr_read32(&ep->regs->epstat);
+       int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
+       struct gr_request *req;
+       seq_printf(seq, "%s:\n", ep->ep.name);
+       seq_printf(seq, "  mode = %s\n", gr_modestring[mode]);
+       seq_printf(seq, "  halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
+       seq_printf(seq, "  disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
+       seq_printf(seq, "  valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
+       seq_printf(seq, "  dma_start = %d\n", ep->dma_start);
+       seq_printf(seq, "  stopped = %d\n", ep->stopped);
+       seq_printf(seq, "  wedged = %d\n", ep->wedged);
+       seq_printf(seq, "  callback = %d\n", ep->callback);
+       seq_printf(seq, "  maxpacket = %d\n", ep->ep.maxpacket);
+       seq_printf(seq, "  maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
+       seq_printf(seq, "  bytes_per_buffer = %d\n", ep->bytes_per_buffer);
+       if (mode == 1 || mode == 3)
+               seq_printf(seq, "  nt = %d\n",
+                          (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
+       seq_printf(seq, "  Buffer 0: %s %s%d\n",
+                  epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
+                  epstat & GR_EPSTAT_BS ? " " : "selected ",
+                  (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
+       seq_printf(seq, "  Buffer 1: %s %s%d\n",
+                  epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
+                  epstat & GR_EPSTAT_BS ? "selected " : " ",
+                  (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
+       if (list_empty(&ep->queue)) {
+               seq_puts(seq, "  Queue: empty\n\n");
+               return;
+       }
+       seq_puts(seq, "  Queue:\n");
+       list_for_each_entry(req, &ep->queue, queue) {
+               struct gr_dma_desc *desc;
+               struct gr_dma_desc *next;
+               seq_printf(seq, "    0x%p: 0x%p %d %d\n", req,
+                          &req->req.buf, req->req.actual, req->req.length);
+               next = req->first_desc;
+               do {
+                       desc = next;
+                       next = desc->next_desc;
+                       seq_printf(seq, "    %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
+                                  desc == req->curr_desc ? 'c' : ' ',
+                                  desc, desc->paddr, desc->ctrl, desc->data);
+               } while (desc != req->last_desc);
+       }
+       seq_puts(seq, "\n");
+ }
+ static int gr_seq_show(struct seq_file *seq, void *v)
+ {
+       struct gr_udc *dev = seq->private;
+       u32 control = gr_read32(&dev->regs->control);
+       u32 status = gr_read32(&dev->regs->status);
+       struct gr_ep *ep;
+       seq_printf(seq, "usb state = %s\n",
+                  usb_state_string(dev->gadget.state));
+       seq_printf(seq, "address = %d\n",
+                  (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
+       seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
+       seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
+       seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
+       seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
+       seq_printf(seq, "test_mode = %d\n", dev->test_mode);
+       seq_puts(seq, "\n");
+       list_for_each_entry(ep, &dev->ep_list, ep_list)
+               gr_seq_ep_show(seq, ep);
+       return 0;
+ }
+ static int gr_dfs_open(struct inode *inode, struct file *file)
+ {
+       return single_open(file, gr_seq_show, inode->i_private);
+ }
+ static const struct file_operations gr_dfs_fops = {
+       .owner          = THIS_MODULE,
+       .open           = gr_dfs_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+ };
+ static void gr_dfs_create(struct gr_udc *dev)
+ {
+       const char *name = "gr_udc_state";
+       dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
+       dev->dfs_state = debugfs_create_file(name, 0444, dev->dfs_root, dev,
+                                            &gr_dfs_fops);
+ }
+ static void gr_dfs_delete(struct gr_udc *dev)
+ {
+       /* Handles NULL and ERR pointers internally */
+       debugfs_remove(dev->dfs_state);
+       debugfs_remove(dev->dfs_root);
+ }
+ #else /* !CONFIG_USB_GADGET_DEBUG_FS */
+ static void gr_dfs_create(struct gr_udc *dev) {}
+ static void gr_dfs_delete(struct gr_udc *dev) {}
+ #endif /* CONFIG_USB_GADGET_DEBUG_FS */
+ /* ---------------------------------------------------------------------- */
+ /* DMA and request handling */
+ /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
+ static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
+ {
+       dma_addr_t paddr;
+       struct gr_dma_desc *dma_desc;
+       dma_desc = dma_pool_alloc(ep->dev->desc_pool, gfp_flags, &paddr);
+       if (!dma_desc) {
+               dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
+               return NULL;
+       }
+       memset(dma_desc, 0, sizeof(*dma_desc));
+       dma_desc->paddr = paddr;
+       return dma_desc;
+ }
+ static inline void gr_free_dma_desc(struct gr_udc *dev,
+                                   struct gr_dma_desc *desc)
+ {
+       dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
+ }
+ /* Frees the chain of struct gr_dma_desc for the given request */
+ static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
+ {
+       struct gr_dma_desc *desc;
+       struct gr_dma_desc *next;
+       next = req->first_desc;
+       if (!next)
+               return;
+       do {
+               desc = next;
+               next = desc->next_desc;
+               gr_free_dma_desc(dev, desc);
+       } while (desc != req->last_desc);
+       req->first_desc = NULL;
+       req->curr_desc = NULL;
+       req->last_desc = NULL;
+ }
+ static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
+ /*
+  * Frees allocated resources and calls the appropriate completion function/setup
+  * package handler for a finished request.
+  *
+  * Must be called with dev->lock held and irqs disabled.
+  */
+ static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
+                             int status)
+       __releases(&dev->lock)
+       __acquires(&dev->lock)
+ {
+       struct gr_udc *dev;
+       list_del_init(&req->queue);
+       if (likely(req->req.status == -EINPROGRESS))
+               req->req.status = status;
+       else
+               status = req->req.status;
+       dev = ep->dev;
+       usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
+       gr_free_dma_desc_chain(dev, req);
+       if (ep->is_in) /* For OUT, actual gets updated bit by bit */
+               req->req.actual = req->req.length;
+       if (!status) {
+               if (ep->is_in)
+                       gr_dbgprint_request("SENT", ep, req);
+               else
+                       gr_dbgprint_request("RECV", ep, req);
+       }
+       /* Prevent changes to ep->queue during callback */
+       ep->callback = 1;
+       if (req == dev->ep0reqo && !status) {
+               if (req->setup)
+                       gr_ep0_setup(dev, req);
+               else
+                       dev_err(dev->dev,
+                               "Unexpected non setup packet on ep0in\n");
+       } else if (req->req.complete) {
+               spin_unlock(&dev->lock);
+               req->req.complete(&ep->ep, &req->req);
+               spin_lock(&dev->lock);
+       }
+       ep->callback = 0;
+ }
+ static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+ {
+       struct gr_request *req;
+       req = kzalloc(sizeof(*req), gfp_flags);
+       if (!req)
+               return NULL;
+       INIT_LIST_HEAD(&req->queue);
+       return &req->req;
+ }
+ /*
+  * Starts DMA for endpoint ep if there are requests in the queue.
+  *
+  * Must be called with dev->lock held and with !ep->stopped.
+  */
+ static void gr_start_dma(struct gr_ep *ep)
+ {
+       struct gr_request *req;
+       u32 dmactrl;
+       if (list_empty(&ep->queue)) {
+               ep->dma_start = 0;
+               return;
+       }
+       req = list_first_entry(&ep->queue, struct gr_request, queue);
+       /* A descriptor should already have been allocated */
+       BUG_ON(!req->curr_desc);
+       wmb(); /* Make sure all is settled before handing it over to DMA */
+       /* Set the descriptor pointer in the hardware */
+       gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
+       /* Announce available descriptors */
+       dmactrl = gr_read32(&ep->regs->dmactrl);
+       gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
+       ep->dma_start = 1;
+ }
+ /*
+  * Finishes the first request in the ep's queue and, if available, starts the
+  * next request in queue.
+  *
+  * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
+  */
+ static void gr_dma_advance(struct gr_ep *ep, int status)
+ {
+       struct gr_request *req;
+       req = list_first_entry(&ep->queue, struct gr_request, queue);
+       gr_finish_request(ep, req, status);
+       gr_start_dma(ep); /* Regardless of ep->dma_start */
+ }
+ /*
+  * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
+  * transfer to be canceled and clears GR_DMACTRL_DA.
+  *
+  * Must be called with dev->lock held.
+  */
+ static void gr_abort_dma(struct gr_ep *ep)
+ {
+       u32 dmactrl;
+       dmactrl = gr_read32(&ep->regs->dmactrl);
+       gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
+ }
+ /*
+  * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
+  * chain.
+  *
+  * Size is not used for OUT endpoints. Hardware can not be instructed to handle
+  * smaller buffer than MAXPL in the OUT direction.
+  */
+ static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
+                          dma_addr_t data, unsigned size, gfp_t gfp_flags)
+ {
+       struct gr_dma_desc *desc;
+       desc = gr_alloc_dma_desc(ep, gfp_flags);
+       if (!desc)
+               return -ENOMEM;
+       desc->data = data;
+       if (ep->is_in)
+               desc->ctrl =
+                       (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
+       else
+               desc->ctrl = GR_DESC_OUT_CTRL_IE;
+       if (!req->first_desc) {
+               req->first_desc = desc;
+               req->curr_desc = desc;
+       } else {
+               req->last_desc->next_desc = desc;
+               req->last_desc->next = desc->paddr;
+               req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
+       }
+       req->last_desc = desc;
+       return 0;
+ }
+ /*
+  * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
+  * together covers req->req.length bytes of the buffer at DMA address
+  * req->req.dma for the OUT direction.
+  *
+  * The first descriptor in the chain is enabled, the rest disabled. The
+  * interrupt handler will later enable them one by one when needed so we can
+  * find out when the transfer is finished. For OUT endpoints, all descriptors
+  * therefore generate interrutps.
+  */
+ static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
+                                 gfp_t gfp_flags)
+ {
+       u16 bytes_left; /* Bytes left to provide descriptors for */
+       u16 bytes_used; /* Bytes accommodated for */
+       int ret = 0;
+       req->first_desc = NULL; /* Signals that no allocation is done yet */
+       bytes_left = req->req.length;
+       bytes_used = 0;
+       while (bytes_left > 0) {
+               dma_addr_t start = req->req.dma + bytes_used;
+               u16 size = min(bytes_left, ep->bytes_per_buffer);
+               /* Should not happen however - gr_queue stops such lengths */
+               if (size < ep->bytes_per_buffer)
+                       dev_warn(ep->dev->dev,
+                                "Buffer overrun risk: %u < %u bytes/buffer\n",
+                                size, ep->bytes_per_buffer);
+               ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
+               if (ret)
+                       goto alloc_err;
+               bytes_left -= size;
+               bytes_used += size;
+       }
+       req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
+       return 0;
+ alloc_err:
+       gr_free_dma_desc_chain(ep->dev, req);
+       return ret;
+ }
+ /*
+  * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
+  * together covers req->req.length bytes of the buffer at DMA address
+  * req->req.dma for the IN direction.
+  *
+  * When more data is provided than the maximum payload size, the hardware splits
+  * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
+  * is always set to a multiple of the maximum payload (restricted to the valid
+  * number of maximum payloads during high bandwidth isochronous or interrupt
+  * transfers)
+  *
+  * All descriptors are enabled from the beginning and we only generate an
+  * interrupt for the last one indicating that the entire request has been pushed
+  * to hardware.
+  */
+ static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
+                                gfp_t gfp_flags)
+ {
+       u16 bytes_left; /* Bytes left in req to provide descriptors for */
+       u16 bytes_used; /* Bytes in req accommodated for */
+       int ret = 0;
+       req->first_desc = NULL; /* Signals that no allocation is done yet */
+       bytes_left = req->req.length;
+       bytes_used = 0;
+       do { /* Allow for zero length packets */
+               dma_addr_t start = req->req.dma + bytes_used;
+               u16 size = min(bytes_left, ep->bytes_per_buffer);
+               ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
+               if (ret)
+                       goto alloc_err;
+               bytes_left -= size;
+               bytes_used += size;
+       } while (bytes_left > 0);
+       /*
+        * Send an extra zero length packet to indicate that no more data is
+        * available when req->req.zero is set and the data length is even
+        * multiples of ep->ep.maxpacket.
+        */
+       if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
+               ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
+               if (ret)
+                       goto alloc_err;
+       }
+       /*
+        * For IN packets we only want to know when the last packet has been
+        * transmitted (not just put into internal buffers).
+        */
+       req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
+       return 0;
+ alloc_err:
+       gr_free_dma_desc_chain(ep->dev, req);
+       return ret;
+ }
+ /* Must be called with dev->lock held */
+ static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
+ {
+       struct gr_udc *dev = ep->dev;
+       int ret;
+       if (unlikely(!ep->ep.desc && ep->num != 0)) {
+               dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
+               return -EINVAL;
+       }
+       if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
+               dev_err(dev->dev,
+                       "Invalid request for %s: buf=%p list_empty=%d\n",
+                       ep->ep.name, req->req.buf, list_empty(&req->queue));
+               return -EINVAL;
+       }
+       /*
+        * The DMA controller can not handle smaller OUT buffers than
+        * maxpacket. It could lead to buffer overruns if unexpectedly long
+        * packet are received.
+        */
+       if (!ep->is_in && (req->req.length % ep->ep.maxpacket) != 0) {
+               dev_err(dev->dev,
+                       "OUT request length %d is not multiple of maxpacket\n",
+                       req->req.length);
+               return -EMSGSIZE;
+       }
+       if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
+               dev_err(dev->dev, "-ESHUTDOWN");
+               return -ESHUTDOWN;
+       }
+       /* Can't touch registers when suspended */
+       if (dev->ep0state == GR_EP0_SUSPEND) {
+               dev_err(dev->dev, "-EBUSY");
+               return -EBUSY;
+       }
+       /* Set up DMA mapping in case the caller didn't */
+       ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
+       if (ret) {
+               dev_err(dev->dev, "usb_gadget_map_request");
+               return ret;
+       }
+       if (ep->is_in)
+               ret = gr_setup_in_desc_list(ep, req, gfp_flags);
+       else
+               ret = gr_setup_out_desc_list(ep, req, gfp_flags);
+       if (ret)
+               return ret;
+       req->req.status = -EINPROGRESS;
+       req->req.actual = 0;
+       list_add_tail(&req->queue, &ep->queue);
+       /* Start DMA if not started, otherwise interrupt handler handles it */
+       if (!ep->dma_start && likely(!ep->stopped))
+               gr_start_dma(ep);
+       return 0;
+ }
+ /*
+  * Queue a request from within the driver.
+  *
+  * Must be called with dev->lock held.
+  */
+ static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
+                              gfp_t gfp_flags)
+ {
+       if (ep->is_in)
+               gr_dbgprint_request("RESP", ep, req);
+       return gr_queue(ep, req, gfp_flags);
+ }
+ /* ---------------------------------------------------------------------- */
+ /* General helper functions */
+ /*
+  * Dequeue ALL requests.
+  *
+  * Must be called with dev->lock held and irqs disabled.
+  */
+ static void gr_ep_nuke(struct gr_ep *ep)
+ {
+       struct gr_request *req;
+       ep->stopped = 1;
+       ep->dma_start = 0;
+       gr_abort_dma(ep);
+       while (!list_empty(&ep->queue)) {
+               req = list_first_entry(&ep->queue, struct gr_request, queue);
+               gr_finish_request(ep, req, -ESHUTDOWN);
+       }
+ }
+ /*
+  * Reset the hardware state of this endpoint.
+  *
+  * Must be called with dev->lock held.
+  */
+ static void gr_ep_reset(struct gr_ep *ep)
+ {
+       gr_write32(&ep->regs->epctrl, 0);
+       gr_write32(&ep->regs->dmactrl, 0);
+       ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
+       ep->ep.desc = NULL;
+       ep->stopped = 1;
+       ep->dma_start = 0;
+ }
+ /*
+  * Generate STALL on ep0in/out.
+  *
+  * Must be called with dev->lock held.
+  */
+ static void gr_control_stall(struct gr_udc *dev)
+ {
+       u32 epctrl;
+       epctrl = gr_read32(&dev->epo[0].regs->epctrl);
+       gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
+       epctrl = gr_read32(&dev->epi[0].regs->epctrl);
+       gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
+       dev->ep0state = GR_EP0_STALL;
+ }
+ /*
+  * Halts, halts and wedges, or clears halt for an endpoint.
+  *
+  * Must be called with dev->lock held.
+  */
+ static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
+ {
+       u32 epctrl;
+       int retval = 0;
+       if (ep->num && !ep->ep.desc)
+               return -EINVAL;
+       if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
+               return -EOPNOTSUPP;
+       /* Never actually halt ep0, and therefore never clear halt for ep0 */
+       if (!ep->num) {
+               if (halt && !fromhost) {
+                       /* ep0 halt from gadget - generate protocol stall */
+                       gr_control_stall(ep->dev);
+                       dev_dbg(ep->dev->dev, "EP: stall ep0\n");
+                       return 0;
+               }
+               return -EINVAL;
+       }
+       dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
+               (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
+       epctrl = gr_read32(&ep->regs->epctrl);
+       if (halt) {
+               /* Set HALT */
+               gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
+               ep->stopped = 1;
+               if (wedge)
+                       ep->wedged = 1;
+       } else {
+               gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
+               ep->stopped = 0;
+               ep->wedged = 0;
+               /* Things might have been queued up in the meantime */
+               if (!ep->dma_start)
+                       gr_start_dma(ep);
+       }
+       return retval;
+ }
+ /* Must be called with dev->lock held */
+ static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
+ {
+       if (dev->ep0state != value)
+               dev_vdbg(dev->dev, "STATE:  ep0state=%s\n",
+                        gr_ep0state_string(value));
+       dev->ep0state = value;
+ }
+ /*
+  * Should only be called when endpoints can not generate interrupts.
+  *
+  * Must be called with dev->lock held.
+  */
+ static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
+ {
+       gr_write32(&dev->regs->control, 0);
+       wmb(); /* Make sure that we do not deny one of our interrupts */
+       dev->irq_enabled = 0;
+ }
+ /*
+  * Stop all device activity and disable data line pullup.
+  *
+  * Must be called with dev->lock held and irqs disabled.
+  */
+ static void gr_stop_activity(struct gr_udc *dev)
+ {
+       struct gr_ep *ep;
+       list_for_each_entry(ep, &dev->ep_list, ep_list)
+               gr_ep_nuke(ep);
+       gr_disable_interrupts_and_pullup(dev);
+       gr_set_ep0state(dev, GR_EP0_DISCONNECT);
+       usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
+ }
+ /* ---------------------------------------------------------------------- */
+ /* ep0 setup packet handling */
+ static void gr_ep0_testmode_complete(struct usb_ep *_ep,
+                                    struct usb_request *_req)
+ {
+       struct gr_ep *ep;
+       struct gr_udc *dev;
+       u32 control;
+       ep = container_of(_ep, struct gr_ep, ep);
+       dev = ep->dev;
+       spin_lock(&dev->lock);
+       control = gr_read32(&dev->regs->control);
+       control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
+       gr_write32(&dev->regs->control, control);
+       spin_unlock(&dev->lock);
+ }
+ static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
+ {
+       /* Nothing needs to be done here */
+ }
+ /*
+  * Queue a response on ep0in.
+  *
+  * Must be called with dev->lock held.
+  */
+ static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
+                         void (*complete)(struct usb_ep *ep,
+                                          struct usb_request *req))
+ {
+       u8 *reqbuf = dev->ep0reqi->req.buf;
+       int status;
+       int i;
+       for (i = 0; i < length; i++)
+               reqbuf[i] = buf[i];
+       dev->ep0reqi->req.length = length;
+       dev->ep0reqi->req.complete = complete;
+       status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
+       if (status < 0)
+               dev_err(dev->dev,
+                       "Could not queue ep0in setup response: %d\n", status);
+       return status;
+ }
+ /*
+  * Queue a 2 byte response on ep0in.
+  *
+  * Must be called with dev->lock held.
+  */
+ static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
+ {
+       __le16 le_response = cpu_to_le16(response);
+       return gr_ep0_respond(dev, (u8 *)&le_response, 2,
+                             gr_ep0_dummy_complete);
+ }
+ /*
+  * Queue a ZLP response on ep0in.
+  *
+  * Must be called with dev->lock held.
+  */
+ static inline int gr_ep0_respond_empty(struct gr_udc *dev)
+ {
+       return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
+ }
+ /*
+  * This is run when a SET_ADDRESS request is received. First writes
+  * the new address to the control register which is updated internally
+  * when the next IN packet is ACKED.
+  *
+  * Must be called with dev->lock held.
+  */
+ static void gr_set_address(struct gr_udc *dev, u8 address)
+ {
+       u32 control;
+       control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
+       control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
+       control |= GR_CONTROL_SU;
+       gr_write32(&dev->regs->control, control);
+ }
+ /*
+  * Returns negative for STALL, 0 for successful handling and positive for
+  * delegation.
+  *
+  * Must be called with dev->lock held.
+  */
+ static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
+                            u16 value, u16 index)
+ {
+       u16 response;
+       u8 test;
+       switch (request) {
+       case USB_REQ_SET_ADDRESS:
+               dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
+               gr_set_address(dev, value & 0xff);
+               if (value)
+                       usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
+               else
+                       usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
+               return gr_ep0_respond_empty(dev);
+       case USB_REQ_GET_STATUS:
+               /* Self powered | remote wakeup */
+               response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
+               return gr_ep0_respond_u16(dev, response);
+       case USB_REQ_SET_FEATURE:
+               switch (value) {
+               case USB_DEVICE_REMOTE_WAKEUP:
+                       /* Allow remote wakeup */
+                       dev->remote_wakeup = 1;
+                       return gr_ep0_respond_empty(dev);
+               case USB_DEVICE_TEST_MODE:
+                       /* The hardware does not support TEST_FORCE_EN */
+                       test = index >> 8;
+                       if (test >= TEST_J && test <= TEST_PACKET) {
+                               dev->test_mode = test;
+                               return gr_ep0_respond(dev, NULL, 0,
+                                                     gr_ep0_testmode_complete);
+                       }
+               }
+               break;
+       case USB_REQ_CLEAR_FEATURE:
+               switch (value) {
+               case USB_DEVICE_REMOTE_WAKEUP:
+                       /* Disallow remote wakeup */
+                       dev->remote_wakeup = 0;
+                       return gr_ep0_respond_empty(dev);
+               }
+               break;
+       }
+       return 1; /* Delegate the rest */
+ }
+ /*
+  * Returns negative for STALL, 0 for successful handling and positive for
+  * delegation.
+  *
+  * Must be called with dev->lock held.
+  */
+ static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
+                               u16 value, u16 index)
+ {
+       if (dev->gadget.state != USB_STATE_CONFIGURED)
+               return -1;
+       /*
+        * Should return STALL for invalid interfaces, but udc driver does not
+        * know anything about that. However, many gadget drivers do not handle
+        * GET_STATUS so we need to take care of that.
+        */
+       switch (request) {
+       case USB_REQ_GET_STATUS:
+               return gr_ep0_respond_u16(dev, 0x0000);
+       case USB_REQ_SET_FEATURE:
+       case USB_REQ_CLEAR_FEATURE:
+               /*
+                * No possible valid standard requests. Still let gadget drivers
+                * have a go at it.
+                */
+               break;
+       }
+       return 1; /* Delegate the rest */
+ }
+ /*
+  * Returns negative for STALL, 0 for successful handling and positive for
+  * delegation.
+  *
+  * Must be called with dev->lock held.
+  */
+ static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
+                              u16 value, u16 index)
+ {
+       struct gr_ep *ep;
+       int status;
+       int halted;
+       u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
+       u8 is_in = index & USB_ENDPOINT_DIR_MASK;
+       if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
+               return -1;
+       if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
+               return -1;
+       ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
+       switch (request) {
+       case USB_REQ_GET_STATUS:
+               halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
+               return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
+       case USB_REQ_SET_FEATURE:
+               switch (value) {
+               case USB_ENDPOINT_HALT:
+                       status = gr_ep_halt_wedge(ep, 1, 0, 1);
+                       if (status >= 0)
+                               status = gr_ep0_respond_empty(dev);
+                       return status;
+               }
+               break;
+       case USB_REQ_CLEAR_FEATURE:
+               switch (value) {
+               case USB_ENDPOINT_HALT:
+                       if (ep->wedged)
+                               return -1;
+                       status = gr_ep_halt_wedge(ep, 0, 0, 1);
+                       if (status >= 0)
+                               status = gr_ep0_respond_empty(dev);
+                       return status;
+               }
+               break;
+       }
+       return 1; /* Delegate the rest */
+ }
+ /* Must be called with dev->lock held */
+ static void gr_ep0out_requeue(struct gr_udc *dev)
+ {
+       int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
+       if (ret)
+               dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
+                       ret);
+ }
+ /*
+  * The main function dealing with setup requests on ep0.
+  *
+  * Must be called with dev->lock held and irqs disabled
+  */
+ static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
+       __releases(&dev->lock)
+       __acquires(&dev->lock)
+ {
+       union {
+               struct usb_ctrlrequest ctrl;
+               u8 raw[8];
+               u32 word[2];
+       } u;
+       u8 type;
+       u8 request;
+       u16 value;
+       u16 index;
+       u16 length;
+       int i;
+       int status;
+       /* Restore from ep0 halt */
+       if (dev->ep0state == GR_EP0_STALL) {
+               gr_set_ep0state(dev, GR_EP0_SETUP);
+               if (!req->req.actual)
+                       goto out;
+       }
+       if (dev->ep0state == GR_EP0_ISTATUS) {
+               gr_set_ep0state(dev, GR_EP0_SETUP);
+               if (req->req.actual > 0)
+                       dev_dbg(dev->dev,
+                               "Unexpected setup packet at state %s\n",
+                               gr_ep0state_string(GR_EP0_ISTATUS));
+               else
+                       goto out; /* Got expected ZLP */
+       } else if (dev->ep0state != GR_EP0_SETUP) {
+               dev_info(dev->dev,
+                        "Unexpected ep0out request at state %s - stalling\n",
+                        gr_ep0state_string(dev->ep0state));
+               gr_control_stall(dev);
+               gr_set_ep0state(dev, GR_EP0_SETUP);
+               goto out;
+       } else if (!req->req.actual) {
+               dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
+                       gr_ep0state_string(dev->ep0state));
+               goto out;
+       }
+       /* Handle SETUP packet */
+       for (i = 0; i < req->req.actual; i++)
+               u.raw[i] = ((u8 *)req->req.buf)[i];
+       type = u.ctrl.bRequestType;
+       request = u.ctrl.bRequest;
+       value = le16_to_cpu(u.ctrl.wValue);
+       index = le16_to_cpu(u.ctrl.wIndex);
+       length = le16_to_cpu(u.ctrl.wLength);
+       gr_dbgprint_devreq(dev, type, request, value, index, length);
+       /* Check for data stage */
+       if (length) {
+               if (type & USB_DIR_IN)
+                       gr_set_ep0state(dev, GR_EP0_IDATA);
+               else
+                       gr_set_ep0state(dev, GR_EP0_ODATA);
+       }
+       status = 1; /* Positive status flags delegation */
+       if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+               switch (type & USB_RECIP_MASK) {
+               case USB_RECIP_DEVICE:
+                       status = gr_device_request(dev, type, request,
+                                                  value, index);
+                       break;
+               case USB_RECIP_ENDPOINT:
+                       status =  gr_endpoint_request(dev, type, request,
+                                                     value, index);
+                       break;
+               case USB_RECIP_INTERFACE:
+                       status = gr_interface_request(dev, type, request,
+                                                     value, index);
+                       break;
+               }
+       }
+       if (status > 0) {
+               spin_unlock(&dev->lock);
+               dev_vdbg(dev->dev, "DELEGATE\n");
+               status = dev->driver->setup(&dev->gadget, &u.ctrl);
+               spin_lock(&dev->lock);
+       }
+       /* Generate STALL on both ep0out and ep0in if requested */
+       if (unlikely(status < 0)) {
+               dev_vdbg(dev->dev, "STALL\n");
+               gr_control_stall(dev);
+       }
+       if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
+           request == USB_REQ_SET_CONFIGURATION) {
+               if (!value) {
+                       dev_dbg(dev->dev, "STATUS: deconfigured\n");
+                       usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
+               } else if (status >= 0) {
+                       /* Not configured unless gadget OK:s it */
+                       dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
+                       usb_gadget_set_state(&dev->gadget,
+                                            USB_STATE_CONFIGURED);
+               }
+       }
+       /* Get ready for next stage */
+       if (dev->ep0state == GR_EP0_ODATA)
+               gr_set_ep0state(dev, GR_EP0_OSTATUS);
+       else if (dev->ep0state == GR_EP0_IDATA)
+               gr_set_ep0state(dev, GR_EP0_ISTATUS);
+       else
+               gr_set_ep0state(dev, GR_EP0_SETUP);
+ out:
+       gr_ep0out_requeue(dev);
+ }
+ /* ---------------------------------------------------------------------- */
+ /* VBUS and USB reset handling */
+ /* Must be called with dev->lock held and irqs disabled  */
+ static void gr_vbus_connected(struct gr_udc *dev, u32 status)
+ {
+       u32 control;
+       dev->gadget.speed = GR_SPEED(status);
+       usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
+       /* Turn on full interrupts and pullup */
+       control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
+                  GR_CONTROL_SP | GR_CONTROL_EP);
+       gr_write32(&dev->regs->control, control);
+ }
+ /* Must be called with dev->lock held */
+ static void gr_enable_vbus_detect(struct gr_udc *dev)
+ {
+       u32 status;
+       dev->irq_enabled = 1;
+       wmb(); /* Make sure we do not ignore an interrupt */
+       gr_write32(&dev->regs->control, GR_CONTROL_VI);
+       /* Take care of the case we are already plugged in at this point */
+       status = gr_read32(&dev->regs->status);
+       if (status & GR_STATUS_VB)
+               gr_vbus_connected(dev, status);
+ }
+ /* Must be called with dev->lock held and irqs disabled */
+ static void gr_vbus_disconnected(struct gr_udc *dev)
+ {
+       gr_stop_activity(dev);
+       /* Report disconnect */
+       if (dev->driver && dev->driver->disconnect) {
+               spin_unlock(&dev->lock);
+               dev->driver->disconnect(&dev->gadget);
+               spin_lock(&dev->lock);
+       }
+       gr_enable_vbus_detect(dev);
+ }
+ /* Must be called with dev->lock held and irqs disabled */
+ static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
+ {
+       gr_set_address(dev, 0);
+       gr_set_ep0state(dev, GR_EP0_SETUP);
+       usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
+       dev->gadget.speed = GR_SPEED(status);
+       gr_ep_nuke(&dev->epo[0]);
+       gr_ep_nuke(&dev->epi[0]);
+       dev->epo[0].stopped = 0;
+       dev->epi[0].stopped = 0;
+       gr_ep0out_requeue(dev);
+ }
+ /* ---------------------------------------------------------------------- */
+ /* Irq handling */
+ /*
+  * Handles interrupts from in endpoints. Returns whether something was handled.
+  *
+  * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
+  */
+ static int gr_handle_in_ep(struct gr_ep *ep)
+ {
+       struct gr_request *req;
+       req = list_first_entry(&ep->queue, struct gr_request, queue);
+       if (!req->last_desc)
+               return 0;
+       if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
+               return 0; /* Not put in hardware buffers yet */
+       if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
+               return 0; /* Not transmitted yet, still in hardware buffers */
+       /* Write complete */
+       gr_dma_advance(ep, 0);
+       return 1;
+ }
+ /*
+  * Handles interrupts from out endpoints. Returns whether something was handled.
+  *
+  * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
+  */
+ static int gr_handle_out_ep(struct gr_ep *ep)
+ {
+       u32 ep_dmactrl;
+       u32 ctrl;
+       u16 len;
+       struct gr_request *req;
+       struct gr_udc *dev = ep->dev;
+       req = list_first_entry(&ep->queue, struct gr_request, queue);
+       if (!req->curr_desc)
+               return 0;
+       ctrl = ACCESS_ONCE(req->curr_desc->ctrl);
+       if (ctrl & GR_DESC_OUT_CTRL_EN)
+               return 0; /* Not received yet */
+       /* Read complete */
+       len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
+       req->req.actual += len;
+       if (ctrl & GR_DESC_OUT_CTRL_SE)
+               req->setup = 1;
+       if (len < ep->ep.maxpacket || req->req.actual == req->req.length) {
+               /* Short packet or the expected size - we are done */
+               if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
+                       /*
+                        * Send a status stage ZLP to ack the DATA stage in the
+                        * OUT direction. This needs to be done before
+                        * gr_dma_advance as that can lead to a call to
+                        * ep0_setup that can change dev->ep0state.
+                        */
+                       gr_ep0_respond_empty(dev);
+                       gr_set_ep0state(dev, GR_EP0_SETUP);
+               }
+               gr_dma_advance(ep, 0);
+       } else {
+               /* Not done yet. Enable the next descriptor to receive more. */
+               req->curr_desc = req->curr_desc->next_desc;
+               req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
+               ep_dmactrl = gr_read32(&ep->regs->dmactrl);
+               gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
+       }
+       return 1;
+ }
+ /*
+  * Handle state changes. Returns whether something was handled.
+  *
+  * Must be called with dev->lock held and irqs disabled.
+  */
+ static int gr_handle_state_changes(struct gr_udc *dev)
+ {
+       u32 status = gr_read32(&dev->regs->status);
+       int handled = 0;
+       int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
+                        dev->gadget.state == USB_STATE_ATTACHED);
+       /* VBUS valid detected */
+       if (!powstate && (status & GR_STATUS_VB)) {
+               dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
+               gr_vbus_connected(dev, status);
+               handled = 1;
+       }
+       /* Disconnect */
+       if (powstate && !(status & GR_STATUS_VB)) {
+               dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
+               gr_vbus_disconnected(dev);
+               handled = 1;
+       }
+       /* USB reset detected */
+       if (status & GR_STATUS_UR) {
+               dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
+                       GR_SPEED_STR(status));
+               gr_write32(&dev->regs->status, GR_STATUS_UR);
+               gr_udc_usbreset(dev, status);
+               handled = 1;
+       }
+       /* Speed change */
+       if (dev->gadget.speed != GR_SPEED(status)) {
+               dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
+                       GR_SPEED_STR(status));
+               dev->gadget.speed = GR_SPEED(status);
+               handled = 1;
+       }
+       /* Going into suspend */
+       if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
+               dev_dbg(dev->dev, "STATUS: USB suspend\n");
+               gr_set_ep0state(dev, GR_EP0_SUSPEND);
+               dev->suspended_from = dev->gadget.state;
+               usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
+               if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
+                   dev->driver && dev->driver->suspend) {
+                       spin_unlock(&dev->lock);
+                       dev->driver->suspend(&dev->gadget);
+                       spin_lock(&dev->lock);
+               }
+               handled = 1;
+       }
+       /* Coming out of suspend */
+       if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
+               dev_dbg(dev->dev, "STATUS: USB resume\n");
+               if (dev->suspended_from == USB_STATE_POWERED)
+                       gr_set_ep0state(dev, GR_EP0_DISCONNECT);
+               else
+                       gr_set_ep0state(dev, GR_EP0_SETUP);
+               usb_gadget_set_state(&dev->gadget, dev->suspended_from);
+               if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
+                   dev->driver && dev->driver->resume) {
+                       spin_unlock(&dev->lock);
+                       dev->driver->resume(&dev->gadget);
+                       spin_lock(&dev->lock);
+               }
+               handled = 1;
+       }
+       return handled;
+ }
+ /* Non-interrupt context irq handler */
+ static irqreturn_t gr_irq_handler(int irq, void *_dev)
+ {
+       struct gr_udc *dev = _dev;
+       struct gr_ep *ep;
+       int handled = 0;
+       int i;
+       unsigned long flags;
+       spin_lock_irqsave(&dev->lock, flags);
+       if (!dev->irq_enabled)
+               goto out;
+       /*
+        * Check IN ep interrupts. We check these before the OUT eps because
+        * some gadgets reuse the request that might already be currently
+        * outstanding and needs to be completed (mainly setup requests).
+        */
+       for (i = 0; i < dev->nepi; i++) {
+               ep = &dev->epi[i];
+               if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
+                       handled = gr_handle_in_ep(ep) || handled;
+       }
+       /* Check OUT ep interrupts */
+       for (i = 0; i < dev->nepo; i++) {
+               ep = &dev->epo[i];
+               if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
+                       handled = gr_handle_out_ep(ep) || handled;
+       }
+       /* Check status interrupts */
+       handled = gr_handle_state_changes(dev) || handled;
+       /*
+        * Check AMBA DMA errors. Only check if we didn't find anything else to
+        * handle because this shouldn't happen if we did everything right.
+        */
+       if (!handled) {
+               list_for_each_entry(ep, &dev->ep_list, ep_list) {
+                       if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
+                               dev_err(dev->dev,
+                                       "AMBA Error occurred for %s\n",
+                                       ep->ep.name);
+                               handled = 1;
+                       }
+               }
+       }
+ out:
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return handled ? IRQ_HANDLED : IRQ_NONE;
+ }
+ /* Interrupt context irq handler */
+ static irqreturn_t gr_irq(int irq, void *_dev)
+ {
+       struct gr_udc *dev = _dev;
+       if (!dev->irq_enabled)
+               return IRQ_NONE;
+       return IRQ_WAKE_THREAD;
+ }
+ /* ---------------------------------------------------------------------- */
+ /* USB ep ops */
+ /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
+ static int gr_ep_enable(struct usb_ep *_ep,
+                       const struct usb_endpoint_descriptor *desc)
+ {
+       struct gr_udc *dev;
+       struct gr_ep *ep;
+       u8 mode;
+       u8 nt;
+       u16 max;
+       u16 buffer_size = 0;
+       u32 epctrl;
+       ep = container_of(_ep, struct gr_ep, ep);
+       if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+               return -EINVAL;
+       dev = ep->dev;
+       /* 'ep0' IN and OUT are reserved */
+       if (ep == &dev->epo[0] || ep == &dev->epi[0])
+               return -EINVAL;
+       if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
+               return -ESHUTDOWN;
+       /* Make sure we are clear for enabling */
+       epctrl = gr_read32(&ep->regs->epctrl);
+       if (epctrl & GR_EPCTRL_EV)
+               return -EBUSY;
+       /* Check that directions match */
+       if (!ep->is_in != !usb_endpoint_dir_in(desc))
+               return -EINVAL;
+       /* Check ep num */
+       if ((!ep->is_in && ep->num >= dev->nepo) ||
+           (ep->is_in && ep->num >= dev->nepi))
+               return -EINVAL;
+       if (usb_endpoint_xfer_control(desc)) {
+               mode = 0;
+       } else if (usb_endpoint_xfer_isoc(desc)) {
+               mode = 1;
+       } else if (usb_endpoint_xfer_bulk(desc)) {
+               mode = 2;
+       } else if (usb_endpoint_xfer_int(desc)) {
+               mode = 3;
+       } else {
+               dev_err(dev->dev, "Unknown transfer type for %s\n",
+                       ep->ep.name);
+               return -EINVAL;
+       }
+       /*
+        * Bits 10-0 set the max payload. 12-11 set the number of
+        * additional transactions.
+        */
+       max = 0x7ff & usb_endpoint_maxp(desc);
+       nt = 0x3 & (usb_endpoint_maxp(desc) >> 11);
+       buffer_size = GR_BUFFER_SIZE(epctrl);
+       if (nt && (mode == 0 || mode == 2)) {
+               dev_err(dev->dev,
+                       "%s mode: multiple trans./microframe not valid\n",
+                       (mode == 2 ? "Bulk" : "Control"));
+               return -EINVAL;
++      } else if (nt == 0x3) {
++              dev_err(dev->dev,
++                      "Invalid value 0x3 for additional trans./microframe\n");
+               return -EINVAL;
+       } else if ((nt + 1) * max > buffer_size) {
+               dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
+                       buffer_size, (nt + 1), max);
+               return -EINVAL;
+       } else if (max == 0) {
+               dev_err(dev->dev, "Max payload cannot be set to 0\n");
+               return -EINVAL;
+       } else if (max > ep->ep.maxpacket_limit) {
+               dev_err(dev->dev, "Requested max payload %d > limit %d\n",
+                       max, ep->ep.maxpacket_limit);
+               return -EINVAL;
+       }
+       spin_lock(&ep->dev->lock);
+       if (!ep->stopped) {
+               spin_unlock(&ep->dev->lock);
+               return -EBUSY;
+       }
+       ep->stopped = 0;
+       ep->wedged = 0;
+       ep->ep.desc = desc;
+       ep->ep.maxpacket = max;
+       ep->dma_start = 0;
+       if (nt) {
+               /*
+                * Maximum possible size of all payloads in one microframe
+                * regardless of direction when using high-bandwidth mode.
+                */
+               ep->bytes_per_buffer = (nt + 1) * max;
+       } else if (ep->is_in) {
+               /*
+                * The biggest multiple of maximum packet size that fits into
+                * the buffer. The hardware will split up into many packets in
+                * the IN direction.
+                */
+               ep->bytes_per_buffer = (buffer_size / max) * max;
+       } else {
+               /*
+                * Only single packets will be placed the buffers in the OUT
+                * direction.
+                */
+               ep->bytes_per_buffer = max;
+       }
+       epctrl = (max << GR_EPCTRL_MAXPL_POS)
+               | (nt << GR_EPCTRL_NT_POS)
+               | (mode << GR_EPCTRL_TT_POS)
+               | GR_EPCTRL_EV;
+       if (ep->is_in)
+               epctrl |= GR_EPCTRL_PI;
+       gr_write32(&ep->regs->epctrl, epctrl);
+       gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
+       spin_unlock(&ep->dev->lock);
+       dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
+               ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
+       return 0;
+ }
+ /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
+ static int gr_ep_disable(struct usb_ep *_ep)
+ {
+       struct gr_ep *ep;
+       struct gr_udc *dev;
+       unsigned long flags;
+       ep = container_of(_ep, struct gr_ep, ep);
+       if (!_ep || !ep->ep.desc)
+               return -ENODEV;
+       dev = ep->dev;
+       /* 'ep0' IN and OUT are reserved */
+       if (ep == &dev->epo[0] || ep == &dev->epi[0])
+               return -EINVAL;
+       if (dev->ep0state == GR_EP0_SUSPEND)
+               return -EBUSY;
+       dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
+       spin_lock_irqsave(&dev->lock, flags);
+       gr_ep_nuke(ep);
+       gr_ep_reset(ep);
+       ep->ep.desc = NULL;
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return 0;
+ }
+ /*
+  * Frees a request, but not any DMA buffers associated with it
+  * (gr_finish_request should already have taken care of that).
+  */
+ static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
+ {
+       struct gr_request *req;
+       if (!_ep || !_req)
+               return;
+       req = container_of(_req, struct gr_request, req);
+       /* Leads to memory leak */
+       WARN(!list_empty(&req->queue),
+            "request not dequeued properly before freeing\n");
+       kfree(req);
+ }
+ /* Queue a request from the gadget */
+ static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
+                       gfp_t gfp_flags)
+ {
+       struct gr_ep *ep;
+       struct gr_request *req;
+       struct gr_udc *dev;
+       int ret;
+       if (unlikely(!_ep || !_req))
+               return -EINVAL;
+       ep = container_of(_ep, struct gr_ep, ep);
+       req = container_of(_req, struct gr_request, req);
+       dev = ep->dev;
+       spin_lock(&ep->dev->lock);
+       /*
+        * The ep0 pointer in the gadget struct is used both for ep0in and
+        * ep0out. In a data stage in the out direction ep0out needs to be used
+        * instead of the default ep0in. Completion functions might use
+        * driver_data, so that needs to be copied as well.
+        */
+       if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
+               ep = &dev->epo[0];
+               ep->ep.driver_data = dev->epi[0].ep.driver_data;
+       }
+       if (ep->is_in)
+               gr_dbgprint_request("EXTERN", ep, req);
+       ret = gr_queue(ep, req, GFP_ATOMIC);
+       spin_unlock(&ep->dev->lock);
+       return ret;
+ }
+ /* Dequeue JUST ONE request */
+ static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+ {
+       struct gr_request *req;
+       struct gr_ep *ep;
+       struct gr_udc *dev;
+       int ret = 0;
+       unsigned long flags;
+       ep = container_of(_ep, struct gr_ep, ep);
+       if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
+               return -EINVAL;
+       dev = ep->dev;
+       if (!dev->driver)
+               return -ESHUTDOWN;
+       /* We can't touch (DMA) registers when suspended */
+       if (dev->ep0state == GR_EP0_SUSPEND)
+               return -EBUSY;
+       spin_lock_irqsave(&dev->lock, flags);
+       /* Make sure it's actually queued on this endpoint */
+       list_for_each_entry(req, &ep->queue, queue) {
+               if (&req->req == _req)
+                       break;
+       }
+       if (&req->req != _req) {
+               ret = -EINVAL;
+               goto out;
+       }
+       if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
+               /* This request is currently being processed */
+               gr_abort_dma(ep);
+               if (ep->stopped)
+                       gr_finish_request(ep, req, -ECONNRESET);
+               else
+                       gr_dma_advance(ep, -ECONNRESET);
+       } else if (!list_empty(&req->queue)) {
+               /* Not being processed - gr_finish_request dequeues it */
+               gr_finish_request(ep, req, -ECONNRESET);
+       } else {
+               ret = -EOPNOTSUPP;
+       }
+ out:
+       spin_unlock_irqrestore(&dev->lock, flags);
+       return ret;
+ }
+ /* Helper for gr_set_halt and gr_set_wedge */
+ static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+ {
+       int ret;
+       struct gr_ep *ep;
+       if (!_ep)
+               return -ENODEV;
+       ep = container_of(_ep, struct gr_ep, ep);
+       spin_lock(&ep->dev->lock);
+       /* Halting an IN endpoint should fail if queue is not empty */
+       if (halt && ep->is_in && !list_empty(&ep->queue)) {
+               ret = -EAGAIN;
+               goto out;
+       }
+       ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
+ out:
+       spin_unlock(&ep->dev->lock);
+       return ret;
+ }
+ /* Halt endpoint */
+ static int gr_set_halt(struct usb_ep *_ep, int halt)
+ {
+       return gr_set_halt_wedge(_ep, halt, 0);
+ }
+ /* Halt and wedge endpoint */
+ static int gr_set_wedge(struct usb_ep *_ep)
+ {
+       return gr_set_halt_wedge(_ep, 1, 1);
+ }
+ /*
+  * Return the total number of bytes currently stored in the internal buffers of
+  * the endpoint.
+  */
+ static int gr_fifo_status(struct usb_ep *_ep)
+ {
+       struct gr_ep *ep;
+       u32 epstat;
+       u32 bytes = 0;
+       if (!_ep)
+               return -ENODEV;
+       ep = container_of(_ep, struct gr_ep, ep);
+       epstat = gr_read32(&ep->regs->epstat);
+       if (epstat & GR_EPSTAT_B0)
+               bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
+       if (epstat & GR_EPSTAT_B1)
+               bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
+       return bytes;
+ }
+ /* Empty data from internal buffers of an endpoint. */
+ static void gr_fifo_flush(struct usb_ep *_ep)
+ {
+       struct gr_ep *ep;
+       u32 epctrl;
+       if (!_ep)
+               return;
+       ep = container_of(_ep, struct gr_ep, ep);
+       dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
+       spin_lock(&ep->dev->lock);
+       epctrl = gr_read32(&ep->regs->epctrl);
+       epctrl |= GR_EPCTRL_CB;
+       gr_write32(&ep->regs->epctrl, epctrl);
+       spin_unlock(&ep->dev->lock);
+ }
+ static struct usb_ep_ops gr_ep_ops = {
+       .enable         = gr_ep_enable,
+       .disable        = gr_ep_disable,
+       .alloc_request  = gr_alloc_request,
+       .free_request   = gr_free_request,
+       .queue          = gr_queue_ext,
+       .dequeue        = gr_dequeue,
+       .set_halt       = gr_set_halt,
+       .set_wedge      = gr_set_wedge,
+       .fifo_status    = gr_fifo_status,
+       .fifo_flush     = gr_fifo_flush,
+ };
+ /* ---------------------------------------------------------------------- */
+ /* USB Gadget ops */
+ static int gr_get_frame(struct usb_gadget *_gadget)
+ {
+       struct gr_udc *dev;
+       if (!_gadget)
+               return -ENODEV;
+       dev = container_of(_gadget, struct gr_udc, gadget);
+       return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
+ }
+ static int gr_wakeup(struct usb_gadget *_gadget)
+ {
+       struct gr_udc *dev;
+       if (!_gadget)
+               return -ENODEV;
+       dev = container_of(_gadget, struct gr_udc, gadget);
+       /* Remote wakeup feature not enabled by host*/
+       if (!dev->remote_wakeup)
+               return -EINVAL;
+       spin_lock(&dev->lock);
+       gr_write32(&dev->regs->control,
+                  gr_read32(&dev->regs->control) | GR_CONTROL_RW);
+       spin_unlock(&dev->lock);
+       return 0;
+ }
+ static int gr_pullup(struct usb_gadget *_gadget, int is_on)
+ {
+       struct gr_udc *dev;
+       u32 control;
+       if (!_gadget)
+               return -ENODEV;
+       dev = container_of(_gadget, struct gr_udc, gadget);
+       spin_lock(&dev->lock);
+       control = gr_read32(&dev->regs->control);
+       if (is_on)
+               control |= GR_CONTROL_EP;
+       else
+               control &= ~GR_CONTROL_EP;
+       gr_write32(&dev->regs->control, control);
+       spin_unlock(&dev->lock);
+       return 0;
+ }
+ static int gr_udc_start(struct usb_gadget *gadget,
+                       struct usb_gadget_driver *driver)
+ {
+       struct gr_udc *dev = to_gr_udc(gadget);
+       spin_lock(&dev->lock);
+       /* Hook up the driver */
+       driver->driver.bus = NULL;
+       dev->driver = driver;
+       /* Get ready for host detection */
+       gr_enable_vbus_detect(dev);
+       spin_unlock(&dev->lock);
+       dev_info(dev->dev, "Started with gadget driver '%s'\n",
+                driver->driver.name);
+       return 0;
+ }
+ static int gr_udc_stop(struct usb_gadget *gadget,
+                      struct usb_gadget_driver *driver)
+ {
+       struct gr_udc *dev = to_gr_udc(gadget);
+       unsigned long flags;
+       spin_lock_irqsave(&dev->lock, flags);
+       dev->driver = NULL;
+       gr_stop_activity(dev);
+       spin_unlock_irqrestore(&dev->lock, flags);
+       dev_info(dev->dev, "Stopped\n");
+       return 0;
+ }
+ static const struct usb_gadget_ops gr_ops = {
+       .get_frame      = gr_get_frame,
+       .wakeup         = gr_wakeup,
+       .pullup         = gr_pullup,
+       .udc_start      = gr_udc_start,
+       .udc_stop       = gr_udc_stop,
+       /* Other operations not supported */
+ };
+ /* ---------------------------------------------------------------------- */
+ /* Module probe, removal and of-matching */
+ static const char * const onames[] = {
+       "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
+       "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
+       "ep12out", "ep13out", "ep14out", "ep15out"
+ };
+ static const char * const inames[] = {
+       "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
+       "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
+       "ep12in", "ep13in", "ep14in", "ep15in"
+ };
+ /* Must be called with dev->lock held */
+ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
+ {
+       struct gr_ep *ep;
+       struct gr_request *req;
+       struct usb_request *_req;
+       void *buf;
+       if (is_in) {
+               ep = &dev->epi[num];
+               ep->ep.name = inames[num];
+               ep->regs = &dev->regs->epi[num];
+       } else {
+               ep = &dev->epo[num];
+               ep->ep.name = onames[num];
+               ep->regs = &dev->regs->epo[num];
+       }
+       gr_ep_reset(ep);
+       ep->num = num;
+       ep->is_in = is_in;
+       ep->dev = dev;
+       ep->ep.ops = &gr_ep_ops;
+       INIT_LIST_HEAD(&ep->queue);
+       if (num == 0) {
+               _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
+               buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
+               if (!_req || !buf) {
+                       /* possible _req freed by gr_probe via gr_remove */
+                       return -ENOMEM;
+               }
+               req = container_of(_req, struct gr_request, req);
+               req->req.buf = buf;
+               req->req.length = MAX_CTRL_PL_SIZE;
+               if (is_in)
+                       dev->ep0reqi = req; /* Complete gets set as used */
+               else
+                       dev->ep0reqo = req; /* Completion treated separately */
+               usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
+               ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
+       } else {
+               usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
+               list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
+       }
+       list_add_tail(&ep->ep_list, &dev->ep_list);
+       return 0;
+ }
+ /* Must be called with dev->lock held */
+ static int gr_udc_init(struct gr_udc *dev)
+ {
+       struct device_node *np = dev->dev->of_node;
+       u32 epctrl_val;
+       u32 dmactrl_val;
+       int i;
+       int ret = 0;
+       u32 bufsize;
+       gr_set_address(dev, 0);
+       INIT_LIST_HEAD(&dev->gadget.ep_list);
+       dev->gadget.speed = USB_SPEED_UNKNOWN;
+       dev->gadget.ep0 = &dev->epi[0].ep;
+       INIT_LIST_HEAD(&dev->ep_list);
+       gr_set_ep0state(dev, GR_EP0_DISCONNECT);
+       for (i = 0; i < dev->nepo; i++) {
+               if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
+                       bufsize = 1024;
+               ret = gr_ep_init(dev, i, 0, bufsize);
+               if (ret)
+                       return ret;
+       }
+       for (i = 0; i < dev->nepi; i++) {
+               if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
+                       bufsize = 1024;
+               ret = gr_ep_init(dev, i, 1, bufsize);
+               if (ret)
+                       return ret;
+       }
+       /* Must be disabled by default */
+       dev->remote_wakeup = 0;
+       /* Enable ep0out and ep0in */
+       epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
+       dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
+       gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
+       gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
+       gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
+       gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
+       return 0;
+ }
+ static int gr_remove(struct platform_device *pdev)
+ {
+       struct gr_udc *dev = platform_get_drvdata(pdev);
+       if (dev->added)
+               usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
+       if (dev->driver)
+               return -EBUSY;
+       gr_dfs_delete(dev);
+       if (dev->desc_pool)
+               dma_pool_destroy(dev->desc_pool);
+       platform_set_drvdata(pdev, NULL);
+       gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
+       gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
+       return 0;
+ }
+ static int gr_request_irq(struct gr_udc *dev, int irq)
+ {
+       return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
+                                        IRQF_SHARED, driver_name, dev);
+ }
+ static int gr_probe(struct platform_device *pdev)
+ {
+       struct gr_udc *dev;
+       struct resource *res;
+       struct gr_regs __iomem *regs;
+       int retval;
+       u32 status;
+       dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+       dev->dev = &pdev->dev;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       regs = devm_ioremap_resource(dev->dev, res);
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
+       dev->irq = platform_get_irq(pdev, 0);
+       if (dev->irq <= 0) {
+               dev_err(dev->dev, "No irq found\n");
+               return -ENODEV;
+       }
+       /* Some core configurations has separate irqs for IN and OUT events */
+       dev->irqi = platform_get_irq(pdev, 1);
+       if (dev->irqi > 0) {
+               dev->irqo = platform_get_irq(pdev, 2);
+               if (dev->irqo <= 0) {
+                       dev_err(dev->dev, "Found irqi but not irqo\n");
+                       return -ENODEV;
+               }
+       } else {
+               dev->irqi = 0;
+       }
+       dev->gadget.name = driver_name;
+       dev->gadget.max_speed = USB_SPEED_HIGH;
+       dev->gadget.ops = &gr_ops;
+       dev->gadget.quirk_ep_out_aligned_size = true;
+       spin_lock_init(&dev->lock);
+       dev->regs = regs;
+       platform_set_drvdata(pdev, dev);
+       /* Determine number of endpoints and data interface mode */
+       status = gr_read32(&dev->regs->status);
+       dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
+       dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
+       if (!(status & GR_STATUS_DM)) {
+               dev_err(dev->dev, "Slave mode cores are not supported\n");
+               return -ENODEV;
+       }
+       /* --- Effects of the following calls might need explicit cleanup --- */
+       /* Create DMA pool for descriptors */
+       dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
+                                        sizeof(struct gr_dma_desc), 4, 0);
+       if (!dev->desc_pool) {
+               dev_err(dev->dev, "Could not allocate DMA pool");
+               return -ENOMEM;
+       }
+       spin_lock(&dev->lock);
+       /* Inside lock so that no gadget can use this udc until probe is done */
+       retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
+       if (retval) {
+               dev_err(dev->dev, "Could not add gadget udc");
+               goto out;
+       }
+       dev->added = 1;
+       retval = gr_udc_init(dev);
+       if (retval)
+               goto out;
+       gr_dfs_create(dev);
+       /* Clear all interrupt enables that might be left on since last boot */
+       gr_disable_interrupts_and_pullup(dev);
+       retval = gr_request_irq(dev, dev->irq);
+       if (retval) {
+               dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
+               goto out;
+       }
+       if (dev->irqi) {
+               retval = gr_request_irq(dev, dev->irqi);
+               if (retval) {
+                       dev_err(dev->dev, "Failed to request irqi %d\n",
+                               dev->irqi);
+                       goto out;
+               }
+               retval = gr_request_irq(dev, dev->irqo);
+               if (retval) {
+                       dev_err(dev->dev, "Failed to request irqo %d\n",
+                               dev->irqo);
+                       goto out;
+               }
+       }
+       if (dev->irqi)
+               dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
+                        dev->irq, dev->irqi, dev->irqo);
+       else
+               dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
+ out:
+       spin_unlock(&dev->lock);
+       if (retval)
+               gr_remove(pdev);
+       return retval;
+ }
+ static const struct of_device_id gr_match[] = {
+       {.name = "GAISLER_USBDC"},
+       {.name = "01_021"},
+       {},
+ };
+ MODULE_DEVICE_TABLE(of, gr_match);
+ static struct platform_driver gr_driver = {
+       .driver = {
+               .name = DRIVER_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = gr_match,
+       },
+       .probe = gr_probe,
+       .remove = gr_remove,
+ };
+ module_platform_driver(gr_driver);
+ MODULE_AUTHOR("Aeroflex Gaisler AB.");
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL");
index 0000000000000000000000000000000000000000,fcff3a571b45da76ff466bcb76bbe219d9d25e53..040fb169b162a1e381180bc92ab408bc9268170f
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,2423 +1,2423 @@@
 -      /* Ensure that updates to the QH will occure before priming. */
+ /*
+  * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+  * Author: Chao Xie <chao.xie@marvell.com>
+  *       Neil Zhang <zhangwm@marvell.com>
+  *
+  * This program is free software; you can redistribute  it and/or modify it
+  * under  the terms of  the GNU General  Public License as published by the
+  * Free Software Foundation;  either version 2 of the  License, or (at your
+  * option) any later version.
+  */
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmapool.h>
+ #include <linux/kernel.h>
+ #include <linux/delay.h>
+ #include <linux/ioport.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+ #include <linux/timer.h>
+ #include <linux/list.h>
+ #include <linux/interrupt.h>
+ #include <linux/moduleparam.h>
+ #include <linux/device.h>
+ #include <linux/usb/ch9.h>
+ #include <linux/usb/gadget.h>
+ #include <linux/usb/otg.h>
+ #include <linux/pm.h>
+ #include <linux/io.h>
+ #include <linux/irq.h>
+ #include <linux/platform_device.h>
+ #include <linux/clk.h>
+ #include <linux/platform_data/mv_usb.h>
+ #include <asm/unaligned.h>
+ #include "mv_udc.h"
+ #define DRIVER_DESC           "Marvell PXA USB Device Controller driver"
+ #define DRIVER_VERSION                "8 Nov 2010"
+ #define ep_dir(ep)    (((ep)->ep_num == 0) ? \
+                               ((ep)->udc->ep0_dir) : ((ep)->direction))
+ /* timeout value -- usec */
+ #define RESET_TIMEOUT         10000
+ #define FLUSH_TIMEOUT         10000
+ #define EPSTATUS_TIMEOUT      10000
+ #define PRIME_TIMEOUT         10000
+ #define READSAFE_TIMEOUT      1000
+ #define LOOPS_USEC_SHIFT      1
+ #define LOOPS_USEC            (1 << LOOPS_USEC_SHIFT)
+ #define LOOPS(timeout)                ((timeout) >> LOOPS_USEC_SHIFT)
+ static DECLARE_COMPLETION(release_done);
+ static const char driver_name[] = "mv_udc";
+ static const char driver_desc[] = DRIVER_DESC;
+ static void nuke(struct mv_ep *ep, int status);
+ static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
+ /* for endpoint 0 operations */
+ static const struct usb_endpoint_descriptor mv_ep0_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bEndpointAddress =     0,
+       .bmAttributes =         USB_ENDPOINT_XFER_CONTROL,
+       .wMaxPacketSize =       EP0_MAX_PKT_SIZE,
+ };
+ static void ep0_reset(struct mv_udc *udc)
+ {
+       struct mv_ep *ep;
+       u32 epctrlx;
+       int i = 0;
+       /* ep0 in and out */
+       for (i = 0; i < 2; i++) {
+               ep = &udc->eps[i];
+               ep->udc = udc;
+               /* ep0 dQH */
+               ep->dqh = &udc->ep_dqh[i];
+               /* configure ep0 endpoint capabilities in dQH */
+               ep->dqh->max_packet_length =
+                       (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+                       | EP_QUEUE_HEAD_IOS;
+               ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
+               epctrlx = readl(&udc->op_regs->epctrlx[0]);
+               if (i) {        /* TX */
+                       epctrlx |= EPCTRL_TX_ENABLE
+                               | (USB_ENDPOINT_XFER_CONTROL
+                                       << EPCTRL_TX_EP_TYPE_SHIFT);
+               } else {        /* RX */
+                       epctrlx |= EPCTRL_RX_ENABLE
+                               | (USB_ENDPOINT_XFER_CONTROL
+                                       << EPCTRL_RX_EP_TYPE_SHIFT);
+               }
+               writel(epctrlx, &udc->op_regs->epctrlx[0]);
+       }
+ }
+ /* protocol ep0 stall, will automatically be cleared on new transaction */
+ static void ep0_stall(struct mv_udc *udc)
+ {
+       u32     epctrlx;
+       /* set TX and RX to stall */
+       epctrlx = readl(&udc->op_regs->epctrlx[0]);
+       epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
+       writel(epctrlx, &udc->op_regs->epctrlx[0]);
+       /* update ep0 state */
+       udc->ep0_state = WAIT_FOR_SETUP;
+       udc->ep0_dir = EP_DIR_OUT;
+ }
+ static int process_ep_req(struct mv_udc *udc, int index,
+       struct mv_req *curr_req)
+ {
+       struct mv_dtd   *curr_dtd;
+       struct mv_dqh   *curr_dqh;
+       int td_complete, actual, remaining_length;
+       int i, direction;
+       int retval = 0;
+       u32 errors;
+       u32 bit_pos;
+       curr_dqh = &udc->ep_dqh[index];
+       direction = index % 2;
+       curr_dtd = curr_req->head;
+       td_complete = 0;
+       actual = curr_req->req.length;
+       for (i = 0; i < curr_req->dtd_count; i++) {
+               if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
+                       dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
+                               udc->eps[index].name);
+                       return 1;
+               }
+               errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
+               if (!errors) {
+                       remaining_length =
+                               (curr_dtd->size_ioc_sts & DTD_PACKET_SIZE)
+                                       >> DTD_LENGTH_BIT_POS;
+                       actual -= remaining_length;
+                       if (remaining_length) {
+                               if (direction) {
+                                       dev_dbg(&udc->dev->dev,
+                                               "TX dTD remains data\n");
+                                       retval = -EPROTO;
+                                       break;
+                               } else
+                                       break;
+                       }
+               } else {
+                       dev_info(&udc->dev->dev,
+                               "complete_tr error: ep=%d %s: error = 0x%x\n",
+                               index >> 1, direction ? "SEND" : "RECV",
+                               errors);
+                       if (errors & DTD_STATUS_HALTED) {
+                               /* Clear the errors and Halt condition */
+                               curr_dqh->size_ioc_int_sts &= ~errors;
+                               retval = -EPIPE;
+                       } else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
+                               retval = -EPROTO;
+                       } else if (errors & DTD_STATUS_TRANSACTION_ERR) {
+                               retval = -EILSEQ;
+                       }
+               }
+               if (i != curr_req->dtd_count - 1)
+                       curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
+       }
+       if (retval)
+               return retval;
+       if (direction == EP_DIR_OUT)
+               bit_pos = 1 << curr_req->ep->ep_num;
+       else
+               bit_pos = 1 << (16 + curr_req->ep->ep_num);
+       while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
+               if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
+                       while (readl(&udc->op_regs->epstatus) & bit_pos)
+                               udelay(1);
+                       break;
+               }
+               udelay(1);
+       }
+       curr_req->req.actual = actual;
+       return 0;
+ }
+ /*
+  * done() - retire a request; caller blocked irqs
+  * @status : request status to be set, only works when
+  * request is still in progress.
+  */
+ static void done(struct mv_ep *ep, struct mv_req *req, int status)
+       __releases(&ep->udc->lock)
+       __acquires(&ep->udc->lock)
+ {
+       struct mv_udc *udc = NULL;
+       unsigned char stopped = ep->stopped;
+       struct mv_dtd *curr_td, *next_td;
+       int j;
+       udc = (struct mv_udc *)ep->udc;
+       /* Removed the req from fsl_ep->queue */
+       list_del_init(&req->queue);
+       /* req.status should be set as -EINPROGRESS in ep_queue() */
+       if (req->req.status == -EINPROGRESS)
+               req->req.status = status;
+       else
+               status = req->req.status;
+       /* Free dtd for the request */
+       next_td = req->head;
+       for (j = 0; j < req->dtd_count; j++) {
+               curr_td = next_td;
+               if (j != req->dtd_count - 1)
+                       next_td = curr_td->next_dtd_virt;
+               dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
+       }
+       usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
+       if (status && (status != -ESHUTDOWN))
+               dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
+                       ep->ep.name, &req->req, status,
+                       req->req.actual, req->req.length);
+       ep->stopped = 1;
+       spin_unlock(&ep->udc->lock);
+       /*
+        * complete() is from gadget layer,
+        * eg fsg->bulk_in_complete()
+        */
+       if (req->req.complete)
+               req->req.complete(&ep->ep, &req->req);
+       spin_lock(&ep->udc->lock);
+       ep->stopped = stopped;
+ }
+ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
+ {
+       struct mv_udc *udc;
+       struct mv_dqh *dqh;
+       u32 bit_pos, direction;
+       u32 usbcmd, epstatus;
+       unsigned int loops;
+       int retval = 0;
+       udc = ep->udc;
+       direction = ep_dir(ep);
+       dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
+       bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+       /* check if the pipe is empty */
+       if (!(list_empty(&ep->queue))) {
+               struct mv_req *lastreq;
+               lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
+               lastreq->tail->dtd_next =
+                       req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+               wmb();
+               if (readl(&udc->op_regs->epprime) & bit_pos)
+                       goto done;
+               loops = LOOPS(READSAFE_TIMEOUT);
+               while (1) {
+                       /* start with setting the semaphores */
+                       usbcmd = readl(&udc->op_regs->usbcmd);
+                       usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
+                       writel(usbcmd, &udc->op_regs->usbcmd);
+                       /* read the endpoint status */
+                       epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
+                       /*
+                        * Reread the ATDTW semaphore bit to check if it is
+                        * cleared. When hardware see a hazard, it will clear
+                        * the bit or else we remain set to 1 and we can
+                        * proceed with priming of endpoint if not already
+                        * primed.
+                        */
+                       if (readl(&udc->op_regs->usbcmd)
+                               & USBCMD_ATDTW_TRIPWIRE_SET)
+                               break;
+                       loops--;
+                       if (loops == 0) {
+                               dev_err(&udc->dev->dev,
+                                       "Timeout for ATDTW_TRIPWIRE...\n");
+                               retval = -ETIME;
+                               goto done;
+                       }
+                       udelay(LOOPS_USEC);
+               }
+               /* Clear the semaphore */
+               usbcmd = readl(&udc->op_regs->usbcmd);
+               usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
+               writel(usbcmd, &udc->op_regs->usbcmd);
+               if (epstatus)
+                       goto done;
+       }
+       /* Write dQH next pointer and terminate bit to 0 */
+       dqh->next_dtd_ptr = req->head->td_dma
+                               & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+       /* clear active and halt bit, in case set from a previous error */
+       dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
 -      /* We process some stardard setup requests here */
++      /* Ensure that updates to the QH will occur before priming. */
+       wmb();
+       /* Prime the Endpoint */
+       writel(bit_pos, &udc->op_regs->epprime);
+ done:
+       return retval;
+ }
+ static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
+               dma_addr_t *dma, int *is_last)
+ {
+       struct mv_dtd *dtd;
+       struct mv_udc *udc;
+       struct mv_dqh *dqh;
+       u32 temp, mult = 0;
+       /* how big will this transfer be? */
+       if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
+               dqh = req->ep->dqh;
+               mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
+                               & 0x3;
+               *length = min(req->req.length - req->req.actual,
+                               (unsigned)(mult * req->ep->ep.maxpacket));
+       } else
+               *length = min(req->req.length - req->req.actual,
+                               (unsigned)EP_MAX_LENGTH_TRANSFER);
+       udc = req->ep->udc;
+       /*
+        * Be careful that no _GFP_HIGHMEM is set,
+        * or we can not use dma_to_virt
+        */
+       dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
+       if (dtd == NULL)
+               return dtd;
+       dtd->td_dma = *dma;
+       /* initialize buffer page pointers */
+       temp = (u32)(req->req.dma + req->req.actual);
+       dtd->buff_ptr0 = cpu_to_le32(temp);
+       temp &= ~0xFFF;
+       dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
+       dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
+       dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
+       dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
+       req->req.actual += *length;
+       /* zlp is needed if req->req.zero is set */
+       if (req->req.zero) {
+               if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
+                       *is_last = 1;
+               else
+                       *is_last = 0;
+       } else if (req->req.length == req->req.actual)
+               *is_last = 1;
+       else
+               *is_last = 0;
+       /* Fill in the transfer size; set active bit */
+       temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
+       /* Enable interrupt for the last dtd of a request */
+       if (*is_last && !req->req.no_interrupt)
+               temp |= DTD_IOC;
+       temp |= mult << 10;
+       dtd->size_ioc_sts = temp;
+       mb();
+       return dtd;
+ }
+ /* generate dTD linked list for a request */
+ static int req_to_dtd(struct mv_req *req)
+ {
+       unsigned count;
+       int is_last, is_first = 1;
+       struct mv_dtd *dtd, *last_dtd = NULL;
+       struct mv_udc *udc;
+       dma_addr_t dma;
+       udc = req->ep->udc;
+       do {
+               dtd = build_dtd(req, &count, &dma, &is_last);
+               if (dtd == NULL)
+                       return -ENOMEM;
+               if (is_first) {
+                       is_first = 0;
+                       req->head = dtd;
+               } else {
+                       last_dtd->dtd_next = dma;
+                       last_dtd->next_dtd_virt = dtd;
+               }
+               last_dtd = dtd;
+               req->dtd_count++;
+       } while (!is_last);
+       /* set terminate bit to 1 for the last dTD */
+       dtd->dtd_next = DTD_NEXT_TERMINATE;
+       req->tail = dtd;
+       return 0;
+ }
+ static int mv_ep_enable(struct usb_ep *_ep,
+               const struct usb_endpoint_descriptor *desc)
+ {
+       struct mv_udc *udc;
+       struct mv_ep *ep;
+       struct mv_dqh *dqh;
+       u16 max = 0;
+       u32 bit_pos, epctrlx, direction;
+       unsigned char zlt = 0, ios = 0, mult = 0;
+       unsigned long flags;
+       ep = container_of(_ep, struct mv_ep, ep);
+       udc = ep->udc;
+       if (!_ep || !desc
+                       || desc->bDescriptorType != USB_DT_ENDPOINT)
+               return -EINVAL;
+       if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+               return -ESHUTDOWN;
+       direction = ep_dir(ep);
+       max = usb_endpoint_maxp(desc);
+       /*
+        * disable HW zero length termination select
+        * driver handles zero length packet through req->req.zero
+        */
+       zlt = 1;
+       bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
+       /* Check if the Endpoint is Primed */
+       if ((readl(&udc->op_regs->epprime) & bit_pos)
+               || (readl(&udc->op_regs->epstatus) & bit_pos)) {
+               dev_info(&udc->dev->dev,
+                       "ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
+                       " ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
+                       (unsigned)ep->ep_num, direction ? "SEND" : "RECV",
+                       (unsigned)readl(&udc->op_regs->epprime),
+                       (unsigned)readl(&udc->op_regs->epstatus),
+                       (unsigned)bit_pos);
+               goto en_done;
+       }
+       /* Set the max packet length, interrupt on Setup and Mult fields */
+       switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+       case USB_ENDPOINT_XFER_BULK:
+               zlt = 1;
+               mult = 0;
+               break;
+       case USB_ENDPOINT_XFER_CONTROL:
+               ios = 1;
+       case USB_ENDPOINT_XFER_INT:
+               mult = 0;
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               /* Calculate transactions needed for high bandwidth iso */
+               mult = (unsigned char)(1 + ((max >> 11) & 0x03));
+               max = max & 0x7ff;      /* bit 0~10 */
+               /* 3 transactions at most */
+               if (mult > 3)
+                       goto en_done;
+               break;
+       default:
+               goto en_done;
+       }
+       spin_lock_irqsave(&udc->lock, flags);
+       /* Get the endpoint queue head address */
+       dqh = ep->dqh;
+       dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
+               | (mult << EP_QUEUE_HEAD_MULT_POS)
+               | (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
+               | (ios ? EP_QUEUE_HEAD_IOS : 0);
+       dqh->next_dtd_ptr = 1;
+       dqh->size_ioc_int_sts = 0;
+       ep->ep.maxpacket = max;
+       ep->ep.desc = desc;
+       ep->stopped = 0;
+       /* Enable the endpoint for Rx or Tx and set the endpoint type */
+       epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+       if (direction == EP_DIR_IN) {
+               epctrlx &= ~EPCTRL_TX_ALL_MASK;
+               epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
+                       | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+                               << EPCTRL_TX_EP_TYPE_SHIFT);
+       } else {
+               epctrlx &= ~EPCTRL_RX_ALL_MASK;
+               epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
+                       | ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+                               << EPCTRL_RX_EP_TYPE_SHIFT);
+       }
+       writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+       /*
+        * Implement Guideline (GL# USB-7) The unused endpoint type must
+        * be programmed to bulk.
+        */
+       epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+       if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
+               epctrlx |= (USB_ENDPOINT_XFER_BULK
+                               << EPCTRL_RX_EP_TYPE_SHIFT);
+               writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+       }
+       epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+       if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
+               epctrlx |= (USB_ENDPOINT_XFER_BULK
+                               << EPCTRL_TX_EP_TYPE_SHIFT);
+               writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+       }
+       spin_unlock_irqrestore(&udc->lock, flags);
+       return 0;
+ en_done:
+       return -EINVAL;
+ }
+ static int  mv_ep_disable(struct usb_ep *_ep)
+ {
+       struct mv_udc *udc;
+       struct mv_ep *ep;
+       struct mv_dqh *dqh;
+       u32 bit_pos, epctrlx, direction;
+       unsigned long flags;
+       ep = container_of(_ep, struct mv_ep, ep);
+       if ((_ep == NULL) || !ep->ep.desc)
+               return -EINVAL;
+       udc = ep->udc;
+       /* Get the endpoint queue head address */
+       dqh = ep->dqh;
+       spin_lock_irqsave(&udc->lock, flags);
+       direction = ep_dir(ep);
+       bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
+       /* Reset the max packet length and the interrupt on Setup */
+       dqh->max_packet_length = 0;
+       /* Disable the endpoint for Rx or Tx and reset the endpoint type */
+       epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+       epctrlx &= ~((direction == EP_DIR_IN)
+                       ? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
+                       : (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
+       writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+       /* nuke all pending requests (does flush) */
+       nuke(ep, -ESHUTDOWN);
+       ep->ep.desc = NULL;
+       ep->stopped = 1;
+       spin_unlock_irqrestore(&udc->lock, flags);
+       return 0;
+ }
+ static struct usb_request *
+ mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
+ {
+       struct mv_req *req = NULL;
+       req = kzalloc(sizeof *req, gfp_flags);
+       if (!req)
+               return NULL;
+       req->req.dma = DMA_ADDR_INVALID;
+       INIT_LIST_HEAD(&req->queue);
+       return &req->req;
+ }
+ static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
+ {
+       struct mv_req *req = NULL;
+       req = container_of(_req, struct mv_req, req);
+       if (_req)
+               kfree(req);
+ }
+ static void mv_ep_fifo_flush(struct usb_ep *_ep)
+ {
+       struct mv_udc *udc;
+       u32 bit_pos, direction;
+       struct mv_ep *ep;
+       unsigned int loops;
+       if (!_ep)
+               return;
+       ep = container_of(_ep, struct mv_ep, ep);
+       if (!ep->ep.desc)
+               return;
+       udc = ep->udc;
+       direction = ep_dir(ep);
+       if (ep->ep_num == 0)
+               bit_pos = (1 << 16) | 1;
+       else if (direction == EP_DIR_OUT)
+               bit_pos = 1 << ep->ep_num;
+       else
+               bit_pos = 1 << (16 + ep->ep_num);
+       loops = LOOPS(EPSTATUS_TIMEOUT);
+       do {
+               unsigned int inter_loops;
+               if (loops == 0) {
+                       dev_err(&udc->dev->dev,
+                               "TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
+                               (unsigned)readl(&udc->op_regs->epstatus),
+                               (unsigned)bit_pos);
+                       return;
+               }
+               /* Write 1 to the Flush register */
+               writel(bit_pos, &udc->op_regs->epflush);
+               /* Wait until flushing completed */
+               inter_loops = LOOPS(FLUSH_TIMEOUT);
+               while (readl(&udc->op_regs->epflush)) {
+                       /*
+                        * ENDPTFLUSH bit should be cleared to indicate this
+                        * operation is complete
+                        */
+                       if (inter_loops == 0) {
+                               dev_err(&udc->dev->dev,
+                                       "TIMEOUT for ENDPTFLUSH=0x%x,"
+                                       "bit_pos=0x%x\n",
+                                       (unsigned)readl(&udc->op_regs->epflush),
+                                       (unsigned)bit_pos);
+                               return;
+                       }
+                       inter_loops--;
+                       udelay(LOOPS_USEC);
+               }
+               loops--;
+       } while (readl(&udc->op_regs->epstatus) & bit_pos);
+ }
+ /* queues (submits) an I/O request to an endpoint */
+ static int
+ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
+ {
+       struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+       struct mv_req *req = container_of(_req, struct mv_req, req);
+       struct mv_udc *udc = ep->udc;
+       unsigned long flags;
+       int retval;
+       /* catch various bogus parameters */
+       if (!_req || !req->req.complete || !req->req.buf
+                       || !list_empty(&req->queue)) {
+               dev_err(&udc->dev->dev, "%s, bad params", __func__);
+               return -EINVAL;
+       }
+       if (unlikely(!_ep || !ep->ep.desc)) {
+               dev_err(&udc->dev->dev, "%s, bad ep", __func__);
+               return -EINVAL;
+       }
+       udc = ep->udc;
+       if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
+               return -ESHUTDOWN;
+       req->ep = ep;
+       /* map virtual address to hardware */
+       retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
+       if (retval)
+               return retval;
+       req->req.status = -EINPROGRESS;
+       req->req.actual = 0;
+       req->dtd_count = 0;
+       spin_lock_irqsave(&udc->lock, flags);
+       /* build dtds and push them to device queue */
+       if (!req_to_dtd(req)) {
+               retval = queue_dtd(ep, req);
+               if (retval) {
+                       spin_unlock_irqrestore(&udc->lock, flags);
+                       dev_err(&udc->dev->dev, "Failed to queue dtd\n");
+                       goto err_unmap_dma;
+               }
+       } else {
+               spin_unlock_irqrestore(&udc->lock, flags);
+               dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
+               retval = -ENOMEM;
+               goto err_unmap_dma;
+       }
+       /* Update ep0 state */
+       if (ep->ep_num == 0)
+               udc->ep0_state = DATA_STATE_XMIT;
+       /* irq handler advances the queue */
+       list_add_tail(&req->queue, &ep->queue);
+       spin_unlock_irqrestore(&udc->lock, flags);
+       return 0;
+ err_unmap_dma:
+       usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
+       return retval;
+ }
+ static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
+ {
+       struct mv_dqh *dqh = ep->dqh;
+       u32 bit_pos;
+       /* Write dQH next pointer and terminate bit to 0 */
+       dqh->next_dtd_ptr = req->head->td_dma
+               & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+       /* clear active and halt bit, in case set from a previous error */
+       dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
+       /* Ensure that updates to the QH will occure before priming. */
+       wmb();
+       bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+       /* Prime the Endpoint */
+       writel(bit_pos, &ep->udc->op_regs->epprime);
+ }
+ /* dequeues (cancels, unlinks) an I/O request from an endpoint */
+ static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+ {
+       struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
+       struct mv_req *req;
+       struct mv_udc *udc = ep->udc;
+       unsigned long flags;
+       int stopped, ret = 0;
+       u32 epctrlx;
+       if (!_ep || !_req)
+               return -EINVAL;
+       spin_lock_irqsave(&ep->udc->lock, flags);
+       stopped = ep->stopped;
+       /* Stop the ep before we deal with the queue */
+       ep->stopped = 1;
+       epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+       if (ep_dir(ep) == EP_DIR_IN)
+               epctrlx &= ~EPCTRL_TX_ENABLE;
+       else
+               epctrlx &= ~EPCTRL_RX_ENABLE;
+       writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+       /* make sure it's actually queued on this endpoint */
+       list_for_each_entry(req, &ep->queue, queue) {
+               if (&req->req == _req)
+                       break;
+       }
+       if (&req->req != _req) {
+               ret = -EINVAL;
+               goto out;
+       }
+       /* The request is in progress, or completed but not dequeued */
+       if (ep->queue.next == &req->queue) {
+               _req->status = -ECONNRESET;
+               mv_ep_fifo_flush(_ep);  /* flush current transfer */
+               /* The request isn't the last request in this ep queue */
+               if (req->queue.next != &ep->queue) {
+                       struct mv_req *next_req;
+                       next_req = list_entry(req->queue.next,
+                               struct mv_req, queue);
+                       /* Point the QH to the first TD of next request */
+                       mv_prime_ep(ep, next_req);
+               } else {
+                       struct mv_dqh *qh;
+                       qh = ep->dqh;
+                       qh->next_dtd_ptr = 1;
+                       qh->size_ioc_int_sts = 0;
+               }
+               /* The request hasn't been processed, patch up the TD chain */
+       } else {
+               struct mv_req *prev_req;
+               prev_req = list_entry(req->queue.prev, struct mv_req, queue);
+               writel(readl(&req->tail->dtd_next),
+                               &prev_req->tail->dtd_next);
+       }
+       done(ep, req, -ECONNRESET);
+       /* Enable EP */
+ out:
+       epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
+       if (ep_dir(ep) == EP_DIR_IN)
+               epctrlx |= EPCTRL_TX_ENABLE;
+       else
+               epctrlx |= EPCTRL_RX_ENABLE;
+       writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
+       ep->stopped = stopped;
+       spin_unlock_irqrestore(&ep->udc->lock, flags);
+       return ret;
+ }
+ static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
+ {
+       u32 epctrlx;
+       epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
+       if (stall) {
+               if (direction == EP_DIR_IN)
+                       epctrlx |= EPCTRL_TX_EP_STALL;
+               else
+                       epctrlx |= EPCTRL_RX_EP_STALL;
+       } else {
+               if (direction == EP_DIR_IN) {
+                       epctrlx &= ~EPCTRL_TX_EP_STALL;
+                       epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
+               } else {
+                       epctrlx &= ~EPCTRL_RX_EP_STALL;
+                       epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
+               }
+       }
+       writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
+ }
+ static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
+ {
+       u32 epctrlx;
+       epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
+       if (direction == EP_DIR_OUT)
+               return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
+       else
+               return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
+ }
+ static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
+ {
+       struct mv_ep *ep;
+       unsigned long flags = 0;
+       int status = 0;
+       struct mv_udc *udc;
+       ep = container_of(_ep, struct mv_ep, ep);
+       udc = ep->udc;
+       if (!_ep || !ep->ep.desc) {
+               status = -EINVAL;
+               goto out;
+       }
+       if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+               status = -EOPNOTSUPP;
+               goto out;
+       }
+       /*
+        * Attempt to halt IN ep will fail if any transfer requests
+        * are still queue
+        */
+       if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
+               status = -EAGAIN;
+               goto out;
+       }
+       spin_lock_irqsave(&ep->udc->lock, flags);
+       ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
+       if (halt && wedge)
+               ep->wedge = 1;
+       else if (!halt)
+               ep->wedge = 0;
+       spin_unlock_irqrestore(&ep->udc->lock, flags);
+       if (ep->ep_num == 0) {
+               udc->ep0_state = WAIT_FOR_SETUP;
+               udc->ep0_dir = EP_DIR_OUT;
+       }
+ out:
+       return status;
+ }
+ static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
+ {
+       return mv_ep_set_halt_wedge(_ep, halt, 0);
+ }
+ static int mv_ep_set_wedge(struct usb_ep *_ep)
+ {
+       return mv_ep_set_halt_wedge(_ep, 1, 1);
+ }
+ static struct usb_ep_ops mv_ep_ops = {
+       .enable         = mv_ep_enable,
+       .disable        = mv_ep_disable,
+       .alloc_request  = mv_alloc_request,
+       .free_request   = mv_free_request,
+       .queue          = mv_ep_queue,
+       .dequeue        = mv_ep_dequeue,
+       .set_wedge      = mv_ep_set_wedge,
+       .set_halt       = mv_ep_set_halt,
+       .fifo_flush     = mv_ep_fifo_flush,     /* flush fifo */
+ };
+ static void udc_clock_enable(struct mv_udc *udc)
+ {
+       clk_prepare_enable(udc->clk);
+ }
+ static void udc_clock_disable(struct mv_udc *udc)
+ {
+       clk_disable_unprepare(udc->clk);
+ }
+ static void udc_stop(struct mv_udc *udc)
+ {
+       u32 tmp;
+       /* Disable interrupts */
+       tmp = readl(&udc->op_regs->usbintr);
+       tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
+               USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
+       writel(tmp, &udc->op_regs->usbintr);
+       udc->stopped = 1;
+       /* Reset the Run the bit in the command register to stop VUSB */
+       tmp = readl(&udc->op_regs->usbcmd);
+       tmp &= ~USBCMD_RUN_STOP;
+       writel(tmp, &udc->op_regs->usbcmd);
+ }
+ static void udc_start(struct mv_udc *udc)
+ {
+       u32 usbintr;
+       usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
+               | USBINTR_PORT_CHANGE_DETECT_EN
+               | USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
+       /* Enable interrupts */
+       writel(usbintr, &udc->op_regs->usbintr);
+       udc->stopped = 0;
+       /* Set the Run bit in the command register */
+       writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
+ }
+ static int udc_reset(struct mv_udc *udc)
+ {
+       unsigned int loops;
+       u32 tmp, portsc;
+       /* Stop the controller */
+       tmp = readl(&udc->op_regs->usbcmd);
+       tmp &= ~USBCMD_RUN_STOP;
+       writel(tmp, &udc->op_regs->usbcmd);
+       /* Reset the controller to get default values */
+       writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
+       /* wait for reset to complete */
+       loops = LOOPS(RESET_TIMEOUT);
+       while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
+               if (loops == 0) {
+                       dev_err(&udc->dev->dev,
+                               "Wait for RESET completed TIMEOUT\n");
+                       return -ETIMEDOUT;
+               }
+               loops--;
+               udelay(LOOPS_USEC);
+       }
+       /* set controller to device mode */
+       tmp = readl(&udc->op_regs->usbmode);
+       tmp |= USBMODE_CTRL_MODE_DEVICE;
+       /* turn setup lockout off, require setup tripwire in usbcmd */
+       tmp |= USBMODE_SETUP_LOCK_OFF;
+       writel(tmp, &udc->op_regs->usbmode);
+       writel(0x0, &udc->op_regs->epsetupstat);
+       /* Configure the Endpoint List Address */
+       writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
+               &udc->op_regs->eplistaddr);
+       portsc = readl(&udc->op_regs->portsc[0]);
+       if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
+               portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
+       if (udc->force_fs)
+               portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
+       else
+               portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
+       writel(portsc, &udc->op_regs->portsc[0]);
+       tmp = readl(&udc->op_regs->epctrlx[0]);
+       tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
+       writel(tmp, &udc->op_regs->epctrlx[0]);
+       return 0;
+ }
+ static int mv_udc_enable_internal(struct mv_udc *udc)
+ {
+       int retval;
+       if (udc->active)
+               return 0;
+       dev_dbg(&udc->dev->dev, "enable udc\n");
+       udc_clock_enable(udc);
+       if (udc->pdata->phy_init) {
+               retval = udc->pdata->phy_init(udc->phy_regs);
+               if (retval) {
+                       dev_err(&udc->dev->dev,
+                               "init phy error %d\n", retval);
+                       udc_clock_disable(udc);
+                       return retval;
+               }
+       }
+       udc->active = 1;
+       return 0;
+ }
+ static int mv_udc_enable(struct mv_udc *udc)
+ {
+       if (udc->clock_gating)
+               return mv_udc_enable_internal(udc);
+       return 0;
+ }
+ static void mv_udc_disable_internal(struct mv_udc *udc)
+ {
+       if (udc->active) {
+               dev_dbg(&udc->dev->dev, "disable udc\n");
+               if (udc->pdata->phy_deinit)
+                       udc->pdata->phy_deinit(udc->phy_regs);
+               udc_clock_disable(udc);
+               udc->active = 0;
+       }
+ }
+ static void mv_udc_disable(struct mv_udc *udc)
+ {
+       if (udc->clock_gating)
+               mv_udc_disable_internal(udc);
+ }
+ static int mv_udc_get_frame(struct usb_gadget *gadget)
+ {
+       struct mv_udc *udc;
+       u16     retval;
+       if (!gadget)
+               return -ENODEV;
+       udc = container_of(gadget, struct mv_udc, gadget);
+       retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
+       return retval;
+ }
+ /* Tries to wake up the host connected to this gadget */
+ static int mv_udc_wakeup(struct usb_gadget *gadget)
+ {
+       struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
+       u32 portsc;
+       /* Remote wakeup feature not enabled by host */
+       if (!udc->remote_wakeup)
+               return -ENOTSUPP;
+       portsc = readl(&udc->op_regs->portsc);
+       /* not suspended? */
+       if (!(portsc & PORTSCX_PORT_SUSPEND))
+               return 0;
+       /* trigger force resume */
+       portsc |= PORTSCX_PORT_FORCE_RESUME;
+       writel(portsc, &udc->op_regs->portsc[0]);
+       return 0;
+ }
+ static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
+ {
+       struct mv_udc *udc;
+       unsigned long flags;
+       int retval = 0;
+       udc = container_of(gadget, struct mv_udc, gadget);
+       spin_lock_irqsave(&udc->lock, flags);
+       udc->vbus_active = (is_active != 0);
+       dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+               __func__, udc->softconnect, udc->vbus_active);
+       if (udc->driver && udc->softconnect && udc->vbus_active) {
+               retval = mv_udc_enable(udc);
+               if (retval == 0) {
+                       /* Clock is disabled, need re-init registers */
+                       udc_reset(udc);
+                       ep0_reset(udc);
+                       udc_start(udc);
+               }
+       } else if (udc->driver && udc->softconnect) {
+               if (!udc->active)
+                       goto out;
+               /* stop all the transfer in queue*/
+               stop_activity(udc, udc->driver);
+               udc_stop(udc);
+               mv_udc_disable(udc);
+       }
+ out:
+       spin_unlock_irqrestore(&udc->lock, flags);
+       return retval;
+ }
+ static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
+ {
+       struct mv_udc *udc;
+       unsigned long flags;
+       int retval = 0;
+       udc = container_of(gadget, struct mv_udc, gadget);
+       spin_lock_irqsave(&udc->lock, flags);
+       udc->softconnect = (is_on != 0);
+       dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
+                       __func__, udc->softconnect, udc->vbus_active);
+       if (udc->driver && udc->softconnect && udc->vbus_active) {
+               retval = mv_udc_enable(udc);
+               if (retval == 0) {
+                       /* Clock is disabled, need re-init registers */
+                       udc_reset(udc);
+                       ep0_reset(udc);
+                       udc_start(udc);
+               }
+       } else if (udc->driver && udc->vbus_active) {
+               /* stop all the transfer in queue*/
+               stop_activity(udc, udc->driver);
+               udc_stop(udc);
+               mv_udc_disable(udc);
+       }
+       spin_unlock_irqrestore(&udc->lock, flags);
+       return retval;
+ }
+ static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
+ static int mv_udc_stop(struct usb_gadget *, struct usb_gadget_driver *);
+ /* device controller usb_gadget_ops structure */
+ static const struct usb_gadget_ops mv_ops = {
+       /* returns the current frame number */
+       .get_frame      = mv_udc_get_frame,
+       /* tries to wake up the host connected to this gadget */
+       .wakeup         = mv_udc_wakeup,
+       /* notify controller that VBUS is powered or not */
+       .vbus_session   = mv_udc_vbus_session,
+       /* D+ pullup, software-controlled connect/disconnect to USB host */
+       .pullup         = mv_udc_pullup,
+       .udc_start      = mv_udc_start,
+       .udc_stop       = mv_udc_stop,
+ };
+ static int eps_init(struct mv_udc *udc)
+ {
+       struct mv_ep    *ep;
+       char name[14];
+       int i;
+       /* initialize ep0 */
+       ep = &udc->eps[0];
+       ep->udc = udc;
+       strncpy(ep->name, "ep0", sizeof(ep->name));
+       ep->ep.name = ep->name;
+       ep->ep.ops = &mv_ep_ops;
+       ep->wedge = 0;
+       ep->stopped = 0;
+       usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
+       ep->ep_num = 0;
+       ep->ep.desc = &mv_ep0_desc;
+       INIT_LIST_HEAD(&ep->queue);
+       ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
+       /* initialize other endpoints */
+       for (i = 2; i < udc->max_eps * 2; i++) {
+               ep = &udc->eps[i];
+               if (i % 2) {
+                       snprintf(name, sizeof(name), "ep%din", i / 2);
+                       ep->direction = EP_DIR_IN;
+               } else {
+                       snprintf(name, sizeof(name), "ep%dout", i / 2);
+                       ep->direction = EP_DIR_OUT;
+               }
+               ep->udc = udc;
+               strncpy(ep->name, name, sizeof(ep->name));
+               ep->ep.name = ep->name;
+               ep->ep.ops = &mv_ep_ops;
+               ep->stopped = 0;
+               usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
+               ep->ep_num = i / 2;
+               INIT_LIST_HEAD(&ep->queue);
+               list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+               ep->dqh = &udc->ep_dqh[i];
+       }
+       return 0;
+ }
+ /* delete all endpoint requests, called with spinlock held */
+ static void nuke(struct mv_ep *ep, int status)
+ {
+       /* called with spinlock held */
+       ep->stopped = 1;
+       /* endpoint fifo flush */
+       mv_ep_fifo_flush(&ep->ep);
+       while (!list_empty(&ep->queue)) {
+               struct mv_req *req = NULL;
+               req = list_entry(ep->queue.next, struct mv_req, queue);
+               done(ep, req, status);
+       }
+ }
+ /* stop all USB activities */
+ static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
+ {
+       struct mv_ep    *ep;
+       nuke(&udc->eps[0], -ESHUTDOWN);
+       list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
+               nuke(ep, -ESHUTDOWN);
+       }
+       /* report disconnect; the driver is already quiesced */
+       if (driver) {
+               spin_unlock(&udc->lock);
+               driver->disconnect(&udc->gadget);
+               spin_lock(&udc->lock);
+       }
+ }
+ static int mv_udc_start(struct usb_gadget *gadget,
+               struct usb_gadget_driver *driver)
+ {
+       struct mv_udc *udc;
+       int retval = 0;
+       unsigned long flags;
+       udc = container_of(gadget, struct mv_udc, gadget);
+       if (udc->driver)
+               return -EBUSY;
+       spin_lock_irqsave(&udc->lock, flags);
+       /* hook up the driver ... */
+       driver->driver.bus = NULL;
+       udc->driver = driver;
+       udc->usb_state = USB_STATE_ATTACHED;
+       udc->ep0_state = WAIT_FOR_SETUP;
+       udc->ep0_dir = EP_DIR_OUT;
+       spin_unlock_irqrestore(&udc->lock, flags);
+       if (udc->transceiver) {
+               retval = otg_set_peripheral(udc->transceiver->otg,
+                                       &udc->gadget);
+               if (retval) {
+                       dev_err(&udc->dev->dev,
+                               "unable to register peripheral to otg\n");
+                       udc->driver = NULL;
+                       return retval;
+               }
+       }
+       /* pullup is always on */
+       mv_udc_pullup(&udc->gadget, 1);
+       /* When boot with cable attached, there will be no vbus irq occurred */
+       if (udc->qwork)
+               queue_work(udc->qwork, &udc->vbus_work);
+       return 0;
+ }
+ static int mv_udc_stop(struct usb_gadget *gadget,
+               struct usb_gadget_driver *driver)
+ {
+       struct mv_udc *udc;
+       unsigned long flags;
+       udc = container_of(gadget, struct mv_udc, gadget);
+       spin_lock_irqsave(&udc->lock, flags);
+       mv_udc_enable(udc);
+       udc_stop(udc);
+       /* stop all usb activities */
+       udc->gadget.speed = USB_SPEED_UNKNOWN;
+       stop_activity(udc, driver);
+       mv_udc_disable(udc);
+       spin_unlock_irqrestore(&udc->lock, flags);
+       /* unbind gadget driver */
+       udc->driver = NULL;
+       return 0;
+ }
+ static void mv_set_ptc(struct mv_udc *udc, u32 mode)
+ {
+       u32 portsc;
+       portsc = readl(&udc->op_regs->portsc[0]);
+       portsc |= mode << 16;
+       writel(portsc, &udc->op_regs->portsc[0]);
+ }
+ static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
+ {
+       struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
+       struct mv_req *req = container_of(_req, struct mv_req, req);
+       struct mv_udc *udc;
+       unsigned long flags;
+       udc = mvep->udc;
+       dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
+       spin_lock_irqsave(&udc->lock, flags);
+       if (req->test_mode) {
+               mv_set_ptc(udc, req->test_mode);
+               req->test_mode = 0;
+       }
+       spin_unlock_irqrestore(&udc->lock, flags);
+ }
+ static int
+ udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
+ {
+       int retval = 0;
+       struct mv_req *req;
+       struct mv_ep *ep;
+       ep = &udc->eps[0];
+       udc->ep0_dir = direction;
+       udc->ep0_state = WAIT_FOR_OUT_STATUS;
+       req = udc->status_req;
+       /* fill in the reqest structure */
+       if (empty == false) {
+               *((u16 *) req->req.buf) = cpu_to_le16(status);
+               req->req.length = 2;
+       } else
+               req->req.length = 0;
+       req->ep = ep;
+       req->req.status = -EINPROGRESS;
+       req->req.actual = 0;
+       if (udc->test_mode) {
+               req->req.complete = prime_status_complete;
+               req->test_mode = udc->test_mode;
+               udc->test_mode = 0;
+       } else
+               req->req.complete = NULL;
+       req->dtd_count = 0;
+       if (req->req.dma == DMA_ADDR_INVALID) {
+               req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+                               req->req.buf, req->req.length,
+                               ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               req->mapped = 1;
+       }
+       /* prime the data phase */
+       if (!req_to_dtd(req)) {
+               retval = queue_dtd(ep, req);
+               if (retval) {
+                       dev_err(&udc->dev->dev,
+                               "Failed to queue dtd when prime status\n");
+                       goto out;
+               }
+       } else{ /* no mem */
+               retval = -ENOMEM;
+               dev_err(&udc->dev->dev,
+                       "Failed to dma_pool_alloc when prime status\n");
+               goto out;
+       }
+       list_add_tail(&req->queue, &ep->queue);
+       return 0;
+ out:
+       usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
+       return retval;
+ }
+ static void mv_udc_testmode(struct mv_udc *udc, u16 index)
+ {
+       if (index <= TEST_FORCE_EN) {
+               udc->test_mode = index;
+               if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+                       ep0_stall(udc);
+       } else
+               dev_err(&udc->dev->dev,
+                       "This test mode(%d) is not supported\n", index);
+ }
+ static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+ {
+       udc->dev_addr = (u8)setup->wValue;
+       /* update usb state */
+       udc->usb_state = USB_STATE_ADDRESS;
+       if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+               ep0_stall(udc);
+ }
+ static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
+       struct usb_ctrlrequest *setup)
+ {
+       u16 status = 0;
+       int retval;
+       if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
+               != (USB_DIR_IN | USB_TYPE_STANDARD))
+               return;
+       if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
+               status = 1 << USB_DEVICE_SELF_POWERED;
+               status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+       } else if ((setup->bRequestType & USB_RECIP_MASK)
+                       == USB_RECIP_INTERFACE) {
+               /* get interface status */
+               status = 0;
+       } else if ((setup->bRequestType & USB_RECIP_MASK)
+                       == USB_RECIP_ENDPOINT) {
+               u8 ep_num, direction;
+               ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+               direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+                               ? EP_DIR_IN : EP_DIR_OUT;
+               status = ep_is_stall(udc, ep_num, direction)
+                               << USB_ENDPOINT_HALT;
+       }
+       retval = udc_prime_status(udc, EP_DIR_IN, status, false);
+       if (retval)
+               ep0_stall(udc);
+       else
+               udc->ep0_state = DATA_STATE_XMIT;
+ }
+ static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+ {
+       u8 ep_num;
+       u8 direction;
+       struct mv_ep *ep;
+       if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+               == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
+               switch (setup->wValue) {
+               case USB_DEVICE_REMOTE_WAKEUP:
+                       udc->remote_wakeup = 0;
+                       break;
+               default:
+                       goto out;
+               }
+       } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+               == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
+               switch (setup->wValue) {
+               case USB_ENDPOINT_HALT:
+                       ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+                       direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+                               ? EP_DIR_IN : EP_DIR_OUT;
+                       if (setup->wValue != 0 || setup->wLength != 0
+                               || ep_num > udc->max_eps)
+                               goto out;
+                       ep = &udc->eps[ep_num * 2 + direction];
+                       if (ep->wedge == 1)
+                               break;
+                       spin_unlock(&udc->lock);
+                       ep_set_stall(udc, ep_num, direction, 0);
+                       spin_lock(&udc->lock);
+                       break;
+               default:
+                       goto out;
+               }
+       } else
+               goto out;
+       if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+               ep0_stall(udc);
+ out:
+       return;
+ }
+ static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
+ {
+       u8 ep_num;
+       u8 direction;
+       if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+               == ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
+               switch (setup->wValue) {
+               case USB_DEVICE_REMOTE_WAKEUP:
+                       udc->remote_wakeup = 1;
+                       break;
+               case USB_DEVICE_TEST_MODE:
+                       if (setup->wIndex & 0xFF
+                               ||  udc->gadget.speed != USB_SPEED_HIGH)
+                               ep0_stall(udc);
+                       if (udc->usb_state != USB_STATE_CONFIGURED
+                               && udc->usb_state != USB_STATE_ADDRESS
+                               && udc->usb_state != USB_STATE_DEFAULT)
+                               ep0_stall(udc);
+                       mv_udc_testmode(udc, (setup->wIndex >> 8));
+                       goto out;
+               default:
+                       goto out;
+               }
+       } else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
+               == ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
+               switch (setup->wValue) {
+               case USB_ENDPOINT_HALT:
+                       ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
+                       direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
+                               ? EP_DIR_IN : EP_DIR_OUT;
+                       if (setup->wValue != 0 || setup->wLength != 0
+                               || ep_num > udc->max_eps)
+                               goto out;
+                       spin_unlock(&udc->lock);
+                       ep_set_stall(udc, ep_num, direction, 1);
+                       spin_lock(&udc->lock);
+                       break;
+               default:
+                       goto out;
+               }
+       } else
+               goto out;
+       if (udc_prime_status(udc, EP_DIR_IN, 0, true))
+               ep0_stall(udc);
+ out:
+       return;
+ }
+ static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
+       struct usb_ctrlrequest *setup)
+       __releases(&ep->udc->lock)
+       __acquires(&ep->udc->lock)
+ {
+       bool delegate = false;
+       nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
+       dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+                       setup->bRequestType, setup->bRequest,
+                       setup->wValue, setup->wIndex, setup->wLength);
++      /* We process some standard setup requests here */
+       if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
+               switch (setup->bRequest) {
+               case USB_REQ_GET_STATUS:
+                       ch9getstatus(udc, ep_num, setup);
+                       break;
+               case USB_REQ_SET_ADDRESS:
+                       ch9setaddress(udc, setup);
+                       break;
+               case USB_REQ_CLEAR_FEATURE:
+                       ch9clearfeature(udc, setup);
+                       break;
+               case USB_REQ_SET_FEATURE:
+                       ch9setfeature(udc, setup);
+                       break;
+               default:
+                       delegate = true;
+               }
+       } else
+               delegate = true;
+       /* delegate USB standard requests to the gadget driver */
+       if (delegate == true) {
+               /* USB requests handled by gadget */
+               if (setup->wLength) {
+                       /* DATA phase from gadget, STATUS phase from udc */
+                       udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
+                                       ?  EP_DIR_IN : EP_DIR_OUT;
+                       spin_unlock(&udc->lock);
+                       if (udc->driver->setup(&udc->gadget,
+                               &udc->local_setup_buff) < 0)
+                               ep0_stall(udc);
+                       spin_lock(&udc->lock);
+                       udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
+                                       ?  DATA_STATE_XMIT : DATA_STATE_RECV;
+               } else {
+                       /* no DATA phase, IN STATUS phase from gadget */
+                       udc->ep0_dir = EP_DIR_IN;
+                       spin_unlock(&udc->lock);
+                       if (udc->driver->setup(&udc->gadget,
+                               &udc->local_setup_buff) < 0)
+                               ep0_stall(udc);
+                       spin_lock(&udc->lock);
+                       udc->ep0_state = WAIT_FOR_OUT_STATUS;
+               }
+       }
+ }
+ /* complete DATA or STATUS phase of ep0 prime status phase if needed */
+ static void ep0_req_complete(struct mv_udc *udc,
+       struct mv_ep *ep0, struct mv_req *req)
+ {
+       u32 new_addr;
+       if (udc->usb_state == USB_STATE_ADDRESS) {
+               /* set the new address */
+               new_addr = (u32)udc->dev_addr;
+               writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
+                       &udc->op_regs->deviceaddr);
+       }
+       done(ep0, req, 0);
+       switch (udc->ep0_state) {
+       case DATA_STATE_XMIT:
+               /* receive status phase */
+               if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
+                       ep0_stall(udc);
+               break;
+       case DATA_STATE_RECV:
+               /* send status phase */
+               if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
+                       ep0_stall(udc);
+               break;
+       case WAIT_FOR_OUT_STATUS:
+               udc->ep0_state = WAIT_FOR_SETUP;
+               break;
+       case WAIT_FOR_SETUP:
+               dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
+               break;
+       default:
+               ep0_stall(udc);
+               break;
+       }
+ }
+ static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
+ {
+       u32 temp;
+       struct mv_dqh *dqh;
+       dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
+       /* Clear bit in ENDPTSETUPSTAT */
+       writel((1 << ep_num), &udc->op_regs->epsetupstat);
+       /* while a hazard exists when setup package arrives */
+       do {
+               /* Set Setup Tripwire */
+               temp = readl(&udc->op_regs->usbcmd);
+               writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
+               /* Copy the setup packet to local buffer */
+               memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
+       } while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
+       /* Clear Setup Tripwire */
+       temp = readl(&udc->op_regs->usbcmd);
+       writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
+ }
+ static void irq_process_tr_complete(struct mv_udc *udc)
+ {
+       u32 tmp, bit_pos;
+       int i, ep_num = 0, direction = 0;
+       struct mv_ep    *curr_ep;
+       struct mv_req *curr_req, *temp_req;
+       int status;
+       /*
+        * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
+        * because the setup packets are to be read ASAP
+        */
+       /* Process all Setup packet received interrupts */
+       tmp = readl(&udc->op_regs->epsetupstat);
+       if (tmp) {
+               for (i = 0; i < udc->max_eps; i++) {
+                       if (tmp & (1 << i)) {
+                               get_setup_data(udc, i,
+                                       (u8 *)(&udc->local_setup_buff));
+                               handle_setup_packet(udc, i,
+                                       &udc->local_setup_buff);
+                       }
+               }
+       }
+       /* Don't clear the endpoint setup status register here.
+        * It is cleared as a setup packet is read out of the buffer
+        */
+       /* Process non-setup transaction complete interrupts */
+       tmp = readl(&udc->op_regs->epcomplete);
+       if (!tmp)
+               return;
+       writel(tmp, &udc->op_regs->epcomplete);
+       for (i = 0; i < udc->max_eps * 2; i++) {
+               ep_num = i >> 1;
+               direction = i % 2;
+               bit_pos = 1 << (ep_num + 16 * direction);
+               if (!(bit_pos & tmp))
+                       continue;
+               if (i == 1)
+                       curr_ep = &udc->eps[0];
+               else
+                       curr_ep = &udc->eps[i];
+               /* process the req queue until an uncomplete request */
+               list_for_each_entry_safe(curr_req, temp_req,
+                       &curr_ep->queue, queue) {
+                       status = process_ep_req(udc, i, curr_req);
+                       if (status)
+                               break;
+                       /* write back status to req */
+                       curr_req->req.status = status;
+                       /* ep0 request completion */
+                       if (ep_num == 0) {
+                               ep0_req_complete(udc, curr_ep, curr_req);
+                               break;
+                       } else {
+                               done(curr_ep, curr_req, status);
+                       }
+               }
+       }
+ }
+ static void irq_process_reset(struct mv_udc *udc)
+ {
+       u32 tmp;
+       unsigned int loops;
+       udc->ep0_dir = EP_DIR_OUT;
+       udc->ep0_state = WAIT_FOR_SETUP;
+       udc->remote_wakeup = 0;         /* default to 0 on reset */
+       /* The address bits are past bit 25-31. Set the address */
+       tmp = readl(&udc->op_regs->deviceaddr);
+       tmp &= ~(USB_DEVICE_ADDRESS_MASK);
+       writel(tmp, &udc->op_regs->deviceaddr);
+       /* Clear all the setup token semaphores */
+       tmp = readl(&udc->op_regs->epsetupstat);
+       writel(tmp, &udc->op_regs->epsetupstat);
+       /* Clear all the endpoint complete status bits */
+       tmp = readl(&udc->op_regs->epcomplete);
+       writel(tmp, &udc->op_regs->epcomplete);
+       /* wait until all endptprime bits cleared */
+       loops = LOOPS(PRIME_TIMEOUT);
+       while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
+               if (loops == 0) {
+                       dev_err(&udc->dev->dev,
+                               "Timeout for ENDPTPRIME = 0x%x\n",
+                               readl(&udc->op_regs->epprime));
+                       break;
+               }
+               loops--;
+               udelay(LOOPS_USEC);
+       }
+       /* Write 1s to the Flush register */
+       writel((u32)~0, &udc->op_regs->epflush);
+       if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
+               dev_info(&udc->dev->dev, "usb bus reset\n");
+               udc->usb_state = USB_STATE_DEFAULT;
+               /* reset all the queues, stop all USB activities */
+               stop_activity(udc, udc->driver);
+       } else {
+               dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
+                       readl(&udc->op_regs->portsc));
+               /*
+                * re-initialize
+                * controller reset
+                */
+               udc_reset(udc);
+               /* reset all the queues, stop all USB activities */
+               stop_activity(udc, udc->driver);
+               /* reset ep0 dQH and endptctrl */
+               ep0_reset(udc);
+               /* enable interrupt and set controller to run state */
+               udc_start(udc);
+               udc->usb_state = USB_STATE_ATTACHED;
+       }
+ }
+ static void handle_bus_resume(struct mv_udc *udc)
+ {
+       udc->usb_state = udc->resume_state;
+       udc->resume_state = 0;
+       /* report resume to the driver */
+       if (udc->driver) {
+               if (udc->driver->resume) {
+                       spin_unlock(&udc->lock);
+                       udc->driver->resume(&udc->gadget);
+                       spin_lock(&udc->lock);
+               }
+       }
+ }
+ static void irq_process_suspend(struct mv_udc *udc)
+ {
+       udc->resume_state = udc->usb_state;
+       udc->usb_state = USB_STATE_SUSPENDED;
+       if (udc->driver->suspend) {
+               spin_unlock(&udc->lock);
+               udc->driver->suspend(&udc->gadget);
+               spin_lock(&udc->lock);
+       }
+ }
+ static void irq_process_port_change(struct mv_udc *udc)
+ {
+       u32 portsc;
+       portsc = readl(&udc->op_regs->portsc[0]);
+       if (!(portsc & PORTSCX_PORT_RESET)) {
+               /* Get the speed */
+               u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
+               switch (speed) {
+               case PORTSCX_PORT_SPEED_HIGH:
+                       udc->gadget.speed = USB_SPEED_HIGH;
+                       break;
+               case PORTSCX_PORT_SPEED_FULL:
+                       udc->gadget.speed = USB_SPEED_FULL;
+                       break;
+               case PORTSCX_PORT_SPEED_LOW:
+                       udc->gadget.speed = USB_SPEED_LOW;
+                       break;
+               default:
+                       udc->gadget.speed = USB_SPEED_UNKNOWN;
+                       break;
+               }
+       }
+       if (portsc & PORTSCX_PORT_SUSPEND) {
+               udc->resume_state = udc->usb_state;
+               udc->usb_state = USB_STATE_SUSPENDED;
+               if (udc->driver->suspend) {
+                       spin_unlock(&udc->lock);
+                       udc->driver->suspend(&udc->gadget);
+                       spin_lock(&udc->lock);
+               }
+       }
+       if (!(portsc & PORTSCX_PORT_SUSPEND)
+               && udc->usb_state == USB_STATE_SUSPENDED) {
+               handle_bus_resume(udc);
+       }
+       if (!udc->resume_state)
+               udc->usb_state = USB_STATE_DEFAULT;
+ }
+ static void irq_process_error(struct mv_udc *udc)
+ {
+       /* Increment the error count */
+       udc->errors++;
+ }
+ static irqreturn_t mv_udc_irq(int irq, void *dev)
+ {
+       struct mv_udc *udc = (struct mv_udc *)dev;
+       u32 status, intr;
+       /* Disable ISR when stopped bit is set */
+       if (udc->stopped)
+               return IRQ_NONE;
+       spin_lock(&udc->lock);
+       status = readl(&udc->op_regs->usbsts);
+       intr = readl(&udc->op_regs->usbintr);
+       status &= intr;
+       if (status == 0) {
+               spin_unlock(&udc->lock);
+               return IRQ_NONE;
+       }
+       /* Clear all the interrupts occurred */
+       writel(status, &udc->op_regs->usbsts);
+       if (status & USBSTS_ERR)
+               irq_process_error(udc);
+       if (status & USBSTS_RESET)
+               irq_process_reset(udc);
+       if (status & USBSTS_PORT_CHANGE)
+               irq_process_port_change(udc);
+       if (status & USBSTS_INT)
+               irq_process_tr_complete(udc);
+       if (status & USBSTS_SUSPEND)
+               irq_process_suspend(udc);
+       spin_unlock(&udc->lock);
+       return IRQ_HANDLED;
+ }
+ static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
+ {
+       struct mv_udc *udc = (struct mv_udc *)dev;
+       /* polling VBUS and init phy may cause too much time*/
+       if (udc->qwork)
+               queue_work(udc->qwork, &udc->vbus_work);
+       return IRQ_HANDLED;
+ }
+ static void mv_udc_vbus_work(struct work_struct *work)
+ {
+       struct mv_udc *udc;
+       unsigned int vbus;
+       udc = container_of(work, struct mv_udc, vbus_work);
+       if (!udc->pdata->vbus)
+               return;
+       vbus = udc->pdata->vbus->poll();
+       dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
+       if (vbus == VBUS_HIGH)
+               mv_udc_vbus_session(&udc->gadget, 1);
+       else if (vbus == VBUS_LOW)
+               mv_udc_vbus_session(&udc->gadget, 0);
+ }
+ /* release device structure */
+ static void gadget_release(struct device *_dev)
+ {
+       struct mv_udc *udc;
+       udc = dev_get_drvdata(_dev);
+       complete(udc->done);
+ }
+ static int mv_udc_remove(struct platform_device *pdev)
+ {
+       struct mv_udc *udc;
+       udc = platform_get_drvdata(pdev);
+       usb_del_gadget_udc(&udc->gadget);
+       if (udc->qwork) {
+               flush_workqueue(udc->qwork);
+               destroy_workqueue(udc->qwork);
+       }
+       /* free memory allocated in probe */
+       if (udc->dtd_pool)
+               dma_pool_destroy(udc->dtd_pool);
+       if (udc->ep_dqh)
+               dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
+                       udc->ep_dqh, udc->ep_dqh_dma);
+       mv_udc_disable(udc);
+       /* free dev, wait for the release() finished */
+       wait_for_completion(udc->done);
+       return 0;
+ }
+ static int mv_udc_probe(struct platform_device *pdev)
+ {
+       struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
+       struct mv_udc *udc;
+       int retval = 0;
+       struct resource *r;
+       size_t size;
+       if (pdata == NULL) {
+               dev_err(&pdev->dev, "missing platform_data\n");
+               return -ENODEV;
+       }
+       udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
+       if (udc == NULL) {
+               dev_err(&pdev->dev, "failed to allocate memory for udc\n");
+               return -ENOMEM;
+       }
+       udc->done = &release_done;
+       udc->pdata = dev_get_platdata(&pdev->dev);
+       spin_lock_init(&udc->lock);
+       udc->dev = pdev;
+       if (pdata->mode == MV_USB_MODE_OTG) {
+               udc->transceiver = devm_usb_get_phy(&pdev->dev,
+                                       USB_PHY_TYPE_USB2);
+               if (IS_ERR(udc->transceiver)) {
+                       retval = PTR_ERR(udc->transceiver);
+                       if (retval == -ENXIO)
+                               return retval;
+                       udc->transceiver = NULL;
+                       return -EPROBE_DEFER;
+               }
+       }
+       /* udc only have one sysclk. */
+       udc->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(udc->clk))
+               return PTR_ERR(udc->clk);
+       r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
+       if (r == NULL) {
+               dev_err(&pdev->dev, "no I/O memory resource defined\n");
+               return -ENODEV;
+       }
+       udc->cap_regs = (struct mv_cap_regs __iomem *)
+               devm_ioremap(&pdev->dev, r->start, resource_size(r));
+       if (udc->cap_regs == NULL) {
+               dev_err(&pdev->dev, "failed to map I/O memory\n");
+               return -EBUSY;
+       }
+       r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
+       if (r == NULL) {
+               dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+               return -ENODEV;
+       }
+       udc->phy_regs = ioremap(r->start, resource_size(r));
+       if (udc->phy_regs == NULL) {
+               dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+               return -EBUSY;
+       }
+       /* we will acces controller register, so enable the clk */
+       retval = mv_udc_enable_internal(udc);
+       if (retval)
+               return retval;
+       udc->op_regs =
+               (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
+               + (readl(&udc->cap_regs->caplength_hciversion)
+                       & CAPLENGTH_MASK));
+       udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
+       /*
+        * some platform will use usb to download image, it may not disconnect
+        * usb gadget before loading kernel. So first stop udc here.
+        */
+       udc_stop(udc);
+       writel(0xFFFFFFFF, &udc->op_regs->usbsts);
+       size = udc->max_eps * sizeof(struct mv_dqh) *2;
+       size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
+       udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
+                                       &udc->ep_dqh_dma, GFP_KERNEL);
+       if (udc->ep_dqh == NULL) {
+               dev_err(&pdev->dev, "allocate dQH memory failed\n");
+               retval = -ENOMEM;
+               goto err_disable_clock;
+       }
+       udc->ep_dqh_size = size;
+       /* create dTD dma_pool resource */
+       udc->dtd_pool = dma_pool_create("mv_dtd",
+                       &pdev->dev,
+                       sizeof(struct mv_dtd),
+                       DTD_ALIGNMENT,
+                       DMA_BOUNDARY);
+       if (!udc->dtd_pool) {
+               retval = -ENOMEM;
+               goto err_free_dma;
+       }
+       size = udc->max_eps * sizeof(struct mv_ep) *2;
+       udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+       if (udc->eps == NULL) {
+               dev_err(&pdev->dev, "allocate ep memory failed\n");
+               retval = -ENOMEM;
+               goto err_destroy_dma;
+       }
+       /* initialize ep0 status request structure */
+       udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
+                                       GFP_KERNEL);
+       if (!udc->status_req) {
+               dev_err(&pdev->dev, "allocate status_req memory failed\n");
+               retval = -ENOMEM;
+               goto err_destroy_dma;
+       }
+       INIT_LIST_HEAD(&udc->status_req->queue);
+       /* allocate a small amount of memory to get valid address */
+       udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
+       udc->status_req->req.dma = DMA_ADDR_INVALID;
+       udc->resume_state = USB_STATE_NOTATTACHED;
+       udc->usb_state = USB_STATE_POWERED;
+       udc->ep0_dir = EP_DIR_OUT;
+       udc->remote_wakeup = 0;
+       r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
+       if (r == NULL) {
+               dev_err(&pdev->dev, "no IRQ resource defined\n");
+               retval = -ENODEV;
+               goto err_destroy_dma;
+       }
+       udc->irq = r->start;
+       if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
+               IRQF_SHARED, driver_name, udc)) {
+               dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
+                       udc->irq);
+               retval = -ENODEV;
+               goto err_destroy_dma;
+       }
+       /* initialize gadget structure */
+       udc->gadget.ops = &mv_ops;      /* usb_gadget_ops */
+       udc->gadget.ep0 = &udc->eps[0].ep;      /* gadget ep0 */
+       INIT_LIST_HEAD(&udc->gadget.ep_list);   /* ep_list */
+       udc->gadget.speed = USB_SPEED_UNKNOWN;  /* speed */
+       udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
+       /* the "gadget" abstracts/virtualizes the controller */
+       udc->gadget.name = driver_name;         /* gadget name */
+       eps_init(udc);
+       /* VBUS detect: we can disable/enable clock on demand.*/
+       if (udc->transceiver)
+               udc->clock_gating = 1;
+       else if (pdata->vbus) {
+               udc->clock_gating = 1;
+               retval = devm_request_threaded_irq(&pdev->dev,
+                               pdata->vbus->irq, NULL,
+                               mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
+               if (retval) {
+                       dev_info(&pdev->dev,
+                               "Can not request irq for VBUS, "
+                               "disable clock gating\n");
+                       udc->clock_gating = 0;
+               }
+               udc->qwork = create_singlethread_workqueue("mv_udc_queue");
+               if (!udc->qwork) {
+                       dev_err(&pdev->dev, "cannot create workqueue\n");
+                       retval = -ENOMEM;
+                       goto err_destroy_dma;
+               }
+               INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
+       }
+       /*
+        * When clock gating is supported, we can disable clk and phy.
+        * If not, it means that VBUS detection is not supported, we
+        * have to enable vbus active all the time to let controller work.
+        */
+       if (udc->clock_gating)
+               mv_udc_disable_internal(udc);
+       else
+               udc->vbus_active = 1;
+       retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
+                       gadget_release);
+       if (retval)
+               goto err_create_workqueue;
+       platform_set_drvdata(pdev, udc);
+       dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
+               udc->clock_gating ? "with" : "without");
+       return 0;
+ err_create_workqueue:
+       destroy_workqueue(udc->qwork);
+ err_destroy_dma:
+       dma_pool_destroy(udc->dtd_pool);
+ err_free_dma:
+       dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
+                       udc->ep_dqh, udc->ep_dqh_dma);
+ err_disable_clock:
+       mv_udc_disable_internal(udc);
+       return retval;
+ }
+ #ifdef CONFIG_PM
+ static int mv_udc_suspend(struct device *dev)
+ {
+       struct mv_udc *udc;
+       udc = dev_get_drvdata(dev);
+       /* if OTG is enabled, the following will be done in OTG driver*/
+       if (udc->transceiver)
+               return 0;
+       if (udc->pdata->vbus && udc->pdata->vbus->poll)
+               if (udc->pdata->vbus->poll() == VBUS_HIGH) {
+                       dev_info(&udc->dev->dev, "USB cable is connected!\n");
+                       return -EAGAIN;
+               }
+       /*
+        * only cable is unplugged, udc can suspend.
+        * So do not care about clock_gating == 1.
+        */
+       if (!udc->clock_gating) {
+               udc_stop(udc);
+               spin_lock_irq(&udc->lock);
+               /* stop all usb activities */
+               stop_activity(udc, udc->driver);
+               spin_unlock_irq(&udc->lock);
+               mv_udc_disable_internal(udc);
+       }
+       return 0;
+ }
+ static int mv_udc_resume(struct device *dev)
+ {
+       struct mv_udc *udc;
+       int retval;
+       udc = dev_get_drvdata(dev);
+       /* if OTG is enabled, the following will be done in OTG driver*/
+       if (udc->transceiver)
+               return 0;
+       if (!udc->clock_gating) {
+               retval = mv_udc_enable_internal(udc);
+               if (retval)
+                       return retval;
+               if (udc->driver && udc->softconnect) {
+                       udc_reset(udc);
+                       ep0_reset(udc);
+                       udc_start(udc);
+               }
+       }
+       return 0;
+ }
+ static const struct dev_pm_ops mv_udc_pm_ops = {
+       .suspend        = mv_udc_suspend,
+       .resume         = mv_udc_resume,
+ };
+ #endif
+ static void mv_udc_shutdown(struct platform_device *pdev)
+ {
+       struct mv_udc *udc;
+       u32 mode;
+       udc = platform_get_drvdata(pdev);
+       /* reset controller mode to IDLE */
+       mv_udc_enable(udc);
+       mode = readl(&udc->op_regs->usbmode);
+       mode &= ~3;
+       writel(mode, &udc->op_regs->usbmode);
+       mv_udc_disable(udc);
+ }
+ static struct platform_driver udc_driver = {
+       .probe          = mv_udc_probe,
+       .remove         = mv_udc_remove,
+       .shutdown       = mv_udc_shutdown,
+       .driver         = {
+               .owner  = THIS_MODULE,
+               .name   = "mv-udc",
+ #ifdef CONFIG_PM
+               .pm     = &mv_udc_pm_ops,
+ #endif
+       },
+ };
+ module_platform_driver(udc_driver);
+ MODULE_ALIAS("platform:mv-udc");
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
+ MODULE_VERSION(DRIVER_VERSION);
+ MODULE_LICENSE("GPL");
index 5341bb223b7cbab3b4465fed5ad83e7a4825b360,adfffe8848911e5c6a5013ba3a7589f8ef99c4e1..47ae6455d0733c2d3ef4093aa4408e98969b3644
@@@ -312,15 -271,13 +271,13 @@@ static void cppi41_dma_callback(void *p
                                goto out;
                        }
                }
-               if (is_isoc(hw_ep, 0)) {
-                       schedule_work(&cppi41_channel->dma_completion);
-                       goto out;
-               }
                list_add_tail(&cppi41_channel->tx_check,
                                &controller->early_tx_list);
 -              if (!hrtimer_active(&controller->early_tx)) {
 +              if (!hrtimer_is_queued(&controller->early_tx)) {
+                       unsigned long usecs = cppi41_channel->total_len / 10;
                        hrtimer_start_range_ns(&controller->early_tx,
-                               ktime_set(0, 140 * NSEC_PER_USEC),
+                               ktime_set(0, usecs * NSEC_PER_USEC),
                                40 * NSEC_PER_USEC,
                                HRTIMER_MODE_REL);
                }
Simple merge
Simple merge
Simple merge
Simple merge
index 24b68c59dcf85363b3bad8853392ae8a51112025,b66fae77c08cf5059e31c1fdfea20ecff5051608..0154b2859fd7dd868a65dcb30bc6455594e23b65
@@@ -33,14 -32,37 +32,45 @@@ struct usb_endpoint_descriptor_no_audi
        __u8  bInterval;
  } __attribute__((packed));
  
 +/* Legacy format, deprecated as of 3.14. */
 +struct usb_functionfs_descs_head {
 +      __le32 magic;
 +      __le32 length;
 +      __le32 fs_count;
 +      __le32 hs_count;
 +} __attribute__((packed, deprecated));
 +
+ /* MS OS Descriptor header */
+ struct usb_os_desc_header {
+       __u8    interface;
+       __le32  dwLength;
+       __le16  bcdVersion;
+       __le16  wIndex;
+       union {
+               struct {
+                       __u8    bCount;
+                       __u8    Reserved;
+               };
+               __le16  wCount;
+       };
+ } __attribute__((packed));
+ struct usb_ext_compat_desc {
+       __u8    bFirstInterfaceNumber;
+       __u8    Reserved1;
+       __u8    CompatibleID[8];
+       __u8    SubCompatibleID[8];
+       __u8    Reserved2[6];
+ };
+ struct usb_ext_prop_desc {
+       __le32  dwSize;
+       __le32  dwPropertyDataType;
+       __le16  wPropertyNameLength;
+ } __attribute__((packed));
+ #ifndef __KERNEL__
  /*
   * Descriptors format:
   *