--- /dev/null
- flush_delayed_work_sync(&info->deferred_work);
+/***************************************************************************
+ * Copyright (C) 2010-2012 by Bruno Prémont <bonbons@linux-vserver.org> *
+ * *
+ * Based on Logitech G13 driver (v0.4) *
+ * Copyright (C) 2009 by Rick L. Vinyard, Jr. <rvinyard@cs.nmsu.edu> *
+ * *
+ * This program is free software: you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation, version 2 of the License. *
+ * *
+ * This driver is distributed in the hope that it will be useful, but *
+ * WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
+ * General Public License for more details. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this software. If not see <http://www.gnu.org/licenses/>. *
+ ***************************************************************************/
+
+#include <linux/hid.h>
+#include <linux/vmalloc.h>
+#include "usbhid/usbhid.h"
+#include <linux/usb.h>
+
+#include <linux/fb.h>
+#include <linux/module.h>
+
+#include "hid-picolcd.h"
+
+/* Framebuffer
+ *
+ * The PicoLCD use a Topway LCD module of 256x64 pixel
+ * This display area is tiled over 4 controllers with 8 tiles
+ * each. Each tile has 8x64 pixel, each data byte representing
+ * a 1-bit wide vertical line of the tile.
+ *
+ * The display can be updated at a tile granularity.
+ *
+ * Chip 1 Chip 2 Chip 3 Chip 4
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 1 | Tile 1 | Tile 1 | Tile 1 |
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 2 | Tile 2 | Tile 2 | Tile 2 |
+ * +----------------+----------------+----------------+----------------+
+ * ...
+ * +----------------+----------------+----------------+----------------+
+ * | Tile 8 | Tile 8 | Tile 8 | Tile 8 |
+ * +----------------+----------------+----------------+----------------+
+ */
+#define PICOLCDFB_NAME "picolcdfb"
+#define PICOLCDFB_WIDTH (256)
+#define PICOLCDFB_HEIGHT (64)
+#define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8)
+
+#define PICOLCDFB_UPDATE_RATE_LIMIT 10
+#define PICOLCDFB_UPDATE_RATE_DEFAULT 2
+
+/* Framebuffer visual structures */
+static const struct fb_fix_screeninfo picolcdfb_fix = {
+ .id = PICOLCDFB_NAME,
+ .type = FB_TYPE_PACKED_PIXELS,
+ .visual = FB_VISUAL_MONO01,
+ .xpanstep = 0,
+ .ypanstep = 0,
+ .ywrapstep = 0,
+ .line_length = PICOLCDFB_WIDTH / 8,
+ .accel = FB_ACCEL_NONE,
+};
+
+static const struct fb_var_screeninfo picolcdfb_var = {
+ .xres = PICOLCDFB_WIDTH,
+ .yres = PICOLCDFB_HEIGHT,
+ .xres_virtual = PICOLCDFB_WIDTH,
+ .yres_virtual = PICOLCDFB_HEIGHT,
+ .width = 103,
+ .height = 26,
+ .bits_per_pixel = 1,
+ .grayscale = 1,
+ .red = {
+ .offset = 0,
+ .length = 1,
+ .msb_right = 0,
+ },
+ .green = {
+ .offset = 0,
+ .length = 1,
+ .msb_right = 0,
+ },
+ .blue = {
+ .offset = 0,
+ .length = 1,
+ .msb_right = 0,
+ },
+ .transp = {
+ .offset = 0,
+ .length = 0,
+ .msb_right = 0,
+ },
+};
+
+/* Send a given tile to PicoLCD */
+static int picolcd_fb_send_tile(struct picolcd_data *data, u8 *vbitmap,
+ int chip, int tile)
+{
+ struct hid_report *report1, *report2;
+ unsigned long flags;
+ u8 *tdata;
+ int i;
+
+ report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, data->hdev);
+ if (!report1 || report1->maxfield != 1)
+ return -ENODEV;
+ report2 = picolcd_out_report(REPORT_LCD_DATA, data->hdev);
+ if (!report2 || report2->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ if ((data->status & PICOLCD_FAILED)) {
+ spin_unlock_irqrestore(&data->lock, flags);
+ return -ENODEV;
+ }
+ hid_set_field(report1->field[0], 0, chip << 2);
+ hid_set_field(report1->field[0], 1, 0x02);
+ hid_set_field(report1->field[0], 2, 0x00);
+ hid_set_field(report1->field[0], 3, 0x00);
+ hid_set_field(report1->field[0], 4, 0xb8 | tile);
+ hid_set_field(report1->field[0], 5, 0x00);
+ hid_set_field(report1->field[0], 6, 0x00);
+ hid_set_field(report1->field[0], 7, 0x40);
+ hid_set_field(report1->field[0], 8, 0x00);
+ hid_set_field(report1->field[0], 9, 0x00);
+ hid_set_field(report1->field[0], 10, 32);
+
+ hid_set_field(report2->field[0], 0, (chip << 2) | 0x01);
+ hid_set_field(report2->field[0], 1, 0x00);
+ hid_set_field(report2->field[0], 2, 0x00);
+ hid_set_field(report2->field[0], 3, 32);
+
+ tdata = vbitmap + (tile * 4 + chip) * 64;
+ for (i = 0; i < 64; i++)
+ if (i < 32)
+ hid_set_field(report1->field[0], 11 + i, tdata[i]);
+ else
+ hid_set_field(report2->field[0], 4 + i - 32, tdata[i]);
+
+ usbhid_submit_report(data->hdev, report1, USB_DIR_OUT);
+ usbhid_submit_report(data->hdev, report2, USB_DIR_OUT);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return 0;
+}
+
+/* Translate a single tile*/
+static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp,
+ int chip, int tile)
+{
+ int i, b, changed = 0;
+ u8 tdata[64];
+ u8 *vdata = vbitmap + (tile * 4 + chip) * 64;
+
+ if (bpp == 1) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i/8] >> (i % 8)) & 0x01;
+ }
+ }
+ } else if (bpp == 8) {
+ for (b = 7; b >= 0; b--) {
+ const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8;
+ for (i = 0; i < 64; i++) {
+ tdata[i] <<= 1;
+ tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00;
+ }
+ }
+ } else {
+ /* Oops, we should never get here! */
+ WARN_ON(1);
+ return 0;
+ }
+
+ for (i = 0; i < 64; i++)
+ if (tdata[i] != vdata[i]) {
+ changed = 1;
+ vdata[i] = tdata[i];
+ }
+ return changed;
+}
+
+void picolcd_fb_refresh(struct picolcd_data *data)
+{
+ if (data->fb_info)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+}
+
+/* Reconfigure LCD display */
+int picolcd_fb_reset(struct picolcd_data *data, int clear)
+{
+ struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev);
+ struct picolcd_fb_data *fbdata = data->fb_info->par;
+ int i, j;
+ unsigned long flags;
+ static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 };
+
+ if (!report || report->maxfield != 1)
+ return -ENODEV;
+
+ spin_lock_irqsave(&data->lock, flags);
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < report->field[0]->maxusage; j++)
+ if (j == 0)
+ hid_set_field(report->field[0], j, i << 2);
+ else if (j < sizeof(mapcmd))
+ hid_set_field(report->field[0], j, mapcmd[j]);
+ else
+ hid_set_field(report->field[0], j, 0);
+ usbhid_submit_report(data->hdev, report, USB_DIR_OUT);
+ }
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ if (clear) {
+ memset(fbdata->vbitmap, 0, PICOLCDFB_SIZE);
+ memset(fbdata->bitmap, 0, PICOLCDFB_SIZE*fbdata->bpp);
+ }
+ fbdata->force = 1;
+
+ /* schedule first output of framebuffer */
+ if (fbdata->ready)
+ schedule_delayed_work(&data->fb_info->deferred_work, 0);
+ else
+ fbdata->ready = 1;
+
+ return 0;
+}
+
+/* Update fb_vbitmap from the screen_base and send changed tiles to device */
+static void picolcd_fb_update(struct fb_info *info)
+{
+ int chip, tile, n;
+ unsigned long flags;
+ struct picolcd_fb_data *fbdata = info->par;
+ struct picolcd_data *data;
+
+ mutex_lock(&info->lock);
+
+ spin_lock_irqsave(&fbdata->lock, flags);
+ if (!fbdata->ready && fbdata->picolcd)
+ picolcd_fb_reset(fbdata->picolcd, 0);
+ spin_unlock_irqrestore(&fbdata->lock, flags);
+
+ /*
+ * Translate the framebuffer into the format needed by the PicoLCD.
+ * See display layout above.
+ * Do this one tile after the other and push those tiles that changed.
+ *
+ * Wait for our IO to complete as otherwise we might flood the queue!
+ */
+ n = 0;
+ for (chip = 0; chip < 4; chip++)
+ for (tile = 0; tile < 8; tile++) {
+ if (!fbdata->force && !picolcd_fb_update_tile(
+ fbdata->vbitmap, fbdata->bitmap,
+ fbdata->bpp, chip, tile))
+ continue;
+ n += 2;
+ if (n >= HID_OUTPUT_FIFO_SIZE / 2) {
+ spin_lock_irqsave(&fbdata->lock, flags);
+ data = fbdata->picolcd;
+ spin_unlock_irqrestore(&fbdata->lock, flags);
+ mutex_unlock(&info->lock);
+ if (!data)
+ return;
+ usbhid_wait_io(data->hdev);
+ mutex_lock(&info->lock);
+ n = 0;
+ }
+ spin_lock_irqsave(&fbdata->lock, flags);
+ data = fbdata->picolcd;
+ spin_unlock_irqrestore(&fbdata->lock, flags);
+ if (!data || picolcd_fb_send_tile(data,
+ fbdata->vbitmap, chip, tile))
+ goto out;
+ }
+ fbdata->force = false;
+ if (n) {
+ spin_lock_irqsave(&fbdata->lock, flags);
+ data = fbdata->picolcd;
+ spin_unlock_irqrestore(&fbdata->lock, flags);
+ mutex_unlock(&info->lock);
+ if (data)
+ usbhid_wait_io(data->hdev);
+ return;
+ }
+out:
+ mutex_unlock(&info->lock);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ if (!info->par)
+ return;
+ sys_fillrect(info, rect);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ if (!info->par)
+ return;
+ sys_copyarea(info, area);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/* Stub to call the system default and update the image on the picoLCD */
+static void picolcd_fb_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+ if (!info->par)
+ return;
+ sys_imageblit(info, image);
+
+ schedule_delayed_work(&info->deferred_work, 0);
+}
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t picolcd_fb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ if (!info->par)
+ return -ENODEV;
+ ret = fb_sys_write(info, buf, count, ppos);
+ if (ret >= 0)
+ schedule_delayed_work(&info->deferred_work, 0);
+ return ret;
+}
+
+static int picolcd_fb_blank(int blank, struct fb_info *info)
+{
+ /* We let fb notification do this for us via lcd/backlight device */
+ return 0;
+}
+
+static void picolcd_fb_destroy(struct fb_info *info)
+{
+ struct picolcd_fb_data *fbdata = info->par;
+
+ /* make sure no work is deferred */
+ fb_deferred_io_cleanup(info);
+
+ /* No thridparty should ever unregister our framebuffer! */
+ WARN_ON(fbdata->picolcd != NULL);
+
+ vfree((u8 *)info->fix.smem_start);
+ framebuffer_release(info);
+}
+
+static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ __u32 bpp = var->bits_per_pixel;
+ __u32 activate = var->activate;
+
+ /* only allow 1/8 bit depth (8-bit is grayscale) */
+ *var = picolcdfb_var;
+ var->activate = activate;
+ if (bpp >= 8) {
+ var->bits_per_pixel = 8;
+ var->red.length = 8;
+ var->green.length = 8;
+ var->blue.length = 8;
+ } else {
+ var->bits_per_pixel = 1;
+ var->red.length = 1;
+ var->green.length = 1;
+ var->blue.length = 1;
+ }
+ return 0;
+}
+
+static int picolcd_set_par(struct fb_info *info)
+{
+ struct picolcd_fb_data *fbdata = info->par;
+ u8 *tmp_fb, *o_fb;
+ if (info->var.bits_per_pixel == fbdata->bpp)
+ return 0;
+ /* switch between 1/8 bit depths */
+ if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8)
+ return -EINVAL;
+
+ o_fb = fbdata->bitmap;
+ tmp_fb = kmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel, GFP_KERNEL);
+ if (!tmp_fb)
+ return -ENOMEM;
+
+ /* translate FB content to new bits-per-pixel */
+ if (info->var.bits_per_pixel == 1) {
+ int i, b;
+ for (i = 0; i < PICOLCDFB_SIZE; i++) {
+ u8 p = 0;
+ for (b = 0; b < 8; b++) {
+ p <<= 1;
+ p |= o_fb[i*8+b] ? 0x01 : 0x00;
+ }
+ tmp_fb[i] = p;
+ }
+ memcpy(o_fb, tmp_fb, PICOLCDFB_SIZE);
+ info->fix.visual = FB_VISUAL_MONO01;
+ info->fix.line_length = PICOLCDFB_WIDTH / 8;
+ } else {
+ int i;
+ memcpy(tmp_fb, o_fb, PICOLCDFB_SIZE);
+ for (i = 0; i < PICOLCDFB_SIZE * 8; i++)
+ o_fb[i] = tmp_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00;
+ info->fix.visual = FB_VISUAL_DIRECTCOLOR;
+ info->fix.line_length = PICOLCDFB_WIDTH;
+ }
+
+ kfree(tmp_fb);
+ fbdata->bpp = info->var.bits_per_pixel;
+ return 0;
+}
+
+/* Note this can't be const because of struct fb_info definition */
+static struct fb_ops picolcdfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_destroy = picolcd_fb_destroy,
+ .fb_read = fb_sys_read,
+ .fb_write = picolcd_fb_write,
+ .fb_blank = picolcd_fb_blank,
+ .fb_fillrect = picolcd_fb_fillrect,
+ .fb_copyarea = picolcd_fb_copyarea,
+ .fb_imageblit = picolcd_fb_imageblit,
+ .fb_check_var = picolcd_fb_check_var,
+ .fb_set_par = picolcd_set_par,
+};
+
+
+/* Callback from deferred IO workqueue */
+static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist)
+{
+ picolcd_fb_update(info);
+}
+
+static const struct fb_deferred_io picolcd_fb_defio = {
+ .delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT,
+ .deferred_io = picolcd_fb_deferred_io,
+};
+
+
+/*
+ * The "fb_update_rate" sysfs attribute
+ */
+static ssize_t picolcd_fb_update_rate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ struct picolcd_fb_data *fbdata = data->fb_info->par;
+ unsigned i, fb_update_rate = fbdata->update_rate;
+ size_t ret = 0;
+
+ for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++)
+ if (ret >= PAGE_SIZE)
+ break;
+ else if (i == fb_update_rate)
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
+ else
+ ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
+ if (ret > 0)
+ buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
+ return ret;
+}
+
+static ssize_t picolcd_fb_update_rate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct picolcd_data *data = dev_get_drvdata(dev);
+ struct picolcd_fb_data *fbdata = data->fb_info->par;
+ int i;
+ unsigned u;
+
+ if (count < 1 || count > 10)
+ return -EINVAL;
+
+ i = sscanf(buf, "%u", &u);
+ if (i != 1)
+ return -EINVAL;
+
+ if (u > PICOLCDFB_UPDATE_RATE_LIMIT)
+ return -ERANGE;
+ else if (u == 0)
+ u = PICOLCDFB_UPDATE_RATE_DEFAULT;
+
+ fbdata->update_rate = u;
+ data->fb_info->fbdefio->delay = HZ / fbdata->update_rate;
+ return count;
+}
+
+static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show,
+ picolcd_fb_update_rate_store);
+
+/* initialize Framebuffer device */
+int picolcd_init_framebuffer(struct picolcd_data *data)
+{
+ struct device *dev = &data->hdev->dev;
+ struct fb_info *info = NULL;
+ struct picolcd_fb_data *fbdata = NULL;
+ int i, error = -ENOMEM;
+ u32 *palette;
+
+ /* The extra memory is:
+ * - 256*u32 for pseudo_palette
+ * - struct fb_deferred_io
+ */
+ info = framebuffer_alloc(256 * sizeof(u32) +
+ sizeof(struct fb_deferred_io) +
+ sizeof(struct picolcd_fb_data) +
+ PICOLCDFB_SIZE, dev);
+ if (info == NULL) {
+ dev_err(dev, "failed to allocate a framebuffer\n");
+ goto err_nomem;
+ }
+
+ info->fbdefio = info->par;
+ *info->fbdefio = picolcd_fb_defio;
+ info->par += sizeof(struct fb_deferred_io);
+ palette = info->par;
+ info->par += 256 * sizeof(u32);
+ for (i = 0; i < 256; i++)
+ palette[i] = i > 0 && i < 16 ? 0xff : 0;
+ info->pseudo_palette = palette;
+ info->fbops = &picolcdfb_ops;
+ info->var = picolcdfb_var;
+ info->fix = picolcdfb_fix;
+ info->fix.smem_len = PICOLCDFB_SIZE*8;
+ info->flags = FBINFO_FLAG_DEFAULT;
+
+ fbdata = info->par;
+ spin_lock_init(&fbdata->lock);
+ fbdata->picolcd = data;
+ fbdata->update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT;
+ fbdata->bpp = picolcdfb_var.bits_per_pixel;
+ fbdata->force = 1;
+ fbdata->vbitmap = info->par + sizeof(struct picolcd_fb_data);
+ fbdata->bitmap = vmalloc(PICOLCDFB_SIZE*8);
+ if (fbdata->bitmap == NULL) {
+ dev_err(dev, "can't get a free page for framebuffer\n");
+ goto err_nomem;
+ }
+ info->screen_base = (char __force __iomem *)fbdata->bitmap;
+ info->fix.smem_start = (unsigned long)fbdata->bitmap;
+ memset(fbdata->vbitmap, 0xff, PICOLCDFB_SIZE);
+ data->fb_info = info;
+
+ error = picolcd_fb_reset(data, 1);
+ if (error) {
+ dev_err(dev, "failed to configure display\n");
+ goto err_cleanup;
+ }
+
+ error = device_create_file(dev, &dev_attr_fb_update_rate);
+ if (error) {
+ dev_err(dev, "failed to create sysfs attributes\n");
+ goto err_cleanup;
+ }
+
+ fb_deferred_io_init(info);
+ error = register_framebuffer(info);
+ if (error) {
+ dev_err(dev, "failed to register framebuffer\n");
+ goto err_sysfs;
+ }
+ return 0;
+
+err_sysfs:
+ device_remove_file(dev, &dev_attr_fb_update_rate);
+ fb_deferred_io_cleanup(info);
+err_cleanup:
+ data->fb_info = NULL;
+
+err_nomem:
+ if (fbdata)
+ vfree(fbdata->bitmap);
+ framebuffer_release(info);
+ return error;
+}
+
+void picolcd_exit_framebuffer(struct picolcd_data *data)
+{
+ struct fb_info *info = data->fb_info;
+ struct picolcd_fb_data *fbdata = info->par;
+ unsigned long flags;
+
+ device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
+
+ /* disconnect framebuffer from HID dev */
+ spin_lock_irqsave(&fbdata->lock, flags);
+ fbdata->picolcd = NULL;
+ spin_unlock_irqrestore(&fbdata->lock, flags);
+
+ /* make sure there is no running update - thus that fbdata->picolcd
+ * once obtained under lock is guaranteed not to get free() under
+ * the feet of the deferred work */
++ flush_delayed_work(&info->deferred_work);
+
+ data->fb_info = NULL;
+ unregister_framebuffer(info);
+}
--- /dev/null
- flush_work_sync(&the_dev->work);
+/*
+ * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "u_ether.h"
+
+
+/*
+ * This component encapsulates the Ethernet link glue needed to provide
+ * one (!) network link through the USB gadget stack, normally "usb0".
+ *
+ * The control and data models are handled by the function driver which
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS. That includes all descriptor and endpoint
+ * management.
+ *
+ * Link level addressing is handled by this component using module
+ * parameters; if no such parameters are provided, random link level
+ * addresses are used. Each end of the link uses one address. The
+ * host end address is exported in various ways, and is often recorded
+ * in configuration databases.
+ *
+ * The driver which assembles each configuration using such a link is
+ * responsible for ensuring that each configuration includes at most one
+ * instance of is network link. (The network layer provides ways for
+ * this single "physical" link to be used by multiple virtual links.)
+ */
+
+#define UETH__VERSION "29-May-2008"
+
+struct eth_dev {
+ /* lock is held while accessing port_usb
+ * or updating its backlink port_usb->ioport
+ */
+ spinlock_t lock;
+ struct gether *port_usb;
+
+ struct net_device *net;
+ struct usb_gadget *gadget;
+
+ spinlock_t req_lock; /* guard {rx,tx}_reqs */
+ struct list_head tx_reqs, rx_reqs;
+ atomic_t tx_qlen;
+
+ struct sk_buff_head rx_frames;
+
+ unsigned header_len;
+ struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
+ int (*unwrap)(struct gether *,
+ struct sk_buff *skb,
+ struct sk_buff_head *list);
+
+ struct work_struct work;
+
+ unsigned long todo;
+#define WORK_RX_MEMORY 0
+
+ bool zlp;
+ u8 host_mac[ETH_ALEN];
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define RX_EXTRA 20 /* bytes guarding against rx overflows */
+
+#define DEFAULT_QLEN 2 /* double buffering by default */
+
+static unsigned qmult = 5;
+module_param(qmult, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
+
+/* for dual-speed hardware, use deeper queues at high/super speed */
+static inline int qlen(struct usb_gadget *gadget)
+{
+ if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
+ gadget->speed == USB_SPEED_SUPER))
+ return qmult * DEFAULT_QLEN;
+ else
+ return DEFAULT_QLEN;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* REVISIT there must be a better way than having two sets
+ * of debug calls ...
+ */
+
+#undef DBG
+#undef VDBG
+#undef ERROR
+#undef INFO
+
+#define xprintk(d, level, fmt, args...) \
+ printk(level "%s: " fmt , (d)->net->name , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DBG(dev, fmt, args...) \
+ xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDBG DBG
+#else
+#define VDBG(dev, fmt, args...) \
+ do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+ xprintk(dev , KERN_ERR , fmt , ## args)
+#define INFO(dev, fmt, args...) \
+ xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+
+static int ueth_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct eth_dev *dev = netdev_priv(net);
+ unsigned long flags;
+ int status = 0;
+
+ /* don't change MTU on "live" link (peer won't know) */
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb)
+ status = -EBUSY;
+ else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+ status = -ERANGE;
+ else
+ net->mtu = new_mtu;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return status;
+}
+
+static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+{
+ struct eth_dev *dev = netdev_priv(net);
+
+ strlcpy(p->driver, "g_ether", sizeof p->driver);
+ strlcpy(p->version, UETH__VERSION, sizeof p->version);
+ strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
+ strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+}
+
+/* REVISIT can also support:
+ * - WOL (by tracking suspends and issuing remote wakeup)
+ * - msglevel (implies updated messaging)
+ * - ... probably more ethtool ops
+ */
+
+static const struct ethtool_ops ops = {
+ .get_drvinfo = eth_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+static void defer_kevent(struct eth_dev *dev, int flag)
+{
+ if (test_and_set_bit(flag, &dev->todo))
+ return;
+ if (!schedule_work(&dev->work))
+ ERROR(dev, "kevent %d may have been dropped\n", flag);
+ else
+ DBG(dev, "kevent %d scheduled\n", flag);
+}
+
+static void rx_complete(struct usb_ep *ep, struct usb_request *req);
+
+static int
+rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+ struct sk_buff *skb;
+ int retval = -ENOMEM;
+ size_t size = 0;
+ struct usb_ep *out;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb)
+ out = dev->port_usb->out_ep;
+ else
+ out = NULL;
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (!out)
+ return -ENOTCONN;
+
+
+ /* Padding up to RX_EXTRA handles minor disagreements with host.
+ * Normally we use the USB "terminate on short read" convention;
+ * so allow up to (N*maxpacket), since that memory is normally
+ * already allocated. Some hardware doesn't deal well with short
+ * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
+ * byte off the end (to force hardware errors on overflow).
+ *
+ * RNDIS uses internal framing, and explicitly allows senders to
+ * pad to end-of-packet. That's potentially nice for speed, but
+ * means receivers can't recover lost synch on their own (because
+ * new packets don't only start after a short RX).
+ */
+ size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
+ size += dev->port_usb->header_len;
+ size += out->maxpacket - 1;
+ size -= size % out->maxpacket;
+
+ if (dev->port_usb->is_fixed)
+ size = max_t(size_t, size, dev->port_usb->fixed_out_len);
+
+ skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+ if (skb == NULL) {
+ DBG(dev, "no rx skb\n");
+ goto enomem;
+ }
+
+ /* Some platforms perform better when IP packets are aligned,
+ * but on at least one, checksumming fails otherwise. Note:
+ * RNDIS headers involve variable numbers of LE32 values.
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ req->buf = skb->data;
+ req->length = size;
+ req->complete = rx_complete;
+ req->context = skb;
+
+ retval = usb_ep_queue(out, req, gfp_flags);
+ if (retval == -ENOMEM)
+enomem:
+ defer_kevent(dev, WORK_RX_MEMORY);
+ if (retval) {
+ DBG(dev, "rx submit --> %d\n", retval);
+ if (skb)
+ dev_kfree_skb_any(skb);
+ spin_lock_irqsave(&dev->req_lock, flags);
+ list_add(&req->list, &dev->rx_reqs);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
+ }
+ return retval;
+}
+
+static void rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct sk_buff *skb = req->context, *skb2;
+ struct eth_dev *dev = ep->driver_data;
+ int status = req->status;
+
+ switch (status) {
+
+ /* normal completion */
+ case 0:
+ skb_put(skb, req->actual);
+
+ if (dev->unwrap) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb) {
+ status = dev->unwrap(dev->port_usb,
+ skb,
+ &dev->rx_frames);
+ } else {
+ dev_kfree_skb_any(skb);
+ status = -ENOTCONN;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ } else {
+ skb_queue_tail(&dev->rx_frames, skb);
+ }
+ skb = NULL;
+
+ skb2 = skb_dequeue(&dev->rx_frames);
+ while (skb2) {
+ if (status < 0
+ || ETH_HLEN > skb2->len
+ || skb2->len > ETH_FRAME_LEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ DBG(dev, "rx length %d\n", skb2->len);
+ dev_kfree_skb_any(skb2);
+ goto next_frame;
+ }
+ skb2->protocol = eth_type_trans(skb2, dev->net);
+ dev->net->stats.rx_packets++;
+ dev->net->stats.rx_bytes += skb2->len;
+
+ /* no buffer copies needed, unless hardware can't
+ * use skb buffers.
+ */
+ status = netif_rx(skb2);
+next_frame:
+ skb2 = skb_dequeue(&dev->rx_frames);
+ }
+ break;
+
+ /* software-driven interface shutdown */
+ case -ECONNRESET: /* unlink */
+ case -ESHUTDOWN: /* disconnect etc */
+ VDBG(dev, "rx shutdown, code %d\n", status);
+ goto quiesce;
+
+ /* for hardware automagic (such as pxa) */
+ case -ECONNABORTED: /* endpoint reset */
+ DBG(dev, "rx %s reset\n", ep->name);
+ defer_kevent(dev, WORK_RX_MEMORY);
+quiesce:
+ dev_kfree_skb_any(skb);
+ goto clean;
+
+ /* data overrun */
+ case -EOVERFLOW:
+ dev->net->stats.rx_over_errors++;
+ /* FALLTHROUGH */
+
+ default:
+ dev->net->stats.rx_errors++;
+ DBG(dev, "rx status %d\n", status);
+ break;
+ }
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+ if (!netif_running(dev->net)) {
+clean:
+ spin_lock(&dev->req_lock);
+ list_add(&req->list, &dev->rx_reqs);
+ spin_unlock(&dev->req_lock);
+ req = NULL;
+ }
+ if (req)
+ rx_submit(dev, req, GFP_ATOMIC);
+}
+
+static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
+{
+ unsigned i;
+ struct usb_request *req;
+
+ if (!n)
+ return -ENOMEM;
+
+ /* queue/recycle up to N requests */
+ i = n;
+ list_for_each_entry(req, list, list) {
+ if (i-- == 0)
+ goto extra;
+ }
+ while (i--) {
+ req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ if (!req)
+ return list_empty(list) ? -ENOMEM : 0;
+ list_add(&req->list, list);
+ }
+ return 0;
+
+extra:
+ /* free extras */
+ for (;;) {
+ struct list_head *next;
+
+ next = req->list.next;
+ list_del(&req->list);
+ usb_ep_free_request(ep, req);
+
+ if (next == list)
+ break;
+
+ req = container_of(next, struct usb_request, list);
+ }
+ return 0;
+}
+
+static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
+{
+ int status;
+
+ spin_lock(&dev->req_lock);
+ status = prealloc(&dev->tx_reqs, link->in_ep, n);
+ if (status < 0)
+ goto fail;
+ status = prealloc(&dev->rx_reqs, link->out_ep, n);
+ if (status < 0)
+ goto fail;
+ goto done;
+fail:
+ DBG(dev, "can't alloc requests\n");
+done:
+ spin_unlock(&dev->req_lock);
+ return status;
+}
+
+static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
+{
+ struct usb_request *req;
+ unsigned long flags;
+
+ /* fill unused rxq slots with some skb */
+ spin_lock_irqsave(&dev->req_lock, flags);
+ while (!list_empty(&dev->rx_reqs)) {
+ req = container_of(dev->rx_reqs.next,
+ struct usb_request, list);
+ list_del_init(&req->list);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
+
+ if (rx_submit(dev, req, gfp_flags) < 0) {
+ defer_kevent(dev, WORK_RX_MEMORY);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->req_lock, flags);
+ }
+ spin_unlock_irqrestore(&dev->req_lock, flags);
+}
+
+static void eth_work(struct work_struct *work)
+{
+ struct eth_dev *dev = container_of(work, struct eth_dev, work);
+
+ if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
+ if (netif_running(dev->net))
+ rx_fill(dev, GFP_KERNEL);
+ }
+
+ if (dev->todo)
+ DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
+}
+
+static void tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+ struct sk_buff *skb = req->context;
+ struct eth_dev *dev = ep->driver_data;
+
+ switch (req->status) {
+ default:
+ dev->net->stats.tx_errors++;
+ VDBG(dev, "tx err %d\n", req->status);
+ /* FALLTHROUGH */
+ case -ECONNRESET: /* unlink */
+ case -ESHUTDOWN: /* disconnect etc */
+ break;
+ case 0:
+ dev->net->stats.tx_bytes += skb->len;
+ }
+ dev->net->stats.tx_packets++;
+
+ spin_lock(&dev->req_lock);
+ list_add(&req->list, &dev->tx_reqs);
+ spin_unlock(&dev->req_lock);
+ dev_kfree_skb_any(skb);
+
+ atomic_dec(&dev->tx_qlen);
+ if (netif_carrier_ok(dev->net))
+ netif_wake_queue(dev->net);
+}
+
+static inline int is_promisc(u16 cdc_filter)
+{
+ return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
+}
+
+static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ struct eth_dev *dev = netdev_priv(net);
+ int length = skb->len;
+ int retval;
+ struct usb_request *req = NULL;
+ unsigned long flags;
+ struct usb_ep *in;
+ u16 cdc_filter;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb) {
+ in = dev->port_usb->in_ep;
+ cdc_filter = dev->port_usb->cdc_filter;
+ } else {
+ in = NULL;
+ cdc_filter = 0;
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ if (!in) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* apply outgoing CDC or RNDIS filters */
+ if (!is_promisc(cdc_filter)) {
+ u8 *dest = skb->data;
+
+ if (is_multicast_ether_addr(dest)) {
+ u16 type;
+
+ /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
+ * SET_ETHERNET_MULTICAST_FILTERS requests
+ */
+ if (is_broadcast_ether_addr(dest))
+ type = USB_CDC_PACKET_TYPE_BROADCAST;
+ else
+ type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
+ if (!(cdc_filter & type)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+ /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
+ }
+
+ spin_lock_irqsave(&dev->req_lock, flags);
+ /*
+ * this freelist can be empty if an interrupt triggered disconnect()
+ * and reconfigured the gadget (shutting down this queue) after the
+ * network stack decided to xmit but before we got the spinlock.
+ */
+ if (list_empty(&dev->tx_reqs)) {
+ spin_unlock_irqrestore(&dev->req_lock, flags);
+ return NETDEV_TX_BUSY;
+ }
+
+ req = container_of(dev->tx_reqs.next, struct usb_request, list);
+ list_del(&req->list);
+
+ /* temporarily stop TX queue when the freelist empties */
+ if (list_empty(&dev->tx_reqs))
+ netif_stop_queue(net);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
+
+ /* no buffer copies needed, unless the network stack did it
+ * or the hardware can't use skb buffers.
+ * or there's not enough space for extra headers we need
+ */
+ if (dev->wrap) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb)
+ skb = dev->wrap(dev->port_usb, skb);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ if (!skb)
+ goto drop;
+
+ length = skb->len;
+ }
+ req->buf = skb->data;
+ req->context = skb;
+ req->complete = tx_complete;
+
+ /* NCM requires no zlp if transfer is dwNtbInMaxSize */
+ if (dev->port_usb->is_fixed &&
+ length == dev->port_usb->fixed_in_len &&
+ (length % in->maxpacket) == 0)
+ req->zero = 0;
+ else
+ req->zero = 1;
+
+ /* use zlp framing on tx for strict CDC-Ether conformance,
+ * though any robust network rx path ignores extra padding.
+ * and some hardware doesn't like to write zlps.
+ */
+ if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+ length++;
+
+ req->length = length;
+
+ /* throttle high/super speed IRQ rate back slightly */
+ if (gadget_is_dualspeed(dev->gadget))
+ req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
+ dev->gadget->speed == USB_SPEED_SUPER)
+ ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
+ : 0;
+
+ retval = usb_ep_queue(in, req, GFP_ATOMIC);
+ switch (retval) {
+ default:
+ DBG(dev, "tx queue err %d\n", retval);
+ break;
+ case 0:
+ net->trans_start = jiffies;
+ atomic_inc(&dev->tx_qlen);
+ }
+
+ if (retval) {
+ dev_kfree_skb_any(skb);
+drop:
+ dev->net->stats.tx_dropped++;
+ spin_lock_irqsave(&dev->req_lock, flags);
+ if (list_empty(&dev->tx_reqs))
+ netif_start_queue(net);
+ list_add(&req->list, &dev->tx_reqs);
+ spin_unlock_irqrestore(&dev->req_lock, flags);
+ }
+ return NETDEV_TX_OK;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
+{
+ DBG(dev, "%s\n", __func__);
+
+ /* fill the rx queue */
+ rx_fill(dev, gfp_flags);
+
+ /* and open the tx floodgates */
+ atomic_set(&dev->tx_qlen, 0);
+ netif_wake_queue(dev->net);
+}
+
+static int eth_open(struct net_device *net)
+{
+ struct eth_dev *dev = netdev_priv(net);
+ struct gether *link;
+
+ DBG(dev, "%s\n", __func__);
+ if (netif_carrier_ok(dev->net))
+ eth_start(dev, GFP_KERNEL);
+
+ spin_lock_irq(&dev->lock);
+ link = dev->port_usb;
+ if (link && link->open)
+ link->open(link);
+ spin_unlock_irq(&dev->lock);
+
+ return 0;
+}
+
+static int eth_stop(struct net_device *net)
+{
+ struct eth_dev *dev = netdev_priv(net);
+ unsigned long flags;
+
+ VDBG(dev, "%s\n", __func__);
+ netif_stop_queue(net);
+
+ DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+ dev->net->stats.rx_packets, dev->net->stats.tx_packets,
+ dev->net->stats.rx_errors, dev->net->stats.tx_errors
+ );
+
+ /* ensure there are no more active requests */
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->port_usb) {
+ struct gether *link = dev->port_usb;
+
+ if (link->close)
+ link->close(link);
+
+ /* NOTE: we have no abort-queue primitive we could use
+ * to cancel all pending I/O. Instead, we disable then
+ * reenable the endpoints ... this idiom may leave toggle
+ * wrong, but that's a self-correcting error.
+ *
+ * REVISIT: we *COULD* just let the transfers complete at
+ * their own pace; the network stack can handle old packets.
+ * For the moment we leave this here, since it works.
+ */
+ usb_ep_disable(link->in_ep);
+ usb_ep_disable(link->out_ep);
+ if (netif_carrier_ok(net)) {
+ DBG(dev, "host still using in/out endpoints\n");
+ usb_ep_enable(link->in_ep);
+ usb_ep_enable(link->out_ep);
+ }
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *dev_addr;
+module_param(dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *host_addr;
+module_param(host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
+
+static int get_ether_addr(const char *str, u8 *dev_addr)
+{
+ if (str) {
+ unsigned i;
+
+ for (i = 0; i < 6; i++) {
+ unsigned char num;
+
+ if ((*str == '.') || (*str == ':'))
+ str++;
+ num = hex_to_bin(*str++) << 4;
+ num |= hex_to_bin(*str++);
+ dev_addr [i] = num;
+ }
+ if (is_valid_ether_addr(dev_addr))
+ return 0;
+ }
+ eth_random_addr(dev_addr);
+ return 1;
+}
+
+static struct eth_dev *the_dev;
+
+static const struct net_device_ops eth_netdev_ops = {
+ .ndo_open = eth_open,
+ .ndo_stop = eth_stop,
+ .ndo_start_xmit = eth_start_xmit,
+ .ndo_change_mtu = ueth_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static struct device_type gadget_type = {
+ .name = "gadget",
+};
+
+/**
+ * gether_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ * host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework. The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ const char *netname)
+{
+ struct eth_dev *dev;
+ struct net_device *net;
+ int status;
+
+ if (the_dev)
+ return -EBUSY;
+
+ net = alloc_etherdev(sizeof *dev);
+ if (!net)
+ return -ENOMEM;
+
+ dev = netdev_priv(net);
+ spin_lock_init(&dev->lock);
+ spin_lock_init(&dev->req_lock);
+ INIT_WORK(&dev->work, eth_work);
+ INIT_LIST_HEAD(&dev->tx_reqs);
+ INIT_LIST_HEAD(&dev->rx_reqs);
+
+ skb_queue_head_init(&dev->rx_frames);
+
+ /* network device setup */
+ dev->net = net;
+ snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+ if (get_ether_addr(dev_addr, net->dev_addr))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "self");
+ if (get_ether_addr(host_addr, dev->host_mac))
+ dev_warn(&g->dev,
+ "using random %s ethernet address\n", "host");
+
+ if (ethaddr)
+ memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+
+ net->netdev_ops = ð_netdev_ops;
+
+ SET_ETHTOOL_OPS(net, &ops);
+
+ dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &gadget_type);
+
+ status = register_netdev(net);
+ if (status < 0) {
+ dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+ free_netdev(net);
+ } else {
+ INFO(dev, "MAC %pM\n", net->dev_addr);
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+ the_dev = dev;
+
+ /* two kinds of host-initiated state changes:
+ * - iff DATA transfer is active, carrier is "on"
+ * - tx queueing enabled if open *and* carrier is "on"
+ */
+ netif_carrier_off(net);
+ }
+
+ return status;
+}
+
+/**
+ * gether_cleanup - remove Ethernet-over-USB device
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gether_setup().
+ */
+void gether_cleanup(void)
+{
+ if (!the_dev)
+ return;
+
+ unregister_netdev(the_dev->net);
++ flush_work(&the_dev->work);
+ free_netdev(the_dev->net);
+
+ the_dev = NULL;
+}
+
+
+/**
+ * gether_connect - notify network layer that USB link is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+ * current device speed, and any framing wrapper(s) set up.
+ * Context: irqs blocked
+ *
+ * This is called to activate endpoints and let the network layer know
+ * the connection is active ("carrier detect"). It may cause the I/O
+ * queues to open and start letting network packets flow, but will in
+ * any case activate the endpoints so that they respond properly to the
+ * USB host.
+ *
+ * Verify net_device pointer returned using IS_ERR(). If it doesn't
+ * indicate some error code (negative errno), ep->driver_data values
+ * have been overwritten.
+ */
+struct net_device *gether_connect(struct gether *link)
+{
+ struct eth_dev *dev = the_dev;
+ int result = 0;
+
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ link->in_ep->driver_data = dev;
+ result = usb_ep_enable(link->in_ep);
+ if (result != 0) {
+ DBG(dev, "enable %s --> %d\n",
+ link->in_ep->name, result);
+ goto fail0;
+ }
+
+ link->out_ep->driver_data = dev;
+ result = usb_ep_enable(link->out_ep);
+ if (result != 0) {
+ DBG(dev, "enable %s --> %d\n",
+ link->out_ep->name, result);
+ goto fail1;
+ }
+
+ if (result == 0)
+ result = alloc_requests(dev, link, qlen(dev->gadget));
+
+ if (result == 0) {
+ dev->zlp = link->is_zlp_ok;
+ DBG(dev, "qlen %d\n", qlen(dev->gadget));
+
+ dev->header_len = link->header_len;
+ dev->unwrap = link->unwrap;
+ dev->wrap = link->wrap;
+
+ spin_lock(&dev->lock);
+ dev->port_usb = link;
+ link->ioport = dev;
+ if (netif_running(dev->net)) {
+ if (link->open)
+ link->open(link);
+ } else {
+ if (link->close)
+ link->close(link);
+ }
+ spin_unlock(&dev->lock);
+
+ netif_carrier_on(dev->net);
+ if (netif_running(dev->net))
+ eth_start(dev, GFP_ATOMIC);
+
+ /* on error, disable any endpoints */
+ } else {
+ (void) usb_ep_disable(link->out_ep);
+fail1:
+ (void) usb_ep_disable(link->in_ep);
+ }
+fail0:
+ /* caller is responsible for cleanup on error */
+ if (result < 0)
+ return ERR_PTR(result);
+ return dev->net;
+}
+
+/**
+ * gether_disconnect - notify network layer that USB link is inactive
+ * @link: the USB link, on which gether_connect() was called
+ * Context: irqs blocked
+ *
+ * This is called to deactivate endpoints and let the network layer know
+ * the connection went inactive ("no carrier").
+ *
+ * On return, the state is as if gether_connect() had never been called.
+ * The endpoints are inactive, and accordingly without active USB I/O.
+ * Pointers to endpoint descriptors and endpoint private data are nulled.
+ */
+void gether_disconnect(struct gether *link)
+{
+ struct eth_dev *dev = link->ioport;
+ struct usb_request *req;
+
+ WARN_ON(!dev);
+ if (!dev)
+ return;
+
+ DBG(dev, "%s\n", __func__);
+
+ netif_stop_queue(dev->net);
+ netif_carrier_off(dev->net);
+
+ /* disable endpoints, forcing (synchronous) completion
+ * of all pending i/o. then free the request objects
+ * and forget about the endpoints.
+ */
+ usb_ep_disable(link->in_ep);
+ spin_lock(&dev->req_lock);
+ while (!list_empty(&dev->tx_reqs)) {
+ req = container_of(dev->tx_reqs.next,
+ struct usb_request, list);
+ list_del(&req->list);
+
+ spin_unlock(&dev->req_lock);
+ usb_ep_free_request(link->in_ep, req);
+ spin_lock(&dev->req_lock);
+ }
+ spin_unlock(&dev->req_lock);
+ link->in_ep->driver_data = NULL;
+ link->in_ep->desc = NULL;
+
+ usb_ep_disable(link->out_ep);
+ spin_lock(&dev->req_lock);
+ while (!list_empty(&dev->rx_reqs)) {
+ req = container_of(dev->rx_reqs.next,
+ struct usb_request, list);
+ list_del(&req->list);
+
+ spin_unlock(&dev->req_lock);
+ usb_ep_free_request(link->out_ep, req);
+ spin_lock(&dev->req_lock);
+ }
+ spin_unlock(&dev->req_lock);
+ link->out_ep->driver_data = NULL;
+ link->out_ep->desc = NULL;
+
+ /* finish forgetting about this USB link episode */
+ dev->header_len = 0;
+ dev->unwrap = NULL;
+ dev->wrap = NULL;
+
+ spin_lock(&dev->lock);
+ dev->port_usb = NULL;
+ link->ioport = NULL;
+ spin_unlock(&dev->lock);
+}