USB: g_mass_storage: testing code from f_mass_storage.c removed
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / gadget / f_mass_storage.c
CommitLineData
d5e2b67a
MN
1/*
2 * file_storage.c -- File-backed USB Storage Gadget, for USB development
3 *
4 * Copyright (C) 2003-2008 Alan Stern
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
23 * later version.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39/*
40 * The File-backed Storage Gadget acts as a USB Mass Storage device,
41 * appearing to the host as a disk drive or as a CD-ROM drive. In addition
42 * to providing an example of a genuinely useful gadget driver for a USB
43 * device, it also illustrates a technique of double-buffering for increased
44 * throughput. Last but not least, it gives an easy way to probe the
45 * behavior of the Mass Storage drivers in a USB host.
46 *
47 * Backing storage is provided by a regular file or a block device, specified
48 * by the "file" module parameter. Access can be limited to read-only by
49 * setting the optional "ro" module parameter. (For CD-ROM emulation,
50 * access is always read-only.) The gadget will indicate that it has
51 * removable media if the optional "removable" module parameter is set.
52 *
d5e2b67a
MN
53 * There is support for multiple logical units (LUNs), each of which has
54 * its own backing file. The number of LUNs can be set using the optional
55 * "luns" module parameter (anywhere from 1 to 8), and the corresponding
56 * files are specified using comma-separated lists for "file" and "ro".
57 * The default number of LUNs is taken from the number of "file" elements;
58 * it is 1 if "file" is not given. If "removable" is not set then a backing
59 * file must be specified for each LUN. If it is set, then an unspecified
60 * or empty backing filename means the LUN's medium is not loaded. Ideally
61 * each LUN would be settable independently as a disk drive or a CD-ROM
62 * drive, but currently all LUNs have to be the same type. The CD-ROM
63 * emulation includes a single data track and no audio tracks; hence there
64 * need be only one backing file per LUN. Note also that the CD-ROM block
65 * length is set to 512 rather than the more common value 2048.
66 *
67 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
68 * needed (an interrupt-out endpoint is also needed for CBI). The memory
69 * requirement amounts to two 16K buffers, size configurable by a parameter.
70 * Support is included for both full-speed and high-speed operation.
71 *
72 * Note that the driver is slightly non-portable in that it assumes a
73 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
74 * interrupt-in endpoints. With most device controllers this isn't an
75 * issue, but there may be some with hardware restrictions that prevent
76 * a buffer from being used by more than one endpoint.
77 *
78 * Module options:
79 *
80 * file=filename[,filename...]
81 * Required if "removable" is not set, names of
82 * the files or block devices used for
83 * backing storage
84 * ro=b[,b...] Default false, booleans for read-only access
85 * removable Default false, boolean for removable media
86 * luns=N Default N = number of filenames, number of
87 * LUNs to support
88 * stall Default determined according to the type of
89 * USB device controller (usually true),
90 * boolean to permit the driver to halt
91 * bulk endpoints
92 * cdrom Default false, boolean for whether to emulate
93 * a CD-ROM drive
d5e2b67a
MN
94 *
95 * The pathnames of the backing files and the ro settings are available in
96 * the attribute files "file" and "ro" in the lun<n> subdirectory of the
97 * gadget's sysfs directory. If the "removable" option is set, writing to
98 * these files will simulate ejecting/loading the medium (writing an empty
99 * line means eject) and adjusting a write-enable tab. Changes to the ro
100 * setting are not allowed when the medium is loaded or if CD-ROM emulation
101 * is being used.
102 *
103 * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
104 * The driver's SCSI command interface was based on the "Information
105 * technology - Small Computer System Interface - 2" document from
106 * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
107 * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception
108 * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
109 * "Universal Serial Bus Mass Storage Class UFI Command Specification"
110 * document, Revision 1.0, December 14, 1998, available at
111 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
112 */
113
114
115/*
116 * Driver Design
117 *
118 * The FSG driver is fairly straightforward. There is a main kernel
119 * thread that handles most of the work. Interrupt routines field
120 * callbacks from the controller driver: bulk- and interrupt-request
121 * completion notifications, endpoint-0 events, and disconnect events.
122 * Completion events are passed to the main thread by wakeup calls. Many
123 * ep0 requests are handled at interrupt time, but SetInterface,
124 * SetConfiguration, and device reset requests are forwarded to the
125 * thread in the form of "exceptions" using SIGUSR1 signals (since they
126 * should interrupt any ongoing file I/O operations).
127 *
128 * The thread's main routine implements the standard command/data/status
129 * parts of a SCSI interaction. It and its subroutines are full of tests
130 * for pending signals/exceptions -- all this polling is necessary since
131 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
132 * indication that the driver really wants to be running in userspace.)
133 * An important point is that so long as the thread is alive it keeps an
134 * open reference to the backing file. This will prevent unmounting
135 * the backing file's underlying filesystem and could cause problems
136 * during system shutdown, for example. To prevent such problems, the
137 * thread catches INT, TERM, and KILL signals and converts them into
138 * an EXIT exception.
139 *
140 * In normal operation the main thread is started during the gadget's
141 * fsg_bind() callback and stopped during fsg_unbind(). But it can also
142 * exit when it receives a signal, and there's no point leaving the
143 * gadget running when the thread is dead. So just before the thread
144 * exits, it deregisters the gadget driver. This makes things a little
145 * tricky: The driver is deregistered at two places, and the exiting
146 * thread can indirectly call fsg_unbind() which in turn can tell the
147 * thread to exit. The first problem is resolved through the use of the
148 * REGISTERED atomic bitflag; the driver will only be deregistered once.
149 * The second problem is resolved by having fsg_unbind() check
150 * fsg->state; it won't try to stop the thread if the state is already
151 * FSG_STATE_TERMINATED.
152 *
153 * To provide maximum throughput, the driver uses a circular pipeline of
154 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
155 * arbitrarily long; in practice the benefits don't justify having more
156 * than 2 stages (i.e., double buffering). But it helps to think of the
157 * pipeline as being a long one. Each buffer head contains a bulk-in and
158 * a bulk-out request pointer (since the buffer can be used for both
159 * output and input -- directions always are given from the host's
160 * point of view) as well as a pointer to the buffer and various state
161 * variables.
162 *
163 * Use of the pipeline follows a simple protocol. There is a variable
164 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
165 * At any time that buffer head may still be in use from an earlier
166 * request, so each buffer head has a state variable indicating whether
167 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
168 * buffer head to be EMPTY, filling the buffer either by file I/O or by
169 * USB I/O (during which the buffer head is BUSY), and marking the buffer
170 * head FULL when the I/O is complete. Then the buffer will be emptied
171 * (again possibly by USB I/O, during which it is marked BUSY) and
172 * finally marked EMPTY again (possibly by a completion routine).
173 *
174 * A module parameter tells the driver to avoid stalling the bulk
175 * endpoints wherever the transport specification allows. This is
176 * necessary for some UDCs like the SuperH, which cannot reliably clear a
177 * halt on a bulk endpoint. However, under certain circumstances the
178 * Bulk-only specification requires a stall. In such cases the driver
179 * will halt the endpoint and set a flag indicating that it should clear
180 * the halt in software during the next device reset. Hopefully this
181 * will permit everything to work correctly. Furthermore, although the
182 * specification allows the bulk-out endpoint to halt when the host sends
183 * too much data, implementing this would cause an unavoidable race.
184 * The driver will always use the "no-stall" approach for OUT transfers.
185 *
186 * One subtle point concerns sending status-stage responses for ep0
187 * requests. Some of these requests, such as device reset, can involve
188 * interrupting an ongoing file I/O operation, which might take an
189 * arbitrarily long time. During that delay the host might give up on
190 * the original ep0 request and issue a new one. When that happens the
191 * driver should not notify the host about completion of the original
192 * request, as the host will no longer be waiting for it. So the driver
193 * assigns to each ep0 request a unique tag, and it keeps track of the
194 * tag value of the request associated with a long-running exception
195 * (device-reset, interface-change, or configuration-change). When the
196 * exception handler is finished, the status-stage response is submitted
197 * only if the current ep0 request tag is equal to the exception request
198 * tag. Thus only the most recently received ep0 request will get a
199 * status-stage response.
200 *
201 * Warning: This driver source file is too long. It ought to be split up
202 * into a header file plus about 3 separate .c files, to handle the details
203 * of the Gadget, USB Mass Storage, and SCSI protocols.
204 */
205
206
207/* #define VERBOSE_DEBUG */
208/* #define DUMP_MSGS */
209
210
211#include <linux/blkdev.h>
212#include <linux/completion.h>
213#include <linux/dcache.h>
214#include <linux/delay.h>
215#include <linux/device.h>
216#include <linux/fcntl.h>
217#include <linux/file.h>
218#include <linux/fs.h>
219#include <linux/kref.h>
220#include <linux/kthread.h>
221#include <linux/limits.h>
222#include <linux/rwsem.h>
223#include <linux/slab.h>
224#include <linux/spinlock.h>
225#include <linux/string.h>
226#include <linux/freezer.h>
227#include <linux/utsname.h>
228
229#include <linux/usb/ch9.h>
230#include <linux/usb/gadget.h>
231
232#include "gadget_chips.h"
233
234
235
236/*
237 * Kbuild is not very cooperative with respect to linking separately
238 * compiled library objects into one module. So for now we won't use
239 * separate compilation ... ensuring init/exit sections work to shrink
240 * the runtime footprint, and giving us at least some parts of what
241 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
242 */
243#include "usbstring.c"
244#include "config.c"
245#include "epautoconf.c"
246
247/*-------------------------------------------------------------------------*/
248
249#define DRIVER_DESC "File-backed Storage Gadget"
250#define DRIVER_NAME "g_file_storage"
251#define DRIVER_VERSION "20 November 2008"
252
253static char fsg_string_manufacturer[64];
254static const char fsg_string_product[] = DRIVER_DESC;
255static char fsg_string_serial[13];
256static const char fsg_string_config[] = "Self-powered";
257static const char fsg_string_interface[] = "Mass Storage";
258
259
93bcf12e
MN
260#define FSG_NO_INTR_EP 1
261
d5e2b67a
MN
262#include "storage_common.c"
263
264
265MODULE_DESCRIPTION(DRIVER_DESC);
266MODULE_AUTHOR("Alan Stern");
267MODULE_LICENSE("Dual BSD/GPL");
268
269/*
270 * This driver assumes self-powered hardware and has no way for users to
271 * trigger remote wakeup. It uses autoconfiguration to select endpoints
272 * and endpoint addresses.
273 */
274
275
276/*-------------------------------------------------------------------------*/
277
278
279/* Encapsulate the module parameter settings */
280
281static struct {
282 char *file[FSG_MAX_LUNS];
283 int ro[FSG_MAX_LUNS];
284 unsigned int num_filenames;
285 unsigned int num_ros;
286 unsigned int nluns;
287
288 int removable;
289 int can_stall;
290 int cdrom;
291
d5e2b67a 292 unsigned short release;
d5e2b67a 293} mod_data = { // Default values
d5e2b67a
MN
294 .removable = 0,
295 .can_stall = 1,
296 .cdrom = 0,
d5e2b67a
MN
297 };
298
299
300module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames,
301 S_IRUGO);
302MODULE_PARM_DESC(file, "names of backing files or devices");
303
304module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO);
305MODULE_PARM_DESC(ro, "true to force read-only");
306
307module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
308MODULE_PARM_DESC(luns, "number of LUNs");
309
310module_param_named(removable, mod_data.removable, bool, S_IRUGO);
311MODULE_PARM_DESC(removable, "true to simulate removable media");
312
313module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
314MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
315
316module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
317MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
318
319
d5e2b67a
MN
320/*-------------------------------------------------------------------------*/
321
322
323struct fsg_dev {
324 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
325 spinlock_t lock;
326 struct usb_gadget *gadget;
327
328 /* filesem protects: backing files in use */
329 struct rw_semaphore filesem;
330
331 /* reference counting: wait until all LUNs are released */
332 struct kref ref;
333
334 struct usb_ep *ep0; // Handy copy of gadget->ep0
335 struct usb_request *ep0req; // For control responses
336 unsigned int ep0_req_tag;
337 const char *ep0req_name;
338
d5e2b67a
MN
339 unsigned int bulk_out_maxpacket;
340 enum fsg_state state; // For exception handling
341 unsigned int exception_req_tag;
342
343 u8 config, new_config;
344
345 unsigned int running : 1;
346 unsigned int bulk_in_enabled : 1;
347 unsigned int bulk_out_enabled : 1;
d5e2b67a
MN
348 unsigned int phase_error : 1;
349 unsigned int short_packet_received : 1;
350 unsigned int bad_lun_okay : 1;
351
352 unsigned long atomic_bitflags;
353#define REGISTERED 0
354#define IGNORE_BULK_OUT 1
d5e2b67a
MN
355
356 struct usb_ep *bulk_in;
357 struct usb_ep *bulk_out;
d5e2b67a
MN
358
359 struct fsg_buffhd *next_buffhd_to_fill;
360 struct fsg_buffhd *next_buffhd_to_drain;
361 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
362
363 int thread_wakeup_needed;
364 struct completion thread_notifier;
365 struct task_struct *thread_task;
366
367 int cmnd_size;
368 u8 cmnd[MAX_COMMAND_SIZE];
369 enum data_direction data_dir;
370 u32 data_size;
371 u32 data_size_from_cmnd;
372 u32 tag;
373 unsigned int lun;
374 u32 residue;
375 u32 usb_amount_left;
376
d5e2b67a
MN
377 unsigned int nluns;
378 struct fsg_lun *luns;
379 struct fsg_lun *curlun;
380};
381
382typedef void (*fsg_routine_t)(struct fsg_dev *);
383
384static int exception_in_progress(struct fsg_dev *fsg)
385{
386 return (fsg->state > FSG_STATE_IDLE);
387}
388
389/* Make bulk-out requests be divisible by the maxpacket size */
390static void set_bulk_out_req_length(struct fsg_dev *fsg,
391 struct fsg_buffhd *bh, unsigned int length)
392{
393 unsigned int rem;
394
395 bh->bulk_out_intended_length = length;
396 rem = length % fsg->bulk_out_maxpacket;
397 if (rem > 0)
398 length += fsg->bulk_out_maxpacket - rem;
399 bh->outreq->length = length;
400}
401
402static struct fsg_dev *the_fsg;
403static struct usb_gadget_driver fsg_driver;
404
405
406/*-------------------------------------------------------------------------*/
407
408static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
409{
410 const char *name;
411
412 if (ep == fsg->bulk_in)
413 name = "bulk-in";
414 else if (ep == fsg->bulk_out)
415 name = "bulk-out";
416 else
417 name = ep->name;
418 DBG(fsg, "%s set halt\n", name);
419 return usb_ep_set_halt(ep);
420}
421
422
423/*-------------------------------------------------------------------------*/
424
425/*
426 * DESCRIPTORS ... most are static, but strings and (full) configuration
427 * descriptors are built on demand. Also the (static) config and interface
428 * descriptors are adjusted during fsg_bind().
429 */
430
431/* There is only one configuration. */
432#define CONFIG_VALUE 1
433
434static struct usb_device_descriptor
435device_desc = {
436 .bLength = sizeof device_desc,
437 .bDescriptorType = USB_DT_DEVICE,
438
439 .bcdUSB = cpu_to_le16(0x0200),
440 .bDeviceClass = USB_CLASS_PER_INTERFACE,
441
442 /* The next three values can be overridden by module parameters */
443 .idVendor = cpu_to_le16(FSG_VENDOR_ID),
444 .idProduct = cpu_to_le16(FSG_PRODUCT_ID),
445 .bcdDevice = cpu_to_le16(0xffff),
446
447 .iManufacturer = FSG_STRING_MANUFACTURER,
448 .iProduct = FSG_STRING_PRODUCT,
449 .iSerialNumber = FSG_STRING_SERIAL,
450 .bNumConfigurations = 1,
451};
452
453static struct usb_config_descriptor
454config_desc = {
455 .bLength = sizeof config_desc,
456 .bDescriptorType = USB_DT_CONFIG,
457
458 /* wTotalLength computed by usb_gadget_config_buf() */
459 .bNumInterfaces = 1,
460 .bConfigurationValue = CONFIG_VALUE,
461 .iConfiguration = FSG_STRING_CONFIG,
462 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
463 .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
464};
465
466
467static struct usb_qualifier_descriptor
468dev_qualifier = {
469 .bLength = sizeof dev_qualifier,
470 .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
471
472 .bcdUSB = cpu_to_le16(0x0200),
473 .bDeviceClass = USB_CLASS_PER_INTERFACE,
474
475 .bNumConfigurations = 1,
476};
477
478
479
480/*
481 * Config descriptors must agree with the code that sets configurations
482 * and with code managing interfaces and their altsettings. They must
483 * also handle different speeds and other-speed requests.
484 */
485static int populate_config_buf(struct usb_gadget *gadget,
486 u8 *buf, u8 type, unsigned index)
487{
488 enum usb_device_speed speed = gadget->speed;
489 int len;
490 const struct usb_descriptor_header **function;
491
492 if (index > 0)
493 return -EINVAL;
494
495 if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
496 speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
497 if (gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH)
498 function = fsg_hs_function;
499 else
500 function = fsg_fs_function;
501
502 /* for now, don't advertise srp-only devices */
503 if (!gadget_is_otg(gadget))
504 function++;
505
506 len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
507 ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
508 return len;
509}
510
511
512/*-------------------------------------------------------------------------*/
513
514/* These routines may be called in process context or in_irq */
515
516/* Caller must hold fsg->lock */
517static void wakeup_thread(struct fsg_dev *fsg)
518{
519 /* Tell the main thread that something has happened */
520 fsg->thread_wakeup_needed = 1;
521 if (fsg->thread_task)
522 wake_up_process(fsg->thread_task);
523}
524
525
526static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
527{
528 unsigned long flags;
529
530 /* Do nothing if a higher-priority exception is already in progress.
531 * If a lower-or-equal priority exception is in progress, preempt it
532 * and notify the main thread by sending it a signal. */
533 spin_lock_irqsave(&fsg->lock, flags);
534 if (fsg->state <= new_state) {
535 fsg->exception_req_tag = fsg->ep0_req_tag;
536 fsg->state = new_state;
537 if (fsg->thread_task)
538 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
539 fsg->thread_task);
540 }
541 spin_unlock_irqrestore(&fsg->lock, flags);
542}
543
544
545/*-------------------------------------------------------------------------*/
546
547/* The disconnect callback and ep0 routines. These always run in_irq,
548 * except that ep0_queue() is called in the main thread to acknowledge
549 * completion of various requests: set config, set interface, and
550 * Bulk-only device reset. */
551
552static void fsg_disconnect(struct usb_gadget *gadget)
553{
554 struct fsg_dev *fsg = get_gadget_data(gadget);
555
556 DBG(fsg, "disconnect or port reset\n");
557 raise_exception(fsg, FSG_STATE_DISCONNECT);
558}
559
560
561static int ep0_queue(struct fsg_dev *fsg)
562{
563 int rc;
564
565 rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
566 if (rc != 0 && rc != -ESHUTDOWN) {
567
568 /* We can't do much more than wait for a reset */
569 WARNING(fsg, "error in submission: %s --> %d\n",
570 fsg->ep0->name, rc);
571 }
572 return rc;
573}
574
575static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
576{
577 struct fsg_dev *fsg = ep->driver_data;
578
579 if (req->actual > 0)
580 dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
581 if (req->status || req->actual != req->length)
582 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
583 req->status, req->actual, req->length);
584 if (req->status == -ECONNRESET) // Request was cancelled
585 usb_ep_fifo_flush(ep);
586
587 if (req->status == 0 && req->context)
588 ((fsg_routine_t) (req->context))(fsg);
589}
590
591
592/*-------------------------------------------------------------------------*/
593
594/* Bulk and interrupt endpoint completion handlers.
595 * These always run in_irq. */
596
597static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
598{
599 struct fsg_dev *fsg = ep->driver_data;
600 struct fsg_buffhd *bh = req->context;
601
602 if (req->status || req->actual != req->length)
603 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
604 req->status, req->actual, req->length);
605 if (req->status == -ECONNRESET) // Request was cancelled
606 usb_ep_fifo_flush(ep);
607
608 /* Hold the lock while we update the request and buffer states */
609 smp_wmb();
610 spin_lock(&fsg->lock);
611 bh->inreq_busy = 0;
612 bh->state = BUF_STATE_EMPTY;
613 wakeup_thread(fsg);
614 spin_unlock(&fsg->lock);
615}
616
617static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
618{
619 struct fsg_dev *fsg = ep->driver_data;
620 struct fsg_buffhd *bh = req->context;
621
622 dump_msg(fsg, "bulk-out", req->buf, req->actual);
623 if (req->status || req->actual != bh->bulk_out_intended_length)
624 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
625 req->status, req->actual,
626 bh->bulk_out_intended_length);
627 if (req->status == -ECONNRESET) // Request was cancelled
628 usb_ep_fifo_flush(ep);
629
630 /* Hold the lock while we update the request and buffer states */
631 smp_wmb();
632 spin_lock(&fsg->lock);
633 bh->outreq_busy = 0;
634 bh->state = BUF_STATE_FULL;
635 wakeup_thread(fsg);
636 spin_unlock(&fsg->lock);
637}
638
639
d5e2b67a
MN
640/*-------------------------------------------------------------------------*/
641
642/* Ep0 class-specific handlers. These always run in_irq. */
643
d5e2b67a
MN
644static int class_setup_req(struct fsg_dev *fsg,
645 const struct usb_ctrlrequest *ctrl)
646{
647 struct usb_request *req = fsg->ep0req;
d5e2b67a 648 u16 w_index = le16_to_cpu(ctrl->wIndex);
93bcf12e 649 u16 w_value = le16_to_cpu(ctrl->wValue);
d5e2b67a
MN
650 u16 w_length = le16_to_cpu(ctrl->wLength);
651
652 if (!fsg->config)
93bcf12e 653 return -EOPNOTSUPP;
d5e2b67a 654
93bcf12e 655 switch (ctrl->bRequest) {
d5e2b67a 656
93bcf12e
MN
657 case USB_BULK_RESET_REQUEST:
658 if (ctrl->bRequestType !=
659 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 660 break;
93bcf12e
MN
661 if (w_index != 0 || w_value != 0)
662 return -EDOM;
d5e2b67a 663
93bcf12e
MN
664 /* Raise an exception to stop the current operation
665 * and reinitialize our state. */
666 DBG(fsg, "bulk reset request\n");
667 raise_exception(fsg, FSG_STATE_RESET);
668 return DELAYED_STATUS;
d5e2b67a 669
93bcf12e
MN
670 case USB_BULK_GET_MAX_LUN_REQUEST:
671 if (ctrl->bRequestType !=
672 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 673 break;
93bcf12e
MN
674 if (w_index != 0 || w_value != 0)
675 return -EDOM;
676 VDBG(fsg, "get max LUN\n");
677 *(u8 *) req->buf = fsg->nluns - 1;
678 return 1;
679 }
680
681 VDBG(fsg,
682 "unknown class-specific control req "
683 "%02x.%02x v%04x i%04x l%u\n",
684 ctrl->bRequestType, ctrl->bRequest,
685 le16_to_cpu(ctrl->wValue), w_index, w_length);
686 return -EOPNOTSUPP;
d5e2b67a
MN
687}
688
689
690/*-------------------------------------------------------------------------*/
691
692/* Ep0 standard request handlers. These always run in_irq. */
693
694static int standard_setup_req(struct fsg_dev *fsg,
695 const struct usb_ctrlrequest *ctrl)
696{
697 struct usb_request *req = fsg->ep0req;
698 int value = -EOPNOTSUPP;
699 u16 w_index = le16_to_cpu(ctrl->wIndex);
700 u16 w_value = le16_to_cpu(ctrl->wValue);
701
702 /* Usually this just stores reply data in the pre-allocated ep0 buffer,
703 * but config change events will also reconfigure hardware. */
704 switch (ctrl->bRequest) {
705
706 case USB_REQ_GET_DESCRIPTOR:
707 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
708 USB_RECIP_DEVICE))
709 break;
710 switch (w_value >> 8) {
711
712 case USB_DT_DEVICE:
713 VDBG(fsg, "get device descriptor\n");
714 value = sizeof device_desc;
715 memcpy(req->buf, &device_desc, value);
716 break;
717 case USB_DT_DEVICE_QUALIFIER:
718 VDBG(fsg, "get device qualifier\n");
719 if (!gadget_is_dualspeed(fsg->gadget))
720 break;
721 value = sizeof dev_qualifier;
722 memcpy(req->buf, &dev_qualifier, value);
723 break;
724
725 case USB_DT_OTHER_SPEED_CONFIG:
726 VDBG(fsg, "get other-speed config descriptor\n");
727 if (!gadget_is_dualspeed(fsg->gadget))
728 break;
729 goto get_config;
730 case USB_DT_CONFIG:
731 VDBG(fsg, "get configuration descriptor\n");
732get_config:
733 value = populate_config_buf(fsg->gadget,
734 req->buf,
735 w_value >> 8,
736 w_value & 0xff);
737 break;
738
739 case USB_DT_STRING:
740 VDBG(fsg, "get string descriptor\n");
741
742 /* wIndex == language code */
743 value = usb_gadget_get_string(&fsg_stringtab,
744 w_value & 0xff, req->buf);
745 break;
746 }
747 break;
748
749 /* One config, two speeds */
750 case USB_REQ_SET_CONFIGURATION:
751 if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
752 USB_RECIP_DEVICE))
753 break;
754 VDBG(fsg, "set configuration\n");
755 if (w_value == CONFIG_VALUE || w_value == 0) {
756 fsg->new_config = w_value;
757
758 /* Raise an exception to wipe out previous transaction
759 * state (queued bufs, etc) and set the new config. */
760 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
761 value = DELAYED_STATUS;
762 }
763 break;
764 case USB_REQ_GET_CONFIGURATION:
765 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
766 USB_RECIP_DEVICE))
767 break;
768 VDBG(fsg, "get configuration\n");
769 *(u8 *) req->buf = fsg->config;
770 value = 1;
771 break;
772
773 case USB_REQ_SET_INTERFACE:
774 if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD |
775 USB_RECIP_INTERFACE))
776 break;
777 if (fsg->config && w_index == 0) {
778
779 /* Raise an exception to wipe out previous transaction
780 * state (queued bufs, etc) and install the new
781 * interface altsetting. */
782 raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
783 value = DELAYED_STATUS;
784 }
785 break;
786 case USB_REQ_GET_INTERFACE:
787 if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
788 USB_RECIP_INTERFACE))
789 break;
790 if (!fsg->config)
791 break;
792 if (w_index != 0) {
793 value = -EDOM;
794 break;
795 }
796 VDBG(fsg, "get interface\n");
797 *(u8 *) req->buf = 0;
798 value = 1;
799 break;
800
801 default:
802 VDBG(fsg,
803 "unknown control req %02x.%02x v%04x i%04x l%u\n",
804 ctrl->bRequestType, ctrl->bRequest,
805 w_value, w_index, le16_to_cpu(ctrl->wLength));
806 }
807
808 return value;
809}
810
811
812static int fsg_setup(struct usb_gadget *gadget,
813 const struct usb_ctrlrequest *ctrl)
814{
815 struct fsg_dev *fsg = get_gadget_data(gadget);
816 int rc;
817 int w_length = le16_to_cpu(ctrl->wLength);
818
819 ++fsg->ep0_req_tag; // Record arrival of a new request
820 fsg->ep0req->context = NULL;
821 fsg->ep0req->length = 0;
822 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
823
824 if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
825 rc = class_setup_req(fsg, ctrl);
826 else
827 rc = standard_setup_req(fsg, ctrl);
828
829 /* Respond with data/status or defer until later? */
830 if (rc >= 0 && rc != DELAYED_STATUS) {
831 rc = min(rc, w_length);
832 fsg->ep0req->length = rc;
833 fsg->ep0req->zero = rc < w_length;
834 fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
835 "ep0-in" : "ep0-out");
836 rc = ep0_queue(fsg);
837 }
838
839 /* Device either stalls (rc < 0) or reports success */
840 return rc;
841}
842
843
844/*-------------------------------------------------------------------------*/
845
846/* All the following routines run in process context */
847
848
849/* Use this for bulk or interrupt transfers, not ep0 */
850static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
851 struct usb_request *req, int *pbusy,
852 enum fsg_buffer_state *state)
853{
854 int rc;
855
856 if (ep == fsg->bulk_in)
857 dump_msg(fsg, "bulk-in", req->buf, req->length);
d5e2b67a
MN
858
859 spin_lock_irq(&fsg->lock);
860 *pbusy = 1;
861 *state = BUF_STATE_BUSY;
862 spin_unlock_irq(&fsg->lock);
863 rc = usb_ep_queue(ep, req, GFP_KERNEL);
864 if (rc != 0) {
865 *pbusy = 0;
866 *state = BUF_STATE_EMPTY;
867
868 /* We can't do much more than wait for a reset */
869
870 /* Note: currently the net2280 driver fails zero-length
871 * submissions if DMA is enabled. */
872 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
873 req->length == 0))
874 WARNING(fsg, "error in submission: %s --> %d\n",
875 ep->name, rc);
876 }
877}
878
879
880static int sleep_thread(struct fsg_dev *fsg)
881{
882 int rc = 0;
883
884 /* Wait until a signal arrives or we are woken up */
885 for (;;) {
886 try_to_freeze();
887 set_current_state(TASK_INTERRUPTIBLE);
888 if (signal_pending(current)) {
889 rc = -EINTR;
890 break;
891 }
892 if (fsg->thread_wakeup_needed)
893 break;
894 schedule();
895 }
896 __set_current_state(TASK_RUNNING);
897 fsg->thread_wakeup_needed = 0;
898 return rc;
899}
900
901
902/*-------------------------------------------------------------------------*/
903
904static int do_read(struct fsg_dev *fsg)
905{
906 struct fsg_lun *curlun = fsg->curlun;
907 u32 lba;
908 struct fsg_buffhd *bh;
909 int rc;
910 u32 amount_left;
911 loff_t file_offset, file_offset_tmp;
912 unsigned int amount;
913 unsigned int partial_page;
914 ssize_t nread;
915
916 /* Get the starting Logical Block Address and check that it's
917 * not too big */
918 if (fsg->cmnd[0] == SC_READ_6)
919 lba = get_unaligned_be24(&fsg->cmnd[1]);
920 else {
921 lba = get_unaligned_be32(&fsg->cmnd[2]);
922
923 /* We allow DPO (Disable Page Out = don't save data in the
924 * cache) and FUA (Force Unit Access = don't read from the
925 * cache), but we don't implement them. */
926 if ((fsg->cmnd[1] & ~0x18) != 0) {
927 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
928 return -EINVAL;
929 }
930 }
931 if (lba >= curlun->num_sectors) {
932 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
933 return -EINVAL;
934 }
935 file_offset = ((loff_t) lba) << 9;
936
937 /* Carry out the file reads */
938 amount_left = fsg->data_size_from_cmnd;
939 if (unlikely(amount_left == 0))
940 return -EIO; // No default reply
941
942 for (;;) {
943
944 /* Figure out how much we need to read:
945 * Try to read the remaining amount.
946 * But don't read more than the buffer size.
947 * And don't try to read past the end of the file.
948 * Finally, if we're not at a page boundary, don't read past
949 * the next page.
950 * If this means reading 0 then we were asked to read past
951 * the end of file. */
93bcf12e 952 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
953 amount = min((loff_t) amount,
954 curlun->file_length - file_offset);
955 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
956 if (partial_page > 0)
957 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
958 partial_page);
959
960 /* Wait for the next buffer to become available */
961 bh = fsg->next_buffhd_to_fill;
962 while (bh->state != BUF_STATE_EMPTY) {
963 rc = sleep_thread(fsg);
964 if (rc)
965 return rc;
966 }
967
968 /* If we were asked to read past the end of file,
969 * end with an empty buffer. */
970 if (amount == 0) {
971 curlun->sense_data =
972 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
973 curlun->sense_data_info = file_offset >> 9;
974 curlun->info_valid = 1;
975 bh->inreq->length = 0;
976 bh->state = BUF_STATE_FULL;
977 break;
978 }
979
980 /* Perform the read */
981 file_offset_tmp = file_offset;
982 nread = vfs_read(curlun->filp,
983 (char __user *) bh->buf,
984 amount, &file_offset_tmp);
985 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
986 (unsigned long long) file_offset,
987 (int) nread);
988 if (signal_pending(current))
989 return -EINTR;
990
991 if (nread < 0) {
992 LDBG(curlun, "error in file read: %d\n",
993 (int) nread);
994 nread = 0;
995 } else if (nread < amount) {
996 LDBG(curlun, "partial file read: %d/%u\n",
997 (int) nread, amount);
998 nread -= (nread & 511); // Round down to a block
999 }
1000 file_offset += nread;
1001 amount_left -= nread;
1002 fsg->residue -= nread;
1003 bh->inreq->length = nread;
1004 bh->state = BUF_STATE_FULL;
1005
1006 /* If an error occurred, report it and its position */
1007 if (nread < amount) {
1008 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1009 curlun->sense_data_info = file_offset >> 9;
1010 curlun->info_valid = 1;
1011 break;
1012 }
1013
1014 if (amount_left == 0)
1015 break; // No more left to read
1016
1017 /* Send this buffer and go read some more */
1018 bh->inreq->zero = 0;
1019 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1020 &bh->inreq_busy, &bh->state);
1021 fsg->next_buffhd_to_fill = bh->next;
1022 }
1023
1024 return -EIO; // No default reply
1025}
1026
1027
1028/*-------------------------------------------------------------------------*/
1029
1030static int do_write(struct fsg_dev *fsg)
1031{
1032 struct fsg_lun *curlun = fsg->curlun;
1033 u32 lba;
1034 struct fsg_buffhd *bh;
1035 int get_some_more;
1036 u32 amount_left_to_req, amount_left_to_write;
1037 loff_t usb_offset, file_offset, file_offset_tmp;
1038 unsigned int amount;
1039 unsigned int partial_page;
1040 ssize_t nwritten;
1041 int rc;
1042
1043 if (curlun->ro) {
1044 curlun->sense_data = SS_WRITE_PROTECTED;
1045 return -EINVAL;
1046 }
1047 spin_lock(&curlun->filp->f_lock);
1048 curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait
1049 spin_unlock(&curlun->filp->f_lock);
1050
1051 /* Get the starting Logical Block Address and check that it's
1052 * not too big */
1053 if (fsg->cmnd[0] == SC_WRITE_6)
1054 lba = get_unaligned_be24(&fsg->cmnd[1]);
1055 else {
1056 lba = get_unaligned_be32(&fsg->cmnd[2]);
1057
1058 /* We allow DPO (Disable Page Out = don't save data in the
1059 * cache) and FUA (Force Unit Access = write directly to the
1060 * medium). We don't implement DPO; we implement FUA by
1061 * performing synchronous output. */
1062 if ((fsg->cmnd[1] & ~0x18) != 0) {
1063 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1064 return -EINVAL;
1065 }
1066 if (fsg->cmnd[1] & 0x08) { // FUA
1067 spin_lock(&curlun->filp->f_lock);
1068 curlun->filp->f_flags |= O_SYNC;
1069 spin_unlock(&curlun->filp->f_lock);
1070 }
1071 }
1072 if (lba >= curlun->num_sectors) {
1073 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1074 return -EINVAL;
1075 }
1076
1077 /* Carry out the file writes */
1078 get_some_more = 1;
1079 file_offset = usb_offset = ((loff_t) lba) << 9;
1080 amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
1081
1082 while (amount_left_to_write > 0) {
1083
1084 /* Queue a request for more data from the host */
1085 bh = fsg->next_buffhd_to_fill;
1086 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
1087
1088 /* Figure out how much we want to get:
1089 * Try to get the remaining amount.
1090 * But don't get more than the buffer size.
1091 * And don't try to go past the end of the file.
1092 * If we're not at a page boundary,
1093 * don't go past the next page.
1094 * If this means getting 0, then we were asked
1095 * to write past the end of file.
1096 * Finally, round down to a block boundary. */
93bcf12e 1097 amount = min(amount_left_to_req, FSG_BUFLEN);
d5e2b67a
MN
1098 amount = min((loff_t) amount, curlun->file_length -
1099 usb_offset);
1100 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
1101 if (partial_page > 0)
1102 amount = min(amount,
1103 (unsigned int) PAGE_CACHE_SIZE - partial_page);
1104
1105 if (amount == 0) {
1106 get_some_more = 0;
1107 curlun->sense_data =
1108 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1109 curlun->sense_data_info = usb_offset >> 9;
1110 curlun->info_valid = 1;
1111 continue;
1112 }
1113 amount -= (amount & 511);
1114 if (amount == 0) {
1115
1116 /* Why were we were asked to transfer a
1117 * partial block? */
1118 get_some_more = 0;
1119 continue;
1120 }
1121
1122 /* Get the next buffer */
1123 usb_offset += amount;
1124 fsg->usb_amount_left -= amount;
1125 amount_left_to_req -= amount;
1126 if (amount_left_to_req == 0)
1127 get_some_more = 0;
1128
1129 /* amount is always divisible by 512, hence by
1130 * the bulk-out maxpacket size */
1131 bh->outreq->length = bh->bulk_out_intended_length =
1132 amount;
1133 bh->outreq->short_not_ok = 1;
1134 start_transfer(fsg, fsg->bulk_out, bh->outreq,
1135 &bh->outreq_busy, &bh->state);
1136 fsg->next_buffhd_to_fill = bh->next;
1137 continue;
1138 }
1139
1140 /* Write the received data to the backing file */
1141 bh = fsg->next_buffhd_to_drain;
1142 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
1143 break; // We stopped early
1144 if (bh->state == BUF_STATE_FULL) {
1145 smp_rmb();
1146 fsg->next_buffhd_to_drain = bh->next;
1147 bh->state = BUF_STATE_EMPTY;
1148
1149 /* Did something go wrong with the transfer? */
1150 if (bh->outreq->status != 0) {
1151 curlun->sense_data = SS_COMMUNICATION_FAILURE;
1152 curlun->sense_data_info = file_offset >> 9;
1153 curlun->info_valid = 1;
1154 break;
1155 }
1156
1157 amount = bh->outreq->actual;
1158 if (curlun->file_length - file_offset < amount) {
1159 LERROR(curlun,
1160 "write %u @ %llu beyond end %llu\n",
1161 amount, (unsigned long long) file_offset,
1162 (unsigned long long) curlun->file_length);
1163 amount = curlun->file_length - file_offset;
1164 }
1165
1166 /* Perform the write */
1167 file_offset_tmp = file_offset;
1168 nwritten = vfs_write(curlun->filp,
1169 (char __user *) bh->buf,
1170 amount, &file_offset_tmp);
1171 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
1172 (unsigned long long) file_offset,
1173 (int) nwritten);
1174 if (signal_pending(current))
1175 return -EINTR; // Interrupted!
1176
1177 if (nwritten < 0) {
1178 LDBG(curlun, "error in file write: %d\n",
1179 (int) nwritten);
1180 nwritten = 0;
1181 } else if (nwritten < amount) {
1182 LDBG(curlun, "partial file write: %d/%u\n",
1183 (int) nwritten, amount);
1184 nwritten -= (nwritten & 511);
1185 // Round down to a block
1186 }
1187 file_offset += nwritten;
1188 amount_left_to_write -= nwritten;
1189 fsg->residue -= nwritten;
1190
1191 /* If an error occurred, report it and its position */
1192 if (nwritten < amount) {
1193 curlun->sense_data = SS_WRITE_ERROR;
1194 curlun->sense_data_info = file_offset >> 9;
1195 curlun->info_valid = 1;
1196 break;
1197 }
1198
1199 /* Did the host decide to stop early? */
1200 if (bh->outreq->actual != bh->outreq->length) {
1201 fsg->short_packet_received = 1;
1202 break;
1203 }
1204 continue;
1205 }
1206
1207 /* Wait for something to happen */
1208 rc = sleep_thread(fsg);
1209 if (rc)
1210 return rc;
1211 }
1212
1213 return -EIO; // No default reply
1214}
1215
1216
1217/*-------------------------------------------------------------------------*/
1218
1219static int do_synchronize_cache(struct fsg_dev *fsg)
1220{
1221 struct fsg_lun *curlun = fsg->curlun;
1222 int rc;
1223
1224 /* We ignore the requested LBA and write out all file's
1225 * dirty data buffers. */
1226 rc = fsg_lun_fsync_sub(curlun);
1227 if (rc)
1228 curlun->sense_data = SS_WRITE_ERROR;
1229 return 0;
1230}
1231
1232
1233/*-------------------------------------------------------------------------*/
1234
1235static void invalidate_sub(struct fsg_lun *curlun)
1236{
1237 struct file *filp = curlun->filp;
1238 struct inode *inode = filp->f_path.dentry->d_inode;
1239 unsigned long rc;
1240
1241 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1242 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
1243}
1244
1245static int do_verify(struct fsg_dev *fsg)
1246{
1247 struct fsg_lun *curlun = fsg->curlun;
1248 u32 lba;
1249 u32 verification_length;
1250 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1251 loff_t file_offset, file_offset_tmp;
1252 u32 amount_left;
1253 unsigned int amount;
1254 ssize_t nread;
1255
1256 /* Get the starting Logical Block Address and check that it's
1257 * not too big */
1258 lba = get_unaligned_be32(&fsg->cmnd[2]);
1259 if (lba >= curlun->num_sectors) {
1260 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1261 return -EINVAL;
1262 }
1263
1264 /* We allow DPO (Disable Page Out = don't save data in the
1265 * cache) but we don't implement it. */
1266 if ((fsg->cmnd[1] & ~0x10) != 0) {
1267 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1268 return -EINVAL;
1269 }
1270
1271 verification_length = get_unaligned_be16(&fsg->cmnd[7]);
1272 if (unlikely(verification_length == 0))
1273 return -EIO; // No default reply
1274
1275 /* Prepare to carry out the file verify */
1276 amount_left = verification_length << 9;
1277 file_offset = ((loff_t) lba) << 9;
1278
1279 /* Write out all the dirty buffers before invalidating them */
1280 fsg_lun_fsync_sub(curlun);
1281 if (signal_pending(current))
1282 return -EINTR;
1283
1284 invalidate_sub(curlun);
1285 if (signal_pending(current))
1286 return -EINTR;
1287
1288 /* Just try to read the requested blocks */
1289 while (amount_left > 0) {
1290
1291 /* Figure out how much we need to read:
1292 * Try to read the remaining amount, but not more than
1293 * the buffer size.
1294 * And don't try to read past the end of the file.
1295 * If this means reading 0 then we were asked to read
1296 * past the end of file. */
93bcf12e 1297 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
1298 amount = min((loff_t) amount,
1299 curlun->file_length - file_offset);
1300 if (amount == 0) {
1301 curlun->sense_data =
1302 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1303 curlun->sense_data_info = file_offset >> 9;
1304 curlun->info_valid = 1;
1305 break;
1306 }
1307
1308 /* Perform the read */
1309 file_offset_tmp = file_offset;
1310 nread = vfs_read(curlun->filp,
1311 (char __user *) bh->buf,
1312 amount, &file_offset_tmp);
1313 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1314 (unsigned long long) file_offset,
1315 (int) nread);
1316 if (signal_pending(current))
1317 return -EINTR;
1318
1319 if (nread < 0) {
1320 LDBG(curlun, "error in file verify: %d\n",
1321 (int) nread);
1322 nread = 0;
1323 } else if (nread < amount) {
1324 LDBG(curlun, "partial file verify: %d/%u\n",
1325 (int) nread, amount);
1326 nread -= (nread & 511); // Round down to a sector
1327 }
1328 if (nread == 0) {
1329 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1330 curlun->sense_data_info = file_offset >> 9;
1331 curlun->info_valid = 1;
1332 break;
1333 }
1334 file_offset += nread;
1335 amount_left -= nread;
1336 }
1337 return 0;
1338}
1339
1340
1341/*-------------------------------------------------------------------------*/
1342
1343static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1344{
1345 u8 *buf = (u8 *) bh->buf;
1346
1347 static char vendor_id[] = "Linux ";
1348 static char product_disk_id[] = "File-Stor Gadget";
1349 static char product_cdrom_id[] = "File-CD Gadget ";
1350
1351 if (!fsg->curlun) { // Unsupported LUNs are okay
1352 fsg->bad_lun_okay = 1;
1353 memset(buf, 0, 36);
1354 buf[0] = 0x7f; // Unsupported, no device-type
1355 buf[4] = 31; // Additional length
1356 return 36;
1357 }
1358
1359 memset(buf, 0, 8);
1360 buf[0] = (mod_data.cdrom ? TYPE_CDROM : TYPE_DISK);
1361 if (mod_data.removable)
1362 buf[1] = 0x80;
1363 buf[2] = 2; // ANSI SCSI level 2
1364 buf[3] = 2; // SCSI-2 INQUIRY data format
1365 buf[4] = 31; // Additional length
1366 // No special options
1367 sprintf(buf + 8, "%-8s%-16s%04x", vendor_id,
1368 (mod_data.cdrom ? product_cdrom_id :
1369 product_disk_id),
1370 mod_data.release);
1371 return 36;
1372}
1373
1374
1375static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1376{
1377 struct fsg_lun *curlun = fsg->curlun;
1378 u8 *buf = (u8 *) bh->buf;
1379 u32 sd, sdinfo;
1380 int valid;
1381
1382 /*
1383 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1384 *
1385 * If a REQUEST SENSE command is received from an initiator
1386 * with a pending unit attention condition (before the target
1387 * generates the contingent allegiance condition), then the
1388 * target shall either:
1389 * a) report any pending sense data and preserve the unit
1390 * attention condition on the logical unit, or,
1391 * b) report the unit attention condition, may discard any
1392 * pending sense data, and clear the unit attention
1393 * condition on the logical unit for that initiator.
1394 *
1395 * FSG normally uses option a); enable this code to use option b).
1396 */
1397#if 0
1398 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1399 curlun->sense_data = curlun->unit_attention_data;
1400 curlun->unit_attention_data = SS_NO_SENSE;
1401 }
1402#endif
1403
1404 if (!curlun) { // Unsupported LUNs are okay
1405 fsg->bad_lun_okay = 1;
1406 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1407 sdinfo = 0;
1408 valid = 0;
1409 } else {
1410 sd = curlun->sense_data;
1411 sdinfo = curlun->sense_data_info;
1412 valid = curlun->info_valid << 7;
1413 curlun->sense_data = SS_NO_SENSE;
1414 curlun->sense_data_info = 0;
1415 curlun->info_valid = 0;
1416 }
1417
1418 memset(buf, 0, 18);
1419 buf[0] = valid | 0x70; // Valid, current error
1420 buf[2] = SK(sd);
1421 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1422 buf[7] = 18 - 8; // Additional sense length
1423 buf[12] = ASC(sd);
1424 buf[13] = ASCQ(sd);
1425 return 18;
1426}
1427
1428
1429static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1430{
1431 struct fsg_lun *curlun = fsg->curlun;
1432 u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
1433 int pmi = fsg->cmnd[8];
1434 u8 *buf = (u8 *) bh->buf;
1435
1436 /* Check the PMI and LBA fields */
1437 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1438 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1439 return -EINVAL;
1440 }
1441
1442 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1443 /* Max logical block */
1444 put_unaligned_be32(512, &buf[4]); /* Block length */
1445 return 8;
1446}
1447
1448
1449static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1450{
1451 struct fsg_lun *curlun = fsg->curlun;
1452 int msf = fsg->cmnd[1] & 0x02;
1453 u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
1454 u8 *buf = (u8 *) bh->buf;
1455
1456 if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */
1457 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1458 return -EINVAL;
1459 }
1460 if (lba >= curlun->num_sectors) {
1461 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1462 return -EINVAL;
1463 }
1464
1465 memset(buf, 0, 8);
1466 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1467 store_cdrom_address(&buf[4], msf, lba);
1468 return 8;
1469}
1470
1471
1472static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1473{
1474 struct fsg_lun *curlun = fsg->curlun;
1475 int msf = fsg->cmnd[1] & 0x02;
1476 int start_track = fsg->cmnd[6];
1477 u8 *buf = (u8 *) bh->buf;
1478
1479 if ((fsg->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1480 start_track > 1) {
1481 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1482 return -EINVAL;
1483 }
1484
1485 memset(buf, 0, 20);
1486 buf[1] = (20-2); /* TOC data length */
1487 buf[2] = 1; /* First track number */
1488 buf[3] = 1; /* Last track number */
1489 buf[5] = 0x16; /* Data track, copying allowed */
1490 buf[6] = 0x01; /* Only track is number 1 */
1491 store_cdrom_address(&buf[8], msf, 0);
1492
1493 buf[13] = 0x16; /* Lead-out track is data */
1494 buf[14] = 0xAA; /* Lead-out track number */
1495 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1496 return 20;
1497}
1498
1499
1500static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1501{
1502 struct fsg_lun *curlun = fsg->curlun;
1503 int mscmnd = fsg->cmnd[0];
1504 u8 *buf = (u8 *) bh->buf;
1505 u8 *buf0 = buf;
1506 int pc, page_code;
1507 int changeable_values, all_pages;
1508 int valid_page = 0;
1509 int len, limit;
1510
1511 if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD
1512 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1513 return -EINVAL;
1514 }
1515 pc = fsg->cmnd[2] >> 6;
1516 page_code = fsg->cmnd[2] & 0x3f;
1517 if (pc == 3) {
1518 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1519 return -EINVAL;
1520 }
1521 changeable_values = (pc == 1);
1522 all_pages = (page_code == 0x3f);
1523
1524 /* Write the mode parameter header. Fixed values are: default
1525 * medium type, no cache control (DPOFUA), and no block descriptors.
1526 * The only variable value is the WriteProtect bit. We will fill in
1527 * the mode data length later. */
1528 memset(buf, 0, 8);
1529 if (mscmnd == SC_MODE_SENSE_6) {
1530 buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1531 buf += 4;
1532 limit = 255;
1533 } else { // SC_MODE_SENSE_10
1534 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1535 buf += 8;
93bcf12e 1536 limit = 65535; // Should really be FSG_BUFLEN
d5e2b67a
MN
1537 }
1538
1539 /* No block descriptors */
1540
1541 /* The mode pages, in numerical order. The only page we support
1542 * is the Caching page. */
1543 if (page_code == 0x08 || all_pages) {
1544 valid_page = 1;
1545 buf[0] = 0x08; // Page code
1546 buf[1] = 10; // Page length
1547 memset(buf+2, 0, 10); // None of the fields are changeable
1548
1549 if (!changeable_values) {
1550 buf[2] = 0x04; // Write cache enable,
1551 // Read cache not disabled
1552 // No cache retention priorities
1553 put_unaligned_be16(0xffff, &buf[4]);
1554 /* Don't disable prefetch */
1555 /* Minimum prefetch = 0 */
1556 put_unaligned_be16(0xffff, &buf[8]);
1557 /* Maximum prefetch */
1558 put_unaligned_be16(0xffff, &buf[10]);
1559 /* Maximum prefetch ceiling */
1560 }
1561 buf += 12;
1562 }
1563
1564 /* Check that a valid page was requested and the mode data length
1565 * isn't too long. */
1566 len = buf - buf0;
1567 if (!valid_page || len > limit) {
1568 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1569 return -EINVAL;
1570 }
1571
1572 /* Store the mode data length */
1573 if (mscmnd == SC_MODE_SENSE_6)
1574 buf0[0] = len - 1;
1575 else
1576 put_unaligned_be16(len - 2, buf0);
1577 return len;
1578}
1579
1580
1581static int do_start_stop(struct fsg_dev *fsg)
1582{
d5e2b67a 1583 if (!mod_data.removable) {
93bcf12e 1584 fsg->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1585 return -EINVAL;
1586 }
d5e2b67a
MN
1587 return 0;
1588}
1589
1590
1591static int do_prevent_allow(struct fsg_dev *fsg)
1592{
1593 struct fsg_lun *curlun = fsg->curlun;
1594 int prevent;
1595
1596 if (!mod_data.removable) {
1597 curlun->sense_data = SS_INVALID_COMMAND;
1598 return -EINVAL;
1599 }
1600
1601 prevent = fsg->cmnd[4] & 0x01;
1602 if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent
1603 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1604 return -EINVAL;
1605 }
1606
1607 if (curlun->prevent_medium_removal && !prevent)
1608 fsg_lun_fsync_sub(curlun);
1609 curlun->prevent_medium_removal = prevent;
1610 return 0;
1611}
1612
1613
1614static int do_read_format_capacities(struct fsg_dev *fsg,
1615 struct fsg_buffhd *bh)
1616{
1617 struct fsg_lun *curlun = fsg->curlun;
1618 u8 *buf = (u8 *) bh->buf;
1619
1620 buf[0] = buf[1] = buf[2] = 0;
1621 buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
1622 buf += 4;
1623
1624 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1625 /* Number of blocks */
1626 put_unaligned_be32(512, &buf[4]); /* Block length */
1627 buf[4] = 0x02; /* Current capacity */
1628 return 12;
1629}
1630
1631
1632static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1633{
1634 struct fsg_lun *curlun = fsg->curlun;
1635
1636 /* We don't support MODE SELECT */
1637 curlun->sense_data = SS_INVALID_COMMAND;
1638 return -EINVAL;
1639}
1640
1641
1642/*-------------------------------------------------------------------------*/
1643
1644static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1645{
1646 int rc;
1647
1648 rc = fsg_set_halt(fsg, fsg->bulk_in);
1649 if (rc == -EAGAIN)
1650 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1651 while (rc != 0) {
1652 if (rc != -EAGAIN) {
1653 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1654 rc = 0;
1655 break;
1656 }
1657
1658 /* Wait for a short time and then try again */
1659 if (msleep_interruptible(100) != 0)
1660 return -EINTR;
1661 rc = usb_ep_set_halt(fsg->bulk_in);
1662 }
1663 return rc;
1664}
1665
1666static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1667{
1668 int rc;
1669
1670 DBG(fsg, "bulk-in set wedge\n");
1671 rc = usb_ep_set_wedge(fsg->bulk_in);
1672 if (rc == -EAGAIN)
1673 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1674 while (rc != 0) {
1675 if (rc != -EAGAIN) {
1676 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1677 rc = 0;
1678 break;
1679 }
1680
1681 /* Wait for a short time and then try again */
1682 if (msleep_interruptible(100) != 0)
1683 return -EINTR;
1684 rc = usb_ep_set_wedge(fsg->bulk_in);
1685 }
1686 return rc;
1687}
1688
1689static int pad_with_zeros(struct fsg_dev *fsg)
1690{
1691 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1692 u32 nkeep = bh->inreq->length;
1693 u32 nsend;
1694 int rc;
1695
1696 bh->state = BUF_STATE_EMPTY; // For the first iteration
1697 fsg->usb_amount_left = nkeep + fsg->residue;
1698 while (fsg->usb_amount_left > 0) {
1699
1700 /* Wait for the next buffer to be free */
1701 while (bh->state != BUF_STATE_EMPTY) {
1702 rc = sleep_thread(fsg);
1703 if (rc)
1704 return rc;
1705 }
1706
93bcf12e 1707 nsend = min(fsg->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1708 memset(bh->buf + nkeep, 0, nsend - nkeep);
1709 bh->inreq->length = nsend;
1710 bh->inreq->zero = 0;
1711 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1712 &bh->inreq_busy, &bh->state);
1713 bh = fsg->next_buffhd_to_fill = bh->next;
1714 fsg->usb_amount_left -= nsend;
1715 nkeep = 0;
1716 }
1717 return 0;
1718}
1719
1720static int throw_away_data(struct fsg_dev *fsg)
1721{
1722 struct fsg_buffhd *bh;
1723 u32 amount;
1724 int rc;
1725
1726 while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
1727 fsg->usb_amount_left > 0) {
1728
1729 /* Throw away the data in a filled buffer */
1730 if (bh->state == BUF_STATE_FULL) {
1731 smp_rmb();
1732 bh->state = BUF_STATE_EMPTY;
1733 fsg->next_buffhd_to_drain = bh->next;
1734
1735 /* A short packet or an error ends everything */
1736 if (bh->outreq->actual != bh->outreq->length ||
1737 bh->outreq->status != 0) {
1738 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1739 return -EINTR;
1740 }
1741 continue;
1742 }
1743
1744 /* Try to submit another request if we need one */
1745 bh = fsg->next_buffhd_to_fill;
1746 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
93bcf12e 1747 amount = min(fsg->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1748
1749 /* amount is always divisible by 512, hence by
1750 * the bulk-out maxpacket size */
1751 bh->outreq->length = bh->bulk_out_intended_length =
1752 amount;
1753 bh->outreq->short_not_ok = 1;
1754 start_transfer(fsg, fsg->bulk_out, bh->outreq,
1755 &bh->outreq_busy, &bh->state);
1756 fsg->next_buffhd_to_fill = bh->next;
1757 fsg->usb_amount_left -= amount;
1758 continue;
1759 }
1760
1761 /* Otherwise wait for something to happen */
1762 rc = sleep_thread(fsg);
1763 if (rc)
1764 return rc;
1765 }
1766 return 0;
1767}
1768
1769
1770static int finish_reply(struct fsg_dev *fsg)
1771{
1772 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1773 int rc = 0;
1774
1775 switch (fsg->data_dir) {
1776 case DATA_DIR_NONE:
1777 break; // Nothing to send
1778
1779 /* If we don't know whether the host wants to read or write,
1780 * this must be CB or CBI with an unknown command. We mustn't
1781 * try to send or receive any data. So stall both bulk pipes
1782 * if we can and wait for a reset. */
1783 case DATA_DIR_UNKNOWN:
1784 if (mod_data.can_stall) {
1785 fsg_set_halt(fsg, fsg->bulk_out);
1786 rc = halt_bulk_in_endpoint(fsg);
1787 }
1788 break;
1789
1790 /* All but the last buffer of data must have already been sent */
1791 case DATA_DIR_TO_HOST:
93bcf12e
MN
1792 if (fsg->data_size == 0) {
1793 /* Nothing to send */
d5e2b67a
MN
1794
1795 /* If there's no residue, simply send the last buffer */
93bcf12e 1796 } else if (fsg->residue == 0) {
d5e2b67a
MN
1797 bh->inreq->zero = 0;
1798 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1799 &bh->inreq_busy, &bh->state);
1800 fsg->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1801
1802 /* For Bulk-only, if we're allowed to stall then send the
1803 * short packet and halt the bulk-in endpoint. If we can't
1804 * stall, pad out the remaining data with 0's. */
93bcf12e
MN
1805 } else if (mod_data.can_stall) {
1806 bh->inreq->zero = 1;
1807 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1808 &bh->inreq_busy, &bh->state);
1809 fsg->next_buffhd_to_fill = bh->next;
1810 rc = halt_bulk_in_endpoint(fsg);
1811 } else {
1812 rc = pad_with_zeros(fsg);
d5e2b67a
MN
1813 }
1814 break;
1815
1816 /* We have processed all we want from the data the host has sent.
1817 * There may still be outstanding bulk-out requests. */
1818 case DATA_DIR_FROM_HOST:
1819 if (fsg->residue == 0)
1820 ; // Nothing to receive
1821
1822 /* Did the host stop sending unexpectedly early? */
1823 else if (fsg->short_packet_received) {
1824 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1825 rc = -EINTR;
1826 }
1827
1828 /* We haven't processed all the incoming data. Even though
1829 * we may be allowed to stall, doing so would cause a race.
1830 * The controller may already have ACK'ed all the remaining
1831 * bulk-out packets, in which case the host wouldn't see a
1832 * STALL. Not realizing the endpoint was halted, it wouldn't
1833 * clear the halt -- leading to problems later on. */
1834#if 0
1835 else if (mod_data.can_stall) {
1836 fsg_set_halt(fsg, fsg->bulk_out);
1837 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1838 rc = -EINTR;
1839 }
1840#endif
1841
1842 /* We can't stall. Read in the excess data and throw it
1843 * all away. */
1844 else
1845 rc = throw_away_data(fsg);
1846 break;
1847 }
1848 return rc;
1849}
1850
1851
1852static int send_status(struct fsg_dev *fsg)
1853{
1854 struct fsg_lun *curlun = fsg->curlun;
1855 struct fsg_buffhd *bh;
93bcf12e 1856 struct bulk_cs_wrap *csw;
d5e2b67a
MN
1857 int rc;
1858 u8 status = USB_STATUS_PASS;
1859 u32 sd, sdinfo = 0;
1860
1861 /* Wait for the next buffer to become available */
1862 bh = fsg->next_buffhd_to_fill;
1863 while (bh->state != BUF_STATE_EMPTY) {
1864 rc = sleep_thread(fsg);
1865 if (rc)
1866 return rc;
1867 }
1868
1869 if (curlun) {
1870 sd = curlun->sense_data;
1871 sdinfo = curlun->sense_data_info;
1872 } else if (fsg->bad_lun_okay)
1873 sd = SS_NO_SENSE;
1874 else
1875 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1876
1877 if (fsg->phase_error) {
1878 DBG(fsg, "sending phase-error status\n");
1879 status = USB_STATUS_PHASE_ERROR;
1880 sd = SS_INVALID_COMMAND;
1881 } else if (sd != SS_NO_SENSE) {
1882 DBG(fsg, "sending command-failure status\n");
1883 status = USB_STATUS_FAIL;
1884 VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1885 " info x%x\n",
1886 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1887 }
1888
d5e2b67a 1889
93bcf12e
MN
1890 /* Store and send the Bulk-only CSW */
1891 csw = bh->buf;
d5e2b67a 1892
93bcf12e
MN
1893 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1894 csw->Tag = fsg->tag;
1895 csw->Residue = cpu_to_le32(fsg->residue);
1896 csw->Status = status;
d5e2b67a 1897
93bcf12e
MN
1898 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1899 bh->inreq->zero = 0;
1900 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1901 &bh->inreq_busy, &bh->state);
d5e2b67a
MN
1902
1903 fsg->next_buffhd_to_fill = bh->next;
1904 return 0;
1905}
1906
1907
1908/*-------------------------------------------------------------------------*/
1909
1910/* Check whether the command is properly formed and whether its data size
1911 * and direction agree with the values we already have. */
1912static int check_command(struct fsg_dev *fsg, int cmnd_size,
1913 enum data_direction data_dir, unsigned int mask,
1914 int needs_medium, const char *name)
1915{
1916 int i;
1917 int lun = fsg->cmnd[1] >> 5;
1918 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1919 char hdlen[20];
1920 struct fsg_lun *curlun;
1921
d5e2b67a
MN
1922 hdlen[0] = 0;
1923 if (fsg->data_dir != DATA_DIR_UNKNOWN)
1924 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
1925 fsg->data_size);
1926 VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1927 name, cmnd_size, dirletter[(int) data_dir],
1928 fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
1929
1930 /* We can't reply at all until we know the correct data direction
1931 * and size. */
1932 if (fsg->data_size_from_cmnd == 0)
1933 data_dir = DATA_DIR_NONE;
1934 if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI
1935 fsg->data_dir = data_dir;
1936 fsg->data_size = fsg->data_size_from_cmnd;
1937
1938 } else { // Bulk-only
1939 if (fsg->data_size < fsg->data_size_from_cmnd) {
1940
1941 /* Host data size < Device data size is a phase error.
1942 * Carry out the command, but only transfer as much
1943 * as we are allowed. */
1944 fsg->data_size_from_cmnd = fsg->data_size;
1945 fsg->phase_error = 1;
1946 }
1947 }
1948 fsg->residue = fsg->usb_amount_left = fsg->data_size;
1949
1950 /* Conflicting data directions is a phase error */
1951 if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
1952 fsg->phase_error = 1;
1953 return -EINVAL;
1954 }
1955
1956 /* Verify the length of the command itself */
1957 if (cmnd_size != fsg->cmnd_size) {
1958
1959 /* Special case workaround: There are plenty of buggy SCSI
1960 * implementations. Many have issues with cbw->Length
1961 * field passing a wrong command size. For those cases we
1962 * always try to work around the problem by using the length
1963 * sent by the host side provided it is at least as large
1964 * as the correct command length.
1965 * Examples of such cases would be MS-Windows, which issues
1966 * REQUEST SENSE with cbw->Length == 12 where it should
1967 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1968 * REQUEST SENSE with cbw->Length == 10 where it should
1969 * be 6 as well.
1970 */
1971 if (cmnd_size <= fsg->cmnd_size) {
1972 DBG(fsg, "%s is buggy! Expected length %d "
1973 "but we got %d\n", name,
1974 cmnd_size, fsg->cmnd_size);
1975 cmnd_size = fsg->cmnd_size;
1976 } else {
1977 fsg->phase_error = 1;
1978 return -EINVAL;
1979 }
1980 }
1981
1982 /* Check that the LUN values are consistent */
93bcf12e
MN
1983 if (fsg->lun != lun)
1984 DBG(fsg, "using LUN %d from CBW, not LUN %d from CDB\n",
1985 fsg->lun, lun);
d5e2b67a
MN
1986
1987 /* Check the LUN */
1988 if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
1989 fsg->curlun = curlun = &fsg->luns[fsg->lun];
1990 if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
1991 curlun->sense_data = SS_NO_SENSE;
1992 curlun->sense_data_info = 0;
1993 curlun->info_valid = 0;
1994 }
1995 } else {
1996 fsg->curlun = curlun = NULL;
1997 fsg->bad_lun_okay = 0;
1998
1999 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
2000 * to use unsupported LUNs; all others may not. */
2001 if (fsg->cmnd[0] != SC_INQUIRY &&
2002 fsg->cmnd[0] != SC_REQUEST_SENSE) {
2003 DBG(fsg, "unsupported LUN %d\n", fsg->lun);
2004 return -EINVAL;
2005 }
2006 }
2007
2008 /* If a unit attention condition exists, only INQUIRY and
2009 * REQUEST SENSE commands are allowed; anything else must fail. */
2010 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
2011 fsg->cmnd[0] != SC_INQUIRY &&
2012 fsg->cmnd[0] != SC_REQUEST_SENSE) {
2013 curlun->sense_data = curlun->unit_attention_data;
2014 curlun->unit_attention_data = SS_NO_SENSE;
2015 return -EINVAL;
2016 }
2017
2018 /* Check that only command bytes listed in the mask are non-zero */
2019 fsg->cmnd[1] &= 0x1f; // Mask away the LUN
2020 for (i = 1; i < cmnd_size; ++i) {
2021 if (fsg->cmnd[i] && !(mask & (1 << i))) {
2022 if (curlun)
2023 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
2024 return -EINVAL;
2025 }
2026 }
2027
2028 /* If the medium isn't mounted and the command needs to access
2029 * it, return an error. */
2030 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
2031 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
2032 return -EINVAL;
2033 }
2034
2035 return 0;
2036}
2037
2038
2039static int do_scsi_command(struct fsg_dev *fsg)
2040{
2041 struct fsg_buffhd *bh;
2042 int rc;
2043 int reply = -EINVAL;
2044 int i;
2045 static char unknown[16];
2046
2047 dump_cdb(fsg);
2048
2049 /* Wait for the next buffer to become available for data or status */
2050 bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
2051 while (bh->state != BUF_STATE_EMPTY) {
2052 rc = sleep_thread(fsg);
2053 if (rc)
2054 return rc;
2055 }
2056 fsg->phase_error = 0;
2057 fsg->short_packet_received = 0;
2058
2059 down_read(&fsg->filesem); // We're using the backing file
2060 switch (fsg->cmnd[0]) {
2061
2062 case SC_INQUIRY:
2063 fsg->data_size_from_cmnd = fsg->cmnd[4];
2064 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2065 (1<<4), 0,
2066 "INQUIRY")) == 0)
2067 reply = do_inquiry(fsg, bh);
2068 break;
2069
2070 case SC_MODE_SELECT_6:
2071 fsg->data_size_from_cmnd = fsg->cmnd[4];
2072 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2073 (1<<1) | (1<<4), 0,
2074 "MODE SELECT(6)")) == 0)
2075 reply = do_mode_select(fsg, bh);
2076 break;
2077
2078 case SC_MODE_SELECT_10:
2079 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2080 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2081 (1<<1) | (3<<7), 0,
2082 "MODE SELECT(10)")) == 0)
2083 reply = do_mode_select(fsg, bh);
2084 break;
2085
2086 case SC_MODE_SENSE_6:
2087 fsg->data_size_from_cmnd = fsg->cmnd[4];
2088 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2089 (1<<1) | (1<<2) | (1<<4), 0,
2090 "MODE SENSE(6)")) == 0)
2091 reply = do_mode_sense(fsg, bh);
2092 break;
2093
2094 case SC_MODE_SENSE_10:
2095 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2096 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2097 (1<<1) | (1<<2) | (3<<7), 0,
2098 "MODE SENSE(10)")) == 0)
2099 reply = do_mode_sense(fsg, bh);
2100 break;
2101
2102 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
2103 fsg->data_size_from_cmnd = 0;
2104 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2105 (1<<4), 0,
2106 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
2107 reply = do_prevent_allow(fsg);
2108 break;
2109
2110 case SC_READ_6:
2111 i = fsg->cmnd[4];
2112 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2113 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2114 (7<<1) | (1<<4), 1,
2115 "READ(6)")) == 0)
2116 reply = do_read(fsg);
2117 break;
2118
2119 case SC_READ_10:
2120 fsg->data_size_from_cmnd =
2121 get_unaligned_be16(&fsg->cmnd[7]) << 9;
2122 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2123 (1<<1) | (0xf<<2) | (3<<7), 1,
2124 "READ(10)")) == 0)
2125 reply = do_read(fsg);
2126 break;
2127
2128 case SC_READ_12:
2129 fsg->data_size_from_cmnd =
2130 get_unaligned_be32(&fsg->cmnd[6]) << 9;
2131 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
2132 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2133 "READ(12)")) == 0)
2134 reply = do_read(fsg);
2135 break;
2136
2137 case SC_READ_CAPACITY:
2138 fsg->data_size_from_cmnd = 8;
2139 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2140 (0xf<<2) | (1<<8), 1,
2141 "READ CAPACITY")) == 0)
2142 reply = do_read_capacity(fsg, bh);
2143 break;
2144
2145 case SC_READ_HEADER:
2146 if (!mod_data.cdrom)
2147 goto unknown_cmnd;
2148 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2149 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2150 (3<<7) | (0x1f<<1), 1,
2151 "READ HEADER")) == 0)
2152 reply = do_read_header(fsg, bh);
2153 break;
2154
2155 case SC_READ_TOC:
2156 if (!mod_data.cdrom)
2157 goto unknown_cmnd;
2158 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2159 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2160 (7<<6) | (1<<1), 1,
2161 "READ TOC")) == 0)
2162 reply = do_read_toc(fsg, bh);
2163 break;
2164
2165 case SC_READ_FORMAT_CAPACITIES:
2166 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2167 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2168 (3<<7), 1,
2169 "READ FORMAT CAPACITIES")) == 0)
2170 reply = do_read_format_capacities(fsg, bh);
2171 break;
2172
2173 case SC_REQUEST_SENSE:
2174 fsg->data_size_from_cmnd = fsg->cmnd[4];
2175 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2176 (1<<4), 0,
2177 "REQUEST SENSE")) == 0)
2178 reply = do_request_sense(fsg, bh);
2179 break;
2180
2181 case SC_START_STOP_UNIT:
2182 fsg->data_size_from_cmnd = 0;
2183 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2184 (1<<1) | (1<<4), 0,
2185 "START-STOP UNIT")) == 0)
2186 reply = do_start_stop(fsg);
2187 break;
2188
2189 case SC_SYNCHRONIZE_CACHE:
2190 fsg->data_size_from_cmnd = 0;
2191 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2192 (0xf<<2) | (3<<7), 1,
2193 "SYNCHRONIZE CACHE")) == 0)
2194 reply = do_synchronize_cache(fsg);
2195 break;
2196
2197 case SC_TEST_UNIT_READY:
2198 fsg->data_size_from_cmnd = 0;
2199 reply = check_command(fsg, 6, DATA_DIR_NONE,
2200 0, 1,
2201 "TEST UNIT READY");
2202 break;
2203
2204 /* Although optional, this command is used by MS-Windows. We
2205 * support a minimal version: BytChk must be 0. */
2206 case SC_VERIFY:
2207 fsg->data_size_from_cmnd = 0;
2208 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2209 (1<<1) | (0xf<<2) | (3<<7), 1,
2210 "VERIFY")) == 0)
2211 reply = do_verify(fsg);
2212 break;
2213
2214 case SC_WRITE_6:
2215 i = fsg->cmnd[4];
2216 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2217 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2218 (7<<1) | (1<<4), 1,
2219 "WRITE(6)")) == 0)
2220 reply = do_write(fsg);
2221 break;
2222
2223 case SC_WRITE_10:
2224 fsg->data_size_from_cmnd =
2225 get_unaligned_be16(&fsg->cmnd[7]) << 9;
2226 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2227 (1<<1) | (0xf<<2) | (3<<7), 1,
2228 "WRITE(10)")) == 0)
2229 reply = do_write(fsg);
2230 break;
2231
2232 case SC_WRITE_12:
2233 fsg->data_size_from_cmnd =
2234 get_unaligned_be32(&fsg->cmnd[6]) << 9;
2235 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
2236 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2237 "WRITE(12)")) == 0)
2238 reply = do_write(fsg);
2239 break;
2240
2241 /* Some mandatory commands that we recognize but don't implement.
2242 * They don't mean much in this setting. It's left as an exercise
2243 * for anyone interested to implement RESERVE and RELEASE in terms
2244 * of Posix locks. */
2245 case SC_FORMAT_UNIT:
2246 case SC_RELEASE:
2247 case SC_RESERVE:
2248 case SC_SEND_DIAGNOSTIC:
2249 // Fall through
2250
2251 default:
2252 unknown_cmnd:
2253 fsg->data_size_from_cmnd = 0;
2254 sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
2255 if ((reply = check_command(fsg, fsg->cmnd_size,
2256 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
2257 fsg->curlun->sense_data = SS_INVALID_COMMAND;
2258 reply = -EINVAL;
2259 }
2260 break;
2261 }
2262 up_read(&fsg->filesem);
2263
2264 if (reply == -EINTR || signal_pending(current))
2265 return -EINTR;
2266
2267 /* Set up the single reply buffer for finish_reply() */
2268 if (reply == -EINVAL)
2269 reply = 0; // Error reply length
2270 if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
2271 reply = min((u32) reply, fsg->data_size_from_cmnd);
2272 bh->inreq->length = reply;
2273 bh->state = BUF_STATE_FULL;
2274 fsg->residue -= reply;
2275 } // Otherwise it's already set
2276
2277 return 0;
2278}
2279
2280
2281/*-------------------------------------------------------------------------*/
2282
2283static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2284{
2285 struct usb_request *req = bh->outreq;
2286 struct fsg_bulk_cb_wrap *cbw = req->buf;
2287
2288 /* Was this a real packet? Should it be ignored? */
2289 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2290 return -EINVAL;
2291
2292 /* Is the CBW valid? */
2293 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2294 cbw->Signature != cpu_to_le32(
2295 USB_BULK_CB_SIG)) {
2296 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2297 req->actual,
2298 le32_to_cpu(cbw->Signature));
2299
2300 /* The Bulk-only spec says we MUST stall the IN endpoint
2301 * (6.6.1), so it's unavoidable. It also says we must
2302 * retain this state until the next reset, but there's
2303 * no way to tell the controller driver it should ignore
2304 * Clear-Feature(HALT) requests.
2305 *
2306 * We aren't required to halt the OUT endpoint; instead
2307 * we can simply accept and discard any data received
2308 * until the next reset. */
2309 wedge_bulk_in_endpoint(fsg);
2310 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2311 return -EINVAL;
2312 }
2313
2314 /* Is the CBW meaningful? */
2315 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2316 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2317 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2318 "cmdlen %u\n",
2319 cbw->Lun, cbw->Flags, cbw->Length);
2320
2321 /* We can do anything we want here, so let's stall the
2322 * bulk pipes if we are allowed to. */
2323 if (mod_data.can_stall) {
2324 fsg_set_halt(fsg, fsg->bulk_out);
2325 halt_bulk_in_endpoint(fsg);
2326 }
2327 return -EINVAL;
2328 }
2329
2330 /* Save the command for later */
2331 fsg->cmnd_size = cbw->Length;
2332 memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
2333 if (cbw->Flags & USB_BULK_IN_FLAG)
2334 fsg->data_dir = DATA_DIR_TO_HOST;
2335 else
2336 fsg->data_dir = DATA_DIR_FROM_HOST;
2337 fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
2338 if (fsg->data_size == 0)
2339 fsg->data_dir = DATA_DIR_NONE;
2340 fsg->lun = cbw->Lun;
2341 fsg->tag = cbw->Tag;
2342 return 0;
2343}
2344
2345
2346static int get_next_command(struct fsg_dev *fsg)
2347{
2348 struct fsg_buffhd *bh;
2349 int rc = 0;
2350
93bcf12e
MN
2351 /* Wait for the next buffer to become available */
2352 bh = fsg->next_buffhd_to_fill;
2353 while (bh->state != BUF_STATE_EMPTY) {
2354 rc = sleep_thread(fsg);
2355 if (rc)
2356 return rc;
2357 }
d5e2b67a 2358
93bcf12e
MN
2359 /* Queue a request to read a Bulk-only CBW */
2360 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
2361 bh->outreq->short_not_ok = 1;
2362 start_transfer(fsg, fsg->bulk_out, bh->outreq,
2363 &bh->outreq_busy, &bh->state);
d5e2b67a 2364
93bcf12e
MN
2365 /* We will drain the buffer in software, which means we
2366 * can reuse it for the next filling. No need to advance
2367 * next_buffhd_to_fill. */
d5e2b67a 2368
93bcf12e
MN
2369 /* Wait for the CBW to arrive */
2370 while (bh->state != BUF_STATE_FULL) {
2371 rc = sleep_thread(fsg);
2372 if (rc)
2373 return rc;
d5e2b67a 2374 }
93bcf12e
MN
2375 smp_rmb();
2376 rc = received_cbw(fsg, bh);
2377 bh->state = BUF_STATE_EMPTY;
2378
d5e2b67a
MN
2379 return rc;
2380}
2381
2382
2383/*-------------------------------------------------------------------------*/
2384
2385static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
2386 const struct usb_endpoint_descriptor *d)
2387{
2388 int rc;
2389
2390 ep->driver_data = fsg;
2391 rc = usb_ep_enable(ep, d);
2392 if (rc)
2393 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
2394 return rc;
2395}
2396
2397static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
2398 struct usb_request **preq)
2399{
2400 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2401 if (*preq)
2402 return 0;
2403 ERROR(fsg, "can't allocate request for %s\n", ep->name);
2404 return -ENOMEM;
2405}
2406
2407/*
2408 * Reset interface setting and re-init endpoint state (toggle etc).
2409 * Call with altsetting < 0 to disable the interface. The only other
2410 * available altsetting is 0, which enables the interface.
2411 */
2412static int do_set_interface(struct fsg_dev *fsg, int altsetting)
2413{
2414 int rc = 0;
2415 int i;
2416 const struct usb_endpoint_descriptor *d;
2417
2418 if (fsg->running)
2419 DBG(fsg, "reset interface\n");
2420
2421reset:
2422 /* Deallocate the requests */
2423 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2424 struct fsg_buffhd *bh = &fsg->buffhds[i];
2425
2426 if (bh->inreq) {
2427 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2428 bh->inreq = NULL;
2429 }
2430 if (bh->outreq) {
2431 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2432 bh->outreq = NULL;
2433 }
2434 }
d5e2b67a
MN
2435
2436 /* Disable the endpoints */
2437 if (fsg->bulk_in_enabled) {
2438 usb_ep_disable(fsg->bulk_in);
2439 fsg->bulk_in_enabled = 0;
2440 }
2441 if (fsg->bulk_out_enabled) {
2442 usb_ep_disable(fsg->bulk_out);
2443 fsg->bulk_out_enabled = 0;
2444 }
d5e2b67a
MN
2445
2446 fsg->running = 0;
2447 if (altsetting < 0 || rc != 0)
2448 return rc;
2449
2450 DBG(fsg, "set interface %d\n", altsetting);
2451
2452 /* Enable the endpoints */
2453 d = fsg_ep_desc(fsg->gadget,
2454 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2455 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
2456 goto reset;
2457 fsg->bulk_in_enabled = 1;
2458
2459 d = fsg_ep_desc(fsg->gadget,
2460 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2461 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
2462 goto reset;
2463 fsg->bulk_out_enabled = 1;
2464 fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2465 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2466
d5e2b67a
MN
2467 /* Allocate the requests */
2468 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2469 struct fsg_buffhd *bh = &fsg->buffhds[i];
2470
2471 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
2472 goto reset;
2473 if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
2474 goto reset;
2475 bh->inreq->buf = bh->outreq->buf = bh->buf;
2476 bh->inreq->context = bh->outreq->context = bh;
2477 bh->inreq->complete = bulk_in_complete;
2478 bh->outreq->complete = bulk_out_complete;
2479 }
d5e2b67a
MN
2480
2481 fsg->running = 1;
2482 for (i = 0; i < fsg->nluns; ++i)
2483 fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2484 return rc;
2485}
2486
2487
2488/*
2489 * Change our operational configuration. This code must agree with the code
2490 * that returns config descriptors, and with interface altsetting code.
2491 *
2492 * It's also responsible for power management interactions. Some
2493 * configurations might not work with our current power sources.
2494 * For now we just assume the gadget is always self-powered.
2495 */
2496static int do_set_config(struct fsg_dev *fsg, u8 new_config)
2497{
2498 int rc = 0;
2499
2500 /* Disable the single interface */
2501 if (fsg->config != 0) {
2502 DBG(fsg, "reset config\n");
2503 fsg->config = 0;
2504 rc = do_set_interface(fsg, -1);
2505 }
2506
2507 /* Enable the interface */
2508 if (new_config != 0) {
2509 fsg->config = new_config;
2510 if ((rc = do_set_interface(fsg, 0)) != 0)
2511 fsg->config = 0; // Reset on errors
2512 else {
2513 char *speed;
2514
2515 switch (fsg->gadget->speed) {
2516 case USB_SPEED_LOW: speed = "low"; break;
2517 case USB_SPEED_FULL: speed = "full"; break;
2518 case USB_SPEED_HIGH: speed = "high"; break;
2519 default: speed = "?"; break;
2520 }
2521 INFO(fsg, "%s speed config #%d\n", speed, fsg->config);
2522 }
2523 }
2524 return rc;
2525}
2526
2527
2528/*-------------------------------------------------------------------------*/
2529
2530static void handle_exception(struct fsg_dev *fsg)
2531{
2532 siginfo_t info;
2533 int sig;
2534 int i;
2535 int num_active;
2536 struct fsg_buffhd *bh;
2537 enum fsg_state old_state;
2538 u8 new_config;
2539 struct fsg_lun *curlun;
2540 unsigned int exception_req_tag;
2541 int rc;
2542
2543 /* Clear the existing signals. Anything but SIGUSR1 is converted
2544 * into a high-priority EXIT exception. */
2545 for (;;) {
2546 sig = dequeue_signal_lock(current, &current->blocked, &info);
2547 if (!sig)
2548 break;
2549 if (sig != SIGUSR1) {
2550 if (fsg->state < FSG_STATE_EXIT)
2551 DBG(fsg, "Main thread exiting on signal\n");
2552 raise_exception(fsg, FSG_STATE_EXIT);
2553 }
2554 }
2555
2556 /* Cancel all the pending transfers */
d5e2b67a
MN
2557 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2558 bh = &fsg->buffhds[i];
2559 if (bh->inreq_busy)
2560 usb_ep_dequeue(fsg->bulk_in, bh->inreq);
2561 if (bh->outreq_busy)
2562 usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2563 }
2564
2565 /* Wait until everything is idle */
2566 for (;;) {
93bcf12e 2567 num_active = 0;
d5e2b67a
MN
2568 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2569 bh = &fsg->buffhds[i];
2570 num_active += bh->inreq_busy + bh->outreq_busy;
2571 }
2572 if (num_active == 0)
2573 break;
2574 if (sleep_thread(fsg))
2575 return;
2576 }
2577
2578 /* Clear out the controller's fifos */
2579 if (fsg->bulk_in_enabled)
2580 usb_ep_fifo_flush(fsg->bulk_in);
2581 if (fsg->bulk_out_enabled)
2582 usb_ep_fifo_flush(fsg->bulk_out);
d5e2b67a
MN
2583
2584 /* Reset the I/O buffer states and pointers, the SCSI
2585 * state, and the exception. Then invoke the handler. */
2586 spin_lock_irq(&fsg->lock);
2587
2588 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2589 bh = &fsg->buffhds[i];
2590 bh->state = BUF_STATE_EMPTY;
2591 }
2592 fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
2593 &fsg->buffhds[0];
2594
2595 exception_req_tag = fsg->exception_req_tag;
2596 new_config = fsg->new_config;
2597 old_state = fsg->state;
2598
2599 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2600 fsg->state = FSG_STATE_STATUS_PHASE;
2601 else {
2602 for (i = 0; i < fsg->nluns; ++i) {
2603 curlun = &fsg->luns[i];
2604 curlun->prevent_medium_removal = 0;
2605 curlun->sense_data = curlun->unit_attention_data =
2606 SS_NO_SENSE;
2607 curlun->sense_data_info = 0;
2608 curlun->info_valid = 0;
2609 }
2610 fsg->state = FSG_STATE_IDLE;
2611 }
2612 spin_unlock_irq(&fsg->lock);
2613
2614 /* Carry out any extra actions required for the exception */
2615 switch (old_state) {
2616 default:
2617 break;
2618
2619 case FSG_STATE_ABORT_BULK_OUT:
2620 send_status(fsg);
2621 spin_lock_irq(&fsg->lock);
2622 if (fsg->state == FSG_STATE_STATUS_PHASE)
2623 fsg->state = FSG_STATE_IDLE;
2624 spin_unlock_irq(&fsg->lock);
2625 break;
2626
2627 case FSG_STATE_RESET:
2628 /* In case we were forced against our will to halt a
2629 * bulk endpoint, clear the halt now. (The SuperH UDC
2630 * requires this.) */
2631 if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2632 usb_ep_clear_halt(fsg->bulk_in);
2633
93bcf12e
MN
2634 if (fsg->ep0_req_tag == exception_req_tag)
2635 ep0_queue(fsg); // Complete the status stage
d5e2b67a
MN
2636
2637 /* Technically this should go here, but it would only be
2638 * a waste of time. Ditto for the INTERFACE_CHANGE and
2639 * CONFIG_CHANGE cases. */
2640 // for (i = 0; i < fsg->nluns; ++i)
2641 // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2642 break;
2643
2644 case FSG_STATE_INTERFACE_CHANGE:
2645 rc = do_set_interface(fsg, 0);
2646 if (fsg->ep0_req_tag != exception_req_tag)
2647 break;
2648 if (rc != 0) // STALL on errors
2649 fsg_set_halt(fsg, fsg->ep0);
2650 else // Complete the status stage
2651 ep0_queue(fsg);
2652 break;
2653
2654 case FSG_STATE_CONFIG_CHANGE:
2655 rc = do_set_config(fsg, new_config);
2656 if (fsg->ep0_req_tag != exception_req_tag)
2657 break;
2658 if (rc != 0) // STALL on errors
2659 fsg_set_halt(fsg, fsg->ep0);
2660 else // Complete the status stage
2661 ep0_queue(fsg);
2662 break;
2663
2664 case FSG_STATE_DISCONNECT:
2665 for (i = 0; i < fsg->nluns; ++i)
2666 fsg_lun_fsync_sub(fsg->luns + i);
2667 do_set_config(fsg, 0); // Unconfigured state
2668 break;
2669
2670 case FSG_STATE_EXIT:
2671 case FSG_STATE_TERMINATED:
2672 do_set_config(fsg, 0); // Free resources
2673 spin_lock_irq(&fsg->lock);
2674 fsg->state = FSG_STATE_TERMINATED; // Stop the thread
2675 spin_unlock_irq(&fsg->lock);
2676 break;
2677 }
2678}
2679
2680
2681/*-------------------------------------------------------------------------*/
2682
2683static int fsg_main_thread(void *fsg_)
2684{
2685 struct fsg_dev *fsg = fsg_;
2686
2687 /* Allow the thread to be killed by a signal, but set the signal mask
2688 * to block everything but INT, TERM, KILL, and USR1. */
2689 allow_signal(SIGINT);
2690 allow_signal(SIGTERM);
2691 allow_signal(SIGKILL);
2692 allow_signal(SIGUSR1);
2693
2694 /* Allow the thread to be frozen */
2695 set_freezable();
2696
2697 /* Arrange for userspace references to be interpreted as kernel
2698 * pointers. That way we can pass a kernel pointer to a routine
2699 * that expects a __user pointer and it will work okay. */
2700 set_fs(get_ds());
2701
2702 /* The main loop */
2703 while (fsg->state != FSG_STATE_TERMINATED) {
2704 if (exception_in_progress(fsg) || signal_pending(current)) {
2705 handle_exception(fsg);
2706 continue;
2707 }
2708
2709 if (!fsg->running) {
2710 sleep_thread(fsg);
2711 continue;
2712 }
2713
2714 if (get_next_command(fsg))
2715 continue;
2716
2717 spin_lock_irq(&fsg->lock);
2718 if (!exception_in_progress(fsg))
2719 fsg->state = FSG_STATE_DATA_PHASE;
2720 spin_unlock_irq(&fsg->lock);
2721
2722 if (do_scsi_command(fsg) || finish_reply(fsg))
2723 continue;
2724
2725 spin_lock_irq(&fsg->lock);
2726 if (!exception_in_progress(fsg))
2727 fsg->state = FSG_STATE_STATUS_PHASE;
2728 spin_unlock_irq(&fsg->lock);
2729
2730 if (send_status(fsg))
2731 continue;
2732
2733 spin_lock_irq(&fsg->lock);
2734 if (!exception_in_progress(fsg))
2735 fsg->state = FSG_STATE_IDLE;
2736 spin_unlock_irq(&fsg->lock);
2737 }
2738
2739 spin_lock_irq(&fsg->lock);
2740 fsg->thread_task = NULL;
2741 spin_unlock_irq(&fsg->lock);
2742
2743 /* If we are exiting because of a signal, unregister the
2744 * gadget driver. */
2745 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
2746 usb_gadget_unregister_driver(&fsg_driver);
2747
2748 /* Let the unbind and cleanup routines know the thread has exited */
2749 complete_and_exit(&fsg->thread_notifier, 0);
2750}
2751
2752
2753/*-------------------------------------------------------------------------*/
2754
2755
2756/* The write permissions and store_xxx pointers are set in fsg_bind() */
2757static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
2758static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
2759
2760
2761/*-------------------------------------------------------------------------*/
2762
2763static void fsg_release(struct kref *ref)
2764{
2765 struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref);
2766
2767 kfree(fsg->luns);
2768 kfree(fsg);
2769}
2770
2771static void lun_release(struct device *dev)
2772{
2773 struct rw_semaphore *filesem = dev_get_drvdata(dev);
2774 struct fsg_dev *fsg =
2775 container_of(filesem, struct fsg_dev, filesem);
2776
2777 kref_put(&fsg->ref, fsg_release);
2778}
2779
2780static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
2781{
2782 struct fsg_dev *fsg = get_gadget_data(gadget);
2783 int i;
2784 struct fsg_lun *curlun;
2785 struct usb_request *req = fsg->ep0req;
2786
2787 DBG(fsg, "unbind\n");
2788 clear_bit(REGISTERED, &fsg->atomic_bitflags);
2789
2790 /* Unregister the sysfs attribute files and the LUNs */
2791 for (i = 0; i < fsg->nluns; ++i) {
2792 curlun = &fsg->luns[i];
2793 if (curlun->registered) {
2794 device_remove_file(&curlun->dev, &dev_attr_ro);
2795 device_remove_file(&curlun->dev, &dev_attr_file);
2796 fsg_lun_close(curlun);
2797 device_unregister(&curlun->dev);
2798 curlun->registered = 0;
2799 }
2800 }
2801
2802 /* If the thread isn't already dead, tell it to exit now */
2803 if (fsg->state != FSG_STATE_TERMINATED) {
2804 raise_exception(fsg, FSG_STATE_EXIT);
2805 wait_for_completion(&fsg->thread_notifier);
2806
2807 /* The cleanup routine waits for this completion also */
2808 complete(&fsg->thread_notifier);
2809 }
2810
2811 /* Free the data buffers */
2812 for (i = 0; i < FSG_NUM_BUFFERS; ++i)
2813 kfree(fsg->buffhds[i].buf);
2814
2815 /* Free the request and buffer for endpoint 0 */
2816 if (req) {
2817 kfree(req->buf);
2818 usb_ep_free_request(fsg->ep0, req);
2819 }
2820
2821 set_gadget_data(gadget, NULL);
2822}
2823
2824
2825static int __init check_parameters(struct fsg_dev *fsg)
2826{
d5e2b67a
MN
2827 int gcnum;
2828
d5e2b67a
MN
2829 /* Some peripheral controllers are known not to be able to
2830 * halt bulk endpoints correctly. If one of them is present,
2831 * disable stalls.
2832 */
2833 if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget))
2834 mod_data.can_stall = 0;
2835
2836 if (mod_data.release == 0xffff) { // Parameter wasn't set
2837 /* The sa1100 controller is not supported */
2838 if (gadget_is_sa1100(fsg->gadget))
2839 gcnum = -1;
2840 else
2841 gcnum = usb_gadget_controller_number(fsg->gadget);
2842 if (gcnum >= 0)
2843 mod_data.release = 0x0300 + gcnum;
2844 else {
2845 WARNING(fsg, "controller '%s' not recognized\n",
2846 fsg->gadget->name);
2847 mod_data.release = 0x0399;
2848 }
2849 }
2850
d5e2b67a
MN
2851 return 0;
2852}
2853
2854
2855static int __init fsg_bind(struct usb_gadget *gadget)
2856{
2857 struct fsg_dev *fsg = the_fsg;
2858 int rc;
2859 int i;
2860 struct fsg_lun *curlun;
2861 struct usb_ep *ep;
2862 struct usb_request *req;
2863 char *pathbuf, *p;
2864
2865 fsg->gadget = gadget;
2866 set_gadget_data(gadget, fsg);
2867 fsg->ep0 = gadget->ep0;
2868 fsg->ep0->driver_data = fsg;
2869
2870 if ((rc = check_parameters(fsg)) != 0)
2871 goto out;
2872
2873 if (mod_data.removable) { // Enable the store_xxx attributes
2874 dev_attr_file.attr.mode = 0644;
2875 dev_attr_file.store = fsg_store_file;
2876 if (!mod_data.cdrom) {
2877 dev_attr_ro.attr.mode = 0644;
2878 dev_attr_ro.store = fsg_store_ro;
2879 }
2880 }
2881
2882 /* Find out how many LUNs there should be */
2883 i = mod_data.nluns;
2884 if (i == 0)
2885 i = max(mod_data.num_filenames, 1u);
2886 if (i > FSG_MAX_LUNS) {
2887 ERROR(fsg, "invalid number of LUNs: %d\n", i);
2888 rc = -EINVAL;
2889 goto out;
2890 }
2891
2892 /* Create the LUNs, open their backing files, and register the
2893 * LUN devices in sysfs. */
2894 fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL);
2895 if (!fsg->luns) {
2896 rc = -ENOMEM;
2897 goto out;
2898 }
2899 fsg->nluns = i;
2900
2901 for (i = 0; i < fsg->nluns; ++i) {
2902 curlun = &fsg->luns[i];
2903 curlun->cdrom = !!mod_data.cdrom;
2904 curlun->ro = mod_data.cdrom || mod_data.ro[i];
2905 curlun->initially_ro = curlun->ro;
2906 curlun->removable = mod_data.removable;
2907 curlun->dev.release = lun_release;
2908 curlun->dev.parent = &gadget->dev;
2909 curlun->dev.driver = &fsg_driver.driver;
2910 dev_set_drvdata(&curlun->dev, &fsg->filesem);
2911 dev_set_name(&curlun->dev,"%s-lun%d",
2912 dev_name(&gadget->dev), i);
2913
2914 if ((rc = device_register(&curlun->dev)) != 0) {
2915 INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
2916 goto out;
2917 }
2918 if ((rc = device_create_file(&curlun->dev,
2919 &dev_attr_ro)) != 0 ||
2920 (rc = device_create_file(&curlun->dev,
2921 &dev_attr_file)) != 0) {
2922 device_unregister(&curlun->dev);
2923 goto out;
2924 }
2925 curlun->registered = 1;
2926 kref_get(&fsg->ref);
2927
2928 if (mod_data.file[i] && *mod_data.file[i]) {
2929 if ((rc = fsg_lun_open(curlun,
2930 mod_data.file[i])) != 0)
2931 goto out;
2932 } else if (!mod_data.removable) {
2933 ERROR(fsg, "no file given for LUN%d\n", i);
2934 rc = -EINVAL;
2935 goto out;
2936 }
2937 }
2938
2939 /* Find all the endpoints we will use */
2940 usb_ep_autoconfig_reset(gadget);
2941 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2942 if (!ep)
2943 goto autoconf_fail;
2944 ep->driver_data = fsg; // claim the endpoint
2945 fsg->bulk_in = ep;
2946
2947 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2948 if (!ep)
2949 goto autoconf_fail;
2950 ep->driver_data = fsg; // claim the endpoint
2951 fsg->bulk_out = ep;
2952
d5e2b67a
MN
2953 /* Fix up the descriptors */
2954 device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
d5e2b67a
MN
2955 device_desc.bcdDevice = cpu_to_le16(mod_data.release);
2956
d5e2b67a 2957 if (gadget_is_dualspeed(gadget)) {
d5e2b67a
MN
2958 /* Assume ep0 uses the same maxpacket value for both speeds */
2959 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
2960
2961 /* Assume endpoint addresses are the same for both speeds */
2962 fsg_hs_bulk_in_desc.bEndpointAddress =
2963 fsg_fs_bulk_in_desc.bEndpointAddress;
2964 fsg_hs_bulk_out_desc.bEndpointAddress =
2965 fsg_fs_bulk_out_desc.bEndpointAddress;
d5e2b67a
MN
2966 }
2967
2968 if (gadget_is_otg(gadget))
2969 fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
2970
2971 rc = -ENOMEM;
2972
2973 /* Allocate the request and buffer for endpoint 0 */
2974 fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
2975 if (!req)
2976 goto out;
2977 req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL);
2978 if (!req->buf)
2979 goto out;
2980 req->complete = ep0_complete;
2981
2982 /* Allocate the data buffers */
2983 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2984 struct fsg_buffhd *bh = &fsg->buffhds[i];
2985
2986 /* Allocate for the bulk-in endpoint. We assume that
2987 * the buffer will also work with the bulk-out (and
2988 * interrupt-in) endpoint. */
93bcf12e 2989 bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
d5e2b67a
MN
2990 if (!bh->buf)
2991 goto out;
2992 bh->next = bh + 1;
2993 }
2994 fsg->buffhds[FSG_NUM_BUFFERS - 1].next = &fsg->buffhds[0];
2995
2996 /* This should reflect the actual gadget power source */
2997 usb_gadget_set_selfpowered(gadget);
2998
2999 snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer,
3000 "%s %s with %s",
3001 init_utsname()->sysname, init_utsname()->release,
3002 gadget->name);
3003
3004 /* On a real device, serial[] would be loaded from permanent
3005 * storage. We just encode it from the driver version string. */
3006 for (i = 0; i < sizeof fsg_string_serial - 2; i += 2) {
3007 unsigned char c = DRIVER_VERSION[i / 2];
3008
3009 if (!c)
3010 break;
3011 sprintf(&fsg_string_serial[i], "%02X", c);
3012 }
3013
3014 fsg->thread_task = kthread_create(fsg_main_thread, fsg,
3015 "file-storage-gadget");
3016 if (IS_ERR(fsg->thread_task)) {
3017 rc = PTR_ERR(fsg->thread_task);
3018 goto out;
3019 }
3020
3021 INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
3022 INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
3023
3024 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
3025 for (i = 0; i < fsg->nluns; ++i) {
3026 curlun = &fsg->luns[i];
3027 if (fsg_lun_is_open(curlun)) {
3028 p = NULL;
3029 if (pathbuf) {
3030 p = d_path(&curlun->filp->f_path,
3031 pathbuf, PATH_MAX);
3032 if (IS_ERR(p))
3033 p = NULL;
3034 }
3035 LINFO(curlun, "ro=%d, file: %s\n",
3036 curlun->ro, (p ? p : "(error)"));
3037 }
3038 }
3039 kfree(pathbuf);
3040
d5e2b67a
MN
3041 DBG(fsg, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
3042 mod_data.removable, mod_data.can_stall,
93bcf12e 3043 mod_data.cdrom, FSG_BUFLEN);
d5e2b67a
MN
3044 DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task));
3045
3046 set_bit(REGISTERED, &fsg->atomic_bitflags);
3047
3048 /* Tell the thread to start working */
3049 wake_up_process(fsg->thread_task);
3050 return 0;
3051
3052autoconf_fail:
3053 ERROR(fsg, "unable to autoconfigure all endpoints\n");
3054 rc = -ENOTSUPP;
3055
3056out:
3057 fsg->state = FSG_STATE_TERMINATED; // The thread is dead
3058 fsg_unbind(gadget);
3059 complete(&fsg->thread_notifier);
3060 return rc;
3061}
3062
3063
d5e2b67a
MN
3064/*-------------------------------------------------------------------------*/
3065
3066static struct usb_gadget_driver fsg_driver = {
3067#ifdef CONFIG_USB_GADGET_DUALSPEED
3068 .speed = USB_SPEED_HIGH,
3069#else
3070 .speed = USB_SPEED_FULL,
3071#endif
3072 .function = (char *) fsg_string_product,
3073 .bind = fsg_bind,
3074 .unbind = fsg_unbind,
3075 .disconnect = fsg_disconnect,
3076 .setup = fsg_setup,
d5e2b67a
MN
3077
3078 .driver = {
3079 .name = DRIVER_NAME,
3080 .owner = THIS_MODULE,
3081 // .release = ...
3082 // .suspend = ...
3083 // .resume = ...
3084 },
3085};
3086
3087
3088static int __init fsg_alloc(void)
3089{
3090 struct fsg_dev *fsg;
3091
3092 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
3093 if (!fsg)
3094 return -ENOMEM;
3095 spin_lock_init(&fsg->lock);
3096 init_rwsem(&fsg->filesem);
3097 kref_init(&fsg->ref);
3098 init_completion(&fsg->thread_notifier);
3099
3100 the_fsg = fsg;
3101 return 0;
3102}
3103
3104
3105static int __init fsg_init(void)
3106{
3107 int rc;
3108 struct fsg_dev *fsg;
3109
3110 if ((rc = fsg_alloc()) != 0)
3111 return rc;
3112 fsg = the_fsg;
3113 if ((rc = usb_gadget_register_driver(&fsg_driver)) != 0)
3114 kref_put(&fsg->ref, fsg_release);
3115 return rc;
3116}
3117module_init(fsg_init);
3118
3119
3120static void __exit fsg_cleanup(void)
3121{
3122 struct fsg_dev *fsg = the_fsg;
3123
3124 /* Unregister the driver iff the thread hasn't already done so */
3125 if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
3126 usb_gadget_unregister_driver(&fsg_driver);
3127
3128 /* Wait for the thread to finish up */
3129 wait_for_completion(&fsg->thread_notifier);
3130
3131 kref_put(&fsg->ref, fsg_release);
3132}
3133module_exit(fsg_cleanup);