USB: g_mass_storage: fsg_config added & module params handlig changed
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / gadget / f_mass_storage.c
CommitLineData
d5e2b67a
MN
1/*
2 * file_storage.c -- File-backed USB Storage Gadget, for USB development
3 *
4 * Copyright (C) 2003-2008 Alan Stern
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
23 * later version.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39/*
40 * The File-backed Storage Gadget acts as a USB Mass Storage device,
41 * appearing to the host as a disk drive or as a CD-ROM drive. In addition
42 * to providing an example of a genuinely useful gadget driver for a USB
43 * device, it also illustrates a technique of double-buffering for increased
44 * throughput. Last but not least, it gives an easy way to probe the
45 * behavior of the Mass Storage drivers in a USB host.
46 *
47 * Backing storage is provided by a regular file or a block device, specified
48 * by the "file" module parameter. Access can be limited to read-only by
49 * setting the optional "ro" module parameter. (For CD-ROM emulation,
50 * access is always read-only.) The gadget will indicate that it has
51 * removable media if the optional "removable" module parameter is set.
52 *
d5e2b67a
MN
53 * There is support for multiple logical units (LUNs), each of which has
54 * its own backing file. The number of LUNs can be set using the optional
55 * "luns" module parameter (anywhere from 1 to 8), and the corresponding
56 * files are specified using comma-separated lists for "file" and "ro".
57 * The default number of LUNs is taken from the number of "file" elements;
58 * it is 1 if "file" is not given. If "removable" is not set then a backing
59 * file must be specified for each LUN. If it is set, then an unspecified
60 * or empty backing filename means the LUN's medium is not loaded. Ideally
61 * each LUN would be settable independently as a disk drive or a CD-ROM
62 * drive, but currently all LUNs have to be the same type. The CD-ROM
63 * emulation includes a single data track and no audio tracks; hence there
64 * need be only one backing file per LUN. Note also that the CD-ROM block
65 * length is set to 512 rather than the more common value 2048.
66 *
67 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
68 * needed (an interrupt-out endpoint is also needed for CBI). The memory
69 * requirement amounts to two 16K buffers, size configurable by a parameter.
70 * Support is included for both full-speed and high-speed operation.
71 *
72 * Note that the driver is slightly non-portable in that it assumes a
73 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
74 * interrupt-in endpoints. With most device controllers this isn't an
75 * issue, but there may be some with hardware restrictions that prevent
76 * a buffer from being used by more than one endpoint.
77 *
78 * Module options:
79 *
80 * file=filename[,filename...]
81 * Required if "removable" is not set, names of
82 * the files or block devices used for
83 * backing storage
84 * ro=b[,b...] Default false, booleans for read-only access
85 * removable Default false, boolean for removable media
86 * luns=N Default N = number of filenames, number of
87 * LUNs to support
88 * stall Default determined according to the type of
89 * USB device controller (usually true),
90 * boolean to permit the driver to halt
91 * bulk endpoints
92 * cdrom Default false, boolean for whether to emulate
93 * a CD-ROM drive
d5e2b67a
MN
94 *
95 * The pathnames of the backing files and the ro settings are available in
96 * the attribute files "file" and "ro" in the lun<n> subdirectory of the
97 * gadget's sysfs directory. If the "removable" option is set, writing to
98 * these files will simulate ejecting/loading the medium (writing an empty
99 * line means eject) and adjusting a write-enable tab. Changes to the ro
100 * setting are not allowed when the medium is loaded or if CD-ROM emulation
101 * is being used.
102 *
103 * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
104 * The driver's SCSI command interface was based on the "Information
105 * technology - Small Computer System Interface - 2" document from
106 * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
107 * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception
108 * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
109 * "Universal Serial Bus Mass Storage Class UFI Command Specification"
110 * document, Revision 1.0, December 14, 1998, available at
111 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
112 */
113
114
115/*
116 * Driver Design
117 *
118 * The FSG driver is fairly straightforward. There is a main kernel
119 * thread that handles most of the work. Interrupt routines field
120 * callbacks from the controller driver: bulk- and interrupt-request
121 * completion notifications, endpoint-0 events, and disconnect events.
122 * Completion events are passed to the main thread by wakeup calls. Many
123 * ep0 requests are handled at interrupt time, but SetInterface,
124 * SetConfiguration, and device reset requests are forwarded to the
125 * thread in the form of "exceptions" using SIGUSR1 signals (since they
126 * should interrupt any ongoing file I/O operations).
127 *
128 * The thread's main routine implements the standard command/data/status
129 * parts of a SCSI interaction. It and its subroutines are full of tests
130 * for pending signals/exceptions -- all this polling is necessary since
131 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
132 * indication that the driver really wants to be running in userspace.)
133 * An important point is that so long as the thread is alive it keeps an
134 * open reference to the backing file. This will prevent unmounting
135 * the backing file's underlying filesystem and could cause problems
136 * during system shutdown, for example. To prevent such problems, the
137 * thread catches INT, TERM, and KILL signals and converts them into
138 * an EXIT exception.
139 *
140 * In normal operation the main thread is started during the gadget's
141 * fsg_bind() callback and stopped during fsg_unbind(). But it can also
142 * exit when it receives a signal, and there's no point leaving the
143 * gadget running when the thread is dead. So just before the thread
144 * exits, it deregisters the gadget driver. This makes things a little
145 * tricky: The driver is deregistered at two places, and the exiting
146 * thread can indirectly call fsg_unbind() which in turn can tell the
147 * thread to exit. The first problem is resolved through the use of the
148 * REGISTERED atomic bitflag; the driver will only be deregistered once.
149 * The second problem is resolved by having fsg_unbind() check
150 * fsg->state; it won't try to stop the thread if the state is already
151 * FSG_STATE_TERMINATED.
152 *
153 * To provide maximum throughput, the driver uses a circular pipeline of
154 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
155 * arbitrarily long; in practice the benefits don't justify having more
156 * than 2 stages (i.e., double buffering). But it helps to think of the
157 * pipeline as being a long one. Each buffer head contains a bulk-in and
158 * a bulk-out request pointer (since the buffer can be used for both
159 * output and input -- directions always are given from the host's
160 * point of view) as well as a pointer to the buffer and various state
161 * variables.
162 *
163 * Use of the pipeline follows a simple protocol. There is a variable
164 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
165 * At any time that buffer head may still be in use from an earlier
166 * request, so each buffer head has a state variable indicating whether
167 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
168 * buffer head to be EMPTY, filling the buffer either by file I/O or by
169 * USB I/O (during which the buffer head is BUSY), and marking the buffer
170 * head FULL when the I/O is complete. Then the buffer will be emptied
171 * (again possibly by USB I/O, during which it is marked BUSY) and
172 * finally marked EMPTY again (possibly by a completion routine).
173 *
174 * A module parameter tells the driver to avoid stalling the bulk
175 * endpoints wherever the transport specification allows. This is
176 * necessary for some UDCs like the SuperH, which cannot reliably clear a
177 * halt on a bulk endpoint. However, under certain circumstances the
178 * Bulk-only specification requires a stall. In such cases the driver
179 * will halt the endpoint and set a flag indicating that it should clear
180 * the halt in software during the next device reset. Hopefully this
181 * will permit everything to work correctly. Furthermore, although the
182 * specification allows the bulk-out endpoint to halt when the host sends
183 * too much data, implementing this would cause an unavoidable race.
184 * The driver will always use the "no-stall" approach for OUT transfers.
185 *
186 * One subtle point concerns sending status-stage responses for ep0
187 * requests. Some of these requests, such as device reset, can involve
188 * interrupting an ongoing file I/O operation, which might take an
189 * arbitrarily long time. During that delay the host might give up on
190 * the original ep0 request and issue a new one. When that happens the
191 * driver should not notify the host about completion of the original
192 * request, as the host will no longer be waiting for it. So the driver
193 * assigns to each ep0 request a unique tag, and it keeps track of the
194 * tag value of the request associated with a long-running exception
195 * (device-reset, interface-change, or configuration-change). When the
196 * exception handler is finished, the status-stage response is submitted
197 * only if the current ep0 request tag is equal to the exception request
198 * tag. Thus only the most recently received ep0 request will get a
199 * status-stage response.
200 *
201 * Warning: This driver source file is too long. It ought to be split up
202 * into a header file plus about 3 separate .c files, to handle the details
203 * of the Gadget, USB Mass Storage, and SCSI protocols.
204 */
205
206
207/* #define VERBOSE_DEBUG */
208/* #define DUMP_MSGS */
209
210
211#include <linux/blkdev.h>
212#include <linux/completion.h>
213#include <linux/dcache.h>
214#include <linux/delay.h>
215#include <linux/device.h>
216#include <linux/fcntl.h>
217#include <linux/file.h>
218#include <linux/fs.h>
219#include <linux/kref.h>
220#include <linux/kthread.h>
221#include <linux/limits.h>
222#include <linux/rwsem.h>
223#include <linux/slab.h>
224#include <linux/spinlock.h>
225#include <linux/string.h>
226#include <linux/freezer.h>
227#include <linux/utsname.h>
228
229#include <linux/usb/ch9.h>
230#include <linux/usb/gadget.h>
231
232#include "gadget_chips.h"
233
234
235
d5e2b67a
MN
236/*-------------------------------------------------------------------------*/
237
d23b0f08
MN
238#define FSG_DRIVER_DESC "Mass Storage Function"
239#define FSG_DRIVER_VERSION "20 November 2008"
d5e2b67a 240
d5e2b67a
MN
241static const char fsg_string_interface[] = "Mass Storage";
242
243
93bcf12e 244#define FSG_NO_INTR_EP 1
606206c2 245#define FSG_BUFFHD_STATIC_BUFFER 1
d23b0f08
MN
246#define FSG_NO_DEVICE_STRINGS 1
247#define FSG_NO_OTG 1
248#define FSG_NO_INTR_EP 1
93bcf12e 249
d5e2b67a
MN
250#include "storage_common.c"
251
252
d5e2b67a
MN
253/*-------------------------------------------------------------------------*/
254
255
a41ae418
MN
256/* Data shared by all the FSG instances. */
257struct fsg_common {
9c610213
MN
258 struct usb_gadget *gadget;
259
a41ae418
MN
260 /* filesem protects: backing files in use */
261 struct rw_semaphore filesem;
262
263 struct fsg_buffhd *next_buffhd_to_fill;
264 struct fsg_buffhd *next_buffhd_to_drain;
265 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
266
267 int cmnd_size;
268 u8 cmnd[MAX_COMMAND_SIZE];
269
270 unsigned int nluns;
271 unsigned int lun;
272 struct fsg_lun *luns;
273 struct fsg_lun *curlun;
9c610213 274
481e4929 275 unsigned int can_stall:1;
9c610213
MN
276 unsigned int free_storage_on_release:1;
277
481e4929
MN
278 /* Vendor (8 chars), product (16 chars), release (4
279 * hexadecimal digits) and NUL byte */
280 char inquiry_string[8 + 16 + 4 + 1];
281
9c610213 282 struct kref ref;
a41ae418
MN
283};
284
285
481e4929
MN
286struct fsg_config {
287 unsigned nluns;
288 struct fsg_lun_config {
289 const char *filename;
290 char ro;
291 char removable;
292 char cdrom;
293 } luns[FSG_MAX_LUNS];
294
295 const char *vendor_name; /* 8 characters or less */
296 const char *product_name; /* 16 characters or less */
297 u16 release;
298
299 char can_stall;
300};
301
302
d5e2b67a 303struct fsg_dev {
d23b0f08
MN
304 struct usb_function function;
305 struct usb_composite_dev*cdev;
306 struct usb_gadget *gadget; /* Copy of cdev->gadget */
a41ae418
MN
307 struct fsg_common *common;
308
d23b0f08
MN
309 u16 interface_number;
310
a41ae418 311 /* lock protects: state, all the req_busy's */
d5e2b67a 312 spinlock_t lock;
d5e2b67a 313
d23b0f08
MN
314 struct usb_ep *ep0; /* Copy of gadget->ep0 */
315 struct usb_request *ep0req; /* Copy of cdev->req */
d5e2b67a
MN
316 unsigned int ep0_req_tag;
317 const char *ep0req_name;
318
d5e2b67a
MN
319 unsigned int bulk_out_maxpacket;
320 enum fsg_state state; // For exception handling
321 unsigned int exception_req_tag;
322
323 u8 config, new_config;
324
325 unsigned int running : 1;
326 unsigned int bulk_in_enabled : 1;
327 unsigned int bulk_out_enabled : 1;
d5e2b67a
MN
328 unsigned int phase_error : 1;
329 unsigned int short_packet_received : 1;
330 unsigned int bad_lun_okay : 1;
481e4929 331 unsigned int can_stall : 1;
d5e2b67a
MN
332
333 unsigned long atomic_bitflags;
334#define REGISTERED 0
335#define IGNORE_BULK_OUT 1
d5e2b67a
MN
336
337 struct usb_ep *bulk_in;
338 struct usb_ep *bulk_out;
d5e2b67a 339
d5e2b67a
MN
340 int thread_wakeup_needed;
341 struct completion thread_notifier;
342 struct task_struct *thread_task;
343
d5e2b67a
MN
344 enum data_direction data_dir;
345 u32 data_size;
346 u32 data_size_from_cmnd;
347 u32 tag;
d5e2b67a
MN
348 u32 residue;
349 u32 usb_amount_left;
d5e2b67a
MN
350};
351
d23b0f08
MN
352
353static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
354{
355 return container_of(f, struct fsg_dev, function);
356}
357
358
d5e2b67a
MN
359typedef void (*fsg_routine_t)(struct fsg_dev *);
360
361static int exception_in_progress(struct fsg_dev *fsg)
362{
363 return (fsg->state > FSG_STATE_IDLE);
364}
365
366/* Make bulk-out requests be divisible by the maxpacket size */
367static void set_bulk_out_req_length(struct fsg_dev *fsg,
368 struct fsg_buffhd *bh, unsigned int length)
369{
370 unsigned int rem;
371
372 bh->bulk_out_intended_length = length;
373 rem = length % fsg->bulk_out_maxpacket;
374 if (rem > 0)
375 length += fsg->bulk_out_maxpacket - rem;
376 bh->outreq->length = length;
377}
378
d5e2b67a
MN
379/*-------------------------------------------------------------------------*/
380
381static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
382{
383 const char *name;
384
385 if (ep == fsg->bulk_in)
386 name = "bulk-in";
387 else if (ep == fsg->bulk_out)
388 name = "bulk-out";
389 else
390 name = ep->name;
391 DBG(fsg, "%s set halt\n", name);
392 return usb_ep_set_halt(ep);
393}
394
395
d5e2b67a
MN
396/*-------------------------------------------------------------------------*/
397
398/* These routines may be called in process context or in_irq */
399
400/* Caller must hold fsg->lock */
401static void wakeup_thread(struct fsg_dev *fsg)
402{
403 /* Tell the main thread that something has happened */
404 fsg->thread_wakeup_needed = 1;
405 if (fsg->thread_task)
406 wake_up_process(fsg->thread_task);
407}
408
409
410static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
411{
412 unsigned long flags;
413
414 /* Do nothing if a higher-priority exception is already in progress.
415 * If a lower-or-equal priority exception is in progress, preempt it
416 * and notify the main thread by sending it a signal. */
417 spin_lock_irqsave(&fsg->lock, flags);
418 if (fsg->state <= new_state) {
419 fsg->exception_req_tag = fsg->ep0_req_tag;
420 fsg->state = new_state;
421 if (fsg->thread_task)
422 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
423 fsg->thread_task);
424 }
425 spin_unlock_irqrestore(&fsg->lock, flags);
426}
427
428
429/*-------------------------------------------------------------------------*/
430
d5e2b67a
MN
431static int ep0_queue(struct fsg_dev *fsg)
432{
433 int rc;
434
435 rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
d23b0f08 436 fsg->ep0->driver_data = fsg;
d5e2b67a
MN
437 if (rc != 0 && rc != -ESHUTDOWN) {
438
439 /* We can't do much more than wait for a reset */
440 WARNING(fsg, "error in submission: %s --> %d\n",
441 fsg->ep0->name, rc);
442 }
443 return rc;
444}
445
d5e2b67a
MN
446/*-------------------------------------------------------------------------*/
447
448/* Bulk and interrupt endpoint completion handlers.
449 * These always run in_irq. */
450
451static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
452{
453 struct fsg_dev *fsg = ep->driver_data;
454 struct fsg_buffhd *bh = req->context;
455
456 if (req->status || req->actual != req->length)
457 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
458 req->status, req->actual, req->length);
459 if (req->status == -ECONNRESET) // Request was cancelled
460 usb_ep_fifo_flush(ep);
461
462 /* Hold the lock while we update the request and buffer states */
463 smp_wmb();
464 spin_lock(&fsg->lock);
465 bh->inreq_busy = 0;
466 bh->state = BUF_STATE_EMPTY;
467 wakeup_thread(fsg);
468 spin_unlock(&fsg->lock);
469}
470
471static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
472{
473 struct fsg_dev *fsg = ep->driver_data;
474 struct fsg_buffhd *bh = req->context;
475
476 dump_msg(fsg, "bulk-out", req->buf, req->actual);
477 if (req->status || req->actual != bh->bulk_out_intended_length)
478 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
479 req->status, req->actual,
480 bh->bulk_out_intended_length);
481 if (req->status == -ECONNRESET) // Request was cancelled
482 usb_ep_fifo_flush(ep);
483
484 /* Hold the lock while we update the request and buffer states */
485 smp_wmb();
486 spin_lock(&fsg->lock);
487 bh->outreq_busy = 0;
488 bh->state = BUF_STATE_FULL;
489 wakeup_thread(fsg);
490 spin_unlock(&fsg->lock);
491}
492
493
d5e2b67a
MN
494/*-------------------------------------------------------------------------*/
495
496/* Ep0 class-specific handlers. These always run in_irq. */
497
d23b0f08 498static int fsg_setup(struct usb_function *f,
d5e2b67a
MN
499 const struct usb_ctrlrequest *ctrl)
500{
d23b0f08 501 struct fsg_dev *fsg = fsg_from_func(f);
d5e2b67a 502 struct usb_request *req = fsg->ep0req;
d5e2b67a 503 u16 w_index = le16_to_cpu(ctrl->wIndex);
93bcf12e 504 u16 w_value = le16_to_cpu(ctrl->wValue);
d5e2b67a
MN
505 u16 w_length = le16_to_cpu(ctrl->wLength);
506
507 if (!fsg->config)
93bcf12e 508 return -EOPNOTSUPP;
d5e2b67a 509
93bcf12e 510 switch (ctrl->bRequest) {
d5e2b67a 511
93bcf12e
MN
512 case USB_BULK_RESET_REQUEST:
513 if (ctrl->bRequestType !=
514 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 515 break;
d23b0f08 516 if (w_index != fsg->interface_number || w_value != 0)
93bcf12e 517 return -EDOM;
d5e2b67a 518
93bcf12e
MN
519 /* Raise an exception to stop the current operation
520 * and reinitialize our state. */
521 DBG(fsg, "bulk reset request\n");
522 raise_exception(fsg, FSG_STATE_RESET);
523 return DELAYED_STATUS;
d5e2b67a 524
93bcf12e
MN
525 case USB_BULK_GET_MAX_LUN_REQUEST:
526 if (ctrl->bRequestType !=
527 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 528 break;
d23b0f08 529 if (w_index != fsg->interface_number || w_value != 0)
93bcf12e
MN
530 return -EDOM;
531 VDBG(fsg, "get max LUN\n");
a41ae418 532 *(u8 *) req->buf = fsg->common->nluns - 1;
93bcf12e
MN
533 return 1;
534 }
535
536 VDBG(fsg,
537 "unknown class-specific control req "
538 "%02x.%02x v%04x i%04x l%u\n",
539 ctrl->bRequestType, ctrl->bRequest,
540 le16_to_cpu(ctrl->wValue), w_index, w_length);
541 return -EOPNOTSUPP;
d5e2b67a
MN
542}
543
544
d5e2b67a
MN
545/*-------------------------------------------------------------------------*/
546
547/* All the following routines run in process context */
548
549
550/* Use this for bulk or interrupt transfers, not ep0 */
551static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
552 struct usb_request *req, int *pbusy,
553 enum fsg_buffer_state *state)
554{
555 int rc;
556
557 if (ep == fsg->bulk_in)
558 dump_msg(fsg, "bulk-in", req->buf, req->length);
d5e2b67a
MN
559
560 spin_lock_irq(&fsg->lock);
561 *pbusy = 1;
562 *state = BUF_STATE_BUSY;
563 spin_unlock_irq(&fsg->lock);
564 rc = usb_ep_queue(ep, req, GFP_KERNEL);
565 if (rc != 0) {
566 *pbusy = 0;
567 *state = BUF_STATE_EMPTY;
568
569 /* We can't do much more than wait for a reset */
570
571 /* Note: currently the net2280 driver fails zero-length
572 * submissions if DMA is enabled. */
573 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
574 req->length == 0))
575 WARNING(fsg, "error in submission: %s --> %d\n",
576 ep->name, rc);
577 }
578}
579
580
581static int sleep_thread(struct fsg_dev *fsg)
582{
583 int rc = 0;
584
585 /* Wait until a signal arrives or we are woken up */
586 for (;;) {
587 try_to_freeze();
588 set_current_state(TASK_INTERRUPTIBLE);
589 if (signal_pending(current)) {
590 rc = -EINTR;
591 break;
592 }
593 if (fsg->thread_wakeup_needed)
594 break;
595 schedule();
596 }
597 __set_current_state(TASK_RUNNING);
598 fsg->thread_wakeup_needed = 0;
599 return rc;
600}
601
602
603/*-------------------------------------------------------------------------*/
604
605static int do_read(struct fsg_dev *fsg)
606{
a41ae418 607 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
608 u32 lba;
609 struct fsg_buffhd *bh;
610 int rc;
611 u32 amount_left;
612 loff_t file_offset, file_offset_tmp;
613 unsigned int amount;
614 unsigned int partial_page;
615 ssize_t nread;
616
617 /* Get the starting Logical Block Address and check that it's
618 * not too big */
a41ae418
MN
619 if (fsg->common->cmnd[0] == SC_READ_6)
620 lba = get_unaligned_be24(&fsg->common->cmnd[1]);
d5e2b67a 621 else {
a41ae418 622 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
d5e2b67a
MN
623
624 /* We allow DPO (Disable Page Out = don't save data in the
625 * cache) and FUA (Force Unit Access = don't read from the
626 * cache), but we don't implement them. */
a41ae418 627 if ((fsg->common->cmnd[1] & ~0x18) != 0) {
d5e2b67a
MN
628 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
629 return -EINVAL;
630 }
631 }
632 if (lba >= curlun->num_sectors) {
633 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
634 return -EINVAL;
635 }
636 file_offset = ((loff_t) lba) << 9;
637
638 /* Carry out the file reads */
639 amount_left = fsg->data_size_from_cmnd;
640 if (unlikely(amount_left == 0))
641 return -EIO; // No default reply
642
643 for (;;) {
644
645 /* Figure out how much we need to read:
646 * Try to read the remaining amount.
647 * But don't read more than the buffer size.
648 * And don't try to read past the end of the file.
649 * Finally, if we're not at a page boundary, don't read past
650 * the next page.
651 * If this means reading 0 then we were asked to read past
652 * the end of file. */
93bcf12e 653 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
654 amount = min((loff_t) amount,
655 curlun->file_length - file_offset);
656 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
657 if (partial_page > 0)
658 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
659 partial_page);
660
661 /* Wait for the next buffer to become available */
a41ae418 662 bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
663 while (bh->state != BUF_STATE_EMPTY) {
664 rc = sleep_thread(fsg);
665 if (rc)
666 return rc;
667 }
668
669 /* If we were asked to read past the end of file,
670 * end with an empty buffer. */
671 if (amount == 0) {
672 curlun->sense_data =
673 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
674 curlun->sense_data_info = file_offset >> 9;
675 curlun->info_valid = 1;
676 bh->inreq->length = 0;
677 bh->state = BUF_STATE_FULL;
678 break;
679 }
680
681 /* Perform the read */
682 file_offset_tmp = file_offset;
683 nread = vfs_read(curlun->filp,
684 (char __user *) bh->buf,
685 amount, &file_offset_tmp);
686 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
687 (unsigned long long) file_offset,
688 (int) nread);
689 if (signal_pending(current))
690 return -EINTR;
691
692 if (nread < 0) {
693 LDBG(curlun, "error in file read: %d\n",
694 (int) nread);
695 nread = 0;
696 } else if (nread < amount) {
697 LDBG(curlun, "partial file read: %d/%u\n",
698 (int) nread, amount);
699 nread -= (nread & 511); // Round down to a block
700 }
701 file_offset += nread;
702 amount_left -= nread;
703 fsg->residue -= nread;
704 bh->inreq->length = nread;
705 bh->state = BUF_STATE_FULL;
706
707 /* If an error occurred, report it and its position */
708 if (nread < amount) {
709 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
710 curlun->sense_data_info = file_offset >> 9;
711 curlun->info_valid = 1;
712 break;
713 }
714
715 if (amount_left == 0)
716 break; // No more left to read
717
718 /* Send this buffer and go read some more */
719 bh->inreq->zero = 0;
720 start_transfer(fsg, fsg->bulk_in, bh->inreq,
721 &bh->inreq_busy, &bh->state);
a41ae418 722 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
723 }
724
725 return -EIO; // No default reply
726}
727
728
729/*-------------------------------------------------------------------------*/
730
731static int do_write(struct fsg_dev *fsg)
732{
a41ae418 733 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
734 u32 lba;
735 struct fsg_buffhd *bh;
736 int get_some_more;
737 u32 amount_left_to_req, amount_left_to_write;
738 loff_t usb_offset, file_offset, file_offset_tmp;
739 unsigned int amount;
740 unsigned int partial_page;
741 ssize_t nwritten;
742 int rc;
743
744 if (curlun->ro) {
745 curlun->sense_data = SS_WRITE_PROTECTED;
746 return -EINVAL;
747 }
748 spin_lock(&curlun->filp->f_lock);
749 curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait
750 spin_unlock(&curlun->filp->f_lock);
751
752 /* Get the starting Logical Block Address and check that it's
753 * not too big */
a41ae418
MN
754 if (fsg->common->cmnd[0] == SC_WRITE_6)
755 lba = get_unaligned_be24(&fsg->common->cmnd[1]);
d5e2b67a 756 else {
a41ae418 757 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
d5e2b67a
MN
758
759 /* We allow DPO (Disable Page Out = don't save data in the
760 * cache) and FUA (Force Unit Access = write directly to the
761 * medium). We don't implement DPO; we implement FUA by
762 * performing synchronous output. */
a41ae418 763 if ((fsg->common->cmnd[1] & ~0x18) != 0) {
d5e2b67a
MN
764 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
765 return -EINVAL;
766 }
a41ae418 767 if (fsg->common->cmnd[1] & 0x08) { // FUA
d5e2b67a
MN
768 spin_lock(&curlun->filp->f_lock);
769 curlun->filp->f_flags |= O_SYNC;
770 spin_unlock(&curlun->filp->f_lock);
771 }
772 }
773 if (lba >= curlun->num_sectors) {
774 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
775 return -EINVAL;
776 }
777
778 /* Carry out the file writes */
779 get_some_more = 1;
780 file_offset = usb_offset = ((loff_t) lba) << 9;
781 amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
782
783 while (amount_left_to_write > 0) {
784
785 /* Queue a request for more data from the host */
a41ae418 786 bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
787 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
788
789 /* Figure out how much we want to get:
790 * Try to get the remaining amount.
791 * But don't get more than the buffer size.
792 * And don't try to go past the end of the file.
793 * If we're not at a page boundary,
794 * don't go past the next page.
795 * If this means getting 0, then we were asked
796 * to write past the end of file.
797 * Finally, round down to a block boundary. */
93bcf12e 798 amount = min(amount_left_to_req, FSG_BUFLEN);
d5e2b67a
MN
799 amount = min((loff_t) amount, curlun->file_length -
800 usb_offset);
801 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
802 if (partial_page > 0)
803 amount = min(amount,
804 (unsigned int) PAGE_CACHE_SIZE - partial_page);
805
806 if (amount == 0) {
807 get_some_more = 0;
808 curlun->sense_data =
809 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
810 curlun->sense_data_info = usb_offset >> 9;
811 curlun->info_valid = 1;
812 continue;
813 }
814 amount -= (amount & 511);
815 if (amount == 0) {
816
817 /* Why were we were asked to transfer a
818 * partial block? */
819 get_some_more = 0;
820 continue;
821 }
822
823 /* Get the next buffer */
824 usb_offset += amount;
825 fsg->usb_amount_left -= amount;
826 amount_left_to_req -= amount;
827 if (amount_left_to_req == 0)
828 get_some_more = 0;
829
830 /* amount is always divisible by 512, hence by
831 * the bulk-out maxpacket size */
832 bh->outreq->length = bh->bulk_out_intended_length =
833 amount;
834 bh->outreq->short_not_ok = 1;
835 start_transfer(fsg, fsg->bulk_out, bh->outreq,
836 &bh->outreq_busy, &bh->state);
a41ae418 837 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
838 continue;
839 }
840
841 /* Write the received data to the backing file */
a41ae418 842 bh = fsg->common->next_buffhd_to_drain;
d5e2b67a
MN
843 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
844 break; // We stopped early
845 if (bh->state == BUF_STATE_FULL) {
846 smp_rmb();
a41ae418 847 fsg->common->next_buffhd_to_drain = bh->next;
d5e2b67a
MN
848 bh->state = BUF_STATE_EMPTY;
849
850 /* Did something go wrong with the transfer? */
851 if (bh->outreq->status != 0) {
852 curlun->sense_data = SS_COMMUNICATION_FAILURE;
853 curlun->sense_data_info = file_offset >> 9;
854 curlun->info_valid = 1;
855 break;
856 }
857
858 amount = bh->outreq->actual;
859 if (curlun->file_length - file_offset < amount) {
860 LERROR(curlun,
861 "write %u @ %llu beyond end %llu\n",
862 amount, (unsigned long long) file_offset,
863 (unsigned long long) curlun->file_length);
864 amount = curlun->file_length - file_offset;
865 }
866
867 /* Perform the write */
868 file_offset_tmp = file_offset;
869 nwritten = vfs_write(curlun->filp,
870 (char __user *) bh->buf,
871 amount, &file_offset_tmp);
872 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
873 (unsigned long long) file_offset,
874 (int) nwritten);
875 if (signal_pending(current))
876 return -EINTR; // Interrupted!
877
878 if (nwritten < 0) {
879 LDBG(curlun, "error in file write: %d\n",
880 (int) nwritten);
881 nwritten = 0;
882 } else if (nwritten < amount) {
883 LDBG(curlun, "partial file write: %d/%u\n",
884 (int) nwritten, amount);
885 nwritten -= (nwritten & 511);
886 // Round down to a block
887 }
888 file_offset += nwritten;
889 amount_left_to_write -= nwritten;
890 fsg->residue -= nwritten;
891
892 /* If an error occurred, report it and its position */
893 if (nwritten < amount) {
894 curlun->sense_data = SS_WRITE_ERROR;
895 curlun->sense_data_info = file_offset >> 9;
896 curlun->info_valid = 1;
897 break;
898 }
899
900 /* Did the host decide to stop early? */
901 if (bh->outreq->actual != bh->outreq->length) {
902 fsg->short_packet_received = 1;
903 break;
904 }
905 continue;
906 }
907
908 /* Wait for something to happen */
909 rc = sleep_thread(fsg);
910 if (rc)
911 return rc;
912 }
913
914 return -EIO; // No default reply
915}
916
917
918/*-------------------------------------------------------------------------*/
919
920static int do_synchronize_cache(struct fsg_dev *fsg)
921{
a41ae418 922 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
923 int rc;
924
925 /* We ignore the requested LBA and write out all file's
926 * dirty data buffers. */
927 rc = fsg_lun_fsync_sub(curlun);
928 if (rc)
929 curlun->sense_data = SS_WRITE_ERROR;
930 return 0;
931}
932
933
934/*-------------------------------------------------------------------------*/
935
936static void invalidate_sub(struct fsg_lun *curlun)
937{
938 struct file *filp = curlun->filp;
939 struct inode *inode = filp->f_path.dentry->d_inode;
940 unsigned long rc;
941
942 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
943 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
944}
945
946static int do_verify(struct fsg_dev *fsg)
947{
a41ae418 948 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
949 u32 lba;
950 u32 verification_length;
a41ae418 951 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
952 loff_t file_offset, file_offset_tmp;
953 u32 amount_left;
954 unsigned int amount;
955 ssize_t nread;
956
957 /* Get the starting Logical Block Address and check that it's
958 * not too big */
a41ae418 959 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
d5e2b67a
MN
960 if (lba >= curlun->num_sectors) {
961 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
962 return -EINVAL;
963 }
964
965 /* We allow DPO (Disable Page Out = don't save data in the
966 * cache) but we don't implement it. */
a41ae418 967 if ((fsg->common->cmnd[1] & ~0x10) != 0) {
d5e2b67a
MN
968 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
969 return -EINVAL;
970 }
971
a41ae418 972 verification_length = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
973 if (unlikely(verification_length == 0))
974 return -EIO; // No default reply
975
976 /* Prepare to carry out the file verify */
977 amount_left = verification_length << 9;
978 file_offset = ((loff_t) lba) << 9;
979
980 /* Write out all the dirty buffers before invalidating them */
981 fsg_lun_fsync_sub(curlun);
982 if (signal_pending(current))
983 return -EINTR;
984
985 invalidate_sub(curlun);
986 if (signal_pending(current))
987 return -EINTR;
988
989 /* Just try to read the requested blocks */
990 while (amount_left > 0) {
991
992 /* Figure out how much we need to read:
993 * Try to read the remaining amount, but not more than
994 * the buffer size.
995 * And don't try to read past the end of the file.
996 * If this means reading 0 then we were asked to read
997 * past the end of file. */
93bcf12e 998 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
999 amount = min((loff_t) amount,
1000 curlun->file_length - file_offset);
1001 if (amount == 0) {
1002 curlun->sense_data =
1003 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1004 curlun->sense_data_info = file_offset >> 9;
1005 curlun->info_valid = 1;
1006 break;
1007 }
1008
1009 /* Perform the read */
1010 file_offset_tmp = file_offset;
1011 nread = vfs_read(curlun->filp,
1012 (char __user *) bh->buf,
1013 amount, &file_offset_tmp);
1014 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1015 (unsigned long long) file_offset,
1016 (int) nread);
1017 if (signal_pending(current))
1018 return -EINTR;
1019
1020 if (nread < 0) {
1021 LDBG(curlun, "error in file verify: %d\n",
1022 (int) nread);
1023 nread = 0;
1024 } else if (nread < amount) {
1025 LDBG(curlun, "partial file verify: %d/%u\n",
1026 (int) nread, amount);
1027 nread -= (nread & 511); // Round down to a sector
1028 }
1029 if (nread == 0) {
1030 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1031 curlun->sense_data_info = file_offset >> 9;
1032 curlun->info_valid = 1;
1033 break;
1034 }
1035 file_offset += nread;
1036 amount_left -= nread;
1037 }
1038 return 0;
1039}
1040
1041
1042/*-------------------------------------------------------------------------*/
1043
1044static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1045{
481e4929 1046 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
1047 u8 *buf = (u8 *) bh->buf;
1048
481e4929 1049 if (!curlun) { /* Unsupported LUNs are okay */
d5e2b67a
MN
1050 fsg->bad_lun_okay = 1;
1051 memset(buf, 0, 36);
1052 buf[0] = 0x7f; // Unsupported, no device-type
1053 buf[4] = 31; // Additional length
1054 return 36;
1055 }
1056
481e4929
MN
1057 buf[0] = curlun->cdrom ? TYPE_CDROM : TYPE_DISK;
1058 buf[1] = curlun->removable ? 0x80 : 0;
d5e2b67a
MN
1059 buf[2] = 2; // ANSI SCSI level 2
1060 buf[3] = 2; // SCSI-2 INQUIRY data format
1061 buf[4] = 31; // Additional length
481e4929
MN
1062 buf[5] = 0; // No special options
1063 buf[6] = 0;
1064 buf[7] = 0;
1065 memcpy(buf + 8, fsg->common->inquiry_string,
1066 sizeof fsg->common->inquiry_string);
d5e2b67a
MN
1067 return 36;
1068}
1069
1070
1071static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1072{
a41ae418 1073 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
1074 u8 *buf = (u8 *) bh->buf;
1075 u32 sd, sdinfo;
1076 int valid;
1077
1078 /*
1079 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1080 *
1081 * If a REQUEST SENSE command is received from an initiator
1082 * with a pending unit attention condition (before the target
1083 * generates the contingent allegiance condition), then the
1084 * target shall either:
1085 * a) report any pending sense data and preserve the unit
1086 * attention condition on the logical unit, or,
1087 * b) report the unit attention condition, may discard any
1088 * pending sense data, and clear the unit attention
1089 * condition on the logical unit for that initiator.
1090 *
1091 * FSG normally uses option a); enable this code to use option b).
1092 */
1093#if 0
1094 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1095 curlun->sense_data = curlun->unit_attention_data;
1096 curlun->unit_attention_data = SS_NO_SENSE;
1097 }
1098#endif
1099
1100 if (!curlun) { // Unsupported LUNs are okay
1101 fsg->bad_lun_okay = 1;
1102 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1103 sdinfo = 0;
1104 valid = 0;
1105 } else {
1106 sd = curlun->sense_data;
1107 sdinfo = curlun->sense_data_info;
1108 valid = curlun->info_valid << 7;
1109 curlun->sense_data = SS_NO_SENSE;
1110 curlun->sense_data_info = 0;
1111 curlun->info_valid = 0;
1112 }
1113
1114 memset(buf, 0, 18);
1115 buf[0] = valid | 0x70; // Valid, current error
1116 buf[2] = SK(sd);
1117 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1118 buf[7] = 18 - 8; // Additional sense length
1119 buf[12] = ASC(sd);
1120 buf[13] = ASCQ(sd);
1121 return 18;
1122}
1123
1124
1125static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1126{
a41ae418
MN
1127 struct fsg_lun *curlun = fsg->common->curlun;
1128 u32 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
1129 int pmi = fsg->common->cmnd[8];
d5e2b67a
MN
1130 u8 *buf = (u8 *) bh->buf;
1131
1132 /* Check the PMI and LBA fields */
1133 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1134 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1135 return -EINVAL;
1136 }
1137
1138 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1139 /* Max logical block */
1140 put_unaligned_be32(512, &buf[4]); /* Block length */
1141 return 8;
1142}
1143
1144
1145static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1146{
a41ae418
MN
1147 struct fsg_lun *curlun = fsg->common->curlun;
1148 int msf = fsg->common->cmnd[1] & 0x02;
1149 u32 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
d5e2b67a
MN
1150 u8 *buf = (u8 *) bh->buf;
1151
a41ae418 1152 if ((fsg->common->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */
d5e2b67a
MN
1153 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1154 return -EINVAL;
1155 }
1156 if (lba >= curlun->num_sectors) {
1157 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1158 return -EINVAL;
1159 }
1160
1161 memset(buf, 0, 8);
1162 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1163 store_cdrom_address(&buf[4], msf, lba);
1164 return 8;
1165}
1166
1167
1168static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1169{
a41ae418
MN
1170 struct fsg_lun *curlun = fsg->common->curlun;
1171 int msf = fsg->common->cmnd[1] & 0x02;
1172 int start_track = fsg->common->cmnd[6];
d5e2b67a
MN
1173 u8 *buf = (u8 *) bh->buf;
1174
a41ae418 1175 if ((fsg->common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
d5e2b67a
MN
1176 start_track > 1) {
1177 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1178 return -EINVAL;
1179 }
1180
1181 memset(buf, 0, 20);
1182 buf[1] = (20-2); /* TOC data length */
1183 buf[2] = 1; /* First track number */
1184 buf[3] = 1; /* Last track number */
1185 buf[5] = 0x16; /* Data track, copying allowed */
1186 buf[6] = 0x01; /* Only track is number 1 */
1187 store_cdrom_address(&buf[8], msf, 0);
1188
1189 buf[13] = 0x16; /* Lead-out track is data */
1190 buf[14] = 0xAA; /* Lead-out track number */
1191 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1192 return 20;
1193}
1194
1195
1196static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1197{
a41ae418
MN
1198 struct fsg_lun *curlun = fsg->common->curlun;
1199 int mscmnd = fsg->common->cmnd[0];
d5e2b67a
MN
1200 u8 *buf = (u8 *) bh->buf;
1201 u8 *buf0 = buf;
1202 int pc, page_code;
1203 int changeable_values, all_pages;
1204 int valid_page = 0;
1205 int len, limit;
1206
a41ae418 1207 if ((fsg->common->cmnd[1] & ~0x08) != 0) { // Mask away DBD
d5e2b67a
MN
1208 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1209 return -EINVAL;
1210 }
a41ae418
MN
1211 pc = fsg->common->cmnd[2] >> 6;
1212 page_code = fsg->common->cmnd[2] & 0x3f;
d5e2b67a
MN
1213 if (pc == 3) {
1214 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1215 return -EINVAL;
1216 }
1217 changeable_values = (pc == 1);
1218 all_pages = (page_code == 0x3f);
1219
1220 /* Write the mode parameter header. Fixed values are: default
1221 * medium type, no cache control (DPOFUA), and no block descriptors.
1222 * The only variable value is the WriteProtect bit. We will fill in
1223 * the mode data length later. */
1224 memset(buf, 0, 8);
1225 if (mscmnd == SC_MODE_SENSE_6) {
1226 buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1227 buf += 4;
1228 limit = 255;
1229 } else { // SC_MODE_SENSE_10
1230 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1231 buf += 8;
93bcf12e 1232 limit = 65535; // Should really be FSG_BUFLEN
d5e2b67a
MN
1233 }
1234
1235 /* No block descriptors */
1236
1237 /* The mode pages, in numerical order. The only page we support
1238 * is the Caching page. */
1239 if (page_code == 0x08 || all_pages) {
1240 valid_page = 1;
1241 buf[0] = 0x08; // Page code
1242 buf[1] = 10; // Page length
1243 memset(buf+2, 0, 10); // None of the fields are changeable
1244
1245 if (!changeable_values) {
1246 buf[2] = 0x04; // Write cache enable,
1247 // Read cache not disabled
1248 // No cache retention priorities
1249 put_unaligned_be16(0xffff, &buf[4]);
1250 /* Don't disable prefetch */
1251 /* Minimum prefetch = 0 */
1252 put_unaligned_be16(0xffff, &buf[8]);
1253 /* Maximum prefetch */
1254 put_unaligned_be16(0xffff, &buf[10]);
1255 /* Maximum prefetch ceiling */
1256 }
1257 buf += 12;
1258 }
1259
1260 /* Check that a valid page was requested and the mode data length
1261 * isn't too long. */
1262 len = buf - buf0;
1263 if (!valid_page || len > limit) {
1264 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1265 return -EINVAL;
1266 }
1267
1268 /* Store the mode data length */
1269 if (mscmnd == SC_MODE_SENSE_6)
1270 buf0[0] = len - 1;
1271 else
1272 put_unaligned_be16(len - 2, buf0);
1273 return len;
1274}
1275
1276
1277static int do_start_stop(struct fsg_dev *fsg)
1278{
481e4929
MN
1279 if (!fsg->common->curlun) {
1280 return -EINVAL;
1281 } else if (!fsg->common->curlun->removable) {
a41ae418 1282 fsg->common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1283 return -EINVAL;
1284 }
d5e2b67a
MN
1285 return 0;
1286}
1287
1288
1289static int do_prevent_allow(struct fsg_dev *fsg)
1290{
a41ae418 1291 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
1292 int prevent;
1293
481e4929
MN
1294 if (!fsg->common->curlun) {
1295 return -EINVAL;
1296 } else if (!fsg->common->curlun->removable) {
1297 fsg->common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1298 return -EINVAL;
1299 }
1300
a41ae418
MN
1301 prevent = fsg->common->cmnd[4] & 0x01;
1302 if ((fsg->common->cmnd[4] & ~0x01) != 0) { // Mask away Prevent
d5e2b67a
MN
1303 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1304 return -EINVAL;
1305 }
1306
1307 if (curlun->prevent_medium_removal && !prevent)
1308 fsg_lun_fsync_sub(curlun);
1309 curlun->prevent_medium_removal = prevent;
1310 return 0;
1311}
1312
1313
1314static int do_read_format_capacities(struct fsg_dev *fsg,
1315 struct fsg_buffhd *bh)
1316{
a41ae418 1317 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
1318 u8 *buf = (u8 *) bh->buf;
1319
1320 buf[0] = buf[1] = buf[2] = 0;
1321 buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
1322 buf += 4;
1323
1324 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1325 /* Number of blocks */
1326 put_unaligned_be32(512, &buf[4]); /* Block length */
1327 buf[4] = 0x02; /* Current capacity */
1328 return 12;
1329}
1330
1331
1332static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1333{
a41ae418 1334 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
1335
1336 /* We don't support MODE SELECT */
1337 curlun->sense_data = SS_INVALID_COMMAND;
1338 return -EINVAL;
1339}
1340
1341
1342/*-------------------------------------------------------------------------*/
1343
1344static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1345{
1346 int rc;
1347
1348 rc = fsg_set_halt(fsg, fsg->bulk_in);
1349 if (rc == -EAGAIN)
1350 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1351 while (rc != 0) {
1352 if (rc != -EAGAIN) {
1353 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1354 rc = 0;
1355 break;
1356 }
1357
1358 /* Wait for a short time and then try again */
1359 if (msleep_interruptible(100) != 0)
1360 return -EINTR;
1361 rc = usb_ep_set_halt(fsg->bulk_in);
1362 }
1363 return rc;
1364}
1365
1366static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1367{
1368 int rc;
1369
1370 DBG(fsg, "bulk-in set wedge\n");
1371 rc = usb_ep_set_wedge(fsg->bulk_in);
1372 if (rc == -EAGAIN)
1373 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1374 while (rc != 0) {
1375 if (rc != -EAGAIN) {
1376 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1377 rc = 0;
1378 break;
1379 }
1380
1381 /* Wait for a short time and then try again */
1382 if (msleep_interruptible(100) != 0)
1383 return -EINTR;
1384 rc = usb_ep_set_wedge(fsg->bulk_in);
1385 }
1386 return rc;
1387}
1388
1389static int pad_with_zeros(struct fsg_dev *fsg)
1390{
a41ae418 1391 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1392 u32 nkeep = bh->inreq->length;
1393 u32 nsend;
1394 int rc;
1395
1396 bh->state = BUF_STATE_EMPTY; // For the first iteration
1397 fsg->usb_amount_left = nkeep + fsg->residue;
1398 while (fsg->usb_amount_left > 0) {
1399
1400 /* Wait for the next buffer to be free */
1401 while (bh->state != BUF_STATE_EMPTY) {
1402 rc = sleep_thread(fsg);
1403 if (rc)
1404 return rc;
1405 }
1406
93bcf12e 1407 nsend = min(fsg->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1408 memset(bh->buf + nkeep, 0, nsend - nkeep);
1409 bh->inreq->length = nsend;
1410 bh->inreq->zero = 0;
1411 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1412 &bh->inreq_busy, &bh->state);
a41ae418 1413 bh = fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1414 fsg->usb_amount_left -= nsend;
1415 nkeep = 0;
1416 }
1417 return 0;
1418}
1419
1420static int throw_away_data(struct fsg_dev *fsg)
1421{
1422 struct fsg_buffhd *bh;
1423 u32 amount;
1424 int rc;
1425
a41ae418
MN
1426 for (bh = fsg->common->next_buffhd_to_drain;
1427 bh->state != BUF_STATE_EMPTY || fsg->usb_amount_left > 0;
1428 bh = fsg->common->next_buffhd_to_drain) {
d5e2b67a
MN
1429
1430 /* Throw away the data in a filled buffer */
1431 if (bh->state == BUF_STATE_FULL) {
1432 smp_rmb();
1433 bh->state = BUF_STATE_EMPTY;
a41ae418 1434 fsg->common->next_buffhd_to_drain = bh->next;
d5e2b67a
MN
1435
1436 /* A short packet or an error ends everything */
1437 if (bh->outreq->actual != bh->outreq->length ||
1438 bh->outreq->status != 0) {
1439 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1440 return -EINTR;
1441 }
1442 continue;
1443 }
1444
1445 /* Try to submit another request if we need one */
a41ae418 1446 bh = fsg->common->next_buffhd_to_fill;
d5e2b67a 1447 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
93bcf12e 1448 amount = min(fsg->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1449
1450 /* amount is always divisible by 512, hence by
1451 * the bulk-out maxpacket size */
1452 bh->outreq->length = bh->bulk_out_intended_length =
1453 amount;
1454 bh->outreq->short_not_ok = 1;
1455 start_transfer(fsg, fsg->bulk_out, bh->outreq,
1456 &bh->outreq_busy, &bh->state);
a41ae418 1457 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1458 fsg->usb_amount_left -= amount;
1459 continue;
1460 }
1461
1462 /* Otherwise wait for something to happen */
1463 rc = sleep_thread(fsg);
1464 if (rc)
1465 return rc;
1466 }
1467 return 0;
1468}
1469
1470
1471static int finish_reply(struct fsg_dev *fsg)
1472{
a41ae418 1473 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1474 int rc = 0;
1475
1476 switch (fsg->data_dir) {
1477 case DATA_DIR_NONE:
1478 break; // Nothing to send
1479
1480 /* If we don't know whether the host wants to read or write,
1481 * this must be CB or CBI with an unknown command. We mustn't
1482 * try to send or receive any data. So stall both bulk pipes
1483 * if we can and wait for a reset. */
1484 case DATA_DIR_UNKNOWN:
481e4929 1485 if (fsg->can_stall) {
d5e2b67a
MN
1486 fsg_set_halt(fsg, fsg->bulk_out);
1487 rc = halt_bulk_in_endpoint(fsg);
1488 }
1489 break;
1490
1491 /* All but the last buffer of data must have already been sent */
1492 case DATA_DIR_TO_HOST:
93bcf12e
MN
1493 if (fsg->data_size == 0) {
1494 /* Nothing to send */
d5e2b67a
MN
1495
1496 /* If there's no residue, simply send the last buffer */
93bcf12e 1497 } else if (fsg->residue == 0) {
d5e2b67a
MN
1498 bh->inreq->zero = 0;
1499 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1500 &bh->inreq_busy, &bh->state);
a41ae418 1501 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1502
1503 /* For Bulk-only, if we're allowed to stall then send the
1504 * short packet and halt the bulk-in endpoint. If we can't
1505 * stall, pad out the remaining data with 0's. */
481e4929 1506 } else if (fsg->can_stall) {
93bcf12e
MN
1507 bh->inreq->zero = 1;
1508 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1509 &bh->inreq_busy, &bh->state);
a41ae418 1510 fsg->common->next_buffhd_to_fill = bh->next;
93bcf12e
MN
1511 rc = halt_bulk_in_endpoint(fsg);
1512 } else {
1513 rc = pad_with_zeros(fsg);
d5e2b67a
MN
1514 }
1515 break;
1516
1517 /* We have processed all we want from the data the host has sent.
1518 * There may still be outstanding bulk-out requests. */
1519 case DATA_DIR_FROM_HOST:
1520 if (fsg->residue == 0)
1521 ; // Nothing to receive
1522
1523 /* Did the host stop sending unexpectedly early? */
1524 else if (fsg->short_packet_received) {
1525 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1526 rc = -EINTR;
1527 }
1528
1529 /* We haven't processed all the incoming data. Even though
1530 * we may be allowed to stall, doing so would cause a race.
1531 * The controller may already have ACK'ed all the remaining
1532 * bulk-out packets, in which case the host wouldn't see a
1533 * STALL. Not realizing the endpoint was halted, it wouldn't
1534 * clear the halt -- leading to problems later on. */
1535#if 0
481e4929 1536 else if (fsg->can_stall) {
d5e2b67a
MN
1537 fsg_set_halt(fsg, fsg->bulk_out);
1538 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1539 rc = -EINTR;
1540 }
1541#endif
1542
1543 /* We can't stall. Read in the excess data and throw it
1544 * all away. */
1545 else
1546 rc = throw_away_data(fsg);
1547 break;
1548 }
1549 return rc;
1550}
1551
1552
1553static int send_status(struct fsg_dev *fsg)
1554{
a41ae418 1555 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a 1556 struct fsg_buffhd *bh;
93bcf12e 1557 struct bulk_cs_wrap *csw;
d5e2b67a
MN
1558 int rc;
1559 u8 status = USB_STATUS_PASS;
1560 u32 sd, sdinfo = 0;
1561
1562 /* Wait for the next buffer to become available */
a41ae418 1563 bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1564 while (bh->state != BUF_STATE_EMPTY) {
1565 rc = sleep_thread(fsg);
1566 if (rc)
1567 return rc;
1568 }
1569
1570 if (curlun) {
1571 sd = curlun->sense_data;
1572 sdinfo = curlun->sense_data_info;
1573 } else if (fsg->bad_lun_okay)
1574 sd = SS_NO_SENSE;
1575 else
1576 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1577
1578 if (fsg->phase_error) {
1579 DBG(fsg, "sending phase-error status\n");
1580 status = USB_STATUS_PHASE_ERROR;
1581 sd = SS_INVALID_COMMAND;
1582 } else if (sd != SS_NO_SENSE) {
1583 DBG(fsg, "sending command-failure status\n");
1584 status = USB_STATUS_FAIL;
1585 VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1586 " info x%x\n",
1587 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1588 }
1589
93bcf12e 1590 /* Store and send the Bulk-only CSW */
606206c2 1591 csw = (void*)bh->buf;
d5e2b67a 1592
93bcf12e
MN
1593 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1594 csw->Tag = fsg->tag;
1595 csw->Residue = cpu_to_le32(fsg->residue);
1596 csw->Status = status;
d5e2b67a 1597
93bcf12e
MN
1598 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1599 bh->inreq->zero = 0;
1600 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1601 &bh->inreq_busy, &bh->state);
d5e2b67a 1602
a41ae418 1603 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1604 return 0;
1605}
1606
1607
1608/*-------------------------------------------------------------------------*/
1609
1610/* Check whether the command is properly formed and whether its data size
1611 * and direction agree with the values we already have. */
1612static int check_command(struct fsg_dev *fsg, int cmnd_size,
1613 enum data_direction data_dir, unsigned int mask,
1614 int needs_medium, const char *name)
1615{
1616 int i;
a41ae418 1617 int lun = fsg->common->cmnd[1] >> 5;
d5e2b67a
MN
1618 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1619 char hdlen[20];
1620 struct fsg_lun *curlun;
1621
d5e2b67a
MN
1622 hdlen[0] = 0;
1623 if (fsg->data_dir != DATA_DIR_UNKNOWN)
1624 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
1625 fsg->data_size);
1626 VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1627 name, cmnd_size, dirletter[(int) data_dir],
a41ae418 1628 fsg->data_size_from_cmnd, fsg->common->cmnd_size, hdlen);
d5e2b67a
MN
1629
1630 /* We can't reply at all until we know the correct data direction
1631 * and size. */
1632 if (fsg->data_size_from_cmnd == 0)
1633 data_dir = DATA_DIR_NONE;
1634 if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI
1635 fsg->data_dir = data_dir;
1636 fsg->data_size = fsg->data_size_from_cmnd;
1637
1638 } else { // Bulk-only
1639 if (fsg->data_size < fsg->data_size_from_cmnd) {
1640
1641 /* Host data size < Device data size is a phase error.
1642 * Carry out the command, but only transfer as much
1643 * as we are allowed. */
1644 fsg->data_size_from_cmnd = fsg->data_size;
1645 fsg->phase_error = 1;
1646 }
1647 }
1648 fsg->residue = fsg->usb_amount_left = fsg->data_size;
1649
1650 /* Conflicting data directions is a phase error */
1651 if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
1652 fsg->phase_error = 1;
1653 return -EINVAL;
1654 }
1655
1656 /* Verify the length of the command itself */
a41ae418 1657 if (cmnd_size != fsg->common->cmnd_size) {
d5e2b67a
MN
1658
1659 /* Special case workaround: There are plenty of buggy SCSI
1660 * implementations. Many have issues with cbw->Length
1661 * field passing a wrong command size. For those cases we
1662 * always try to work around the problem by using the length
1663 * sent by the host side provided it is at least as large
1664 * as the correct command length.
1665 * Examples of such cases would be MS-Windows, which issues
1666 * REQUEST SENSE with cbw->Length == 12 where it should
1667 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1668 * REQUEST SENSE with cbw->Length == 10 where it should
1669 * be 6 as well.
1670 */
a41ae418 1671 if (cmnd_size <= fsg->common->cmnd_size) {
d5e2b67a 1672 DBG(fsg, "%s is buggy! Expected length %d "
a41ae418
MN
1673 "but we got %d\n", name,
1674 cmnd_size, fsg->common->cmnd_size);
1675 cmnd_size = fsg->common->cmnd_size;
d5e2b67a
MN
1676 } else {
1677 fsg->phase_error = 1;
1678 return -EINVAL;
1679 }
1680 }
1681
1682 /* Check that the LUN values are consistent */
a41ae418 1683 if (fsg->common->lun != lun)
93bcf12e 1684 DBG(fsg, "using LUN %d from CBW, not LUN %d from CDB\n",
a41ae418 1685 fsg->common->lun, lun);
d5e2b67a
MN
1686
1687 /* Check the LUN */
a41ae418
MN
1688 if (fsg->common->lun >= 0 && fsg->common->lun < fsg->common->nluns) {
1689 fsg->common->curlun = curlun = &fsg->common->luns[fsg->common->lun];
1690 if (fsg->common->cmnd[0] != SC_REQUEST_SENSE) {
d5e2b67a
MN
1691 curlun->sense_data = SS_NO_SENSE;
1692 curlun->sense_data_info = 0;
1693 curlun->info_valid = 0;
1694 }
1695 } else {
a41ae418 1696 fsg->common->curlun = curlun = NULL;
d5e2b67a
MN
1697 fsg->bad_lun_okay = 0;
1698
1699 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1700 * to use unsupported LUNs; all others may not. */
a41ae418
MN
1701 if (fsg->common->cmnd[0] != SC_INQUIRY &&
1702 fsg->common->cmnd[0] != SC_REQUEST_SENSE) {
1703 DBG(fsg, "unsupported LUN %d\n", fsg->common->lun);
d5e2b67a
MN
1704 return -EINVAL;
1705 }
1706 }
1707
1708 /* If a unit attention condition exists, only INQUIRY and
1709 * REQUEST SENSE commands are allowed; anything else must fail. */
1710 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
a41ae418
MN
1711 fsg->common->cmnd[0] != SC_INQUIRY &&
1712 fsg->common->cmnd[0] != SC_REQUEST_SENSE) {
d5e2b67a
MN
1713 curlun->sense_data = curlun->unit_attention_data;
1714 curlun->unit_attention_data = SS_NO_SENSE;
1715 return -EINVAL;
1716 }
1717
1718 /* Check that only command bytes listed in the mask are non-zero */
a41ae418 1719 fsg->common->cmnd[1] &= 0x1f; // Mask away the LUN
d5e2b67a 1720 for (i = 1; i < cmnd_size; ++i) {
a41ae418 1721 if (fsg->common->cmnd[i] && !(mask & (1 << i))) {
d5e2b67a
MN
1722 if (curlun)
1723 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1724 return -EINVAL;
1725 }
1726 }
1727
1728 /* If the medium isn't mounted and the command needs to access
1729 * it, return an error. */
1730 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
1731 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1732 return -EINVAL;
1733 }
1734
1735 return 0;
1736}
1737
1738
1739static int do_scsi_command(struct fsg_dev *fsg)
1740{
1741 struct fsg_buffhd *bh;
1742 int rc;
1743 int reply = -EINVAL;
1744 int i;
1745 static char unknown[16];
1746
a41ae418 1747 dump_cdb(fsg->common);
d5e2b67a
MN
1748
1749 /* Wait for the next buffer to become available for data or status */
a41ae418 1750 bh = fsg->common->next_buffhd_to_drain = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1751 while (bh->state != BUF_STATE_EMPTY) {
1752 rc = sleep_thread(fsg);
1753 if (rc)
1754 return rc;
1755 }
1756 fsg->phase_error = 0;
1757 fsg->short_packet_received = 0;
1758
a41ae418
MN
1759 down_read(&fsg->common->filesem); // We're using the backing file
1760 switch (fsg->common->cmnd[0]) {
d5e2b67a
MN
1761
1762 case SC_INQUIRY:
a41ae418 1763 fsg->data_size_from_cmnd = fsg->common->cmnd[4];
d5e2b67a
MN
1764 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1765 (1<<4), 0,
1766 "INQUIRY")) == 0)
1767 reply = do_inquiry(fsg, bh);
1768 break;
1769
1770 case SC_MODE_SELECT_6:
a41ae418 1771 fsg->data_size_from_cmnd = fsg->common->cmnd[4];
d5e2b67a
MN
1772 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
1773 (1<<1) | (1<<4), 0,
1774 "MODE SELECT(6)")) == 0)
1775 reply = do_mode_select(fsg, bh);
1776 break;
1777
1778 case SC_MODE_SELECT_10:
a41ae418 1779 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1780 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
1781 (1<<1) | (3<<7), 0,
1782 "MODE SELECT(10)")) == 0)
1783 reply = do_mode_select(fsg, bh);
1784 break;
1785
1786 case SC_MODE_SENSE_6:
a41ae418 1787 fsg->data_size_from_cmnd = fsg->common->cmnd[4];
d5e2b67a
MN
1788 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1789 (1<<1) | (1<<2) | (1<<4), 0,
1790 "MODE SENSE(6)")) == 0)
1791 reply = do_mode_sense(fsg, bh);
1792 break;
1793
1794 case SC_MODE_SENSE_10:
a41ae418 1795 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1796 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1797 (1<<1) | (1<<2) | (3<<7), 0,
1798 "MODE SENSE(10)")) == 0)
1799 reply = do_mode_sense(fsg, bh);
1800 break;
1801
1802 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1803 fsg->data_size_from_cmnd = 0;
1804 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
1805 (1<<4), 0,
1806 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
1807 reply = do_prevent_allow(fsg);
1808 break;
1809
1810 case SC_READ_6:
a41ae418 1811 i = fsg->common->cmnd[4];
d5e2b67a
MN
1812 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1813 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1814 (7<<1) | (1<<4), 1,
1815 "READ(6)")) == 0)
1816 reply = do_read(fsg);
1817 break;
1818
1819 case SC_READ_10:
1820 fsg->data_size_from_cmnd =
a41ae418 1821 get_unaligned_be16(&fsg->common->cmnd[7]) << 9;
d5e2b67a
MN
1822 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1823 (1<<1) | (0xf<<2) | (3<<7), 1,
1824 "READ(10)")) == 0)
1825 reply = do_read(fsg);
1826 break;
1827
1828 case SC_READ_12:
1829 fsg->data_size_from_cmnd =
a41ae418 1830 get_unaligned_be32(&fsg->common->cmnd[6]) << 9;
d5e2b67a
MN
1831 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
1832 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1833 "READ(12)")) == 0)
1834 reply = do_read(fsg);
1835 break;
1836
1837 case SC_READ_CAPACITY:
1838 fsg->data_size_from_cmnd = 8;
1839 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1840 (0xf<<2) | (1<<8), 1,
1841 "READ CAPACITY")) == 0)
1842 reply = do_read_capacity(fsg, bh);
1843 break;
1844
1845 case SC_READ_HEADER:
481e4929 1846 if (!fsg->common->curlun || !fsg->common->curlun->cdrom)
d5e2b67a 1847 goto unknown_cmnd;
a41ae418 1848 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1849 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1850 (3<<7) | (0x1f<<1), 1,
1851 "READ HEADER")) == 0)
1852 reply = do_read_header(fsg, bh);
1853 break;
1854
1855 case SC_READ_TOC:
481e4929 1856 if (!fsg->common->curlun || !fsg->common->curlun->cdrom)
d5e2b67a 1857 goto unknown_cmnd;
a41ae418 1858 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1859 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1860 (7<<6) | (1<<1), 1,
1861 "READ TOC")) == 0)
1862 reply = do_read_toc(fsg, bh);
1863 break;
1864
1865 case SC_READ_FORMAT_CAPACITIES:
a41ae418 1866 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1867 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1868 (3<<7), 1,
1869 "READ FORMAT CAPACITIES")) == 0)
1870 reply = do_read_format_capacities(fsg, bh);
1871 break;
1872
1873 case SC_REQUEST_SENSE:
a41ae418 1874 fsg->data_size_from_cmnd = fsg->common->cmnd[4];
d5e2b67a
MN
1875 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1876 (1<<4), 0,
1877 "REQUEST SENSE")) == 0)
1878 reply = do_request_sense(fsg, bh);
1879 break;
1880
1881 case SC_START_STOP_UNIT:
1882 fsg->data_size_from_cmnd = 0;
1883 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
1884 (1<<1) | (1<<4), 0,
1885 "START-STOP UNIT")) == 0)
1886 reply = do_start_stop(fsg);
1887 break;
1888
1889 case SC_SYNCHRONIZE_CACHE:
1890 fsg->data_size_from_cmnd = 0;
1891 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
1892 (0xf<<2) | (3<<7), 1,
1893 "SYNCHRONIZE CACHE")) == 0)
1894 reply = do_synchronize_cache(fsg);
1895 break;
1896
1897 case SC_TEST_UNIT_READY:
1898 fsg->data_size_from_cmnd = 0;
1899 reply = check_command(fsg, 6, DATA_DIR_NONE,
1900 0, 1,
1901 "TEST UNIT READY");
1902 break;
1903
1904 /* Although optional, this command is used by MS-Windows. We
1905 * support a minimal version: BytChk must be 0. */
1906 case SC_VERIFY:
1907 fsg->data_size_from_cmnd = 0;
1908 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
1909 (1<<1) | (0xf<<2) | (3<<7), 1,
1910 "VERIFY")) == 0)
1911 reply = do_verify(fsg);
1912 break;
1913
1914 case SC_WRITE_6:
a41ae418 1915 i = fsg->common->cmnd[4];
d5e2b67a
MN
1916 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1917 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
1918 (7<<1) | (1<<4), 1,
1919 "WRITE(6)")) == 0)
1920 reply = do_write(fsg);
1921 break;
1922
1923 case SC_WRITE_10:
1924 fsg->data_size_from_cmnd =
a41ae418 1925 get_unaligned_be16(&fsg->common->cmnd[7]) << 9;
d5e2b67a
MN
1926 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
1927 (1<<1) | (0xf<<2) | (3<<7), 1,
1928 "WRITE(10)")) == 0)
1929 reply = do_write(fsg);
1930 break;
1931
1932 case SC_WRITE_12:
1933 fsg->data_size_from_cmnd =
a41ae418 1934 get_unaligned_be32(&fsg->common->cmnd[6]) << 9;
d5e2b67a
MN
1935 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
1936 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1937 "WRITE(12)")) == 0)
1938 reply = do_write(fsg);
1939 break;
1940
1941 /* Some mandatory commands that we recognize but don't implement.
1942 * They don't mean much in this setting. It's left as an exercise
1943 * for anyone interested to implement RESERVE and RELEASE in terms
1944 * of Posix locks. */
1945 case SC_FORMAT_UNIT:
1946 case SC_RELEASE:
1947 case SC_RESERVE:
1948 case SC_SEND_DIAGNOSTIC:
1949 // Fall through
1950
1951 default:
1952 unknown_cmnd:
1953 fsg->data_size_from_cmnd = 0;
a41ae418
MN
1954 sprintf(unknown, "Unknown x%02x", fsg->common->cmnd[0]);
1955 if ((reply = check_command(fsg, fsg->common->cmnd_size,
d5e2b67a 1956 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
a41ae418 1957 fsg->common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1958 reply = -EINVAL;
1959 }
1960 break;
1961 }
a41ae418 1962 up_read(&fsg->common->filesem);
d5e2b67a
MN
1963
1964 if (reply == -EINTR || signal_pending(current))
1965 return -EINTR;
1966
1967 /* Set up the single reply buffer for finish_reply() */
1968 if (reply == -EINVAL)
1969 reply = 0; // Error reply length
1970 if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
1971 reply = min((u32) reply, fsg->data_size_from_cmnd);
1972 bh->inreq->length = reply;
1973 bh->state = BUF_STATE_FULL;
1974 fsg->residue -= reply;
1975 } // Otherwise it's already set
1976
1977 return 0;
1978}
1979
1980
1981/*-------------------------------------------------------------------------*/
1982
1983static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1984{
1985 struct usb_request *req = bh->outreq;
1986 struct fsg_bulk_cb_wrap *cbw = req->buf;
1987
1988 /* Was this a real packet? Should it be ignored? */
1989 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
1990 return -EINVAL;
1991
1992 /* Is the CBW valid? */
1993 if (req->actual != USB_BULK_CB_WRAP_LEN ||
1994 cbw->Signature != cpu_to_le32(
1995 USB_BULK_CB_SIG)) {
1996 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
1997 req->actual,
1998 le32_to_cpu(cbw->Signature));
1999
2000 /* The Bulk-only spec says we MUST stall the IN endpoint
2001 * (6.6.1), so it's unavoidable. It also says we must
2002 * retain this state until the next reset, but there's
2003 * no way to tell the controller driver it should ignore
2004 * Clear-Feature(HALT) requests.
2005 *
2006 * We aren't required to halt the OUT endpoint; instead
2007 * we can simply accept and discard any data received
2008 * until the next reset. */
2009 wedge_bulk_in_endpoint(fsg);
2010 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2011 return -EINVAL;
2012 }
2013
2014 /* Is the CBW meaningful? */
2015 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2016 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2017 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2018 "cmdlen %u\n",
2019 cbw->Lun, cbw->Flags, cbw->Length);
2020
2021 /* We can do anything we want here, so let's stall the
2022 * bulk pipes if we are allowed to. */
481e4929 2023 if (fsg->can_stall) {
d5e2b67a
MN
2024 fsg_set_halt(fsg, fsg->bulk_out);
2025 halt_bulk_in_endpoint(fsg);
2026 }
2027 return -EINVAL;
2028 }
2029
2030 /* Save the command for later */
a41ae418
MN
2031 fsg->common->cmnd_size = cbw->Length;
2032 memcpy(fsg->common->cmnd, cbw->CDB, fsg->common->cmnd_size);
d5e2b67a
MN
2033 if (cbw->Flags & USB_BULK_IN_FLAG)
2034 fsg->data_dir = DATA_DIR_TO_HOST;
2035 else
2036 fsg->data_dir = DATA_DIR_FROM_HOST;
2037 fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
2038 if (fsg->data_size == 0)
2039 fsg->data_dir = DATA_DIR_NONE;
a41ae418 2040 fsg->common->lun = cbw->Lun;
d5e2b67a
MN
2041 fsg->tag = cbw->Tag;
2042 return 0;
2043}
2044
2045
2046static int get_next_command(struct fsg_dev *fsg)
2047{
2048 struct fsg_buffhd *bh;
2049 int rc = 0;
2050
93bcf12e 2051 /* Wait for the next buffer to become available */
a41ae418 2052 bh = fsg->common->next_buffhd_to_fill;
93bcf12e
MN
2053 while (bh->state != BUF_STATE_EMPTY) {
2054 rc = sleep_thread(fsg);
2055 if (rc)
2056 return rc;
2057 }
d5e2b67a 2058
93bcf12e
MN
2059 /* Queue a request to read a Bulk-only CBW */
2060 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
2061 bh->outreq->short_not_ok = 1;
2062 start_transfer(fsg, fsg->bulk_out, bh->outreq,
2063 &bh->outreq_busy, &bh->state);
d5e2b67a 2064
93bcf12e
MN
2065 /* We will drain the buffer in software, which means we
2066 * can reuse it for the next filling. No need to advance
2067 * next_buffhd_to_fill. */
d5e2b67a 2068
93bcf12e
MN
2069 /* Wait for the CBW to arrive */
2070 while (bh->state != BUF_STATE_FULL) {
2071 rc = sleep_thread(fsg);
2072 if (rc)
2073 return rc;
d5e2b67a 2074 }
93bcf12e
MN
2075 smp_rmb();
2076 rc = received_cbw(fsg, bh);
2077 bh->state = BUF_STATE_EMPTY;
2078
d5e2b67a
MN
2079 return rc;
2080}
2081
2082
2083/*-------------------------------------------------------------------------*/
2084
2085static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
2086 const struct usb_endpoint_descriptor *d)
2087{
2088 int rc;
2089
2090 ep->driver_data = fsg;
2091 rc = usb_ep_enable(ep, d);
2092 if (rc)
2093 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
2094 return rc;
2095}
2096
2097static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
2098 struct usb_request **preq)
2099{
2100 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2101 if (*preq)
2102 return 0;
2103 ERROR(fsg, "can't allocate request for %s\n", ep->name);
2104 return -ENOMEM;
2105}
2106
2107/*
2108 * Reset interface setting and re-init endpoint state (toggle etc).
2109 * Call with altsetting < 0 to disable the interface. The only other
2110 * available altsetting is 0, which enables the interface.
2111 */
2112static int do_set_interface(struct fsg_dev *fsg, int altsetting)
2113{
2114 int rc = 0;
2115 int i;
2116 const struct usb_endpoint_descriptor *d;
2117
2118 if (fsg->running)
2119 DBG(fsg, "reset interface\n");
2120
2121reset:
2122 /* Deallocate the requests */
2123 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2124 struct fsg_buffhd *bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2125
2126 if (bh->inreq) {
2127 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2128 bh->inreq = NULL;
2129 }
2130 if (bh->outreq) {
2131 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2132 bh->outreq = NULL;
2133 }
2134 }
d5e2b67a
MN
2135
2136 /* Disable the endpoints */
2137 if (fsg->bulk_in_enabled) {
2138 usb_ep_disable(fsg->bulk_in);
2139 fsg->bulk_in_enabled = 0;
2140 }
2141 if (fsg->bulk_out_enabled) {
2142 usb_ep_disable(fsg->bulk_out);
2143 fsg->bulk_out_enabled = 0;
2144 }
d5e2b67a
MN
2145
2146 fsg->running = 0;
2147 if (altsetting < 0 || rc != 0)
2148 return rc;
2149
2150 DBG(fsg, "set interface %d\n", altsetting);
2151
2152 /* Enable the endpoints */
2153 d = fsg_ep_desc(fsg->gadget,
2154 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2155 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
2156 goto reset;
2157 fsg->bulk_in_enabled = 1;
2158
2159 d = fsg_ep_desc(fsg->gadget,
2160 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2161 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
2162 goto reset;
2163 fsg->bulk_out_enabled = 1;
2164 fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2165 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2166
d5e2b67a
MN
2167 /* Allocate the requests */
2168 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2169 struct fsg_buffhd *bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2170
2171 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
2172 goto reset;
2173 if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
2174 goto reset;
2175 bh->inreq->buf = bh->outreq->buf = bh->buf;
2176 bh->inreq->context = bh->outreq->context = bh;
2177 bh->inreq->complete = bulk_in_complete;
2178 bh->outreq->complete = bulk_out_complete;
2179 }
d5e2b67a
MN
2180
2181 fsg->running = 1;
a41ae418
MN
2182 for (i = 0; i < fsg->common->nluns; ++i)
2183 fsg->common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
d5e2b67a
MN
2184 return rc;
2185}
2186
2187
2188/*
2189 * Change our operational configuration. This code must agree with the code
2190 * that returns config descriptors, and with interface altsetting code.
2191 *
2192 * It's also responsible for power management interactions. Some
2193 * configurations might not work with our current power sources.
2194 * For now we just assume the gadget is always self-powered.
2195 */
2196static int do_set_config(struct fsg_dev *fsg, u8 new_config)
2197{
2198 int rc = 0;
2199
2200 /* Disable the single interface */
2201 if (fsg->config != 0) {
2202 DBG(fsg, "reset config\n");
2203 fsg->config = 0;
2204 rc = do_set_interface(fsg, -1);
2205 }
2206
2207 /* Enable the interface */
2208 if (new_config != 0) {
2209 fsg->config = new_config;
d23b0f08
MN
2210 rc = do_set_interface(fsg, 0);
2211 if (rc != 0)
2212 fsg->config = 0; /* Reset on errors */
d5e2b67a
MN
2213 }
2214 return rc;
2215}
2216
2217
d23b0f08
MN
2218/****************************** ALT CONFIGS ******************************/
2219
2220
2221static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2222{
2223 struct fsg_dev *fsg = fsg_from_func(f);
2224 fsg->new_config = 1;
2225 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
2226 return 0;
2227}
2228
2229static void fsg_disable(struct usb_function *f)
2230{
2231 struct fsg_dev *fsg = fsg_from_func(f);
2232 fsg->new_config = 0;
2233 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
2234}
2235
2236
d5e2b67a
MN
2237/*-------------------------------------------------------------------------*/
2238
2239static void handle_exception(struct fsg_dev *fsg)
2240{
2241 siginfo_t info;
2242 int sig;
2243 int i;
d5e2b67a
MN
2244 struct fsg_buffhd *bh;
2245 enum fsg_state old_state;
2246 u8 new_config;
2247 struct fsg_lun *curlun;
2248 unsigned int exception_req_tag;
2249 int rc;
2250
2251 /* Clear the existing signals. Anything but SIGUSR1 is converted
2252 * into a high-priority EXIT exception. */
2253 for (;;) {
2254 sig = dequeue_signal_lock(current, &current->blocked, &info);
2255 if (!sig)
2256 break;
2257 if (sig != SIGUSR1) {
2258 if (fsg->state < FSG_STATE_EXIT)
2259 DBG(fsg, "Main thread exiting on signal\n");
2260 raise_exception(fsg, FSG_STATE_EXIT);
2261 }
2262 }
2263
2264 /* Cancel all the pending transfers */
d5e2b67a 2265 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2266 bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2267 if (bh->inreq_busy)
2268 usb_ep_dequeue(fsg->bulk_in, bh->inreq);
2269 if (bh->outreq_busy)
2270 usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2271 }
2272
2273 /* Wait until everything is idle */
2274 for (;;) {
a41ae418 2275 int num_active = 0;
d5e2b67a 2276 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2277 bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2278 num_active += bh->inreq_busy + bh->outreq_busy;
2279 }
2280 if (num_active == 0)
2281 break;
2282 if (sleep_thread(fsg))
2283 return;
2284 }
2285
2286 /* Clear out the controller's fifos */
2287 if (fsg->bulk_in_enabled)
2288 usb_ep_fifo_flush(fsg->bulk_in);
2289 if (fsg->bulk_out_enabled)
2290 usb_ep_fifo_flush(fsg->bulk_out);
d5e2b67a
MN
2291
2292 /* Reset the I/O buffer states and pointers, the SCSI
2293 * state, and the exception. Then invoke the handler. */
2294 spin_lock_irq(&fsg->lock);
2295
2296 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2297 bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2298 bh->state = BUF_STATE_EMPTY;
2299 }
a41ae418
MN
2300 fsg->common->next_buffhd_to_fill = fsg->common->next_buffhd_to_drain =
2301 &fsg->common->buffhds[0];
d5e2b67a
MN
2302
2303 exception_req_tag = fsg->exception_req_tag;
2304 new_config = fsg->new_config;
2305 old_state = fsg->state;
2306
2307 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2308 fsg->state = FSG_STATE_STATUS_PHASE;
2309 else {
a41ae418
MN
2310 for (i = 0; i < fsg->common->nluns; ++i) {
2311 curlun = &fsg->common->luns[i];
d5e2b67a
MN
2312 curlun->prevent_medium_removal = 0;
2313 curlun->sense_data = curlun->unit_attention_data =
2314 SS_NO_SENSE;
2315 curlun->sense_data_info = 0;
2316 curlun->info_valid = 0;
2317 }
2318 fsg->state = FSG_STATE_IDLE;
2319 }
2320 spin_unlock_irq(&fsg->lock);
2321
2322 /* Carry out any extra actions required for the exception */
2323 switch (old_state) {
d5e2b67a
MN
2324 case FSG_STATE_ABORT_BULK_OUT:
2325 send_status(fsg);
2326 spin_lock_irq(&fsg->lock);
2327 if (fsg->state == FSG_STATE_STATUS_PHASE)
2328 fsg->state = FSG_STATE_IDLE;
2329 spin_unlock_irq(&fsg->lock);
2330 break;
2331
2332 case FSG_STATE_RESET:
2333 /* In case we were forced against our will to halt a
2334 * bulk endpoint, clear the halt now. (The SuperH UDC
2335 * requires this.) */
2336 if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2337 usb_ep_clear_halt(fsg->bulk_in);
2338
93bcf12e
MN
2339 if (fsg->ep0_req_tag == exception_req_tag)
2340 ep0_queue(fsg); // Complete the status stage
d5e2b67a
MN
2341
2342 /* Technically this should go here, but it would only be
2343 * a waste of time. Ditto for the INTERFACE_CHANGE and
2344 * CONFIG_CHANGE cases. */
a41ae418
MN
2345 // for (i = 0; i < fsg->common->nluns; ++i)
2346 // fsg->common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
d5e2b67a
MN
2347 break;
2348
d5e2b67a
MN
2349 case FSG_STATE_CONFIG_CHANGE:
2350 rc = do_set_config(fsg, new_config);
2351 if (fsg->ep0_req_tag != exception_req_tag)
2352 break;
2353 if (rc != 0) // STALL on errors
2354 fsg_set_halt(fsg, fsg->ep0);
2355 else // Complete the status stage
2356 ep0_queue(fsg);
2357 break;
2358
d5e2b67a
MN
2359 case FSG_STATE_EXIT:
2360 case FSG_STATE_TERMINATED:
2361 do_set_config(fsg, 0); // Free resources
2362 spin_lock_irq(&fsg->lock);
2363 fsg->state = FSG_STATE_TERMINATED; // Stop the thread
2364 spin_unlock_irq(&fsg->lock);
2365 break;
d23b0f08
MN
2366
2367 case FSG_STATE_INTERFACE_CHANGE:
2368 case FSG_STATE_DISCONNECT:
2369 case FSG_STATE_COMMAND_PHASE:
2370 case FSG_STATE_DATA_PHASE:
2371 case FSG_STATE_STATUS_PHASE:
2372 case FSG_STATE_IDLE:
2373 break;
d5e2b67a
MN
2374 }
2375}
2376
2377
2378/*-------------------------------------------------------------------------*/
2379
2380static int fsg_main_thread(void *fsg_)
2381{
2382 struct fsg_dev *fsg = fsg_;
2383
2384 /* Allow the thread to be killed by a signal, but set the signal mask
2385 * to block everything but INT, TERM, KILL, and USR1. */
2386 allow_signal(SIGINT);
2387 allow_signal(SIGTERM);
2388 allow_signal(SIGKILL);
2389 allow_signal(SIGUSR1);
2390
2391 /* Allow the thread to be frozen */
2392 set_freezable();
2393
2394 /* Arrange for userspace references to be interpreted as kernel
2395 * pointers. That way we can pass a kernel pointer to a routine
2396 * that expects a __user pointer and it will work okay. */
2397 set_fs(get_ds());
2398
2399 /* The main loop */
2400 while (fsg->state != FSG_STATE_TERMINATED) {
2401 if (exception_in_progress(fsg) || signal_pending(current)) {
2402 handle_exception(fsg);
2403 continue;
2404 }
2405
2406 if (!fsg->running) {
2407 sleep_thread(fsg);
2408 continue;
2409 }
2410
2411 if (get_next_command(fsg))
2412 continue;
2413
2414 spin_lock_irq(&fsg->lock);
2415 if (!exception_in_progress(fsg))
2416 fsg->state = FSG_STATE_DATA_PHASE;
2417 spin_unlock_irq(&fsg->lock);
2418
2419 if (do_scsi_command(fsg) || finish_reply(fsg))
2420 continue;
2421
2422 spin_lock_irq(&fsg->lock);
2423 if (!exception_in_progress(fsg))
2424 fsg->state = FSG_STATE_STATUS_PHASE;
2425 spin_unlock_irq(&fsg->lock);
2426
2427 if (send_status(fsg))
2428 continue;
2429
2430 spin_lock_irq(&fsg->lock);
2431 if (!exception_in_progress(fsg))
2432 fsg->state = FSG_STATE_IDLE;
2433 spin_unlock_irq(&fsg->lock);
d23b0f08 2434 }
d5e2b67a
MN
2435
2436 spin_lock_irq(&fsg->lock);
2437 fsg->thread_task = NULL;
2438 spin_unlock_irq(&fsg->lock);
2439
d23b0f08 2440 /* XXX */
d5e2b67a
MN
2441 /* If we are exiting because of a signal, unregister the
2442 * gadget driver. */
d23b0f08
MN
2443 /* if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) */
2444 /* usb_gadget_unregister_driver(&fsg_driver); */
d5e2b67a
MN
2445
2446 /* Let the unbind and cleanup routines know the thread has exited */
2447 complete_and_exit(&fsg->thread_notifier, 0);
2448}
2449
2450
9c610213 2451/*************************** DEVICE ATTRIBUTES ***************************/
d5e2b67a 2452
d23b0f08
MN
2453/* Write permission is checked per LUN in store_*() functions. */
2454static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
2455static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
d5e2b67a
MN
2456
2457
9c610213
MN
2458/****************************** FSG COMMON ******************************/
2459
2460static void fsg_common_release(struct kref *ref);
d5e2b67a 2461
9c610213 2462static void fsg_lun_release(struct device *dev)
d5e2b67a 2463{
9c610213 2464 /* Nothing needs to be done */
d5e2b67a
MN
2465}
2466
9c610213 2467static inline void fsg_common_get(struct fsg_common *common)
d5e2b67a 2468{
9c610213 2469 kref_get(&common->ref);
d5e2b67a
MN
2470}
2471
9c610213
MN
2472static inline void fsg_common_put(struct fsg_common *common)
2473{
2474 kref_put(&common->ref, fsg_common_release);
2475}
2476
2477
2478static struct fsg_common *fsg_common_init(struct fsg_common *common,
481e4929
MN
2479 struct usb_composite_dev *cdev,
2480 struct fsg_config *cfg)
9c610213 2481{
d23b0f08 2482 struct usb_gadget *gadget = cdev->gadget;
9c610213
MN
2483 struct fsg_buffhd *bh;
2484 struct fsg_lun *curlun;
481e4929 2485 struct fsg_lun_config *lcfg;
9c610213 2486 int nluns, i, rc;
d23b0f08 2487 char *pathbuf;
9c610213
MN
2488
2489 /* Find out how many LUNs there should be */
481e4929 2490 nluns = cfg->nluns;
9c610213
MN
2491 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2492 dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
2493 return ERR_PTR(-EINVAL);
2494 }
2495
2496 /* Allocate? */
2497 if (!common) {
2498 common = kzalloc(sizeof *common, GFP_KERNEL);
2499 if (!common)
2500 return ERR_PTR(-ENOMEM);
2501 common->free_storage_on_release = 1;
2502 } else {
2503 memset(common, 0, sizeof common);
2504 common->free_storage_on_release = 0;
2505 }
2506 common->gadget = gadget;
2507
2508 /* Create the LUNs, open their backing files, and register the
2509 * LUN devices in sysfs. */
2510 curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
2511 if (!curlun) {
2512 kfree(common);
2513 return ERR_PTR(-ENOMEM);
2514 }
2515 common->luns = curlun;
2516
2517 init_rwsem(&common->filesem);
2518
481e4929
MN
2519 for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
2520 curlun->cdrom = !!lcfg->cdrom;
2521 curlun->ro = lcfg->cdrom || lcfg->ro;
2522 curlun->removable = lcfg->removable;
9c610213
MN
2523 curlun->dev.release = fsg_lun_release;
2524 curlun->dev.parent = &gadget->dev;
d23b0f08 2525 /* curlun->dev.driver = &fsg_driver.driver; XXX */
9c610213
MN
2526 dev_set_drvdata(&curlun->dev, &common->filesem);
2527 dev_set_name(&curlun->dev,"%s-lun%d",
2528 dev_name(&gadget->dev), i);
2529
2530 rc = device_register(&curlun->dev);
2531 if (rc) {
2532 INFO(common, "failed to register LUN%d: %d\n", i, rc);
2533 common->nluns = i;
2534 goto error_release;
2535 }
2536
2537 rc = device_create_file(&curlun->dev, &dev_attr_ro);
2538 if (rc)
2539 goto error_luns;
2540 rc = device_create_file(&curlun->dev, &dev_attr_file);
2541 if (rc)
2542 goto error_luns;
2543
481e4929
MN
2544 if (lcfg->filename) {
2545 rc = fsg_lun_open(curlun, lcfg->filename);
9c610213
MN
2546 if (rc)
2547 goto error_luns;
481e4929 2548 } else if (!curlun->removable) {
9c610213
MN
2549 ERROR(common, "no file given for LUN%d\n", i);
2550 rc = -EINVAL;
2551 goto error_luns;
2552 }
2553 }
2554 common->nluns = nluns;
2555
2556
2557 /* Data buffers cyclic list */
2558 /* Buffers in buffhds are static -- no need for additional
2559 * allocation. */
2560 bh = common->buffhds;
2561 i = FSG_NUM_BUFFERS - 1;
2562 do {
2563 bh->next = bh + 1;
2564 } while (++bh, --i);
2565 bh->next = common->buffhds;
2566
2567
481e4929
MN
2568 /* Prepare inquiryString */
2569 if (cfg->release != 0xffff) {
2570 i = cfg->release;
2571 } else {
9c610213 2572 /* The sa1100 controller is not supported */
481e4929
MN
2573 i = gadget_is_sa1100(gadget)
2574 ? -1
2575 : usb_gadget_controller_number(gadget);
2576 if (i >= 0) {
2577 i = 0x0300 + i;
2578 } else {
9c610213
MN
2579 WARNING(common, "controller '%s' not recognized\n",
2580 gadget->name);
481e4929 2581 i = 0x0399;
9c610213
MN
2582 }
2583 }
481e4929
MN
2584#define OR(x, y) ((x) ? (x) : (y))
2585 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2586 "%-8s%-16s%04x",
2587 OR(cfg->vendor_name, "Linux "),
2588 /* Assume product name dependent on the first LUN */
2589 OR(cfg->product_name, common->luns->cdrom
2590 ? "File-Stor Gadget"
2591 : "File-CD Gadget "),
2592 i);
2593#undef OR
9c610213
MN
2594
2595
2596 /* Some peripheral controllers are known not to be able to
2597 * halt bulk endpoints correctly. If one of them is present,
2598 * disable stalls.
2599 */
481e4929
MN
2600 common->can_stall = cfg->can_stall &&
2601 !(gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget));
9c610213
MN
2602
2603
2604 kref_init(&common->ref);
d23b0f08
MN
2605
2606 /* Information */
2607 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2608 INFO(common, "Number of LUNs=%d\n", common->nluns);
2609
2610 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2611 for (i = 0, nluns = common->nluns, curlun = common->luns;
2612 i < nluns;
2613 ++curlun, ++i) {
2614 char *p = "(no medium)";
2615 if (fsg_lun_is_open(curlun)) {
2616 p = "(error)";
2617 if (pathbuf) {
2618 p = d_path(&curlun->filp->f_path,
2619 pathbuf, PATH_MAX);
2620 if (IS_ERR(p))
2621 p = "(error)";
2622 }
2623 }
2624 LINFO(curlun, "LUN: %s%s%sfile: %s\n",
2625 curlun->removable ? "removable " : "",
2626 curlun->ro ? "read only " : "",
2627 curlun->cdrom ? "CD-ROM " : "",
2628 p);
2629 }
2630 kfree(pathbuf);
2631
9c610213
MN
2632 return common;
2633
2634
2635error_luns:
2636 common->nluns = i + 1;
2637error_release:
2638 /* Call fsg_common_release() directly, ref is not initialised */
2639 fsg_common_release(&common->ref);
2640 return ERR_PTR(rc);
2641}
2642
2643
2644static void fsg_common_release(struct kref *ref)
2645{
2646 struct fsg_common *common =
2647 container_of(ref, struct fsg_common, ref);
2648 unsigned i = common->nluns;
2649 struct fsg_lun *lun = common->luns;
2650
2651 /* Beware tempting for -> do-while optimization: when in error
2652 * recovery nluns may be zero. */
2653
2654 for (; i; --i, ++lun) {
2655 device_remove_file(&lun->dev, &dev_attr_ro);
2656 device_remove_file(&lun->dev, &dev_attr_file);
2657 fsg_lun_close(lun);
2658 device_unregister(&lun->dev);
2659 }
2660
2661 kfree(common->luns);
2662 if (common->free_storage_on_release)
2663 kfree(common);
2664}
2665
2666
2667/*-------------------------------------------------------------------------*/
2668
2669
d23b0f08 2670static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
d5e2b67a 2671{
d23b0f08 2672 struct fsg_dev *fsg = fsg_from_func(f);
d5e2b67a
MN
2673
2674 DBG(fsg, "unbind\n");
2675 clear_bit(REGISTERED, &fsg->atomic_bitflags);
2676
d5e2b67a
MN
2677 /* If the thread isn't already dead, tell it to exit now */
2678 if (fsg->state != FSG_STATE_TERMINATED) {
2679 raise_exception(fsg, FSG_STATE_EXIT);
2680 wait_for_completion(&fsg->thread_notifier);
2681
2682 /* The cleanup routine waits for this completion also */
2683 complete(&fsg->thread_notifier);
2684 }
2685
9c610213
MN
2686 fsg_common_put(fsg->common);
2687 kfree(fsg);
d5e2b67a
MN
2688}
2689
2690
d23b0f08 2691static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
d5e2b67a 2692{
d23b0f08
MN
2693 struct fsg_dev *fsg = fsg_from_func(f);
2694 struct usb_gadget *gadget = c->cdev->gadget;
d5e2b67a
MN
2695 int rc;
2696 int i;
d5e2b67a 2697 struct usb_ep *ep;
d5e2b67a
MN
2698
2699 fsg->gadget = gadget;
d5e2b67a 2700 fsg->ep0 = gadget->ep0;
d23b0f08 2701 fsg->ep0req = c->cdev->req;
d5e2b67a 2702
d23b0f08
MN
2703 /* New interface */
2704 i = usb_interface_id(c, f);
2705 if (i < 0)
2706 return i;
2707 fsg_intf_desc.bInterfaceNumber = i;
2708 fsg->interface_number = i;
d5e2b67a 2709
d5e2b67a 2710 /* Find all the endpoints we will use */
d5e2b67a
MN
2711 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2712 if (!ep)
2713 goto autoconf_fail;
2714 ep->driver_data = fsg; // claim the endpoint
2715 fsg->bulk_in = ep;
2716
2717 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2718 if (!ep)
2719 goto autoconf_fail;
2720 ep->driver_data = fsg; // claim the endpoint
2721 fsg->bulk_out = ep;
2722
d5e2b67a 2723 if (gadget_is_dualspeed(gadget)) {
d5e2b67a
MN
2724 /* Assume endpoint addresses are the same for both speeds */
2725 fsg_hs_bulk_in_desc.bEndpointAddress =
2726 fsg_fs_bulk_in_desc.bEndpointAddress;
2727 fsg_hs_bulk_out_desc.bEndpointAddress =
2728 fsg_fs_bulk_out_desc.bEndpointAddress;
d23b0f08 2729 f->hs_descriptors = fsg_hs_function;
d5e2b67a
MN
2730 }
2731
d5e2b67a 2732
d23b0f08
MN
2733 /* maybe allocate device-global string IDs, and patch descriptors */
2734 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2735 i = usb_string_id(c->cdev);
2736 if (i < 0)
2737 return i;
2738 fsg_strings[FSG_STRING_INTERFACE].id = i;
2739 fsg_intf_desc.iInterface = i;
d5e2b67a
MN
2740 }
2741
d23b0f08 2742
d5e2b67a
MN
2743 fsg->thread_task = kthread_create(fsg_main_thread, fsg,
2744 "file-storage-gadget");
2745 if (IS_ERR(fsg->thread_task)) {
2746 rc = PTR_ERR(fsg->thread_task);
2747 goto out;
2748 }
2749
d5e2b67a
MN
2750 DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task));
2751
2752 set_bit(REGISTERED, &fsg->atomic_bitflags);
2753
2754 /* Tell the thread to start working */
2755 wake_up_process(fsg->thread_task);
2756 return 0;
2757
2758autoconf_fail:
2759 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2760 rc = -ENOTSUPP;
2761
2762out:
2763 fsg->state = FSG_STATE_TERMINATED; // The thread is dead
d23b0f08 2764 fsg_unbind(c, f);
d5e2b67a
MN
2765 complete(&fsg->thread_notifier);
2766 return rc;
2767}
2768
2769
d23b0f08 2770/****************************** ADD FUNCTION ******************************/
d5e2b67a 2771
d23b0f08
MN
2772static struct usb_gadget_strings *fsg_strings_array[] = {
2773 &fsg_stringtab,
2774 NULL,
d5e2b67a
MN
2775};
2776
d23b0f08
MN
2777static int fsg_add(struct usb_composite_dev *cdev,
2778 struct usb_configuration *c,
2779 struct fsg_common *common)
d5e2b67a 2780{
d23b0f08
MN
2781 struct fsg_dev *fsg;
2782 int rc;
2783
2784 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
2785 if (unlikely(!fsg))
2786 return -ENOMEM;
d5e2b67a 2787
d23b0f08
MN
2788 spin_lock_init(&fsg->lock);
2789 init_completion(&fsg->thread_notifier);
d5e2b67a 2790
d23b0f08
MN
2791 fsg->cdev = cdev;
2792 fsg->function.name = FSG_DRIVER_DESC;
2793 fsg->function.strings = fsg_strings_array;
2794 fsg->function.descriptors = fsg_fs_function;
2795 fsg->function.bind = fsg_bind;
2796 fsg->function.unbind = fsg_unbind;
2797 fsg->function.setup = fsg_setup;
2798 fsg->function.set_alt = fsg_set_alt;
2799 fsg->function.disable = fsg_disable;
2800
2801 fsg->common = common;
2802 /* Our caller holds a reference to common structure so we
2803 * don't have to be worry about it being freed until we return
2804 * from this function. So instead of incrementing counter now
2805 * and decrement in error recovery we increment it only when
2806 * call to usb_add_function() was successful. */
481e4929 2807 fsg->can_stall = common->can_stall;
d23b0f08
MN
2808
2809 rc = usb_add_function(c, &fsg->function);
2810
2811 if (likely(rc == 0))
2812 fsg_common_get(fsg->common);
2813 else
2814 kfree(fsg);
2815
2816 return rc;
d5e2b67a 2817}
481e4929
MN
2818
2819
2820
2821/************************* Module parameters *************************/
2822
2823
2824struct fsg_module_parameters {
2825 char *file[FSG_MAX_LUNS];
2826 int ro[FSG_MAX_LUNS];
2827 int removable[FSG_MAX_LUNS];
2828 int cdrom[FSG_MAX_LUNS];
2829
2830 unsigned int file_count, ro_count, removable_count, cdrom_count;
2831 unsigned int luns; /* nluns */
2832 int stall; /* can_stall */
2833};
2834
2835
2836#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
2837 module_param_array_named(prefix ## name, params.name, type, \
2838 &prefix ## params.name ## _count, \
2839 S_IRUGO); \
2840 MODULE_PARM_DESC(prefix ## name, desc)
2841
2842#define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \
2843 module_param_named(prefix ## name, params.name, type, \
2844 S_IRUGO); \
2845 MODULE_PARM_DESC(prefix ## name, desc)
2846
2847#define FSG_MODULE_PARAMETERS(prefix, params) \
2848 _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \
2849 "names of backing files or devices"); \
2850 _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \
2851 "true to force read-only"); \
2852 _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \
2853 "true to simulate removable media"); \
2854 _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
2855 "true to simulate CD-ROM instead of disk"); \
2856 _FSG_MODULE_PARAM(prefix, params, luns, uint, \
2857 "number of LUNs"); \
2858 _FSG_MODULE_PARAM(prefix, params, stall, bool, \
2859 "false to prevent bulk stalls")
2860
2861
2862static void
2863fsg_config_from_params(struct fsg_config *cfg,
2864 const struct fsg_module_parameters *params)
2865{
2866 struct fsg_lun_config *lun;
2867 unsigned i, nluns;
2868
2869 /* Configure LUNs */
2870 nluns = cfg->nluns = !params->luns
2871 ? params->file_count ? params->file_count : 1
2872 : params->luns;
2873 for (i = 0, lun = cfg->luns;
2874 i < FSG_MAX_LUNS && i < nluns;
2875 ++i, ++lun) {
2876 lun->ro = !!params->ro[i];
2877 lun->cdrom = !!params->cdrom[i];
2878 lun->removable =
2879 params->removable_count <= i || params->removable[i];
2880 lun->filename =
2881 params->file_count > i && params->file[i][0]
2882 ? params->file[i]
2883 : 0;
2884 }
2885
2886 /* Let FSG use defaults */
2887 cfg->vendor_name = 0;
2888 cfg->product_name = 0;
2889 cfg->release = 0xffff;
2890
2891 /* Finalise */
2892 cfg->can_stall = params->stall;
2893}
2894
2895static inline struct fsg_common *
2896fsg_common_from_params(struct fsg_common *common,
2897 struct usb_composite_dev *cdev,
2898 const struct fsg_module_parameters *params)
2899 __attribute__((unused));
2900static inline struct fsg_common *
2901fsg_common_from_params(struct fsg_common *common,
2902 struct usb_composite_dev *cdev,
2903 const struct fsg_module_parameters *params)
2904{
2905 struct fsg_config cfg;
2906 fsg_config_from_params(&cfg, params);
2907 return fsg_common_init(common, cdev, &cfg);
2908}
2909