USB: g_mass_storage: Mass Storage Function created
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / gadget / f_mass_storage.c
CommitLineData
d5e2b67a
MN
1/*
2 * file_storage.c -- File-backed USB Storage Gadget, for USB development
3 *
4 * Copyright (C) 2003-2008 Alan Stern
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
19 *
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
23 * later version.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38
39/*
40 * The File-backed Storage Gadget acts as a USB Mass Storage device,
41 * appearing to the host as a disk drive or as a CD-ROM drive. In addition
42 * to providing an example of a genuinely useful gadget driver for a USB
43 * device, it also illustrates a technique of double-buffering for increased
44 * throughput. Last but not least, it gives an easy way to probe the
45 * behavior of the Mass Storage drivers in a USB host.
46 *
47 * Backing storage is provided by a regular file or a block device, specified
48 * by the "file" module parameter. Access can be limited to read-only by
49 * setting the optional "ro" module parameter. (For CD-ROM emulation,
50 * access is always read-only.) The gadget will indicate that it has
51 * removable media if the optional "removable" module parameter is set.
52 *
d5e2b67a
MN
53 * There is support for multiple logical units (LUNs), each of which has
54 * its own backing file. The number of LUNs can be set using the optional
55 * "luns" module parameter (anywhere from 1 to 8), and the corresponding
56 * files are specified using comma-separated lists for "file" and "ro".
57 * The default number of LUNs is taken from the number of "file" elements;
58 * it is 1 if "file" is not given. If "removable" is not set then a backing
59 * file must be specified for each LUN. If it is set, then an unspecified
60 * or empty backing filename means the LUN's medium is not loaded. Ideally
61 * each LUN would be settable independently as a disk drive or a CD-ROM
62 * drive, but currently all LUNs have to be the same type. The CD-ROM
63 * emulation includes a single data track and no audio tracks; hence there
64 * need be only one backing file per LUN. Note also that the CD-ROM block
65 * length is set to 512 rather than the more common value 2048.
66 *
67 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
68 * needed (an interrupt-out endpoint is also needed for CBI). The memory
69 * requirement amounts to two 16K buffers, size configurable by a parameter.
70 * Support is included for both full-speed and high-speed operation.
71 *
72 * Note that the driver is slightly non-portable in that it assumes a
73 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
74 * interrupt-in endpoints. With most device controllers this isn't an
75 * issue, but there may be some with hardware restrictions that prevent
76 * a buffer from being used by more than one endpoint.
77 *
78 * Module options:
79 *
80 * file=filename[,filename...]
81 * Required if "removable" is not set, names of
82 * the files or block devices used for
83 * backing storage
84 * ro=b[,b...] Default false, booleans for read-only access
85 * removable Default false, boolean for removable media
86 * luns=N Default N = number of filenames, number of
87 * LUNs to support
88 * stall Default determined according to the type of
89 * USB device controller (usually true),
90 * boolean to permit the driver to halt
91 * bulk endpoints
92 * cdrom Default false, boolean for whether to emulate
93 * a CD-ROM drive
d5e2b67a
MN
94 *
95 * The pathnames of the backing files and the ro settings are available in
96 * the attribute files "file" and "ro" in the lun<n> subdirectory of the
97 * gadget's sysfs directory. If the "removable" option is set, writing to
98 * these files will simulate ejecting/loading the medium (writing an empty
99 * line means eject) and adjusting a write-enable tab. Changes to the ro
100 * setting are not allowed when the medium is loaded or if CD-ROM emulation
101 * is being used.
102 *
103 * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
104 * The driver's SCSI command interface was based on the "Information
105 * technology - Small Computer System Interface - 2" document from
106 * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
107 * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception
108 * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
109 * "Universal Serial Bus Mass Storage Class UFI Command Specification"
110 * document, Revision 1.0, December 14, 1998, available at
111 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
112 */
113
114
115/*
116 * Driver Design
117 *
118 * The FSG driver is fairly straightforward. There is a main kernel
119 * thread that handles most of the work. Interrupt routines field
120 * callbacks from the controller driver: bulk- and interrupt-request
121 * completion notifications, endpoint-0 events, and disconnect events.
122 * Completion events are passed to the main thread by wakeup calls. Many
123 * ep0 requests are handled at interrupt time, but SetInterface,
124 * SetConfiguration, and device reset requests are forwarded to the
125 * thread in the form of "exceptions" using SIGUSR1 signals (since they
126 * should interrupt any ongoing file I/O operations).
127 *
128 * The thread's main routine implements the standard command/data/status
129 * parts of a SCSI interaction. It and its subroutines are full of tests
130 * for pending signals/exceptions -- all this polling is necessary since
131 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
132 * indication that the driver really wants to be running in userspace.)
133 * An important point is that so long as the thread is alive it keeps an
134 * open reference to the backing file. This will prevent unmounting
135 * the backing file's underlying filesystem and could cause problems
136 * during system shutdown, for example. To prevent such problems, the
137 * thread catches INT, TERM, and KILL signals and converts them into
138 * an EXIT exception.
139 *
140 * In normal operation the main thread is started during the gadget's
141 * fsg_bind() callback and stopped during fsg_unbind(). But it can also
142 * exit when it receives a signal, and there's no point leaving the
143 * gadget running when the thread is dead. So just before the thread
144 * exits, it deregisters the gadget driver. This makes things a little
145 * tricky: The driver is deregistered at two places, and the exiting
146 * thread can indirectly call fsg_unbind() which in turn can tell the
147 * thread to exit. The first problem is resolved through the use of the
148 * REGISTERED atomic bitflag; the driver will only be deregistered once.
149 * The second problem is resolved by having fsg_unbind() check
150 * fsg->state; it won't try to stop the thread if the state is already
151 * FSG_STATE_TERMINATED.
152 *
153 * To provide maximum throughput, the driver uses a circular pipeline of
154 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
155 * arbitrarily long; in practice the benefits don't justify having more
156 * than 2 stages (i.e., double buffering). But it helps to think of the
157 * pipeline as being a long one. Each buffer head contains a bulk-in and
158 * a bulk-out request pointer (since the buffer can be used for both
159 * output and input -- directions always are given from the host's
160 * point of view) as well as a pointer to the buffer and various state
161 * variables.
162 *
163 * Use of the pipeline follows a simple protocol. There is a variable
164 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
165 * At any time that buffer head may still be in use from an earlier
166 * request, so each buffer head has a state variable indicating whether
167 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
168 * buffer head to be EMPTY, filling the buffer either by file I/O or by
169 * USB I/O (during which the buffer head is BUSY), and marking the buffer
170 * head FULL when the I/O is complete. Then the buffer will be emptied
171 * (again possibly by USB I/O, during which it is marked BUSY) and
172 * finally marked EMPTY again (possibly by a completion routine).
173 *
174 * A module parameter tells the driver to avoid stalling the bulk
175 * endpoints wherever the transport specification allows. This is
176 * necessary for some UDCs like the SuperH, which cannot reliably clear a
177 * halt on a bulk endpoint. However, under certain circumstances the
178 * Bulk-only specification requires a stall. In such cases the driver
179 * will halt the endpoint and set a flag indicating that it should clear
180 * the halt in software during the next device reset. Hopefully this
181 * will permit everything to work correctly. Furthermore, although the
182 * specification allows the bulk-out endpoint to halt when the host sends
183 * too much data, implementing this would cause an unavoidable race.
184 * The driver will always use the "no-stall" approach for OUT transfers.
185 *
186 * One subtle point concerns sending status-stage responses for ep0
187 * requests. Some of these requests, such as device reset, can involve
188 * interrupting an ongoing file I/O operation, which might take an
189 * arbitrarily long time. During that delay the host might give up on
190 * the original ep0 request and issue a new one. When that happens the
191 * driver should not notify the host about completion of the original
192 * request, as the host will no longer be waiting for it. So the driver
193 * assigns to each ep0 request a unique tag, and it keeps track of the
194 * tag value of the request associated with a long-running exception
195 * (device-reset, interface-change, or configuration-change). When the
196 * exception handler is finished, the status-stage response is submitted
197 * only if the current ep0 request tag is equal to the exception request
198 * tag. Thus only the most recently received ep0 request will get a
199 * status-stage response.
200 *
201 * Warning: This driver source file is too long. It ought to be split up
202 * into a header file plus about 3 separate .c files, to handle the details
203 * of the Gadget, USB Mass Storage, and SCSI protocols.
204 */
205
206
207/* #define VERBOSE_DEBUG */
208/* #define DUMP_MSGS */
209
210
211#include <linux/blkdev.h>
212#include <linux/completion.h>
213#include <linux/dcache.h>
214#include <linux/delay.h>
215#include <linux/device.h>
216#include <linux/fcntl.h>
217#include <linux/file.h>
218#include <linux/fs.h>
219#include <linux/kref.h>
220#include <linux/kthread.h>
221#include <linux/limits.h>
222#include <linux/rwsem.h>
223#include <linux/slab.h>
224#include <linux/spinlock.h>
225#include <linux/string.h>
226#include <linux/freezer.h>
227#include <linux/utsname.h>
228
229#include <linux/usb/ch9.h>
230#include <linux/usb/gadget.h>
231
232#include "gadget_chips.h"
233
234
235
d5e2b67a
MN
236/*-------------------------------------------------------------------------*/
237
d23b0f08
MN
238#define FSG_DRIVER_DESC "Mass Storage Function"
239#define FSG_DRIVER_VERSION "20 November 2008"
d5e2b67a 240
d5e2b67a
MN
241static const char fsg_string_interface[] = "Mass Storage";
242
243
93bcf12e 244#define FSG_NO_INTR_EP 1
606206c2 245#define FSG_BUFFHD_STATIC_BUFFER 1
d23b0f08
MN
246#define FSG_NO_DEVICE_STRINGS 1
247#define FSG_NO_OTG 1
248#define FSG_NO_INTR_EP 1
93bcf12e 249
d5e2b67a
MN
250#include "storage_common.c"
251
252
d5e2b67a
MN
253/*-------------------------------------------------------------------------*/
254
255
256/* Encapsulate the module parameter settings */
257
258static struct {
259 char *file[FSG_MAX_LUNS];
260 int ro[FSG_MAX_LUNS];
261 unsigned int num_filenames;
262 unsigned int num_ros;
263 unsigned int nluns;
264
265 int removable;
266 int can_stall;
267 int cdrom;
268
d5e2b67a 269 unsigned short release;
d5e2b67a 270} mod_data = { // Default values
d5e2b67a
MN
271 .removable = 0,
272 .can_stall = 1,
273 .cdrom = 0,
d23b0f08 274 .release = 0xffff,
d5e2b67a
MN
275 };
276
277
278module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames,
279 S_IRUGO);
280MODULE_PARM_DESC(file, "names of backing files or devices");
281
282module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO);
283MODULE_PARM_DESC(ro, "true to force read-only");
284
285module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
286MODULE_PARM_DESC(luns, "number of LUNs");
287
288module_param_named(removable, mod_data.removable, bool, S_IRUGO);
289MODULE_PARM_DESC(removable, "true to simulate removable media");
290
291module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
292MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
293
294module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
295MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
296
297
d5e2b67a
MN
298/*-------------------------------------------------------------------------*/
299
300
a41ae418
MN
301/* Data shared by all the FSG instances. */
302struct fsg_common {
9c610213
MN
303 struct usb_gadget *gadget;
304
a41ae418
MN
305 /* filesem protects: backing files in use */
306 struct rw_semaphore filesem;
307
308 struct fsg_buffhd *next_buffhd_to_fill;
309 struct fsg_buffhd *next_buffhd_to_drain;
310 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
311
312 int cmnd_size;
313 u8 cmnd[MAX_COMMAND_SIZE];
314
315 unsigned int nluns;
316 unsigned int lun;
317 struct fsg_lun *luns;
318 struct fsg_lun *curlun;
9c610213
MN
319
320 unsigned int free_storage_on_release:1;
321
322 struct kref ref;
a41ae418
MN
323};
324
325
d5e2b67a 326struct fsg_dev {
d23b0f08
MN
327 struct usb_function function;
328 struct usb_composite_dev*cdev;
329 struct usb_gadget *gadget; /* Copy of cdev->gadget */
a41ae418
MN
330 struct fsg_common *common;
331
d23b0f08
MN
332 u16 interface_number;
333
a41ae418 334 /* lock protects: state, all the req_busy's */
d5e2b67a 335 spinlock_t lock;
d5e2b67a 336
d23b0f08
MN
337 struct usb_ep *ep0; /* Copy of gadget->ep0 */
338 struct usb_request *ep0req; /* Copy of cdev->req */
d5e2b67a
MN
339 unsigned int ep0_req_tag;
340 const char *ep0req_name;
341
d5e2b67a
MN
342 unsigned int bulk_out_maxpacket;
343 enum fsg_state state; // For exception handling
344 unsigned int exception_req_tag;
345
346 u8 config, new_config;
347
348 unsigned int running : 1;
349 unsigned int bulk_in_enabled : 1;
350 unsigned int bulk_out_enabled : 1;
d5e2b67a
MN
351 unsigned int phase_error : 1;
352 unsigned int short_packet_received : 1;
353 unsigned int bad_lun_okay : 1;
354
355 unsigned long atomic_bitflags;
356#define REGISTERED 0
357#define IGNORE_BULK_OUT 1
d5e2b67a
MN
358
359 struct usb_ep *bulk_in;
360 struct usb_ep *bulk_out;
d5e2b67a 361
d5e2b67a
MN
362 int thread_wakeup_needed;
363 struct completion thread_notifier;
364 struct task_struct *thread_task;
365
d5e2b67a
MN
366 enum data_direction data_dir;
367 u32 data_size;
368 u32 data_size_from_cmnd;
369 u32 tag;
d5e2b67a
MN
370 u32 residue;
371 u32 usb_amount_left;
d5e2b67a
MN
372};
373
d23b0f08
MN
374
375static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
376{
377 return container_of(f, struct fsg_dev, function);
378}
379
380
d5e2b67a
MN
381typedef void (*fsg_routine_t)(struct fsg_dev *);
382
383static int exception_in_progress(struct fsg_dev *fsg)
384{
385 return (fsg->state > FSG_STATE_IDLE);
386}
387
388/* Make bulk-out requests be divisible by the maxpacket size */
389static void set_bulk_out_req_length(struct fsg_dev *fsg,
390 struct fsg_buffhd *bh, unsigned int length)
391{
392 unsigned int rem;
393
394 bh->bulk_out_intended_length = length;
395 rem = length % fsg->bulk_out_maxpacket;
396 if (rem > 0)
397 length += fsg->bulk_out_maxpacket - rem;
398 bh->outreq->length = length;
399}
400
d5e2b67a
MN
401/*-------------------------------------------------------------------------*/
402
403static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
404{
405 const char *name;
406
407 if (ep == fsg->bulk_in)
408 name = "bulk-in";
409 else if (ep == fsg->bulk_out)
410 name = "bulk-out";
411 else
412 name = ep->name;
413 DBG(fsg, "%s set halt\n", name);
414 return usb_ep_set_halt(ep);
415}
416
417
d5e2b67a
MN
418/*-------------------------------------------------------------------------*/
419
420/* These routines may be called in process context or in_irq */
421
422/* Caller must hold fsg->lock */
423static void wakeup_thread(struct fsg_dev *fsg)
424{
425 /* Tell the main thread that something has happened */
426 fsg->thread_wakeup_needed = 1;
427 if (fsg->thread_task)
428 wake_up_process(fsg->thread_task);
429}
430
431
432static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
433{
434 unsigned long flags;
435
436 /* Do nothing if a higher-priority exception is already in progress.
437 * If a lower-or-equal priority exception is in progress, preempt it
438 * and notify the main thread by sending it a signal. */
439 spin_lock_irqsave(&fsg->lock, flags);
440 if (fsg->state <= new_state) {
441 fsg->exception_req_tag = fsg->ep0_req_tag;
442 fsg->state = new_state;
443 if (fsg->thread_task)
444 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
445 fsg->thread_task);
446 }
447 spin_unlock_irqrestore(&fsg->lock, flags);
448}
449
450
451/*-------------------------------------------------------------------------*/
452
d5e2b67a
MN
453static int ep0_queue(struct fsg_dev *fsg)
454{
455 int rc;
456
457 rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
d23b0f08 458 fsg->ep0->driver_data = fsg;
d5e2b67a
MN
459 if (rc != 0 && rc != -ESHUTDOWN) {
460
461 /* We can't do much more than wait for a reset */
462 WARNING(fsg, "error in submission: %s --> %d\n",
463 fsg->ep0->name, rc);
464 }
465 return rc;
466}
467
d5e2b67a
MN
468/*-------------------------------------------------------------------------*/
469
470/* Bulk and interrupt endpoint completion handlers.
471 * These always run in_irq. */
472
473static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
474{
475 struct fsg_dev *fsg = ep->driver_data;
476 struct fsg_buffhd *bh = req->context;
477
478 if (req->status || req->actual != req->length)
479 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
480 req->status, req->actual, req->length);
481 if (req->status == -ECONNRESET) // Request was cancelled
482 usb_ep_fifo_flush(ep);
483
484 /* Hold the lock while we update the request and buffer states */
485 smp_wmb();
486 spin_lock(&fsg->lock);
487 bh->inreq_busy = 0;
488 bh->state = BUF_STATE_EMPTY;
489 wakeup_thread(fsg);
490 spin_unlock(&fsg->lock);
491}
492
493static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
494{
495 struct fsg_dev *fsg = ep->driver_data;
496 struct fsg_buffhd *bh = req->context;
497
498 dump_msg(fsg, "bulk-out", req->buf, req->actual);
499 if (req->status || req->actual != bh->bulk_out_intended_length)
500 DBG(fsg, "%s --> %d, %u/%u\n", __func__,
501 req->status, req->actual,
502 bh->bulk_out_intended_length);
503 if (req->status == -ECONNRESET) // Request was cancelled
504 usb_ep_fifo_flush(ep);
505
506 /* Hold the lock while we update the request and buffer states */
507 smp_wmb();
508 spin_lock(&fsg->lock);
509 bh->outreq_busy = 0;
510 bh->state = BUF_STATE_FULL;
511 wakeup_thread(fsg);
512 spin_unlock(&fsg->lock);
513}
514
515
d5e2b67a
MN
516/*-------------------------------------------------------------------------*/
517
518/* Ep0 class-specific handlers. These always run in_irq. */
519
d23b0f08 520static int fsg_setup(struct usb_function *f,
d5e2b67a
MN
521 const struct usb_ctrlrequest *ctrl)
522{
d23b0f08 523 struct fsg_dev *fsg = fsg_from_func(f);
d5e2b67a 524 struct usb_request *req = fsg->ep0req;
d5e2b67a 525 u16 w_index = le16_to_cpu(ctrl->wIndex);
93bcf12e 526 u16 w_value = le16_to_cpu(ctrl->wValue);
d5e2b67a
MN
527 u16 w_length = le16_to_cpu(ctrl->wLength);
528
529 if (!fsg->config)
93bcf12e 530 return -EOPNOTSUPP;
d5e2b67a 531
93bcf12e 532 switch (ctrl->bRequest) {
d5e2b67a 533
93bcf12e
MN
534 case USB_BULK_RESET_REQUEST:
535 if (ctrl->bRequestType !=
536 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 537 break;
d23b0f08 538 if (w_index != fsg->interface_number || w_value != 0)
93bcf12e 539 return -EDOM;
d5e2b67a 540
93bcf12e
MN
541 /* Raise an exception to stop the current operation
542 * and reinitialize our state. */
543 DBG(fsg, "bulk reset request\n");
544 raise_exception(fsg, FSG_STATE_RESET);
545 return DELAYED_STATUS;
d5e2b67a 546
93bcf12e
MN
547 case USB_BULK_GET_MAX_LUN_REQUEST:
548 if (ctrl->bRequestType !=
549 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 550 break;
d23b0f08 551 if (w_index != fsg->interface_number || w_value != 0)
93bcf12e
MN
552 return -EDOM;
553 VDBG(fsg, "get max LUN\n");
a41ae418 554 *(u8 *) req->buf = fsg->common->nluns - 1;
93bcf12e
MN
555 return 1;
556 }
557
558 VDBG(fsg,
559 "unknown class-specific control req "
560 "%02x.%02x v%04x i%04x l%u\n",
561 ctrl->bRequestType, ctrl->bRequest,
562 le16_to_cpu(ctrl->wValue), w_index, w_length);
563 return -EOPNOTSUPP;
d5e2b67a
MN
564}
565
566
d5e2b67a
MN
567/*-------------------------------------------------------------------------*/
568
569/* All the following routines run in process context */
570
571
572/* Use this for bulk or interrupt transfers, not ep0 */
573static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
574 struct usb_request *req, int *pbusy,
575 enum fsg_buffer_state *state)
576{
577 int rc;
578
579 if (ep == fsg->bulk_in)
580 dump_msg(fsg, "bulk-in", req->buf, req->length);
d5e2b67a
MN
581
582 spin_lock_irq(&fsg->lock);
583 *pbusy = 1;
584 *state = BUF_STATE_BUSY;
585 spin_unlock_irq(&fsg->lock);
586 rc = usb_ep_queue(ep, req, GFP_KERNEL);
587 if (rc != 0) {
588 *pbusy = 0;
589 *state = BUF_STATE_EMPTY;
590
591 /* We can't do much more than wait for a reset */
592
593 /* Note: currently the net2280 driver fails zero-length
594 * submissions if DMA is enabled. */
595 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
596 req->length == 0))
597 WARNING(fsg, "error in submission: %s --> %d\n",
598 ep->name, rc);
599 }
600}
601
602
603static int sleep_thread(struct fsg_dev *fsg)
604{
605 int rc = 0;
606
607 /* Wait until a signal arrives or we are woken up */
608 for (;;) {
609 try_to_freeze();
610 set_current_state(TASK_INTERRUPTIBLE);
611 if (signal_pending(current)) {
612 rc = -EINTR;
613 break;
614 }
615 if (fsg->thread_wakeup_needed)
616 break;
617 schedule();
618 }
619 __set_current_state(TASK_RUNNING);
620 fsg->thread_wakeup_needed = 0;
621 return rc;
622}
623
624
625/*-------------------------------------------------------------------------*/
626
627static int do_read(struct fsg_dev *fsg)
628{
a41ae418 629 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
630 u32 lba;
631 struct fsg_buffhd *bh;
632 int rc;
633 u32 amount_left;
634 loff_t file_offset, file_offset_tmp;
635 unsigned int amount;
636 unsigned int partial_page;
637 ssize_t nread;
638
639 /* Get the starting Logical Block Address and check that it's
640 * not too big */
a41ae418
MN
641 if (fsg->common->cmnd[0] == SC_READ_6)
642 lba = get_unaligned_be24(&fsg->common->cmnd[1]);
d5e2b67a 643 else {
a41ae418 644 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
d5e2b67a
MN
645
646 /* We allow DPO (Disable Page Out = don't save data in the
647 * cache) and FUA (Force Unit Access = don't read from the
648 * cache), but we don't implement them. */
a41ae418 649 if ((fsg->common->cmnd[1] & ~0x18) != 0) {
d5e2b67a
MN
650 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
651 return -EINVAL;
652 }
653 }
654 if (lba >= curlun->num_sectors) {
655 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
656 return -EINVAL;
657 }
658 file_offset = ((loff_t) lba) << 9;
659
660 /* Carry out the file reads */
661 amount_left = fsg->data_size_from_cmnd;
662 if (unlikely(amount_left == 0))
663 return -EIO; // No default reply
664
665 for (;;) {
666
667 /* Figure out how much we need to read:
668 * Try to read the remaining amount.
669 * But don't read more than the buffer size.
670 * And don't try to read past the end of the file.
671 * Finally, if we're not at a page boundary, don't read past
672 * the next page.
673 * If this means reading 0 then we were asked to read past
674 * the end of file. */
93bcf12e 675 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
676 amount = min((loff_t) amount,
677 curlun->file_length - file_offset);
678 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
679 if (partial_page > 0)
680 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
681 partial_page);
682
683 /* Wait for the next buffer to become available */
a41ae418 684 bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
685 while (bh->state != BUF_STATE_EMPTY) {
686 rc = sleep_thread(fsg);
687 if (rc)
688 return rc;
689 }
690
691 /* If we were asked to read past the end of file,
692 * end with an empty buffer. */
693 if (amount == 0) {
694 curlun->sense_data =
695 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
696 curlun->sense_data_info = file_offset >> 9;
697 curlun->info_valid = 1;
698 bh->inreq->length = 0;
699 bh->state = BUF_STATE_FULL;
700 break;
701 }
702
703 /* Perform the read */
704 file_offset_tmp = file_offset;
705 nread = vfs_read(curlun->filp,
706 (char __user *) bh->buf,
707 amount, &file_offset_tmp);
708 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
709 (unsigned long long) file_offset,
710 (int) nread);
711 if (signal_pending(current))
712 return -EINTR;
713
714 if (nread < 0) {
715 LDBG(curlun, "error in file read: %d\n",
716 (int) nread);
717 nread = 0;
718 } else if (nread < amount) {
719 LDBG(curlun, "partial file read: %d/%u\n",
720 (int) nread, amount);
721 nread -= (nread & 511); // Round down to a block
722 }
723 file_offset += nread;
724 amount_left -= nread;
725 fsg->residue -= nread;
726 bh->inreq->length = nread;
727 bh->state = BUF_STATE_FULL;
728
729 /* If an error occurred, report it and its position */
730 if (nread < amount) {
731 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
732 curlun->sense_data_info = file_offset >> 9;
733 curlun->info_valid = 1;
734 break;
735 }
736
737 if (amount_left == 0)
738 break; // No more left to read
739
740 /* Send this buffer and go read some more */
741 bh->inreq->zero = 0;
742 start_transfer(fsg, fsg->bulk_in, bh->inreq,
743 &bh->inreq_busy, &bh->state);
a41ae418 744 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
745 }
746
747 return -EIO; // No default reply
748}
749
750
751/*-------------------------------------------------------------------------*/
752
753static int do_write(struct fsg_dev *fsg)
754{
a41ae418 755 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
756 u32 lba;
757 struct fsg_buffhd *bh;
758 int get_some_more;
759 u32 amount_left_to_req, amount_left_to_write;
760 loff_t usb_offset, file_offset, file_offset_tmp;
761 unsigned int amount;
762 unsigned int partial_page;
763 ssize_t nwritten;
764 int rc;
765
766 if (curlun->ro) {
767 curlun->sense_data = SS_WRITE_PROTECTED;
768 return -EINVAL;
769 }
770 spin_lock(&curlun->filp->f_lock);
771 curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait
772 spin_unlock(&curlun->filp->f_lock);
773
774 /* Get the starting Logical Block Address and check that it's
775 * not too big */
a41ae418
MN
776 if (fsg->common->cmnd[0] == SC_WRITE_6)
777 lba = get_unaligned_be24(&fsg->common->cmnd[1]);
d5e2b67a 778 else {
a41ae418 779 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
d5e2b67a
MN
780
781 /* We allow DPO (Disable Page Out = don't save data in the
782 * cache) and FUA (Force Unit Access = write directly to the
783 * medium). We don't implement DPO; we implement FUA by
784 * performing synchronous output. */
a41ae418 785 if ((fsg->common->cmnd[1] & ~0x18) != 0) {
d5e2b67a
MN
786 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
787 return -EINVAL;
788 }
a41ae418 789 if (fsg->common->cmnd[1] & 0x08) { // FUA
d5e2b67a
MN
790 spin_lock(&curlun->filp->f_lock);
791 curlun->filp->f_flags |= O_SYNC;
792 spin_unlock(&curlun->filp->f_lock);
793 }
794 }
795 if (lba >= curlun->num_sectors) {
796 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
797 return -EINVAL;
798 }
799
800 /* Carry out the file writes */
801 get_some_more = 1;
802 file_offset = usb_offset = ((loff_t) lba) << 9;
803 amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
804
805 while (amount_left_to_write > 0) {
806
807 /* Queue a request for more data from the host */
a41ae418 808 bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
809 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
810
811 /* Figure out how much we want to get:
812 * Try to get the remaining amount.
813 * But don't get more than the buffer size.
814 * And don't try to go past the end of the file.
815 * If we're not at a page boundary,
816 * don't go past the next page.
817 * If this means getting 0, then we were asked
818 * to write past the end of file.
819 * Finally, round down to a block boundary. */
93bcf12e 820 amount = min(amount_left_to_req, FSG_BUFLEN);
d5e2b67a
MN
821 amount = min((loff_t) amount, curlun->file_length -
822 usb_offset);
823 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
824 if (partial_page > 0)
825 amount = min(amount,
826 (unsigned int) PAGE_CACHE_SIZE - partial_page);
827
828 if (amount == 0) {
829 get_some_more = 0;
830 curlun->sense_data =
831 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
832 curlun->sense_data_info = usb_offset >> 9;
833 curlun->info_valid = 1;
834 continue;
835 }
836 amount -= (amount & 511);
837 if (amount == 0) {
838
839 /* Why were we were asked to transfer a
840 * partial block? */
841 get_some_more = 0;
842 continue;
843 }
844
845 /* Get the next buffer */
846 usb_offset += amount;
847 fsg->usb_amount_left -= amount;
848 amount_left_to_req -= amount;
849 if (amount_left_to_req == 0)
850 get_some_more = 0;
851
852 /* amount is always divisible by 512, hence by
853 * the bulk-out maxpacket size */
854 bh->outreq->length = bh->bulk_out_intended_length =
855 amount;
856 bh->outreq->short_not_ok = 1;
857 start_transfer(fsg, fsg->bulk_out, bh->outreq,
858 &bh->outreq_busy, &bh->state);
a41ae418 859 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
860 continue;
861 }
862
863 /* Write the received data to the backing file */
a41ae418 864 bh = fsg->common->next_buffhd_to_drain;
d5e2b67a
MN
865 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
866 break; // We stopped early
867 if (bh->state == BUF_STATE_FULL) {
868 smp_rmb();
a41ae418 869 fsg->common->next_buffhd_to_drain = bh->next;
d5e2b67a
MN
870 bh->state = BUF_STATE_EMPTY;
871
872 /* Did something go wrong with the transfer? */
873 if (bh->outreq->status != 0) {
874 curlun->sense_data = SS_COMMUNICATION_FAILURE;
875 curlun->sense_data_info = file_offset >> 9;
876 curlun->info_valid = 1;
877 break;
878 }
879
880 amount = bh->outreq->actual;
881 if (curlun->file_length - file_offset < amount) {
882 LERROR(curlun,
883 "write %u @ %llu beyond end %llu\n",
884 amount, (unsigned long long) file_offset,
885 (unsigned long long) curlun->file_length);
886 amount = curlun->file_length - file_offset;
887 }
888
889 /* Perform the write */
890 file_offset_tmp = file_offset;
891 nwritten = vfs_write(curlun->filp,
892 (char __user *) bh->buf,
893 amount, &file_offset_tmp);
894 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
895 (unsigned long long) file_offset,
896 (int) nwritten);
897 if (signal_pending(current))
898 return -EINTR; // Interrupted!
899
900 if (nwritten < 0) {
901 LDBG(curlun, "error in file write: %d\n",
902 (int) nwritten);
903 nwritten = 0;
904 } else if (nwritten < amount) {
905 LDBG(curlun, "partial file write: %d/%u\n",
906 (int) nwritten, amount);
907 nwritten -= (nwritten & 511);
908 // Round down to a block
909 }
910 file_offset += nwritten;
911 amount_left_to_write -= nwritten;
912 fsg->residue -= nwritten;
913
914 /* If an error occurred, report it and its position */
915 if (nwritten < amount) {
916 curlun->sense_data = SS_WRITE_ERROR;
917 curlun->sense_data_info = file_offset >> 9;
918 curlun->info_valid = 1;
919 break;
920 }
921
922 /* Did the host decide to stop early? */
923 if (bh->outreq->actual != bh->outreq->length) {
924 fsg->short_packet_received = 1;
925 break;
926 }
927 continue;
928 }
929
930 /* Wait for something to happen */
931 rc = sleep_thread(fsg);
932 if (rc)
933 return rc;
934 }
935
936 return -EIO; // No default reply
937}
938
939
940/*-------------------------------------------------------------------------*/
941
942static int do_synchronize_cache(struct fsg_dev *fsg)
943{
a41ae418 944 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
945 int rc;
946
947 /* We ignore the requested LBA and write out all file's
948 * dirty data buffers. */
949 rc = fsg_lun_fsync_sub(curlun);
950 if (rc)
951 curlun->sense_data = SS_WRITE_ERROR;
952 return 0;
953}
954
955
956/*-------------------------------------------------------------------------*/
957
958static void invalidate_sub(struct fsg_lun *curlun)
959{
960 struct file *filp = curlun->filp;
961 struct inode *inode = filp->f_path.dentry->d_inode;
962 unsigned long rc;
963
964 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
965 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
966}
967
968static int do_verify(struct fsg_dev *fsg)
969{
a41ae418 970 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
971 u32 lba;
972 u32 verification_length;
a41ae418 973 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
974 loff_t file_offset, file_offset_tmp;
975 u32 amount_left;
976 unsigned int amount;
977 ssize_t nread;
978
979 /* Get the starting Logical Block Address and check that it's
980 * not too big */
a41ae418 981 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
d5e2b67a
MN
982 if (lba >= curlun->num_sectors) {
983 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
984 return -EINVAL;
985 }
986
987 /* We allow DPO (Disable Page Out = don't save data in the
988 * cache) but we don't implement it. */
a41ae418 989 if ((fsg->common->cmnd[1] & ~0x10) != 0) {
d5e2b67a
MN
990 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
991 return -EINVAL;
992 }
993
a41ae418 994 verification_length = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
995 if (unlikely(verification_length == 0))
996 return -EIO; // No default reply
997
998 /* Prepare to carry out the file verify */
999 amount_left = verification_length << 9;
1000 file_offset = ((loff_t) lba) << 9;
1001
1002 /* Write out all the dirty buffers before invalidating them */
1003 fsg_lun_fsync_sub(curlun);
1004 if (signal_pending(current))
1005 return -EINTR;
1006
1007 invalidate_sub(curlun);
1008 if (signal_pending(current))
1009 return -EINTR;
1010
1011 /* Just try to read the requested blocks */
1012 while (amount_left > 0) {
1013
1014 /* Figure out how much we need to read:
1015 * Try to read the remaining amount, but not more than
1016 * the buffer size.
1017 * And don't try to read past the end of the file.
1018 * If this means reading 0 then we were asked to read
1019 * past the end of file. */
93bcf12e 1020 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
1021 amount = min((loff_t) amount,
1022 curlun->file_length - file_offset);
1023 if (amount == 0) {
1024 curlun->sense_data =
1025 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1026 curlun->sense_data_info = file_offset >> 9;
1027 curlun->info_valid = 1;
1028 break;
1029 }
1030
1031 /* Perform the read */
1032 file_offset_tmp = file_offset;
1033 nread = vfs_read(curlun->filp,
1034 (char __user *) bh->buf,
1035 amount, &file_offset_tmp);
1036 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1037 (unsigned long long) file_offset,
1038 (int) nread);
1039 if (signal_pending(current))
1040 return -EINTR;
1041
1042 if (nread < 0) {
1043 LDBG(curlun, "error in file verify: %d\n",
1044 (int) nread);
1045 nread = 0;
1046 } else if (nread < amount) {
1047 LDBG(curlun, "partial file verify: %d/%u\n",
1048 (int) nread, amount);
1049 nread -= (nread & 511); // Round down to a sector
1050 }
1051 if (nread == 0) {
1052 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1053 curlun->sense_data_info = file_offset >> 9;
1054 curlun->info_valid = 1;
1055 break;
1056 }
1057 file_offset += nread;
1058 amount_left -= nread;
1059 }
1060 return 0;
1061}
1062
1063
1064/*-------------------------------------------------------------------------*/
1065
1066static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1067{
1068 u8 *buf = (u8 *) bh->buf;
1069
1070 static char vendor_id[] = "Linux ";
1071 static char product_disk_id[] = "File-Stor Gadget";
1072 static char product_cdrom_id[] = "File-CD Gadget ";
1073
a41ae418 1074 if (!fsg->common->curlun) { // Unsupported LUNs are okay
d5e2b67a
MN
1075 fsg->bad_lun_okay = 1;
1076 memset(buf, 0, 36);
1077 buf[0] = 0x7f; // Unsupported, no device-type
1078 buf[4] = 31; // Additional length
1079 return 36;
1080 }
1081
1082 memset(buf, 0, 8);
1083 buf[0] = (mod_data.cdrom ? TYPE_CDROM : TYPE_DISK);
1084 if (mod_data.removable)
1085 buf[1] = 0x80;
1086 buf[2] = 2; // ANSI SCSI level 2
1087 buf[3] = 2; // SCSI-2 INQUIRY data format
1088 buf[4] = 31; // Additional length
1089 // No special options
1090 sprintf(buf + 8, "%-8s%-16s%04x", vendor_id,
1091 (mod_data.cdrom ? product_cdrom_id :
1092 product_disk_id),
1093 mod_data.release);
1094 return 36;
1095}
1096
1097
1098static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1099{
a41ae418 1100 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
1101 u8 *buf = (u8 *) bh->buf;
1102 u32 sd, sdinfo;
1103 int valid;
1104
1105 /*
1106 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1107 *
1108 * If a REQUEST SENSE command is received from an initiator
1109 * with a pending unit attention condition (before the target
1110 * generates the contingent allegiance condition), then the
1111 * target shall either:
1112 * a) report any pending sense data and preserve the unit
1113 * attention condition on the logical unit, or,
1114 * b) report the unit attention condition, may discard any
1115 * pending sense data, and clear the unit attention
1116 * condition on the logical unit for that initiator.
1117 *
1118 * FSG normally uses option a); enable this code to use option b).
1119 */
1120#if 0
1121 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1122 curlun->sense_data = curlun->unit_attention_data;
1123 curlun->unit_attention_data = SS_NO_SENSE;
1124 }
1125#endif
1126
1127 if (!curlun) { // Unsupported LUNs are okay
1128 fsg->bad_lun_okay = 1;
1129 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1130 sdinfo = 0;
1131 valid = 0;
1132 } else {
1133 sd = curlun->sense_data;
1134 sdinfo = curlun->sense_data_info;
1135 valid = curlun->info_valid << 7;
1136 curlun->sense_data = SS_NO_SENSE;
1137 curlun->sense_data_info = 0;
1138 curlun->info_valid = 0;
1139 }
1140
1141 memset(buf, 0, 18);
1142 buf[0] = valid | 0x70; // Valid, current error
1143 buf[2] = SK(sd);
1144 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1145 buf[7] = 18 - 8; // Additional sense length
1146 buf[12] = ASC(sd);
1147 buf[13] = ASCQ(sd);
1148 return 18;
1149}
1150
1151
1152static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1153{
a41ae418
MN
1154 struct fsg_lun *curlun = fsg->common->curlun;
1155 u32 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
1156 int pmi = fsg->common->cmnd[8];
d5e2b67a
MN
1157 u8 *buf = (u8 *) bh->buf;
1158
1159 /* Check the PMI and LBA fields */
1160 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1161 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1162 return -EINVAL;
1163 }
1164
1165 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1166 /* Max logical block */
1167 put_unaligned_be32(512, &buf[4]); /* Block length */
1168 return 8;
1169}
1170
1171
1172static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1173{
a41ae418
MN
1174 struct fsg_lun *curlun = fsg->common->curlun;
1175 int msf = fsg->common->cmnd[1] & 0x02;
1176 u32 lba = get_unaligned_be32(&fsg->common->cmnd[2]);
d5e2b67a
MN
1177 u8 *buf = (u8 *) bh->buf;
1178
a41ae418 1179 if ((fsg->common->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */
d5e2b67a
MN
1180 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1181 return -EINVAL;
1182 }
1183 if (lba >= curlun->num_sectors) {
1184 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1185 return -EINVAL;
1186 }
1187
1188 memset(buf, 0, 8);
1189 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1190 store_cdrom_address(&buf[4], msf, lba);
1191 return 8;
1192}
1193
1194
1195static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1196{
a41ae418
MN
1197 struct fsg_lun *curlun = fsg->common->curlun;
1198 int msf = fsg->common->cmnd[1] & 0x02;
1199 int start_track = fsg->common->cmnd[6];
d5e2b67a
MN
1200 u8 *buf = (u8 *) bh->buf;
1201
a41ae418 1202 if ((fsg->common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
d5e2b67a
MN
1203 start_track > 1) {
1204 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1205 return -EINVAL;
1206 }
1207
1208 memset(buf, 0, 20);
1209 buf[1] = (20-2); /* TOC data length */
1210 buf[2] = 1; /* First track number */
1211 buf[3] = 1; /* Last track number */
1212 buf[5] = 0x16; /* Data track, copying allowed */
1213 buf[6] = 0x01; /* Only track is number 1 */
1214 store_cdrom_address(&buf[8], msf, 0);
1215
1216 buf[13] = 0x16; /* Lead-out track is data */
1217 buf[14] = 0xAA; /* Lead-out track number */
1218 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1219 return 20;
1220}
1221
1222
1223static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1224{
a41ae418
MN
1225 struct fsg_lun *curlun = fsg->common->curlun;
1226 int mscmnd = fsg->common->cmnd[0];
d5e2b67a
MN
1227 u8 *buf = (u8 *) bh->buf;
1228 u8 *buf0 = buf;
1229 int pc, page_code;
1230 int changeable_values, all_pages;
1231 int valid_page = 0;
1232 int len, limit;
1233
a41ae418 1234 if ((fsg->common->cmnd[1] & ~0x08) != 0) { // Mask away DBD
d5e2b67a
MN
1235 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1236 return -EINVAL;
1237 }
a41ae418
MN
1238 pc = fsg->common->cmnd[2] >> 6;
1239 page_code = fsg->common->cmnd[2] & 0x3f;
d5e2b67a
MN
1240 if (pc == 3) {
1241 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1242 return -EINVAL;
1243 }
1244 changeable_values = (pc == 1);
1245 all_pages = (page_code == 0x3f);
1246
1247 /* Write the mode parameter header. Fixed values are: default
1248 * medium type, no cache control (DPOFUA), and no block descriptors.
1249 * The only variable value is the WriteProtect bit. We will fill in
1250 * the mode data length later. */
1251 memset(buf, 0, 8);
1252 if (mscmnd == SC_MODE_SENSE_6) {
1253 buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1254 buf += 4;
1255 limit = 255;
1256 } else { // SC_MODE_SENSE_10
1257 buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1258 buf += 8;
93bcf12e 1259 limit = 65535; // Should really be FSG_BUFLEN
d5e2b67a
MN
1260 }
1261
1262 /* No block descriptors */
1263
1264 /* The mode pages, in numerical order. The only page we support
1265 * is the Caching page. */
1266 if (page_code == 0x08 || all_pages) {
1267 valid_page = 1;
1268 buf[0] = 0x08; // Page code
1269 buf[1] = 10; // Page length
1270 memset(buf+2, 0, 10); // None of the fields are changeable
1271
1272 if (!changeable_values) {
1273 buf[2] = 0x04; // Write cache enable,
1274 // Read cache not disabled
1275 // No cache retention priorities
1276 put_unaligned_be16(0xffff, &buf[4]);
1277 /* Don't disable prefetch */
1278 /* Minimum prefetch = 0 */
1279 put_unaligned_be16(0xffff, &buf[8]);
1280 /* Maximum prefetch */
1281 put_unaligned_be16(0xffff, &buf[10]);
1282 /* Maximum prefetch ceiling */
1283 }
1284 buf += 12;
1285 }
1286
1287 /* Check that a valid page was requested and the mode data length
1288 * isn't too long. */
1289 len = buf - buf0;
1290 if (!valid_page || len > limit) {
1291 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1292 return -EINVAL;
1293 }
1294
1295 /* Store the mode data length */
1296 if (mscmnd == SC_MODE_SENSE_6)
1297 buf0[0] = len - 1;
1298 else
1299 put_unaligned_be16(len - 2, buf0);
1300 return len;
1301}
1302
1303
1304static int do_start_stop(struct fsg_dev *fsg)
1305{
d5e2b67a 1306 if (!mod_data.removable) {
a41ae418 1307 fsg->common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1308 return -EINVAL;
1309 }
d5e2b67a
MN
1310 return 0;
1311}
1312
1313
1314static int do_prevent_allow(struct fsg_dev *fsg)
1315{
a41ae418 1316 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
1317 int prevent;
1318
1319 if (!mod_data.removable) {
1320 curlun->sense_data = SS_INVALID_COMMAND;
1321 return -EINVAL;
1322 }
1323
a41ae418
MN
1324 prevent = fsg->common->cmnd[4] & 0x01;
1325 if ((fsg->common->cmnd[4] & ~0x01) != 0) { // Mask away Prevent
d5e2b67a
MN
1326 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1327 return -EINVAL;
1328 }
1329
1330 if (curlun->prevent_medium_removal && !prevent)
1331 fsg_lun_fsync_sub(curlun);
1332 curlun->prevent_medium_removal = prevent;
1333 return 0;
1334}
1335
1336
1337static int do_read_format_capacities(struct fsg_dev *fsg,
1338 struct fsg_buffhd *bh)
1339{
a41ae418 1340 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
1341 u8 *buf = (u8 *) bh->buf;
1342
1343 buf[0] = buf[1] = buf[2] = 0;
1344 buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
1345 buf += 4;
1346
1347 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1348 /* Number of blocks */
1349 put_unaligned_be32(512, &buf[4]); /* Block length */
1350 buf[4] = 0x02; /* Current capacity */
1351 return 12;
1352}
1353
1354
1355static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1356{
a41ae418 1357 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a
MN
1358
1359 /* We don't support MODE SELECT */
1360 curlun->sense_data = SS_INVALID_COMMAND;
1361 return -EINVAL;
1362}
1363
1364
1365/*-------------------------------------------------------------------------*/
1366
1367static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1368{
1369 int rc;
1370
1371 rc = fsg_set_halt(fsg, fsg->bulk_in);
1372 if (rc == -EAGAIN)
1373 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1374 while (rc != 0) {
1375 if (rc != -EAGAIN) {
1376 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1377 rc = 0;
1378 break;
1379 }
1380
1381 /* Wait for a short time and then try again */
1382 if (msleep_interruptible(100) != 0)
1383 return -EINTR;
1384 rc = usb_ep_set_halt(fsg->bulk_in);
1385 }
1386 return rc;
1387}
1388
1389static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1390{
1391 int rc;
1392
1393 DBG(fsg, "bulk-in set wedge\n");
1394 rc = usb_ep_set_wedge(fsg->bulk_in);
1395 if (rc == -EAGAIN)
1396 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1397 while (rc != 0) {
1398 if (rc != -EAGAIN) {
1399 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1400 rc = 0;
1401 break;
1402 }
1403
1404 /* Wait for a short time and then try again */
1405 if (msleep_interruptible(100) != 0)
1406 return -EINTR;
1407 rc = usb_ep_set_wedge(fsg->bulk_in);
1408 }
1409 return rc;
1410}
1411
1412static int pad_with_zeros(struct fsg_dev *fsg)
1413{
a41ae418 1414 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1415 u32 nkeep = bh->inreq->length;
1416 u32 nsend;
1417 int rc;
1418
1419 bh->state = BUF_STATE_EMPTY; // For the first iteration
1420 fsg->usb_amount_left = nkeep + fsg->residue;
1421 while (fsg->usb_amount_left > 0) {
1422
1423 /* Wait for the next buffer to be free */
1424 while (bh->state != BUF_STATE_EMPTY) {
1425 rc = sleep_thread(fsg);
1426 if (rc)
1427 return rc;
1428 }
1429
93bcf12e 1430 nsend = min(fsg->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1431 memset(bh->buf + nkeep, 0, nsend - nkeep);
1432 bh->inreq->length = nsend;
1433 bh->inreq->zero = 0;
1434 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1435 &bh->inreq_busy, &bh->state);
a41ae418 1436 bh = fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1437 fsg->usb_amount_left -= nsend;
1438 nkeep = 0;
1439 }
1440 return 0;
1441}
1442
1443static int throw_away_data(struct fsg_dev *fsg)
1444{
1445 struct fsg_buffhd *bh;
1446 u32 amount;
1447 int rc;
1448
a41ae418
MN
1449 for (bh = fsg->common->next_buffhd_to_drain;
1450 bh->state != BUF_STATE_EMPTY || fsg->usb_amount_left > 0;
1451 bh = fsg->common->next_buffhd_to_drain) {
d5e2b67a
MN
1452
1453 /* Throw away the data in a filled buffer */
1454 if (bh->state == BUF_STATE_FULL) {
1455 smp_rmb();
1456 bh->state = BUF_STATE_EMPTY;
a41ae418 1457 fsg->common->next_buffhd_to_drain = bh->next;
d5e2b67a
MN
1458
1459 /* A short packet or an error ends everything */
1460 if (bh->outreq->actual != bh->outreq->length ||
1461 bh->outreq->status != 0) {
1462 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1463 return -EINTR;
1464 }
1465 continue;
1466 }
1467
1468 /* Try to submit another request if we need one */
a41ae418 1469 bh = fsg->common->next_buffhd_to_fill;
d5e2b67a 1470 if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
93bcf12e 1471 amount = min(fsg->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1472
1473 /* amount is always divisible by 512, hence by
1474 * the bulk-out maxpacket size */
1475 bh->outreq->length = bh->bulk_out_intended_length =
1476 amount;
1477 bh->outreq->short_not_ok = 1;
1478 start_transfer(fsg, fsg->bulk_out, bh->outreq,
1479 &bh->outreq_busy, &bh->state);
a41ae418 1480 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1481 fsg->usb_amount_left -= amount;
1482 continue;
1483 }
1484
1485 /* Otherwise wait for something to happen */
1486 rc = sleep_thread(fsg);
1487 if (rc)
1488 return rc;
1489 }
1490 return 0;
1491}
1492
1493
1494static int finish_reply(struct fsg_dev *fsg)
1495{
a41ae418 1496 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1497 int rc = 0;
1498
1499 switch (fsg->data_dir) {
1500 case DATA_DIR_NONE:
1501 break; // Nothing to send
1502
1503 /* If we don't know whether the host wants to read or write,
1504 * this must be CB or CBI with an unknown command. We mustn't
1505 * try to send or receive any data. So stall both bulk pipes
1506 * if we can and wait for a reset. */
1507 case DATA_DIR_UNKNOWN:
1508 if (mod_data.can_stall) {
1509 fsg_set_halt(fsg, fsg->bulk_out);
1510 rc = halt_bulk_in_endpoint(fsg);
1511 }
1512 break;
1513
1514 /* All but the last buffer of data must have already been sent */
1515 case DATA_DIR_TO_HOST:
93bcf12e
MN
1516 if (fsg->data_size == 0) {
1517 /* Nothing to send */
d5e2b67a
MN
1518
1519 /* If there's no residue, simply send the last buffer */
93bcf12e 1520 } else if (fsg->residue == 0) {
d5e2b67a
MN
1521 bh->inreq->zero = 0;
1522 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1523 &bh->inreq_busy, &bh->state);
a41ae418 1524 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1525
1526 /* For Bulk-only, if we're allowed to stall then send the
1527 * short packet and halt the bulk-in endpoint. If we can't
1528 * stall, pad out the remaining data with 0's. */
93bcf12e
MN
1529 } else if (mod_data.can_stall) {
1530 bh->inreq->zero = 1;
1531 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1532 &bh->inreq_busy, &bh->state);
a41ae418 1533 fsg->common->next_buffhd_to_fill = bh->next;
93bcf12e
MN
1534 rc = halt_bulk_in_endpoint(fsg);
1535 } else {
1536 rc = pad_with_zeros(fsg);
d5e2b67a
MN
1537 }
1538 break;
1539
1540 /* We have processed all we want from the data the host has sent.
1541 * There may still be outstanding bulk-out requests. */
1542 case DATA_DIR_FROM_HOST:
1543 if (fsg->residue == 0)
1544 ; // Nothing to receive
1545
1546 /* Did the host stop sending unexpectedly early? */
1547 else if (fsg->short_packet_received) {
1548 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1549 rc = -EINTR;
1550 }
1551
1552 /* We haven't processed all the incoming data. Even though
1553 * we may be allowed to stall, doing so would cause a race.
1554 * The controller may already have ACK'ed all the remaining
1555 * bulk-out packets, in which case the host wouldn't see a
1556 * STALL. Not realizing the endpoint was halted, it wouldn't
1557 * clear the halt -- leading to problems later on. */
1558#if 0
1559 else if (mod_data.can_stall) {
1560 fsg_set_halt(fsg, fsg->bulk_out);
1561 raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1562 rc = -EINTR;
1563 }
1564#endif
1565
1566 /* We can't stall. Read in the excess data and throw it
1567 * all away. */
1568 else
1569 rc = throw_away_data(fsg);
1570 break;
1571 }
1572 return rc;
1573}
1574
1575
1576static int send_status(struct fsg_dev *fsg)
1577{
a41ae418 1578 struct fsg_lun *curlun = fsg->common->curlun;
d5e2b67a 1579 struct fsg_buffhd *bh;
93bcf12e 1580 struct bulk_cs_wrap *csw;
d5e2b67a
MN
1581 int rc;
1582 u8 status = USB_STATUS_PASS;
1583 u32 sd, sdinfo = 0;
1584
1585 /* Wait for the next buffer to become available */
a41ae418 1586 bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1587 while (bh->state != BUF_STATE_EMPTY) {
1588 rc = sleep_thread(fsg);
1589 if (rc)
1590 return rc;
1591 }
1592
1593 if (curlun) {
1594 sd = curlun->sense_data;
1595 sdinfo = curlun->sense_data_info;
1596 } else if (fsg->bad_lun_okay)
1597 sd = SS_NO_SENSE;
1598 else
1599 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1600
1601 if (fsg->phase_error) {
1602 DBG(fsg, "sending phase-error status\n");
1603 status = USB_STATUS_PHASE_ERROR;
1604 sd = SS_INVALID_COMMAND;
1605 } else if (sd != SS_NO_SENSE) {
1606 DBG(fsg, "sending command-failure status\n");
1607 status = USB_STATUS_FAIL;
1608 VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1609 " info x%x\n",
1610 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1611 }
1612
93bcf12e 1613 /* Store and send the Bulk-only CSW */
606206c2 1614 csw = (void*)bh->buf;
d5e2b67a 1615
93bcf12e
MN
1616 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1617 csw->Tag = fsg->tag;
1618 csw->Residue = cpu_to_le32(fsg->residue);
1619 csw->Status = status;
d5e2b67a 1620
93bcf12e
MN
1621 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1622 bh->inreq->zero = 0;
1623 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1624 &bh->inreq_busy, &bh->state);
d5e2b67a 1625
a41ae418 1626 fsg->common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1627 return 0;
1628}
1629
1630
1631/*-------------------------------------------------------------------------*/
1632
1633/* Check whether the command is properly formed and whether its data size
1634 * and direction agree with the values we already have. */
1635static int check_command(struct fsg_dev *fsg, int cmnd_size,
1636 enum data_direction data_dir, unsigned int mask,
1637 int needs_medium, const char *name)
1638{
1639 int i;
a41ae418 1640 int lun = fsg->common->cmnd[1] >> 5;
d5e2b67a
MN
1641 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1642 char hdlen[20];
1643 struct fsg_lun *curlun;
1644
d5e2b67a
MN
1645 hdlen[0] = 0;
1646 if (fsg->data_dir != DATA_DIR_UNKNOWN)
1647 sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
1648 fsg->data_size);
1649 VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1650 name, cmnd_size, dirletter[(int) data_dir],
a41ae418 1651 fsg->data_size_from_cmnd, fsg->common->cmnd_size, hdlen);
d5e2b67a
MN
1652
1653 /* We can't reply at all until we know the correct data direction
1654 * and size. */
1655 if (fsg->data_size_from_cmnd == 0)
1656 data_dir = DATA_DIR_NONE;
1657 if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI
1658 fsg->data_dir = data_dir;
1659 fsg->data_size = fsg->data_size_from_cmnd;
1660
1661 } else { // Bulk-only
1662 if (fsg->data_size < fsg->data_size_from_cmnd) {
1663
1664 /* Host data size < Device data size is a phase error.
1665 * Carry out the command, but only transfer as much
1666 * as we are allowed. */
1667 fsg->data_size_from_cmnd = fsg->data_size;
1668 fsg->phase_error = 1;
1669 }
1670 }
1671 fsg->residue = fsg->usb_amount_left = fsg->data_size;
1672
1673 /* Conflicting data directions is a phase error */
1674 if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
1675 fsg->phase_error = 1;
1676 return -EINVAL;
1677 }
1678
1679 /* Verify the length of the command itself */
a41ae418 1680 if (cmnd_size != fsg->common->cmnd_size) {
d5e2b67a
MN
1681
1682 /* Special case workaround: There are plenty of buggy SCSI
1683 * implementations. Many have issues with cbw->Length
1684 * field passing a wrong command size. For those cases we
1685 * always try to work around the problem by using the length
1686 * sent by the host side provided it is at least as large
1687 * as the correct command length.
1688 * Examples of such cases would be MS-Windows, which issues
1689 * REQUEST SENSE with cbw->Length == 12 where it should
1690 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1691 * REQUEST SENSE with cbw->Length == 10 where it should
1692 * be 6 as well.
1693 */
a41ae418 1694 if (cmnd_size <= fsg->common->cmnd_size) {
d5e2b67a 1695 DBG(fsg, "%s is buggy! Expected length %d "
a41ae418
MN
1696 "but we got %d\n", name,
1697 cmnd_size, fsg->common->cmnd_size);
1698 cmnd_size = fsg->common->cmnd_size;
d5e2b67a
MN
1699 } else {
1700 fsg->phase_error = 1;
1701 return -EINVAL;
1702 }
1703 }
1704
1705 /* Check that the LUN values are consistent */
a41ae418 1706 if (fsg->common->lun != lun)
93bcf12e 1707 DBG(fsg, "using LUN %d from CBW, not LUN %d from CDB\n",
a41ae418 1708 fsg->common->lun, lun);
d5e2b67a
MN
1709
1710 /* Check the LUN */
a41ae418
MN
1711 if (fsg->common->lun >= 0 && fsg->common->lun < fsg->common->nluns) {
1712 fsg->common->curlun = curlun = &fsg->common->luns[fsg->common->lun];
1713 if (fsg->common->cmnd[0] != SC_REQUEST_SENSE) {
d5e2b67a
MN
1714 curlun->sense_data = SS_NO_SENSE;
1715 curlun->sense_data_info = 0;
1716 curlun->info_valid = 0;
1717 }
1718 } else {
a41ae418 1719 fsg->common->curlun = curlun = NULL;
d5e2b67a
MN
1720 fsg->bad_lun_okay = 0;
1721
1722 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1723 * to use unsupported LUNs; all others may not. */
a41ae418
MN
1724 if (fsg->common->cmnd[0] != SC_INQUIRY &&
1725 fsg->common->cmnd[0] != SC_REQUEST_SENSE) {
1726 DBG(fsg, "unsupported LUN %d\n", fsg->common->lun);
d5e2b67a
MN
1727 return -EINVAL;
1728 }
1729 }
1730
1731 /* If a unit attention condition exists, only INQUIRY and
1732 * REQUEST SENSE commands are allowed; anything else must fail. */
1733 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
a41ae418
MN
1734 fsg->common->cmnd[0] != SC_INQUIRY &&
1735 fsg->common->cmnd[0] != SC_REQUEST_SENSE) {
d5e2b67a
MN
1736 curlun->sense_data = curlun->unit_attention_data;
1737 curlun->unit_attention_data = SS_NO_SENSE;
1738 return -EINVAL;
1739 }
1740
1741 /* Check that only command bytes listed in the mask are non-zero */
a41ae418 1742 fsg->common->cmnd[1] &= 0x1f; // Mask away the LUN
d5e2b67a 1743 for (i = 1; i < cmnd_size; ++i) {
a41ae418 1744 if (fsg->common->cmnd[i] && !(mask & (1 << i))) {
d5e2b67a
MN
1745 if (curlun)
1746 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1747 return -EINVAL;
1748 }
1749 }
1750
1751 /* If the medium isn't mounted and the command needs to access
1752 * it, return an error. */
1753 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
1754 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1755 return -EINVAL;
1756 }
1757
1758 return 0;
1759}
1760
1761
1762static int do_scsi_command(struct fsg_dev *fsg)
1763{
1764 struct fsg_buffhd *bh;
1765 int rc;
1766 int reply = -EINVAL;
1767 int i;
1768 static char unknown[16];
1769
a41ae418 1770 dump_cdb(fsg->common);
d5e2b67a
MN
1771
1772 /* Wait for the next buffer to become available for data or status */
a41ae418 1773 bh = fsg->common->next_buffhd_to_drain = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1774 while (bh->state != BUF_STATE_EMPTY) {
1775 rc = sleep_thread(fsg);
1776 if (rc)
1777 return rc;
1778 }
1779 fsg->phase_error = 0;
1780 fsg->short_packet_received = 0;
1781
a41ae418
MN
1782 down_read(&fsg->common->filesem); // We're using the backing file
1783 switch (fsg->common->cmnd[0]) {
d5e2b67a
MN
1784
1785 case SC_INQUIRY:
a41ae418 1786 fsg->data_size_from_cmnd = fsg->common->cmnd[4];
d5e2b67a
MN
1787 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1788 (1<<4), 0,
1789 "INQUIRY")) == 0)
1790 reply = do_inquiry(fsg, bh);
1791 break;
1792
1793 case SC_MODE_SELECT_6:
a41ae418 1794 fsg->data_size_from_cmnd = fsg->common->cmnd[4];
d5e2b67a
MN
1795 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
1796 (1<<1) | (1<<4), 0,
1797 "MODE SELECT(6)")) == 0)
1798 reply = do_mode_select(fsg, bh);
1799 break;
1800
1801 case SC_MODE_SELECT_10:
a41ae418 1802 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1803 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
1804 (1<<1) | (3<<7), 0,
1805 "MODE SELECT(10)")) == 0)
1806 reply = do_mode_select(fsg, bh);
1807 break;
1808
1809 case SC_MODE_SENSE_6:
a41ae418 1810 fsg->data_size_from_cmnd = fsg->common->cmnd[4];
d5e2b67a
MN
1811 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1812 (1<<1) | (1<<2) | (1<<4), 0,
1813 "MODE SENSE(6)")) == 0)
1814 reply = do_mode_sense(fsg, bh);
1815 break;
1816
1817 case SC_MODE_SENSE_10:
a41ae418 1818 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1819 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1820 (1<<1) | (1<<2) | (3<<7), 0,
1821 "MODE SENSE(10)")) == 0)
1822 reply = do_mode_sense(fsg, bh);
1823 break;
1824
1825 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1826 fsg->data_size_from_cmnd = 0;
1827 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
1828 (1<<4), 0,
1829 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
1830 reply = do_prevent_allow(fsg);
1831 break;
1832
1833 case SC_READ_6:
a41ae418 1834 i = fsg->common->cmnd[4];
d5e2b67a
MN
1835 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1836 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1837 (7<<1) | (1<<4), 1,
1838 "READ(6)")) == 0)
1839 reply = do_read(fsg);
1840 break;
1841
1842 case SC_READ_10:
1843 fsg->data_size_from_cmnd =
a41ae418 1844 get_unaligned_be16(&fsg->common->cmnd[7]) << 9;
d5e2b67a
MN
1845 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1846 (1<<1) | (0xf<<2) | (3<<7), 1,
1847 "READ(10)")) == 0)
1848 reply = do_read(fsg);
1849 break;
1850
1851 case SC_READ_12:
1852 fsg->data_size_from_cmnd =
a41ae418 1853 get_unaligned_be32(&fsg->common->cmnd[6]) << 9;
d5e2b67a
MN
1854 if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
1855 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1856 "READ(12)")) == 0)
1857 reply = do_read(fsg);
1858 break;
1859
1860 case SC_READ_CAPACITY:
1861 fsg->data_size_from_cmnd = 8;
1862 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1863 (0xf<<2) | (1<<8), 1,
1864 "READ CAPACITY")) == 0)
1865 reply = do_read_capacity(fsg, bh);
1866 break;
1867
1868 case SC_READ_HEADER:
1869 if (!mod_data.cdrom)
1870 goto unknown_cmnd;
a41ae418 1871 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1872 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1873 (3<<7) | (0x1f<<1), 1,
1874 "READ HEADER")) == 0)
1875 reply = do_read_header(fsg, bh);
1876 break;
1877
1878 case SC_READ_TOC:
1879 if (!mod_data.cdrom)
1880 goto unknown_cmnd;
a41ae418 1881 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1882 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1883 (7<<6) | (1<<1), 1,
1884 "READ TOC")) == 0)
1885 reply = do_read_toc(fsg, bh);
1886 break;
1887
1888 case SC_READ_FORMAT_CAPACITIES:
a41ae418 1889 fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->common->cmnd[7]);
d5e2b67a
MN
1890 if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
1891 (3<<7), 1,
1892 "READ FORMAT CAPACITIES")) == 0)
1893 reply = do_read_format_capacities(fsg, bh);
1894 break;
1895
1896 case SC_REQUEST_SENSE:
a41ae418 1897 fsg->data_size_from_cmnd = fsg->common->cmnd[4];
d5e2b67a
MN
1898 if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
1899 (1<<4), 0,
1900 "REQUEST SENSE")) == 0)
1901 reply = do_request_sense(fsg, bh);
1902 break;
1903
1904 case SC_START_STOP_UNIT:
1905 fsg->data_size_from_cmnd = 0;
1906 if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
1907 (1<<1) | (1<<4), 0,
1908 "START-STOP UNIT")) == 0)
1909 reply = do_start_stop(fsg);
1910 break;
1911
1912 case SC_SYNCHRONIZE_CACHE:
1913 fsg->data_size_from_cmnd = 0;
1914 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
1915 (0xf<<2) | (3<<7), 1,
1916 "SYNCHRONIZE CACHE")) == 0)
1917 reply = do_synchronize_cache(fsg);
1918 break;
1919
1920 case SC_TEST_UNIT_READY:
1921 fsg->data_size_from_cmnd = 0;
1922 reply = check_command(fsg, 6, DATA_DIR_NONE,
1923 0, 1,
1924 "TEST UNIT READY");
1925 break;
1926
1927 /* Although optional, this command is used by MS-Windows. We
1928 * support a minimal version: BytChk must be 0. */
1929 case SC_VERIFY:
1930 fsg->data_size_from_cmnd = 0;
1931 if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
1932 (1<<1) | (0xf<<2) | (3<<7), 1,
1933 "VERIFY")) == 0)
1934 reply = do_verify(fsg);
1935 break;
1936
1937 case SC_WRITE_6:
a41ae418 1938 i = fsg->common->cmnd[4];
d5e2b67a
MN
1939 fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1940 if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
1941 (7<<1) | (1<<4), 1,
1942 "WRITE(6)")) == 0)
1943 reply = do_write(fsg);
1944 break;
1945
1946 case SC_WRITE_10:
1947 fsg->data_size_from_cmnd =
a41ae418 1948 get_unaligned_be16(&fsg->common->cmnd[7]) << 9;
d5e2b67a
MN
1949 if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
1950 (1<<1) | (0xf<<2) | (3<<7), 1,
1951 "WRITE(10)")) == 0)
1952 reply = do_write(fsg);
1953 break;
1954
1955 case SC_WRITE_12:
1956 fsg->data_size_from_cmnd =
a41ae418 1957 get_unaligned_be32(&fsg->common->cmnd[6]) << 9;
d5e2b67a
MN
1958 if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
1959 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1960 "WRITE(12)")) == 0)
1961 reply = do_write(fsg);
1962 break;
1963
1964 /* Some mandatory commands that we recognize but don't implement.
1965 * They don't mean much in this setting. It's left as an exercise
1966 * for anyone interested to implement RESERVE and RELEASE in terms
1967 * of Posix locks. */
1968 case SC_FORMAT_UNIT:
1969 case SC_RELEASE:
1970 case SC_RESERVE:
1971 case SC_SEND_DIAGNOSTIC:
1972 // Fall through
1973
1974 default:
1975 unknown_cmnd:
1976 fsg->data_size_from_cmnd = 0;
a41ae418
MN
1977 sprintf(unknown, "Unknown x%02x", fsg->common->cmnd[0]);
1978 if ((reply = check_command(fsg, fsg->common->cmnd_size,
d5e2b67a 1979 DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
a41ae418 1980 fsg->common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1981 reply = -EINVAL;
1982 }
1983 break;
1984 }
a41ae418 1985 up_read(&fsg->common->filesem);
d5e2b67a
MN
1986
1987 if (reply == -EINTR || signal_pending(current))
1988 return -EINTR;
1989
1990 /* Set up the single reply buffer for finish_reply() */
1991 if (reply == -EINVAL)
1992 reply = 0; // Error reply length
1993 if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
1994 reply = min((u32) reply, fsg->data_size_from_cmnd);
1995 bh->inreq->length = reply;
1996 bh->state = BUF_STATE_FULL;
1997 fsg->residue -= reply;
1998 } // Otherwise it's already set
1999
2000 return 0;
2001}
2002
2003
2004/*-------------------------------------------------------------------------*/
2005
2006static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2007{
2008 struct usb_request *req = bh->outreq;
2009 struct fsg_bulk_cb_wrap *cbw = req->buf;
2010
2011 /* Was this a real packet? Should it be ignored? */
2012 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2013 return -EINVAL;
2014
2015 /* Is the CBW valid? */
2016 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2017 cbw->Signature != cpu_to_le32(
2018 USB_BULK_CB_SIG)) {
2019 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2020 req->actual,
2021 le32_to_cpu(cbw->Signature));
2022
2023 /* The Bulk-only spec says we MUST stall the IN endpoint
2024 * (6.6.1), so it's unavoidable. It also says we must
2025 * retain this state until the next reset, but there's
2026 * no way to tell the controller driver it should ignore
2027 * Clear-Feature(HALT) requests.
2028 *
2029 * We aren't required to halt the OUT endpoint; instead
2030 * we can simply accept and discard any data received
2031 * until the next reset. */
2032 wedge_bulk_in_endpoint(fsg);
2033 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2034 return -EINVAL;
2035 }
2036
2037 /* Is the CBW meaningful? */
2038 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2039 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2040 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2041 "cmdlen %u\n",
2042 cbw->Lun, cbw->Flags, cbw->Length);
2043
2044 /* We can do anything we want here, so let's stall the
2045 * bulk pipes if we are allowed to. */
2046 if (mod_data.can_stall) {
2047 fsg_set_halt(fsg, fsg->bulk_out);
2048 halt_bulk_in_endpoint(fsg);
2049 }
2050 return -EINVAL;
2051 }
2052
2053 /* Save the command for later */
a41ae418
MN
2054 fsg->common->cmnd_size = cbw->Length;
2055 memcpy(fsg->common->cmnd, cbw->CDB, fsg->common->cmnd_size);
d5e2b67a
MN
2056 if (cbw->Flags & USB_BULK_IN_FLAG)
2057 fsg->data_dir = DATA_DIR_TO_HOST;
2058 else
2059 fsg->data_dir = DATA_DIR_FROM_HOST;
2060 fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
2061 if (fsg->data_size == 0)
2062 fsg->data_dir = DATA_DIR_NONE;
a41ae418 2063 fsg->common->lun = cbw->Lun;
d5e2b67a
MN
2064 fsg->tag = cbw->Tag;
2065 return 0;
2066}
2067
2068
2069static int get_next_command(struct fsg_dev *fsg)
2070{
2071 struct fsg_buffhd *bh;
2072 int rc = 0;
2073
93bcf12e 2074 /* Wait for the next buffer to become available */
a41ae418 2075 bh = fsg->common->next_buffhd_to_fill;
93bcf12e
MN
2076 while (bh->state != BUF_STATE_EMPTY) {
2077 rc = sleep_thread(fsg);
2078 if (rc)
2079 return rc;
2080 }
d5e2b67a 2081
93bcf12e
MN
2082 /* Queue a request to read a Bulk-only CBW */
2083 set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
2084 bh->outreq->short_not_ok = 1;
2085 start_transfer(fsg, fsg->bulk_out, bh->outreq,
2086 &bh->outreq_busy, &bh->state);
d5e2b67a 2087
93bcf12e
MN
2088 /* We will drain the buffer in software, which means we
2089 * can reuse it for the next filling. No need to advance
2090 * next_buffhd_to_fill. */
d5e2b67a 2091
93bcf12e
MN
2092 /* Wait for the CBW to arrive */
2093 while (bh->state != BUF_STATE_FULL) {
2094 rc = sleep_thread(fsg);
2095 if (rc)
2096 return rc;
d5e2b67a 2097 }
93bcf12e
MN
2098 smp_rmb();
2099 rc = received_cbw(fsg, bh);
2100 bh->state = BUF_STATE_EMPTY;
2101
d5e2b67a
MN
2102 return rc;
2103}
2104
2105
2106/*-------------------------------------------------------------------------*/
2107
2108static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
2109 const struct usb_endpoint_descriptor *d)
2110{
2111 int rc;
2112
2113 ep->driver_data = fsg;
2114 rc = usb_ep_enable(ep, d);
2115 if (rc)
2116 ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
2117 return rc;
2118}
2119
2120static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
2121 struct usb_request **preq)
2122{
2123 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2124 if (*preq)
2125 return 0;
2126 ERROR(fsg, "can't allocate request for %s\n", ep->name);
2127 return -ENOMEM;
2128}
2129
2130/*
2131 * Reset interface setting and re-init endpoint state (toggle etc).
2132 * Call with altsetting < 0 to disable the interface. The only other
2133 * available altsetting is 0, which enables the interface.
2134 */
2135static int do_set_interface(struct fsg_dev *fsg, int altsetting)
2136{
2137 int rc = 0;
2138 int i;
2139 const struct usb_endpoint_descriptor *d;
2140
2141 if (fsg->running)
2142 DBG(fsg, "reset interface\n");
2143
2144reset:
2145 /* Deallocate the requests */
2146 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2147 struct fsg_buffhd *bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2148
2149 if (bh->inreq) {
2150 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2151 bh->inreq = NULL;
2152 }
2153 if (bh->outreq) {
2154 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2155 bh->outreq = NULL;
2156 }
2157 }
d5e2b67a
MN
2158
2159 /* Disable the endpoints */
2160 if (fsg->bulk_in_enabled) {
2161 usb_ep_disable(fsg->bulk_in);
2162 fsg->bulk_in_enabled = 0;
2163 }
2164 if (fsg->bulk_out_enabled) {
2165 usb_ep_disable(fsg->bulk_out);
2166 fsg->bulk_out_enabled = 0;
2167 }
d5e2b67a
MN
2168
2169 fsg->running = 0;
2170 if (altsetting < 0 || rc != 0)
2171 return rc;
2172
2173 DBG(fsg, "set interface %d\n", altsetting);
2174
2175 /* Enable the endpoints */
2176 d = fsg_ep_desc(fsg->gadget,
2177 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2178 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
2179 goto reset;
2180 fsg->bulk_in_enabled = 1;
2181
2182 d = fsg_ep_desc(fsg->gadget,
2183 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2184 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
2185 goto reset;
2186 fsg->bulk_out_enabled = 1;
2187 fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2188 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2189
d5e2b67a
MN
2190 /* Allocate the requests */
2191 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2192 struct fsg_buffhd *bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2193
2194 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
2195 goto reset;
2196 if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
2197 goto reset;
2198 bh->inreq->buf = bh->outreq->buf = bh->buf;
2199 bh->inreq->context = bh->outreq->context = bh;
2200 bh->inreq->complete = bulk_in_complete;
2201 bh->outreq->complete = bulk_out_complete;
2202 }
d5e2b67a
MN
2203
2204 fsg->running = 1;
a41ae418
MN
2205 for (i = 0; i < fsg->common->nluns; ++i)
2206 fsg->common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
d5e2b67a
MN
2207 return rc;
2208}
2209
2210
2211/*
2212 * Change our operational configuration. This code must agree with the code
2213 * that returns config descriptors, and with interface altsetting code.
2214 *
2215 * It's also responsible for power management interactions. Some
2216 * configurations might not work with our current power sources.
2217 * For now we just assume the gadget is always self-powered.
2218 */
2219static int do_set_config(struct fsg_dev *fsg, u8 new_config)
2220{
2221 int rc = 0;
2222
2223 /* Disable the single interface */
2224 if (fsg->config != 0) {
2225 DBG(fsg, "reset config\n");
2226 fsg->config = 0;
2227 rc = do_set_interface(fsg, -1);
2228 }
2229
2230 /* Enable the interface */
2231 if (new_config != 0) {
2232 fsg->config = new_config;
d23b0f08
MN
2233 rc = do_set_interface(fsg, 0);
2234 if (rc != 0)
2235 fsg->config = 0; /* Reset on errors */
d5e2b67a
MN
2236 }
2237 return rc;
2238}
2239
2240
d23b0f08
MN
2241/****************************** ALT CONFIGS ******************************/
2242
2243
2244static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2245{
2246 struct fsg_dev *fsg = fsg_from_func(f);
2247 fsg->new_config = 1;
2248 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
2249 return 0;
2250}
2251
2252static void fsg_disable(struct usb_function *f)
2253{
2254 struct fsg_dev *fsg = fsg_from_func(f);
2255 fsg->new_config = 0;
2256 raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
2257}
2258
2259
d5e2b67a
MN
2260/*-------------------------------------------------------------------------*/
2261
2262static void handle_exception(struct fsg_dev *fsg)
2263{
2264 siginfo_t info;
2265 int sig;
2266 int i;
d5e2b67a
MN
2267 struct fsg_buffhd *bh;
2268 enum fsg_state old_state;
2269 u8 new_config;
2270 struct fsg_lun *curlun;
2271 unsigned int exception_req_tag;
2272 int rc;
2273
2274 /* Clear the existing signals. Anything but SIGUSR1 is converted
2275 * into a high-priority EXIT exception. */
2276 for (;;) {
2277 sig = dequeue_signal_lock(current, &current->blocked, &info);
2278 if (!sig)
2279 break;
2280 if (sig != SIGUSR1) {
2281 if (fsg->state < FSG_STATE_EXIT)
2282 DBG(fsg, "Main thread exiting on signal\n");
2283 raise_exception(fsg, FSG_STATE_EXIT);
2284 }
2285 }
2286
2287 /* Cancel all the pending transfers */
d5e2b67a 2288 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2289 bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2290 if (bh->inreq_busy)
2291 usb_ep_dequeue(fsg->bulk_in, bh->inreq);
2292 if (bh->outreq_busy)
2293 usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2294 }
2295
2296 /* Wait until everything is idle */
2297 for (;;) {
a41ae418 2298 int num_active = 0;
d5e2b67a 2299 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2300 bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2301 num_active += bh->inreq_busy + bh->outreq_busy;
2302 }
2303 if (num_active == 0)
2304 break;
2305 if (sleep_thread(fsg))
2306 return;
2307 }
2308
2309 /* Clear out the controller's fifos */
2310 if (fsg->bulk_in_enabled)
2311 usb_ep_fifo_flush(fsg->bulk_in);
2312 if (fsg->bulk_out_enabled)
2313 usb_ep_fifo_flush(fsg->bulk_out);
d5e2b67a
MN
2314
2315 /* Reset the I/O buffer states and pointers, the SCSI
2316 * state, and the exception. Then invoke the handler. */
2317 spin_lock_irq(&fsg->lock);
2318
2319 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
a41ae418 2320 bh = &fsg->common->buffhds[i];
d5e2b67a
MN
2321 bh->state = BUF_STATE_EMPTY;
2322 }
a41ae418
MN
2323 fsg->common->next_buffhd_to_fill = fsg->common->next_buffhd_to_drain =
2324 &fsg->common->buffhds[0];
d5e2b67a
MN
2325
2326 exception_req_tag = fsg->exception_req_tag;
2327 new_config = fsg->new_config;
2328 old_state = fsg->state;
2329
2330 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2331 fsg->state = FSG_STATE_STATUS_PHASE;
2332 else {
a41ae418
MN
2333 for (i = 0; i < fsg->common->nluns; ++i) {
2334 curlun = &fsg->common->luns[i];
d5e2b67a
MN
2335 curlun->prevent_medium_removal = 0;
2336 curlun->sense_data = curlun->unit_attention_data =
2337 SS_NO_SENSE;
2338 curlun->sense_data_info = 0;
2339 curlun->info_valid = 0;
2340 }
2341 fsg->state = FSG_STATE_IDLE;
2342 }
2343 spin_unlock_irq(&fsg->lock);
2344
2345 /* Carry out any extra actions required for the exception */
2346 switch (old_state) {
d5e2b67a
MN
2347 case FSG_STATE_ABORT_BULK_OUT:
2348 send_status(fsg);
2349 spin_lock_irq(&fsg->lock);
2350 if (fsg->state == FSG_STATE_STATUS_PHASE)
2351 fsg->state = FSG_STATE_IDLE;
2352 spin_unlock_irq(&fsg->lock);
2353 break;
2354
2355 case FSG_STATE_RESET:
2356 /* In case we were forced against our will to halt a
2357 * bulk endpoint, clear the halt now. (The SuperH UDC
2358 * requires this.) */
2359 if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2360 usb_ep_clear_halt(fsg->bulk_in);
2361
93bcf12e
MN
2362 if (fsg->ep0_req_tag == exception_req_tag)
2363 ep0_queue(fsg); // Complete the status stage
d5e2b67a
MN
2364
2365 /* Technically this should go here, but it would only be
2366 * a waste of time. Ditto for the INTERFACE_CHANGE and
2367 * CONFIG_CHANGE cases. */
a41ae418
MN
2368 // for (i = 0; i < fsg->common->nluns; ++i)
2369 // fsg->common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
d5e2b67a
MN
2370 break;
2371
d5e2b67a
MN
2372 case FSG_STATE_CONFIG_CHANGE:
2373 rc = do_set_config(fsg, new_config);
2374 if (fsg->ep0_req_tag != exception_req_tag)
2375 break;
2376 if (rc != 0) // STALL on errors
2377 fsg_set_halt(fsg, fsg->ep0);
2378 else // Complete the status stage
2379 ep0_queue(fsg);
2380 break;
2381
d5e2b67a
MN
2382 case FSG_STATE_EXIT:
2383 case FSG_STATE_TERMINATED:
2384 do_set_config(fsg, 0); // Free resources
2385 spin_lock_irq(&fsg->lock);
2386 fsg->state = FSG_STATE_TERMINATED; // Stop the thread
2387 spin_unlock_irq(&fsg->lock);
2388 break;
d23b0f08
MN
2389
2390 case FSG_STATE_INTERFACE_CHANGE:
2391 case FSG_STATE_DISCONNECT:
2392 case FSG_STATE_COMMAND_PHASE:
2393 case FSG_STATE_DATA_PHASE:
2394 case FSG_STATE_STATUS_PHASE:
2395 case FSG_STATE_IDLE:
2396 break;
d5e2b67a
MN
2397 }
2398}
2399
2400
2401/*-------------------------------------------------------------------------*/
2402
2403static int fsg_main_thread(void *fsg_)
2404{
2405 struct fsg_dev *fsg = fsg_;
2406
2407 /* Allow the thread to be killed by a signal, but set the signal mask
2408 * to block everything but INT, TERM, KILL, and USR1. */
2409 allow_signal(SIGINT);
2410 allow_signal(SIGTERM);
2411 allow_signal(SIGKILL);
2412 allow_signal(SIGUSR1);
2413
2414 /* Allow the thread to be frozen */
2415 set_freezable();
2416
2417 /* Arrange for userspace references to be interpreted as kernel
2418 * pointers. That way we can pass a kernel pointer to a routine
2419 * that expects a __user pointer and it will work okay. */
2420 set_fs(get_ds());
2421
2422 /* The main loop */
2423 while (fsg->state != FSG_STATE_TERMINATED) {
2424 if (exception_in_progress(fsg) || signal_pending(current)) {
2425 handle_exception(fsg);
2426 continue;
2427 }
2428
2429 if (!fsg->running) {
2430 sleep_thread(fsg);
2431 continue;
2432 }
2433
2434 if (get_next_command(fsg))
2435 continue;
2436
2437 spin_lock_irq(&fsg->lock);
2438 if (!exception_in_progress(fsg))
2439 fsg->state = FSG_STATE_DATA_PHASE;
2440 spin_unlock_irq(&fsg->lock);
2441
2442 if (do_scsi_command(fsg) || finish_reply(fsg))
2443 continue;
2444
2445 spin_lock_irq(&fsg->lock);
2446 if (!exception_in_progress(fsg))
2447 fsg->state = FSG_STATE_STATUS_PHASE;
2448 spin_unlock_irq(&fsg->lock);
2449
2450 if (send_status(fsg))
2451 continue;
2452
2453 spin_lock_irq(&fsg->lock);
2454 if (!exception_in_progress(fsg))
2455 fsg->state = FSG_STATE_IDLE;
2456 spin_unlock_irq(&fsg->lock);
d23b0f08 2457 }
d5e2b67a
MN
2458
2459 spin_lock_irq(&fsg->lock);
2460 fsg->thread_task = NULL;
2461 spin_unlock_irq(&fsg->lock);
2462
d23b0f08 2463 /* XXX */
d5e2b67a
MN
2464 /* If we are exiting because of a signal, unregister the
2465 * gadget driver. */
d23b0f08
MN
2466 /* if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) */
2467 /* usb_gadget_unregister_driver(&fsg_driver); */
d5e2b67a
MN
2468
2469 /* Let the unbind and cleanup routines know the thread has exited */
2470 complete_and_exit(&fsg->thread_notifier, 0);
2471}
2472
2473
9c610213 2474/*************************** DEVICE ATTRIBUTES ***************************/
d5e2b67a 2475
d23b0f08
MN
2476/* Write permission is checked per LUN in store_*() functions. */
2477static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
2478static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
d5e2b67a
MN
2479
2480
9c610213
MN
2481/****************************** FSG COMMON ******************************/
2482
2483static void fsg_common_release(struct kref *ref);
d5e2b67a 2484
9c610213 2485static void fsg_lun_release(struct device *dev)
d5e2b67a 2486{
9c610213 2487 /* Nothing needs to be done */
d5e2b67a
MN
2488}
2489
9c610213 2490static inline void fsg_common_get(struct fsg_common *common)
d5e2b67a 2491{
9c610213 2492 kref_get(&common->ref);
d5e2b67a
MN
2493}
2494
9c610213
MN
2495static inline void fsg_common_put(struct fsg_common *common)
2496{
2497 kref_put(&common->ref, fsg_common_release);
2498}
2499
2500
2501static struct fsg_common *fsg_common_init(struct fsg_common *common,
d23b0f08 2502 struct usb_composite_dev *cdev)
9c610213 2503{
d23b0f08 2504 struct usb_gadget *gadget = cdev->gadget;
9c610213
MN
2505 struct fsg_buffhd *bh;
2506 struct fsg_lun *curlun;
2507 int nluns, i, rc;
d23b0f08 2508 char *pathbuf;
9c610213
MN
2509
2510 /* Find out how many LUNs there should be */
2511 nluns = mod_data.nluns;
2512 if (nluns == 0)
2513 nluns = max(mod_data.num_filenames, 1u);
2514 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2515 dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
2516 return ERR_PTR(-EINVAL);
2517 }
2518
2519 /* Allocate? */
2520 if (!common) {
2521 common = kzalloc(sizeof *common, GFP_KERNEL);
2522 if (!common)
2523 return ERR_PTR(-ENOMEM);
2524 common->free_storage_on_release = 1;
2525 } else {
2526 memset(common, 0, sizeof common);
2527 common->free_storage_on_release = 0;
2528 }
2529 common->gadget = gadget;
2530
2531 /* Create the LUNs, open their backing files, and register the
2532 * LUN devices in sysfs. */
2533 curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
2534 if (!curlun) {
2535 kfree(common);
2536 return ERR_PTR(-ENOMEM);
2537 }
2538 common->luns = curlun;
2539
2540 init_rwsem(&common->filesem);
2541
2542 for (i = 0; i < nluns; ++i, ++curlun) {
2543 curlun->cdrom = !!mod_data.cdrom;
2544 curlun->ro = mod_data.cdrom || mod_data.ro[i];
2545 curlun->removable = mod_data.removable;
2546 curlun->dev.release = fsg_lun_release;
2547 curlun->dev.parent = &gadget->dev;
d23b0f08 2548 /* curlun->dev.driver = &fsg_driver.driver; XXX */
9c610213
MN
2549 dev_set_drvdata(&curlun->dev, &common->filesem);
2550 dev_set_name(&curlun->dev,"%s-lun%d",
2551 dev_name(&gadget->dev), i);
2552
2553 rc = device_register(&curlun->dev);
2554 if (rc) {
2555 INFO(common, "failed to register LUN%d: %d\n", i, rc);
2556 common->nluns = i;
2557 goto error_release;
2558 }
2559
2560 rc = device_create_file(&curlun->dev, &dev_attr_ro);
2561 if (rc)
2562 goto error_luns;
2563 rc = device_create_file(&curlun->dev, &dev_attr_file);
2564 if (rc)
2565 goto error_luns;
2566
2567 if (mod_data.file[i] && *mod_data.file[i]) {
2568 rc = fsg_lun_open(curlun, mod_data.file[i]);
2569 if (rc)
2570 goto error_luns;
2571 } else if (!mod_data.removable) {
2572 ERROR(common, "no file given for LUN%d\n", i);
2573 rc = -EINVAL;
2574 goto error_luns;
2575 }
2576 }
2577 common->nluns = nluns;
2578
2579
2580 /* Data buffers cyclic list */
2581 /* Buffers in buffhds are static -- no need for additional
2582 * allocation. */
2583 bh = common->buffhds;
2584 i = FSG_NUM_BUFFERS - 1;
2585 do {
2586 bh->next = bh + 1;
2587 } while (++bh, --i);
2588 bh->next = common->buffhds;
2589
2590
2591 /* Release */
2592 if (mod_data.release == 0xffff) { // Parameter wasn't set
2593 int gcnum;
2594
2595 /* The sa1100 controller is not supported */
2596 if (gadget_is_sa1100(gadget))
2597 gcnum = -1;
2598 else
2599 gcnum = usb_gadget_controller_number(gadget);
2600 if (gcnum >= 0)
2601 mod_data.release = 0x0300 + gcnum;
2602 else {
2603 WARNING(common, "controller '%s' not recognized\n",
2604 gadget->name);
2605 WARNING(common, "controller '%s' not recognized\n",
2606 gadget->name);
2607 mod_data.release = 0x0399;
2608 }
2609 }
2610
2611
2612 /* Some peripheral controllers are known not to be able to
2613 * halt bulk endpoints correctly. If one of them is present,
2614 * disable stalls.
2615 */
2616 if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget))
2617 mod_data.can_stall = 0;
2618
2619
2620 kref_init(&common->ref);
d23b0f08
MN
2621
2622 /* Information */
2623 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2624 INFO(common, "Number of LUNs=%d\n", common->nluns);
2625
2626 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2627 for (i = 0, nluns = common->nluns, curlun = common->luns;
2628 i < nluns;
2629 ++curlun, ++i) {
2630 char *p = "(no medium)";
2631 if (fsg_lun_is_open(curlun)) {
2632 p = "(error)";
2633 if (pathbuf) {
2634 p = d_path(&curlun->filp->f_path,
2635 pathbuf, PATH_MAX);
2636 if (IS_ERR(p))
2637 p = "(error)";
2638 }
2639 }
2640 LINFO(curlun, "LUN: %s%s%sfile: %s\n",
2641 curlun->removable ? "removable " : "",
2642 curlun->ro ? "read only " : "",
2643 curlun->cdrom ? "CD-ROM " : "",
2644 p);
2645 }
2646 kfree(pathbuf);
2647
9c610213
MN
2648 return common;
2649
2650
2651error_luns:
2652 common->nluns = i + 1;
2653error_release:
2654 /* Call fsg_common_release() directly, ref is not initialised */
2655 fsg_common_release(&common->ref);
2656 return ERR_PTR(rc);
2657}
2658
2659
2660static void fsg_common_release(struct kref *ref)
2661{
2662 struct fsg_common *common =
2663 container_of(ref, struct fsg_common, ref);
2664 unsigned i = common->nluns;
2665 struct fsg_lun *lun = common->luns;
2666
2667 /* Beware tempting for -> do-while optimization: when in error
2668 * recovery nluns may be zero. */
2669
2670 for (; i; --i, ++lun) {
2671 device_remove_file(&lun->dev, &dev_attr_ro);
2672 device_remove_file(&lun->dev, &dev_attr_file);
2673 fsg_lun_close(lun);
2674 device_unregister(&lun->dev);
2675 }
2676
2677 kfree(common->luns);
2678 if (common->free_storage_on_release)
2679 kfree(common);
2680}
2681
2682
2683/*-------------------------------------------------------------------------*/
2684
2685
d23b0f08 2686static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
d5e2b67a 2687{
d23b0f08 2688 struct fsg_dev *fsg = fsg_from_func(f);
d5e2b67a
MN
2689
2690 DBG(fsg, "unbind\n");
2691 clear_bit(REGISTERED, &fsg->atomic_bitflags);
2692
d5e2b67a
MN
2693 /* If the thread isn't already dead, tell it to exit now */
2694 if (fsg->state != FSG_STATE_TERMINATED) {
2695 raise_exception(fsg, FSG_STATE_EXIT);
2696 wait_for_completion(&fsg->thread_notifier);
2697
2698 /* The cleanup routine waits for this completion also */
2699 complete(&fsg->thread_notifier);
2700 }
2701
9c610213
MN
2702 fsg_common_put(fsg->common);
2703 kfree(fsg);
d5e2b67a
MN
2704}
2705
2706
d23b0f08 2707static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
d5e2b67a 2708{
d23b0f08
MN
2709 struct fsg_dev *fsg = fsg_from_func(f);
2710 struct usb_gadget *gadget = c->cdev->gadget;
d5e2b67a
MN
2711 int rc;
2712 int i;
d5e2b67a 2713 struct usb_ep *ep;
d5e2b67a
MN
2714
2715 fsg->gadget = gadget;
d5e2b67a 2716 fsg->ep0 = gadget->ep0;
d23b0f08 2717 fsg->ep0req = c->cdev->req;
d5e2b67a 2718
d23b0f08
MN
2719 /* New interface */
2720 i = usb_interface_id(c, f);
2721 if (i < 0)
2722 return i;
2723 fsg_intf_desc.bInterfaceNumber = i;
2724 fsg->interface_number = i;
d5e2b67a 2725
d5e2b67a 2726 /* Find all the endpoints we will use */
d5e2b67a
MN
2727 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2728 if (!ep)
2729 goto autoconf_fail;
2730 ep->driver_data = fsg; // claim the endpoint
2731 fsg->bulk_in = ep;
2732
2733 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2734 if (!ep)
2735 goto autoconf_fail;
2736 ep->driver_data = fsg; // claim the endpoint
2737 fsg->bulk_out = ep;
2738
d5e2b67a 2739 if (gadget_is_dualspeed(gadget)) {
d5e2b67a
MN
2740 /* Assume endpoint addresses are the same for both speeds */
2741 fsg_hs_bulk_in_desc.bEndpointAddress =
2742 fsg_fs_bulk_in_desc.bEndpointAddress;
2743 fsg_hs_bulk_out_desc.bEndpointAddress =
2744 fsg_fs_bulk_out_desc.bEndpointAddress;
d23b0f08 2745 f->hs_descriptors = fsg_hs_function;
d5e2b67a
MN
2746 }
2747
d5e2b67a 2748
d23b0f08
MN
2749 /* maybe allocate device-global string IDs, and patch descriptors */
2750 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2751 i = usb_string_id(c->cdev);
2752 if (i < 0)
2753 return i;
2754 fsg_strings[FSG_STRING_INTERFACE].id = i;
2755 fsg_intf_desc.iInterface = i;
d5e2b67a
MN
2756 }
2757
d23b0f08 2758
d5e2b67a
MN
2759 fsg->thread_task = kthread_create(fsg_main_thread, fsg,
2760 "file-storage-gadget");
2761 if (IS_ERR(fsg->thread_task)) {
2762 rc = PTR_ERR(fsg->thread_task);
2763 goto out;
2764 }
2765
d5e2b67a
MN
2766 DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task));
2767
2768 set_bit(REGISTERED, &fsg->atomic_bitflags);
2769
2770 /* Tell the thread to start working */
2771 wake_up_process(fsg->thread_task);
2772 return 0;
2773
2774autoconf_fail:
2775 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2776 rc = -ENOTSUPP;
2777
2778out:
2779 fsg->state = FSG_STATE_TERMINATED; // The thread is dead
d23b0f08 2780 fsg_unbind(c, f);
d5e2b67a
MN
2781 complete(&fsg->thread_notifier);
2782 return rc;
2783}
2784
2785
d23b0f08 2786/****************************** ADD FUNCTION ******************************/
d5e2b67a 2787
d23b0f08
MN
2788static struct usb_gadget_strings *fsg_strings_array[] = {
2789 &fsg_stringtab,
2790 NULL,
d5e2b67a
MN
2791};
2792
d23b0f08
MN
2793static int fsg_add(struct usb_composite_dev *cdev,
2794 struct usb_configuration *c,
2795 struct fsg_common *common)
d5e2b67a 2796{
d23b0f08
MN
2797 struct fsg_dev *fsg;
2798 int rc;
2799
2800 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
2801 if (unlikely(!fsg))
2802 return -ENOMEM;
d5e2b67a 2803
d23b0f08
MN
2804 spin_lock_init(&fsg->lock);
2805 init_completion(&fsg->thread_notifier);
d5e2b67a 2806
d23b0f08
MN
2807 fsg->cdev = cdev;
2808 fsg->function.name = FSG_DRIVER_DESC;
2809 fsg->function.strings = fsg_strings_array;
2810 fsg->function.descriptors = fsg_fs_function;
2811 fsg->function.bind = fsg_bind;
2812 fsg->function.unbind = fsg_unbind;
2813 fsg->function.setup = fsg_setup;
2814 fsg->function.set_alt = fsg_set_alt;
2815 fsg->function.disable = fsg_disable;
2816
2817 fsg->common = common;
2818 /* Our caller holds a reference to common structure so we
2819 * don't have to be worry about it being freed until we return
2820 * from this function. So instead of incrementing counter now
2821 * and decrement in error recovery we increment it only when
2822 * call to usb_add_function() was successful. */
2823
2824 rc = usb_add_function(c, &fsg->function);
2825
2826 if (likely(rc == 0))
2827 fsg_common_get(fsg->common);
2828 else
2829 kfree(fsg);
2830
2831 return rc;
d5e2b67a 2832}