USB: composite: usb_composite_unregister() no longer __exit
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / usb / gadget / f_mass_storage.c
CommitLineData
d5e2b67a 1/*
d26a6aa0 2 * f_mass_storage.c -- Mass Storage USB Composite Function
d5e2b67a
MN
3 *
4 * Copyright (C) 2003-2008 Alan Stern
d26a6aa0
MN
5 * Copyright (C) 2009 Samsung Electronics
6 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
d5e2b67a
MN
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation, either version 2 of that License or (at your option) any
25 * later version.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40
41/*
d26a6aa0
MN
42 * The Mass Storage Function acts as a USB Mass Storage device,
43 * appearing to the host as a disk drive or as a CD-ROM drive. In
44 * addition to providing an example of a genuinely useful composite
45 * function for a USB device, it also illustrates a technique of
46 * double-buffering for increased throughput.
d5e2b67a 47 *
d26a6aa0
MN
48 * Function supports multiple logical units (LUNs). Backing storage
49 * for each LUN is provided by a regular file or a block device.
50 * Access for each LUN can be limited to read-only. Moreover, the
51 * function can indicate that LUN is removable and/or CD-ROM. (The
52 * later implies read-only access.)
53 *
54 * MSF is configured by specifying a fsg_config structure. It has the
55 * following fields:
56 *
57 * nluns Number of LUNs function have (anywhere from 1
58 * to FSG_MAX_LUNS which is 8).
59 * luns An array of LUN configuration values. This
60 * should be filled for each LUN that
61 * function will include (ie. for "nluns"
62 * LUNs). Each element of the array has
63 * the following fields:
64 * ->filename The path to the backing file for the LUN.
65 * Required if LUN is not marked as
66 * removable.
67 * ->ro Flag specifying access to the LUN shall be
68 * read-only. This is implied if CD-ROM
69 * emulation is enabled as well as when
70 * it was impossible to open "filename"
71 * in R/W mode.
72 * ->removable Flag specifying that LUN shall be indicated as
73 * being removable.
74 * ->cdrom Flag specifying that LUN shall be reported as
75 * being a CD-ROM.
76 *
77 * lun_name_format A printf-like format for names of the LUN
78 * devices. This determines how the
79 * directory in sysfs will be named.
80 * Unless you are using several MSFs in
81 * a single gadget (as opposed to single
82 * MSF in many configurations) you may
83 * leave it as NULL (in which case
84 * "lun%d" will be used). In the format
85 * you can use "%d" to index LUNs for
86 * MSF's with more than one LUN. (Beware
87 * that there is only one integer given
88 * as an argument for the format and
89 * specifying invalid format may cause
90 * unspecified behaviour.)
91 * thread_name Name of the kernel thread process used by the
92 * MSF. You can safely set it to NULL
93 * (in which case default "file-storage"
94 * will be used).
95 *
96 * vendor_name
97 * product_name
98 * release Information used as a reply to INQUIRY
99 * request. To use default set to NULL,
100 * NULL, 0xffff respectively. The first
101 * field should be 8 and the second 16
102 * characters or less.
103 *
104 * can_stall Set to permit function to halt bulk endpoints.
105 * Disabled on some USB devices known not
106 * to work correctly. You should set it
107 * to true.
108 *
109 * If "removable" is not set for a LUN then a backing file must be
110 * specified. If it is set, then NULL filename means the LUN's medium
111 * is not loaded (an empty string as "filename" in the fsg_config
112 * structure causes error). The CD-ROM emulation includes a single
113 * data track and no audio tracks; hence there need be only one
114 * backing file per LUN. Note also that the CD-ROM block length is
115 * set to 512 rather than the more common value 2048.
116 *
117 *
118 * MSF includes support for module parameters. If gadget using it
119 * decides to use it, the following module parameters will be
120 * available:
121 *
122 * file=filename[,filename...]
123 * Names of the files or block devices used for
124 * backing storage.
125 * ro=b[,b...] Default false, boolean for read-only access.
126 * removable=b[,b...]
127 * Default true, boolean for removable media.
128 * cdrom=b[,b...] Default false, boolean for whether to emulate
129 * a CD-ROM drive.
130 * luns=N Default N = number of filenames, number of
131 * LUNs to support.
132 * stall Default determined according to the type of
133 * USB device controller (usually true),
134 * boolean to permit the driver to halt
135 * bulk endpoints.
136 *
137 * The module parameters may be prefixed with some string. You need
138 * to consult gadget's documentation or source to verify whether it is
139 * using those module parameters and if it does what are the prefixes
140 * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
141 * the prefix).
d5e2b67a 142 *
d5e2b67a
MN
143 *
144 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
d26a6aa0
MN
145 * needed. The memory requirement amounts to two 16K buffers, size
146 * configurable by a parameter. Support is included for both
147 * full-speed and high-speed operation.
d5e2b67a
MN
148 *
149 * Note that the driver is slightly non-portable in that it assumes a
150 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
151 * interrupt-in endpoints. With most device controllers this isn't an
152 * issue, but there may be some with hardware restrictions that prevent
153 * a buffer from being used by more than one endpoint.
154 *
d5e2b67a 155 *
d26a6aa0
MN
156 * The pathnames of the backing files and the ro settings are
157 * available in the attribute files "file" and "ro" in the lun<n> (or
158 * to be more precise in a directory which name comes from
159 * "lun_name_format" option!) subdirectory of the gadget's sysfs
160 * directory. If the "removable" option is set, writing to these
161 * files will simulate ejecting/loading the medium (writing an empty
162 * line means eject) and adjusting a write-enable tab. Changes to the
163 * ro setting are not allowed when the medium is loaded or if CD-ROM
164 * emulation is being used.
d5e2b67a 165 *
d5e2b67a 166 *
d26a6aa0
MN
167 * This function is heavily based on "File-backed Storage Gadget" by
168 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
169 * Brownell. The driver's SCSI command interface was based on the
170 * "Information technology - Small Computer System Interface - 2"
171 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
172 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
173 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
174 * was based on the "Universal Serial Bus Mass Storage Class UFI
175 * Command Specification" document, Revision 1.0, December 14, 1998,
176 * available at
d5e2b67a
MN
177 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
178 */
179
180
181/*
182 * Driver Design
183 *
d26a6aa0 184 * The MSF is fairly straightforward. There is a main kernel
d5e2b67a
MN
185 * thread that handles most of the work. Interrupt routines field
186 * callbacks from the controller driver: bulk- and interrupt-request
187 * completion notifications, endpoint-0 events, and disconnect events.
188 * Completion events are passed to the main thread by wakeup calls. Many
189 * ep0 requests are handled at interrupt time, but SetInterface,
190 * SetConfiguration, and device reset requests are forwarded to the
191 * thread in the form of "exceptions" using SIGUSR1 signals (since they
192 * should interrupt any ongoing file I/O operations).
193 *
194 * The thread's main routine implements the standard command/data/status
195 * parts of a SCSI interaction. It and its subroutines are full of tests
196 * for pending signals/exceptions -- all this polling is necessary since
197 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
198 * indication that the driver really wants to be running in userspace.)
199 * An important point is that so long as the thread is alive it keeps an
200 * open reference to the backing file. This will prevent unmounting
201 * the backing file's underlying filesystem and could cause problems
202 * during system shutdown, for example. To prevent such problems, the
203 * thread catches INT, TERM, and KILL signals and converts them into
204 * an EXIT exception.
205 *
206 * In normal operation the main thread is started during the gadget's
d26a6aa0
MN
207 * fsg_bind() callback and stopped during fsg_unbind(). But it can
208 * also exit when it receives a signal, and there's no point leaving
209 * the gadget running when the thread is dead. At of this moment, MSF
210 * provides no way to deregister the gadget when thread dies -- maybe
211 * a callback functions is needed.
d5e2b67a
MN
212 *
213 * To provide maximum throughput, the driver uses a circular pipeline of
214 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
215 * arbitrarily long; in practice the benefits don't justify having more
216 * than 2 stages (i.e., double buffering). But it helps to think of the
217 * pipeline as being a long one. Each buffer head contains a bulk-in and
218 * a bulk-out request pointer (since the buffer can be used for both
219 * output and input -- directions always are given from the host's
220 * point of view) as well as a pointer to the buffer and various state
221 * variables.
222 *
223 * Use of the pipeline follows a simple protocol. There is a variable
224 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
225 * At any time that buffer head may still be in use from an earlier
226 * request, so each buffer head has a state variable indicating whether
227 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
228 * buffer head to be EMPTY, filling the buffer either by file I/O or by
229 * USB I/O (during which the buffer head is BUSY), and marking the buffer
230 * head FULL when the I/O is complete. Then the buffer will be emptied
231 * (again possibly by USB I/O, during which it is marked BUSY) and
232 * finally marked EMPTY again (possibly by a completion routine).
233 *
234 * A module parameter tells the driver to avoid stalling the bulk
235 * endpoints wherever the transport specification allows. This is
236 * necessary for some UDCs like the SuperH, which cannot reliably clear a
237 * halt on a bulk endpoint. However, under certain circumstances the
238 * Bulk-only specification requires a stall. In such cases the driver
239 * will halt the endpoint and set a flag indicating that it should clear
240 * the halt in software during the next device reset. Hopefully this
241 * will permit everything to work correctly. Furthermore, although the
242 * specification allows the bulk-out endpoint to halt when the host sends
243 * too much data, implementing this would cause an unavoidable race.
244 * The driver will always use the "no-stall" approach for OUT transfers.
245 *
246 * One subtle point concerns sending status-stage responses for ep0
247 * requests. Some of these requests, such as device reset, can involve
248 * interrupting an ongoing file I/O operation, which might take an
249 * arbitrarily long time. During that delay the host might give up on
250 * the original ep0 request and issue a new one. When that happens the
251 * driver should not notify the host about completion of the original
252 * request, as the host will no longer be waiting for it. So the driver
253 * assigns to each ep0 request a unique tag, and it keeps track of the
254 * tag value of the request associated with a long-running exception
255 * (device-reset, interface-change, or configuration-change). When the
256 * exception handler is finished, the status-stage response is submitted
257 * only if the current ep0 request tag is equal to the exception request
258 * tag. Thus only the most recently received ep0 request will get a
259 * status-stage response.
260 *
261 * Warning: This driver source file is too long. It ought to be split up
262 * into a header file plus about 3 separate .c files, to handle the details
263 * of the Gadget, USB Mass Storage, and SCSI protocols.
264 */
265
266
267/* #define VERBOSE_DEBUG */
268/* #define DUMP_MSGS */
269
270
271#include <linux/blkdev.h>
272#include <linux/completion.h>
273#include <linux/dcache.h>
274#include <linux/delay.h>
275#include <linux/device.h>
276#include <linux/fcntl.h>
277#include <linux/file.h>
278#include <linux/fs.h>
279#include <linux/kref.h>
280#include <linux/kthread.h>
281#include <linux/limits.h>
282#include <linux/rwsem.h>
283#include <linux/slab.h>
284#include <linux/spinlock.h>
285#include <linux/string.h>
286#include <linux/freezer.h>
287#include <linux/utsname.h>
288
289#include <linux/usb/ch9.h>
290#include <linux/usb/gadget.h>
291
292#include "gadget_chips.h"
293
294
295
e8b6f8c5 296/*------------------------------------------------------------------------*/
d5e2b67a 297
d23b0f08 298#define FSG_DRIVER_DESC "Mass Storage Function"
d26a6aa0 299#define FSG_DRIVER_VERSION "2009/09/11"
d5e2b67a 300
d5e2b67a
MN
301static const char fsg_string_interface[] = "Mass Storage";
302
303
93bcf12e 304#define FSG_NO_INTR_EP 1
606206c2 305#define FSG_BUFFHD_STATIC_BUFFER 1
d23b0f08
MN
306#define FSG_NO_DEVICE_STRINGS 1
307#define FSG_NO_OTG 1
308#define FSG_NO_INTR_EP 1
93bcf12e 309
d5e2b67a
MN
310#include "storage_common.c"
311
312
d5e2b67a
MN
313/*-------------------------------------------------------------------------*/
314
8ea864cf
MN
315struct fsg_dev;
316
d5e2b67a 317
a41ae418
MN
318/* Data shared by all the FSG instances. */
319struct fsg_common {
9c610213 320 struct usb_gadget *gadget;
8ea864cf
MN
321 struct fsg_dev *fsg;
322 struct fsg_dev *prev_fsg;
9c610213 323
a41ae418
MN
324 /* filesem protects: backing files in use */
325 struct rw_semaphore filesem;
326
8ea864cf
MN
327 /* lock protects: state, all the req_busy's */
328 spinlock_t lock;
329
330 struct usb_ep *ep0; /* Copy of gadget->ep0 */
331 struct usb_request *ep0req; /* Copy of cdev->req */
332 unsigned int ep0_req_tag;
333 const char *ep0req_name;
334
a41ae418
MN
335 struct fsg_buffhd *next_buffhd_to_fill;
336 struct fsg_buffhd *next_buffhd_to_drain;
337 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
338
339 int cmnd_size;
340 u8 cmnd[MAX_COMMAND_SIZE];
341
342 unsigned int nluns;
343 unsigned int lun;
344 struct fsg_lun *luns;
345 struct fsg_lun *curlun;
9c610213 346
8ea864cf
MN
347 unsigned int bulk_out_maxpacket;
348 enum fsg_state state; /* For exception handling */
349 unsigned int exception_req_tag;
350
351 u8 config, new_config;
352 enum data_direction data_dir;
353 u32 data_size;
354 u32 data_size_from_cmnd;
355 u32 tag;
356 u32 residue;
357 u32 usb_amount_left;
358
481e4929 359 unsigned int can_stall:1;
9c610213 360 unsigned int free_storage_on_release:1;
8ea864cf
MN
361 unsigned int phase_error:1;
362 unsigned int short_packet_received:1;
363 unsigned int bad_lun_okay:1;
364 unsigned int running:1;
9c610213 365
8ea864cf
MN
366 int thread_wakeup_needed;
367 struct completion thread_notifier;
368 struct task_struct *thread_task;
e8b6f8c5 369
481e4929
MN
370 /* Vendor (8 chars), product (16 chars), release (4
371 * hexadecimal digits) and NUL byte */
372 char inquiry_string[8 + 16 + 4 + 1];
373
9c610213 374 struct kref ref;
a41ae418
MN
375};
376
377
481e4929
MN
378struct fsg_config {
379 unsigned nluns;
380 struct fsg_lun_config {
381 const char *filename;
382 char ro;
383 char removable;
384 char cdrom;
385 } luns[FSG_MAX_LUNS];
386
e8b6f8c5
MN
387 const char *lun_name_format;
388 const char *thread_name;
389
481e4929
MN
390 const char *vendor_name; /* 8 characters or less */
391 const char *product_name; /* 16 characters or less */
392 u16 release;
393
394 char can_stall;
395};
396
397
d5e2b67a 398struct fsg_dev {
d23b0f08 399 struct usb_function function;
d23b0f08 400 struct usb_gadget *gadget; /* Copy of cdev->gadget */
a41ae418
MN
401 struct fsg_common *common;
402
d23b0f08
MN
403 u16 interface_number;
404
d26a6aa0
MN
405 unsigned int bulk_in_enabled:1;
406 unsigned int bulk_out_enabled:1;
d5e2b67a
MN
407
408 unsigned long atomic_bitflags;
8ea864cf 409#define IGNORE_BULK_OUT 0
d5e2b67a
MN
410
411 struct usb_ep *bulk_in;
412 struct usb_ep *bulk_out;
8ea864cf 413};
d5e2b67a 414
d5e2b67a 415
8ea864cf
MN
416static inline int __fsg_is_set(struct fsg_common *common,
417 const char *func, unsigned line)
418{
419 if (common->fsg)
420 return 1;
421 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
422 return 0;
423}
424
425#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
d5e2b67a 426
d23b0f08
MN
427
428static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
429{
430 return container_of(f, struct fsg_dev, function);
431}
432
433
d5e2b67a
MN
434typedef void (*fsg_routine_t)(struct fsg_dev *);
435
8ea864cf 436static int exception_in_progress(struct fsg_common *common)
d5e2b67a 437{
8ea864cf 438 return common->state > FSG_STATE_IDLE;
d5e2b67a
MN
439}
440
441/* Make bulk-out requests be divisible by the maxpacket size */
8ea864cf 442static void set_bulk_out_req_length(struct fsg_common *common,
d5e2b67a
MN
443 struct fsg_buffhd *bh, unsigned int length)
444{
445 unsigned int rem;
446
447 bh->bulk_out_intended_length = length;
8ea864cf 448 rem = length % common->bulk_out_maxpacket;
d5e2b67a 449 if (rem > 0)
8ea864cf 450 length += common->bulk_out_maxpacket - rem;
d5e2b67a
MN
451 bh->outreq->length = length;
452}
453
d5e2b67a
MN
454/*-------------------------------------------------------------------------*/
455
456static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
457{
458 const char *name;
459
460 if (ep == fsg->bulk_in)
461 name = "bulk-in";
462 else if (ep == fsg->bulk_out)
463 name = "bulk-out";
464 else
465 name = ep->name;
466 DBG(fsg, "%s set halt\n", name);
467 return usb_ep_set_halt(ep);
468}
469
470
d5e2b67a
MN
471/*-------------------------------------------------------------------------*/
472
473/* These routines may be called in process context or in_irq */
474
475/* Caller must hold fsg->lock */
8ea864cf 476static void wakeup_thread(struct fsg_common *common)
d5e2b67a
MN
477{
478 /* Tell the main thread that something has happened */
8ea864cf
MN
479 common->thread_wakeup_needed = 1;
480 if (common->thread_task)
481 wake_up_process(common->thread_task);
d5e2b67a
MN
482}
483
484
8ea864cf 485static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
d5e2b67a
MN
486{
487 unsigned long flags;
488
489 /* Do nothing if a higher-priority exception is already in progress.
490 * If a lower-or-equal priority exception is in progress, preempt it
491 * and notify the main thread by sending it a signal. */
8ea864cf
MN
492 spin_lock_irqsave(&common->lock, flags);
493 if (common->state <= new_state) {
494 common->exception_req_tag = common->ep0_req_tag;
495 common->state = new_state;
496 if (common->thread_task)
d5e2b67a 497 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
8ea864cf 498 common->thread_task);
d5e2b67a 499 }
8ea864cf 500 spin_unlock_irqrestore(&common->lock, flags);
d5e2b67a
MN
501}
502
503
504/*-------------------------------------------------------------------------*/
505
8ea864cf 506static int ep0_queue(struct fsg_common *common)
d5e2b67a
MN
507{
508 int rc;
509
8ea864cf
MN
510 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
511 common->ep0->driver_data = common;
d5e2b67a 512 if (rc != 0 && rc != -ESHUTDOWN) {
d5e2b67a 513 /* We can't do much more than wait for a reset */
8ea864cf
MN
514 WARNING(common, "error in submission: %s --> %d\n",
515 common->ep0->name, rc);
d5e2b67a
MN
516 }
517 return rc;
518}
519
d5e2b67a
MN
520/*-------------------------------------------------------------------------*/
521
522/* Bulk and interrupt endpoint completion handlers.
523 * These always run in_irq. */
524
525static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
526{
8ea864cf 527 struct fsg_common *common = ep->driver_data;
d5e2b67a
MN
528 struct fsg_buffhd *bh = req->context;
529
530 if (req->status || req->actual != req->length)
8ea864cf 531 DBG(common, "%s --> %d, %u/%u\n", __func__,
d5e2b67a 532 req->status, req->actual, req->length);
d26a6aa0 533 if (req->status == -ECONNRESET) /* Request was cancelled */
d5e2b67a
MN
534 usb_ep_fifo_flush(ep);
535
536 /* Hold the lock while we update the request and buffer states */
537 smp_wmb();
8ea864cf 538 spin_lock(&common->lock);
d5e2b67a
MN
539 bh->inreq_busy = 0;
540 bh->state = BUF_STATE_EMPTY;
8ea864cf
MN
541 wakeup_thread(common);
542 spin_unlock(&common->lock);
d5e2b67a
MN
543}
544
545static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
546{
8ea864cf 547 struct fsg_common *common = ep->driver_data;
d5e2b67a
MN
548 struct fsg_buffhd *bh = req->context;
549
8ea864cf 550 dump_msg(common, "bulk-out", req->buf, req->actual);
d5e2b67a 551 if (req->status || req->actual != bh->bulk_out_intended_length)
8ea864cf 552 DBG(common, "%s --> %d, %u/%u\n", __func__,
d5e2b67a
MN
553 req->status, req->actual,
554 bh->bulk_out_intended_length);
d26a6aa0 555 if (req->status == -ECONNRESET) /* Request was cancelled */
d5e2b67a
MN
556 usb_ep_fifo_flush(ep);
557
558 /* Hold the lock while we update the request and buffer states */
559 smp_wmb();
8ea864cf 560 spin_lock(&common->lock);
d5e2b67a
MN
561 bh->outreq_busy = 0;
562 bh->state = BUF_STATE_FULL;
8ea864cf
MN
563 wakeup_thread(common);
564 spin_unlock(&common->lock);
d5e2b67a
MN
565}
566
567
d5e2b67a
MN
568/*-------------------------------------------------------------------------*/
569
570/* Ep0 class-specific handlers. These always run in_irq. */
571
d23b0f08 572static int fsg_setup(struct usb_function *f,
d5e2b67a
MN
573 const struct usb_ctrlrequest *ctrl)
574{
d23b0f08 575 struct fsg_dev *fsg = fsg_from_func(f);
8ea864cf 576 struct usb_request *req = fsg->common->ep0req;
d5e2b67a 577 u16 w_index = le16_to_cpu(ctrl->wIndex);
93bcf12e 578 u16 w_value = le16_to_cpu(ctrl->wValue);
d5e2b67a
MN
579 u16 w_length = le16_to_cpu(ctrl->wLength);
580
8ea864cf 581 if (!fsg->common->config)
93bcf12e 582 return -EOPNOTSUPP;
d5e2b67a 583
93bcf12e 584 switch (ctrl->bRequest) {
d5e2b67a 585
93bcf12e
MN
586 case USB_BULK_RESET_REQUEST:
587 if (ctrl->bRequestType !=
588 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 589 break;
d23b0f08 590 if (w_index != fsg->interface_number || w_value != 0)
93bcf12e 591 return -EDOM;
d5e2b67a 592
93bcf12e
MN
593 /* Raise an exception to stop the current operation
594 * and reinitialize our state. */
595 DBG(fsg, "bulk reset request\n");
8ea864cf 596 raise_exception(fsg->common, FSG_STATE_RESET);
93bcf12e 597 return DELAYED_STATUS;
d5e2b67a 598
93bcf12e
MN
599 case USB_BULK_GET_MAX_LUN_REQUEST:
600 if (ctrl->bRequestType !=
601 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
d5e2b67a 602 break;
d23b0f08 603 if (w_index != fsg->interface_number || w_value != 0)
93bcf12e
MN
604 return -EDOM;
605 VDBG(fsg, "get max LUN\n");
a41ae418 606 *(u8 *) req->buf = fsg->common->nluns - 1;
93bcf12e
MN
607 return 1;
608 }
609
610 VDBG(fsg,
611 "unknown class-specific control req "
612 "%02x.%02x v%04x i%04x l%u\n",
613 ctrl->bRequestType, ctrl->bRequest,
614 le16_to_cpu(ctrl->wValue), w_index, w_length);
615 return -EOPNOTSUPP;
d5e2b67a
MN
616}
617
618
d5e2b67a
MN
619/*-------------------------------------------------------------------------*/
620
621/* All the following routines run in process context */
622
623
624/* Use this for bulk or interrupt transfers, not ep0 */
625static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
626 struct usb_request *req, int *pbusy,
627 enum fsg_buffer_state *state)
628{
629 int rc;
630
631 if (ep == fsg->bulk_in)
632 dump_msg(fsg, "bulk-in", req->buf, req->length);
d5e2b67a 633
8ea864cf 634 spin_lock_irq(&fsg->common->lock);
d5e2b67a
MN
635 *pbusy = 1;
636 *state = BUF_STATE_BUSY;
8ea864cf 637 spin_unlock_irq(&fsg->common->lock);
d5e2b67a
MN
638 rc = usb_ep_queue(ep, req, GFP_KERNEL);
639 if (rc != 0) {
640 *pbusy = 0;
641 *state = BUF_STATE_EMPTY;
642
643 /* We can't do much more than wait for a reset */
644
645 /* Note: currently the net2280 driver fails zero-length
646 * submissions if DMA is enabled. */
647 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
648 req->length == 0))
649 WARNING(fsg, "error in submission: %s --> %d\n",
650 ep->name, rc);
651 }
652}
653
8ea864cf
MN
654#define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \
655 if (fsg_is_set(common)) \
656 start_transfer((common)->fsg, (common)->fsg->ep_name, \
657 req, pbusy, state); \
658 else
659
660#define START_TRANSFER(common, ep_name, req, pbusy, state) \
661 START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
662
663
d5e2b67a 664
8ea864cf 665static int sleep_thread(struct fsg_common *common)
d5e2b67a
MN
666{
667 int rc = 0;
668
669 /* Wait until a signal arrives or we are woken up */
670 for (;;) {
671 try_to_freeze();
672 set_current_state(TASK_INTERRUPTIBLE);
673 if (signal_pending(current)) {
674 rc = -EINTR;
675 break;
676 }
8ea864cf 677 if (common->thread_wakeup_needed)
d5e2b67a
MN
678 break;
679 schedule();
680 }
681 __set_current_state(TASK_RUNNING);
8ea864cf 682 common->thread_wakeup_needed = 0;
d5e2b67a
MN
683 return rc;
684}
685
686
687/*-------------------------------------------------------------------------*/
688
8ea864cf 689static int do_read(struct fsg_common *common)
d5e2b67a 690{
8ea864cf 691 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
692 u32 lba;
693 struct fsg_buffhd *bh;
694 int rc;
695 u32 amount_left;
696 loff_t file_offset, file_offset_tmp;
697 unsigned int amount;
698 unsigned int partial_page;
699 ssize_t nread;
700
701 /* Get the starting Logical Block Address and check that it's
702 * not too big */
8ea864cf
MN
703 if (common->cmnd[0] == SC_READ_6)
704 lba = get_unaligned_be24(&common->cmnd[1]);
d5e2b67a 705 else {
8ea864cf 706 lba = get_unaligned_be32(&common->cmnd[2]);
d5e2b67a
MN
707
708 /* We allow DPO (Disable Page Out = don't save data in the
709 * cache) and FUA (Force Unit Access = don't read from the
710 * cache), but we don't implement them. */
8ea864cf 711 if ((common->cmnd[1] & ~0x18) != 0) {
d5e2b67a
MN
712 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
713 return -EINVAL;
714 }
715 }
716 if (lba >= curlun->num_sectors) {
717 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
718 return -EINVAL;
719 }
720 file_offset = ((loff_t) lba) << 9;
721
722 /* Carry out the file reads */
8ea864cf 723 amount_left = common->data_size_from_cmnd;
d5e2b67a 724 if (unlikely(amount_left == 0))
d26a6aa0 725 return -EIO; /* No default reply */
d5e2b67a
MN
726
727 for (;;) {
728
729 /* Figure out how much we need to read:
730 * Try to read the remaining amount.
731 * But don't read more than the buffer size.
732 * And don't try to read past the end of the file.
733 * Finally, if we're not at a page boundary, don't read past
734 * the next page.
735 * If this means reading 0 then we were asked to read past
736 * the end of file. */
93bcf12e 737 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
738 amount = min((loff_t) amount,
739 curlun->file_length - file_offset);
740 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
741 if (partial_page > 0)
742 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
743 partial_page);
744
745 /* Wait for the next buffer to become available */
8ea864cf 746 bh = common->next_buffhd_to_fill;
d5e2b67a 747 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 748 rc = sleep_thread(common);
d5e2b67a
MN
749 if (rc)
750 return rc;
751 }
752
753 /* If we were asked to read past the end of file,
754 * end with an empty buffer. */
755 if (amount == 0) {
756 curlun->sense_data =
757 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
758 curlun->sense_data_info = file_offset >> 9;
759 curlun->info_valid = 1;
760 bh->inreq->length = 0;
761 bh->state = BUF_STATE_FULL;
762 break;
763 }
764
765 /* Perform the read */
766 file_offset_tmp = file_offset;
767 nread = vfs_read(curlun->filp,
768 (char __user *) bh->buf,
769 amount, &file_offset_tmp);
770 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
771 (unsigned long long) file_offset,
772 (int) nread);
773 if (signal_pending(current))
774 return -EINTR;
775
776 if (nread < 0) {
777 LDBG(curlun, "error in file read: %d\n",
778 (int) nread);
779 nread = 0;
780 } else if (nread < amount) {
781 LDBG(curlun, "partial file read: %d/%u\n",
782 (int) nread, amount);
d26a6aa0 783 nread -= (nread & 511); /* Round down to a block */
d5e2b67a
MN
784 }
785 file_offset += nread;
786 amount_left -= nread;
8ea864cf 787 common->residue -= nread;
d5e2b67a
MN
788 bh->inreq->length = nread;
789 bh->state = BUF_STATE_FULL;
790
791 /* If an error occurred, report it and its position */
792 if (nread < amount) {
793 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
794 curlun->sense_data_info = file_offset >> 9;
795 curlun->info_valid = 1;
796 break;
797 }
798
799 if (amount_left == 0)
d26a6aa0 800 break; /* No more left to read */
d5e2b67a
MN
801
802 /* Send this buffer and go read some more */
803 bh->inreq->zero = 0;
8ea864cf
MN
804 START_TRANSFER_OR(common, bulk_in, bh->inreq,
805 &bh->inreq_busy, &bh->state)
806 /* Don't know what to do if
807 * common->fsg is NULL */
808 return -EIO;
809 common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
810 }
811
d26a6aa0 812 return -EIO; /* No default reply */
d5e2b67a
MN
813}
814
815
816/*-------------------------------------------------------------------------*/
817
8ea864cf 818static int do_write(struct fsg_common *common)
d5e2b67a 819{
8ea864cf 820 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
821 u32 lba;
822 struct fsg_buffhd *bh;
823 int get_some_more;
824 u32 amount_left_to_req, amount_left_to_write;
825 loff_t usb_offset, file_offset, file_offset_tmp;
826 unsigned int amount;
827 unsigned int partial_page;
828 ssize_t nwritten;
829 int rc;
830
831 if (curlun->ro) {
832 curlun->sense_data = SS_WRITE_PROTECTED;
833 return -EINVAL;
834 }
835 spin_lock(&curlun->filp->f_lock);
d26a6aa0 836 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
d5e2b67a
MN
837 spin_unlock(&curlun->filp->f_lock);
838
839 /* Get the starting Logical Block Address and check that it's
840 * not too big */
8ea864cf
MN
841 if (common->cmnd[0] == SC_WRITE_6)
842 lba = get_unaligned_be24(&common->cmnd[1]);
d5e2b67a 843 else {
8ea864cf 844 lba = get_unaligned_be32(&common->cmnd[2]);
d5e2b67a
MN
845
846 /* We allow DPO (Disable Page Out = don't save data in the
847 * cache) and FUA (Force Unit Access = write directly to the
848 * medium). We don't implement DPO; we implement FUA by
849 * performing synchronous output. */
8ea864cf 850 if (common->cmnd[1] & ~0x18) {
d5e2b67a
MN
851 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
852 return -EINVAL;
853 }
8ea864cf 854 if (common->cmnd[1] & 0x08) { /* FUA */
d5e2b67a
MN
855 spin_lock(&curlun->filp->f_lock);
856 curlun->filp->f_flags |= O_SYNC;
857 spin_unlock(&curlun->filp->f_lock);
858 }
859 }
860 if (lba >= curlun->num_sectors) {
861 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
862 return -EINVAL;
863 }
864
865 /* Carry out the file writes */
866 get_some_more = 1;
867 file_offset = usb_offset = ((loff_t) lba) << 9;
8ea864cf
MN
868 amount_left_to_req = common->data_size_from_cmnd;
869 amount_left_to_write = common->data_size_from_cmnd;
d5e2b67a
MN
870
871 while (amount_left_to_write > 0) {
872
873 /* Queue a request for more data from the host */
8ea864cf 874 bh = common->next_buffhd_to_fill;
d5e2b67a
MN
875 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
876
877 /* Figure out how much we want to get:
878 * Try to get the remaining amount.
879 * But don't get more than the buffer size.
880 * And don't try to go past the end of the file.
881 * If we're not at a page boundary,
882 * don't go past the next page.
883 * If this means getting 0, then we were asked
884 * to write past the end of file.
885 * Finally, round down to a block boundary. */
93bcf12e 886 amount = min(amount_left_to_req, FSG_BUFLEN);
d5e2b67a
MN
887 amount = min((loff_t) amount, curlun->file_length -
888 usb_offset);
889 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
890 if (partial_page > 0)
891 amount = min(amount,
892 (unsigned int) PAGE_CACHE_SIZE - partial_page);
893
894 if (amount == 0) {
895 get_some_more = 0;
896 curlun->sense_data =
897 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
898 curlun->sense_data_info = usb_offset >> 9;
899 curlun->info_valid = 1;
900 continue;
901 }
902 amount -= (amount & 511);
903 if (amount == 0) {
904
905 /* Why were we were asked to transfer a
906 * partial block? */
907 get_some_more = 0;
908 continue;
909 }
910
911 /* Get the next buffer */
912 usb_offset += amount;
8ea864cf 913 common->usb_amount_left -= amount;
d5e2b67a
MN
914 amount_left_to_req -= amount;
915 if (amount_left_to_req == 0)
916 get_some_more = 0;
917
918 /* amount is always divisible by 512, hence by
919 * the bulk-out maxpacket size */
d26a6aa0
MN
920 bh->outreq->length = amount;
921 bh->bulk_out_intended_length = amount;
d5e2b67a 922 bh->outreq->short_not_ok = 1;
8ea864cf
MN
923 START_TRANSFER_OR(common, bulk_out, bh->outreq,
924 &bh->outreq_busy, &bh->state)
925 /* Don't know what to do if
926 * common->fsg is NULL */
927 return -EIO;
928 common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
929 continue;
930 }
931
932 /* Write the received data to the backing file */
8ea864cf 933 bh = common->next_buffhd_to_drain;
d5e2b67a 934 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
d26a6aa0 935 break; /* We stopped early */
d5e2b67a
MN
936 if (bh->state == BUF_STATE_FULL) {
937 smp_rmb();
8ea864cf 938 common->next_buffhd_to_drain = bh->next;
d5e2b67a
MN
939 bh->state = BUF_STATE_EMPTY;
940
941 /* Did something go wrong with the transfer? */
942 if (bh->outreq->status != 0) {
943 curlun->sense_data = SS_COMMUNICATION_FAILURE;
944 curlun->sense_data_info = file_offset >> 9;
945 curlun->info_valid = 1;
946 break;
947 }
948
949 amount = bh->outreq->actual;
950 if (curlun->file_length - file_offset < amount) {
951 LERROR(curlun,
952 "write %u @ %llu beyond end %llu\n",
953 amount, (unsigned long long) file_offset,
954 (unsigned long long) curlun->file_length);
955 amount = curlun->file_length - file_offset;
956 }
957
958 /* Perform the write */
959 file_offset_tmp = file_offset;
960 nwritten = vfs_write(curlun->filp,
961 (char __user *) bh->buf,
962 amount, &file_offset_tmp);
963 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
964 (unsigned long long) file_offset,
965 (int) nwritten);
966 if (signal_pending(current))
d26a6aa0 967 return -EINTR; /* Interrupted! */
d5e2b67a
MN
968
969 if (nwritten < 0) {
970 LDBG(curlun, "error in file write: %d\n",
971 (int) nwritten);
972 nwritten = 0;
973 } else if (nwritten < amount) {
974 LDBG(curlun, "partial file write: %d/%u\n",
975 (int) nwritten, amount);
976 nwritten -= (nwritten & 511);
d26a6aa0 977 /* Round down to a block */
d5e2b67a
MN
978 }
979 file_offset += nwritten;
980 amount_left_to_write -= nwritten;
8ea864cf 981 common->residue -= nwritten;
d5e2b67a
MN
982
983 /* If an error occurred, report it and its position */
984 if (nwritten < amount) {
985 curlun->sense_data = SS_WRITE_ERROR;
986 curlun->sense_data_info = file_offset >> 9;
987 curlun->info_valid = 1;
988 break;
989 }
990
991 /* Did the host decide to stop early? */
992 if (bh->outreq->actual != bh->outreq->length) {
8ea864cf 993 common->short_packet_received = 1;
d5e2b67a
MN
994 break;
995 }
996 continue;
997 }
998
999 /* Wait for something to happen */
8ea864cf 1000 rc = sleep_thread(common);
d5e2b67a
MN
1001 if (rc)
1002 return rc;
1003 }
1004
d26a6aa0 1005 return -EIO; /* No default reply */
d5e2b67a
MN
1006}
1007
1008
1009/*-------------------------------------------------------------------------*/
1010
8ea864cf 1011static int do_synchronize_cache(struct fsg_common *common)
d5e2b67a 1012{
8ea864cf 1013 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1014 int rc;
1015
1016 /* We ignore the requested LBA and write out all file's
1017 * dirty data buffers. */
1018 rc = fsg_lun_fsync_sub(curlun);
1019 if (rc)
1020 curlun->sense_data = SS_WRITE_ERROR;
1021 return 0;
1022}
1023
1024
1025/*-------------------------------------------------------------------------*/
1026
1027static void invalidate_sub(struct fsg_lun *curlun)
1028{
1029 struct file *filp = curlun->filp;
1030 struct inode *inode = filp->f_path.dentry->d_inode;
1031 unsigned long rc;
1032
1033 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1034 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
1035}
1036
8ea864cf 1037static int do_verify(struct fsg_common *common)
d5e2b67a 1038{
8ea864cf 1039 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1040 u32 lba;
1041 u32 verification_length;
8ea864cf 1042 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
d5e2b67a
MN
1043 loff_t file_offset, file_offset_tmp;
1044 u32 amount_left;
1045 unsigned int amount;
1046 ssize_t nread;
1047
1048 /* Get the starting Logical Block Address and check that it's
1049 * not too big */
8ea864cf 1050 lba = get_unaligned_be32(&common->cmnd[2]);
d5e2b67a
MN
1051 if (lba >= curlun->num_sectors) {
1052 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1053 return -EINVAL;
1054 }
1055
1056 /* We allow DPO (Disable Page Out = don't save data in the
1057 * cache) but we don't implement it. */
8ea864cf 1058 if (common->cmnd[1] & ~0x10) {
d5e2b67a
MN
1059 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1060 return -EINVAL;
1061 }
1062
8ea864cf 1063 verification_length = get_unaligned_be16(&common->cmnd[7]);
d5e2b67a 1064 if (unlikely(verification_length == 0))
d26a6aa0 1065 return -EIO; /* No default reply */
d5e2b67a
MN
1066
1067 /* Prepare to carry out the file verify */
1068 amount_left = verification_length << 9;
1069 file_offset = ((loff_t) lba) << 9;
1070
1071 /* Write out all the dirty buffers before invalidating them */
1072 fsg_lun_fsync_sub(curlun);
1073 if (signal_pending(current))
1074 return -EINTR;
1075
1076 invalidate_sub(curlun);
1077 if (signal_pending(current))
1078 return -EINTR;
1079
1080 /* Just try to read the requested blocks */
1081 while (amount_left > 0) {
1082
1083 /* Figure out how much we need to read:
1084 * Try to read the remaining amount, but not more than
1085 * the buffer size.
1086 * And don't try to read past the end of the file.
1087 * If this means reading 0 then we were asked to read
1088 * past the end of file. */
93bcf12e 1089 amount = min(amount_left, FSG_BUFLEN);
d5e2b67a
MN
1090 amount = min((loff_t) amount,
1091 curlun->file_length - file_offset);
1092 if (amount == 0) {
1093 curlun->sense_data =
1094 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1095 curlun->sense_data_info = file_offset >> 9;
1096 curlun->info_valid = 1;
1097 break;
1098 }
1099
1100 /* Perform the read */
1101 file_offset_tmp = file_offset;
1102 nread = vfs_read(curlun->filp,
1103 (char __user *) bh->buf,
1104 amount, &file_offset_tmp);
1105 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1106 (unsigned long long) file_offset,
1107 (int) nread);
1108 if (signal_pending(current))
1109 return -EINTR;
1110
1111 if (nread < 0) {
1112 LDBG(curlun, "error in file verify: %d\n",
1113 (int) nread);
1114 nread = 0;
1115 } else if (nread < amount) {
1116 LDBG(curlun, "partial file verify: %d/%u\n",
1117 (int) nread, amount);
d26a6aa0 1118 nread -= (nread & 511); /* Round down to a sector */
d5e2b67a
MN
1119 }
1120 if (nread == 0) {
1121 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1122 curlun->sense_data_info = file_offset >> 9;
1123 curlun->info_valid = 1;
1124 break;
1125 }
1126 file_offset += nread;
1127 amount_left -= nread;
1128 }
1129 return 0;
1130}
1131
1132
1133/*-------------------------------------------------------------------------*/
1134
8ea864cf 1135static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1136{
8ea864cf 1137 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1138 u8 *buf = (u8 *) bh->buf;
1139
481e4929 1140 if (!curlun) { /* Unsupported LUNs are okay */
8ea864cf 1141 common->bad_lun_okay = 1;
d5e2b67a 1142 memset(buf, 0, 36);
d26a6aa0
MN
1143 buf[0] = 0x7f; /* Unsupported, no device-type */
1144 buf[4] = 31; /* Additional length */
d5e2b67a
MN
1145 return 36;
1146 }
1147
481e4929
MN
1148 buf[0] = curlun->cdrom ? TYPE_CDROM : TYPE_DISK;
1149 buf[1] = curlun->removable ? 0x80 : 0;
d26a6aa0
MN
1150 buf[2] = 2; /* ANSI SCSI level 2 */
1151 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1152 buf[4] = 31; /* Additional length */
1153 buf[5] = 0; /* No special options */
481e4929
MN
1154 buf[6] = 0;
1155 buf[7] = 0;
8ea864cf 1156 memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
d5e2b67a
MN
1157 return 36;
1158}
1159
1160
8ea864cf 1161static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1162{
8ea864cf 1163 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1164 u8 *buf = (u8 *) bh->buf;
1165 u32 sd, sdinfo;
1166 int valid;
1167
1168 /*
1169 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1170 *
1171 * If a REQUEST SENSE command is received from an initiator
1172 * with a pending unit attention condition (before the target
1173 * generates the contingent allegiance condition), then the
1174 * target shall either:
1175 * a) report any pending sense data and preserve the unit
1176 * attention condition on the logical unit, or,
1177 * b) report the unit attention condition, may discard any
1178 * pending sense data, and clear the unit attention
1179 * condition on the logical unit for that initiator.
1180 *
1181 * FSG normally uses option a); enable this code to use option b).
1182 */
1183#if 0
1184 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1185 curlun->sense_data = curlun->unit_attention_data;
1186 curlun->unit_attention_data = SS_NO_SENSE;
1187 }
1188#endif
1189
d26a6aa0 1190 if (!curlun) { /* Unsupported LUNs are okay */
8ea864cf 1191 common->bad_lun_okay = 1;
d5e2b67a
MN
1192 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1193 sdinfo = 0;
1194 valid = 0;
1195 } else {
1196 sd = curlun->sense_data;
1197 sdinfo = curlun->sense_data_info;
1198 valid = curlun->info_valid << 7;
1199 curlun->sense_data = SS_NO_SENSE;
1200 curlun->sense_data_info = 0;
1201 curlun->info_valid = 0;
1202 }
1203
1204 memset(buf, 0, 18);
d26a6aa0 1205 buf[0] = valid | 0x70; /* Valid, current error */
d5e2b67a
MN
1206 buf[2] = SK(sd);
1207 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
d26a6aa0 1208 buf[7] = 18 - 8; /* Additional sense length */
d5e2b67a
MN
1209 buf[12] = ASC(sd);
1210 buf[13] = ASCQ(sd);
1211 return 18;
1212}
1213
1214
8ea864cf 1215static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1216{
8ea864cf
MN
1217 struct fsg_lun *curlun = common->curlun;
1218 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1219 int pmi = common->cmnd[8];
d5e2b67a
MN
1220 u8 *buf = (u8 *) bh->buf;
1221
1222 /* Check the PMI and LBA fields */
1223 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1224 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1225 return -EINVAL;
1226 }
1227
1228 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1229 /* Max logical block */
1230 put_unaligned_be32(512, &buf[4]); /* Block length */
1231 return 8;
1232}
1233
1234
8ea864cf 1235static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1236{
8ea864cf
MN
1237 struct fsg_lun *curlun = common->curlun;
1238 int msf = common->cmnd[1] & 0x02;
1239 u32 lba = get_unaligned_be32(&common->cmnd[2]);
d5e2b67a
MN
1240 u8 *buf = (u8 *) bh->buf;
1241
8ea864cf 1242 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
d5e2b67a
MN
1243 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1244 return -EINVAL;
1245 }
1246 if (lba >= curlun->num_sectors) {
1247 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1248 return -EINVAL;
1249 }
1250
1251 memset(buf, 0, 8);
1252 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1253 store_cdrom_address(&buf[4], msf, lba);
1254 return 8;
1255}
1256
1257
8ea864cf 1258static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1259{
8ea864cf
MN
1260 struct fsg_lun *curlun = common->curlun;
1261 int msf = common->cmnd[1] & 0x02;
1262 int start_track = common->cmnd[6];
d5e2b67a
MN
1263 u8 *buf = (u8 *) bh->buf;
1264
8ea864cf 1265 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
d5e2b67a
MN
1266 start_track > 1) {
1267 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1268 return -EINVAL;
1269 }
1270
1271 memset(buf, 0, 20);
1272 buf[1] = (20-2); /* TOC data length */
1273 buf[2] = 1; /* First track number */
1274 buf[3] = 1; /* Last track number */
1275 buf[5] = 0x16; /* Data track, copying allowed */
1276 buf[6] = 0x01; /* Only track is number 1 */
1277 store_cdrom_address(&buf[8], msf, 0);
1278
1279 buf[13] = 0x16; /* Lead-out track is data */
1280 buf[14] = 0xAA; /* Lead-out track number */
1281 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1282 return 20;
1283}
1284
1285
8ea864cf 1286static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1287{
8ea864cf
MN
1288 struct fsg_lun *curlun = common->curlun;
1289 int mscmnd = common->cmnd[0];
d5e2b67a
MN
1290 u8 *buf = (u8 *) bh->buf;
1291 u8 *buf0 = buf;
1292 int pc, page_code;
1293 int changeable_values, all_pages;
1294 int valid_page = 0;
1295 int len, limit;
1296
8ea864cf 1297 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
d5e2b67a
MN
1298 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1299 return -EINVAL;
1300 }
8ea864cf
MN
1301 pc = common->cmnd[2] >> 6;
1302 page_code = common->cmnd[2] & 0x3f;
d5e2b67a
MN
1303 if (pc == 3) {
1304 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1305 return -EINVAL;
1306 }
1307 changeable_values = (pc == 1);
1308 all_pages = (page_code == 0x3f);
1309
1310 /* Write the mode parameter header. Fixed values are: default
1311 * medium type, no cache control (DPOFUA), and no block descriptors.
1312 * The only variable value is the WriteProtect bit. We will fill in
1313 * the mode data length later. */
1314 memset(buf, 0, 8);
1315 if (mscmnd == SC_MODE_SENSE_6) {
d26a6aa0 1316 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
d5e2b67a
MN
1317 buf += 4;
1318 limit = 255;
d26a6aa0
MN
1319 } else { /* SC_MODE_SENSE_10 */
1320 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
d5e2b67a 1321 buf += 8;
d26a6aa0 1322 limit = 65535; /* Should really be FSG_BUFLEN */
d5e2b67a
MN
1323 }
1324
1325 /* No block descriptors */
1326
1327 /* The mode pages, in numerical order. The only page we support
1328 * is the Caching page. */
1329 if (page_code == 0x08 || all_pages) {
1330 valid_page = 1;
d26a6aa0
MN
1331 buf[0] = 0x08; /* Page code */
1332 buf[1] = 10; /* Page length */
1333 memset(buf+2, 0, 10); /* None of the fields are changeable */
d5e2b67a
MN
1334
1335 if (!changeable_values) {
d26a6aa0
MN
1336 buf[2] = 0x04; /* Write cache enable, */
1337 /* Read cache not disabled */
1338 /* No cache retention priorities */
d5e2b67a
MN
1339 put_unaligned_be16(0xffff, &buf[4]);
1340 /* Don't disable prefetch */
1341 /* Minimum prefetch = 0 */
1342 put_unaligned_be16(0xffff, &buf[8]);
1343 /* Maximum prefetch */
1344 put_unaligned_be16(0xffff, &buf[10]);
1345 /* Maximum prefetch ceiling */
1346 }
1347 buf += 12;
1348 }
1349
1350 /* Check that a valid page was requested and the mode data length
1351 * isn't too long. */
1352 len = buf - buf0;
1353 if (!valid_page || len > limit) {
1354 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1355 return -EINVAL;
1356 }
1357
1358 /* Store the mode data length */
1359 if (mscmnd == SC_MODE_SENSE_6)
1360 buf0[0] = len - 1;
1361 else
1362 put_unaligned_be16(len - 2, buf0);
1363 return len;
1364}
1365
1366
8ea864cf 1367static int do_start_stop(struct fsg_common *common)
d5e2b67a 1368{
8ea864cf 1369 if (!common->curlun) {
481e4929 1370 return -EINVAL;
8ea864cf
MN
1371 } else if (!common->curlun->removable) {
1372 common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1373 return -EINVAL;
1374 }
d5e2b67a
MN
1375 return 0;
1376}
1377
1378
8ea864cf 1379static int do_prevent_allow(struct fsg_common *common)
d5e2b67a 1380{
8ea864cf 1381 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1382 int prevent;
1383
8ea864cf 1384 if (!common->curlun) {
481e4929 1385 return -EINVAL;
8ea864cf
MN
1386 } else if (!common->curlun->removable) {
1387 common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1388 return -EINVAL;
1389 }
1390
8ea864cf
MN
1391 prevent = common->cmnd[4] & 0x01;
1392 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
d5e2b67a
MN
1393 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1394 return -EINVAL;
1395 }
1396
1397 if (curlun->prevent_medium_removal && !prevent)
1398 fsg_lun_fsync_sub(curlun);
1399 curlun->prevent_medium_removal = prevent;
1400 return 0;
1401}
1402
1403
8ea864cf 1404static int do_read_format_capacities(struct fsg_common *common,
d5e2b67a
MN
1405 struct fsg_buffhd *bh)
1406{
8ea864cf 1407 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1408 u8 *buf = (u8 *) bh->buf;
1409
1410 buf[0] = buf[1] = buf[2] = 0;
d26a6aa0 1411 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
d5e2b67a
MN
1412 buf += 4;
1413
1414 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1415 /* Number of blocks */
1416 put_unaligned_be32(512, &buf[4]); /* Block length */
1417 buf[4] = 0x02; /* Current capacity */
1418 return 12;
1419}
1420
1421
8ea864cf 1422static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
d5e2b67a 1423{
8ea864cf 1424 struct fsg_lun *curlun = common->curlun;
d5e2b67a
MN
1425
1426 /* We don't support MODE SELECT */
8ea864cf
MN
1427 if (curlun)
1428 curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
1429 return -EINVAL;
1430}
1431
1432
1433/*-------------------------------------------------------------------------*/
1434
1435static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1436{
1437 int rc;
1438
1439 rc = fsg_set_halt(fsg, fsg->bulk_in);
1440 if (rc == -EAGAIN)
1441 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1442 while (rc != 0) {
1443 if (rc != -EAGAIN) {
1444 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1445 rc = 0;
1446 break;
1447 }
1448
1449 /* Wait for a short time and then try again */
1450 if (msleep_interruptible(100) != 0)
1451 return -EINTR;
1452 rc = usb_ep_set_halt(fsg->bulk_in);
1453 }
1454 return rc;
1455}
1456
1457static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1458{
1459 int rc;
1460
1461 DBG(fsg, "bulk-in set wedge\n");
1462 rc = usb_ep_set_wedge(fsg->bulk_in);
1463 if (rc == -EAGAIN)
1464 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1465 while (rc != 0) {
1466 if (rc != -EAGAIN) {
1467 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1468 rc = 0;
1469 break;
1470 }
1471
1472 /* Wait for a short time and then try again */
1473 if (msleep_interruptible(100) != 0)
1474 return -EINTR;
1475 rc = usb_ep_set_wedge(fsg->bulk_in);
1476 }
1477 return rc;
1478}
1479
1480static int pad_with_zeros(struct fsg_dev *fsg)
1481{
a41ae418 1482 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
d5e2b67a
MN
1483 u32 nkeep = bh->inreq->length;
1484 u32 nsend;
1485 int rc;
1486
d26a6aa0 1487 bh->state = BUF_STATE_EMPTY; /* For the first iteration */
8ea864cf
MN
1488 fsg->common->usb_amount_left = nkeep + fsg->common->residue;
1489 while (fsg->common->usb_amount_left > 0) {
d5e2b67a
MN
1490
1491 /* Wait for the next buffer to be free */
1492 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 1493 rc = sleep_thread(fsg->common);
d5e2b67a
MN
1494 if (rc)
1495 return rc;
1496 }
1497
8ea864cf 1498 nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1499 memset(bh->buf + nkeep, 0, nsend - nkeep);
1500 bh->inreq->length = nsend;
1501 bh->inreq->zero = 0;
1502 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1503 &bh->inreq_busy, &bh->state);
a41ae418 1504 bh = fsg->common->next_buffhd_to_fill = bh->next;
8ea864cf 1505 fsg->common->usb_amount_left -= nsend;
d5e2b67a
MN
1506 nkeep = 0;
1507 }
1508 return 0;
1509}
1510
8ea864cf 1511static int throw_away_data(struct fsg_common *common)
d5e2b67a
MN
1512{
1513 struct fsg_buffhd *bh;
1514 u32 amount;
1515 int rc;
1516
8ea864cf
MN
1517 for (bh = common->next_buffhd_to_drain;
1518 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1519 bh = common->next_buffhd_to_drain) {
d5e2b67a
MN
1520
1521 /* Throw away the data in a filled buffer */
1522 if (bh->state == BUF_STATE_FULL) {
1523 smp_rmb();
1524 bh->state = BUF_STATE_EMPTY;
8ea864cf 1525 common->next_buffhd_to_drain = bh->next;
d5e2b67a
MN
1526
1527 /* A short packet or an error ends everything */
1528 if (bh->outreq->actual != bh->outreq->length ||
1529 bh->outreq->status != 0) {
8ea864cf
MN
1530 raise_exception(common,
1531 FSG_STATE_ABORT_BULK_OUT);
d5e2b67a
MN
1532 return -EINTR;
1533 }
1534 continue;
1535 }
1536
1537 /* Try to submit another request if we need one */
8ea864cf
MN
1538 bh = common->next_buffhd_to_fill;
1539 if (bh->state == BUF_STATE_EMPTY
1540 && common->usb_amount_left > 0) {
1541 amount = min(common->usb_amount_left, FSG_BUFLEN);
d5e2b67a
MN
1542
1543 /* amount is always divisible by 512, hence by
1544 * the bulk-out maxpacket size */
d26a6aa0
MN
1545 bh->outreq->length = amount;
1546 bh->bulk_out_intended_length = amount;
d5e2b67a 1547 bh->outreq->short_not_ok = 1;
8ea864cf
MN
1548 START_TRANSFER_OR(common, bulk_out, bh->outreq,
1549 &bh->outreq_busy, &bh->state)
1550 /* Don't know what to do if
1551 * common->fsg is NULL */
1552 return -EIO;
1553 common->next_buffhd_to_fill = bh->next;
1554 common->usb_amount_left -= amount;
d5e2b67a
MN
1555 continue;
1556 }
1557
1558 /* Otherwise wait for something to happen */
8ea864cf 1559 rc = sleep_thread(common);
d5e2b67a
MN
1560 if (rc)
1561 return rc;
1562 }
1563 return 0;
1564}
1565
1566
8ea864cf 1567static int finish_reply(struct fsg_common *common)
d5e2b67a 1568{
8ea864cf 1569 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
d5e2b67a
MN
1570 int rc = 0;
1571
8ea864cf 1572 switch (common->data_dir) {
d5e2b67a 1573 case DATA_DIR_NONE:
d26a6aa0 1574 break; /* Nothing to send */
d5e2b67a
MN
1575
1576 /* If we don't know whether the host wants to read or write,
1577 * this must be CB or CBI with an unknown command. We mustn't
1578 * try to send or receive any data. So stall both bulk pipes
1579 * if we can and wait for a reset. */
1580 case DATA_DIR_UNKNOWN:
8ea864cf
MN
1581 if (!common->can_stall) {
1582 /* Nothing */
1583 } else if (fsg_is_set(common)) {
1584 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1585 rc = halt_bulk_in_endpoint(common->fsg);
1586 } else {
1587 /* Don't know what to do if common->fsg is NULL */
1588 rc = -EIO;
d5e2b67a
MN
1589 }
1590 break;
1591
1592 /* All but the last buffer of data must have already been sent */
1593 case DATA_DIR_TO_HOST:
8ea864cf 1594 if (common->data_size == 0) {
93bcf12e 1595 /* Nothing to send */
d5e2b67a
MN
1596
1597 /* If there's no residue, simply send the last buffer */
8ea864cf 1598 } else if (common->residue == 0) {
d5e2b67a 1599 bh->inreq->zero = 0;
8ea864cf
MN
1600 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1601 &bh->inreq_busy, &bh->state)
1602 return -EIO;
1603 common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1604
1605 /* For Bulk-only, if we're allowed to stall then send the
1606 * short packet and halt the bulk-in endpoint. If we can't
1607 * stall, pad out the remaining data with 0's. */
8ea864cf 1608 } else if (common->can_stall) {
93bcf12e 1609 bh->inreq->zero = 1;
8ea864cf
MN
1610 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1611 &bh->inreq_busy, &bh->state)
1612 /* Don't know what to do if
1613 * common->fsg is NULL */
1614 rc = -EIO;
1615 common->next_buffhd_to_fill = bh->next;
1616 if (common->fsg)
1617 rc = halt_bulk_in_endpoint(common->fsg);
1618 } else if (fsg_is_set(common)) {
1619 rc = pad_with_zeros(common->fsg);
93bcf12e 1620 } else {
8ea864cf
MN
1621 /* Don't know what to do if common->fsg is NULL */
1622 rc = -EIO;
d5e2b67a
MN
1623 }
1624 break;
1625
1626 /* We have processed all we want from the data the host has sent.
1627 * There may still be outstanding bulk-out requests. */
1628 case DATA_DIR_FROM_HOST:
8ea864cf 1629 if (common->residue == 0) {
d26a6aa0 1630 /* Nothing to receive */
d5e2b67a
MN
1631
1632 /* Did the host stop sending unexpectedly early? */
8ea864cf
MN
1633 } else if (common->short_packet_received) {
1634 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
d5e2b67a 1635 rc = -EINTR;
d5e2b67a
MN
1636
1637 /* We haven't processed all the incoming data. Even though
1638 * we may be allowed to stall, doing so would cause a race.
1639 * The controller may already have ACK'ed all the remaining
1640 * bulk-out packets, in which case the host wouldn't see a
1641 * STALL. Not realizing the endpoint was halted, it wouldn't
1642 * clear the halt -- leading to problems later on. */
1643#if 0
8ea864cf
MN
1644 } else if (common->can_stall) {
1645 if (fsg_is_set(common))
1646 fsg_set_halt(common->fsg,
1647 common->fsg->bulk_out);
1648 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
d5e2b67a 1649 rc = -EINTR;
d5e2b67a
MN
1650#endif
1651
1652 /* We can't stall. Read in the excess data and throw it
1653 * all away. */
d26a6aa0 1654 } else {
8ea864cf 1655 rc = throw_away_data(common);
d26a6aa0 1656 }
d5e2b67a
MN
1657 break;
1658 }
1659 return rc;
1660}
1661
1662
8ea864cf 1663static int send_status(struct fsg_common *common)
d5e2b67a 1664{
8ea864cf 1665 struct fsg_lun *curlun = common->curlun;
d5e2b67a 1666 struct fsg_buffhd *bh;
93bcf12e 1667 struct bulk_cs_wrap *csw;
d5e2b67a
MN
1668 int rc;
1669 u8 status = USB_STATUS_PASS;
1670 u32 sd, sdinfo = 0;
1671
1672 /* Wait for the next buffer to become available */
8ea864cf 1673 bh = common->next_buffhd_to_fill;
d5e2b67a 1674 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 1675 rc = sleep_thread(common);
d5e2b67a
MN
1676 if (rc)
1677 return rc;
1678 }
1679
1680 if (curlun) {
1681 sd = curlun->sense_data;
1682 sdinfo = curlun->sense_data_info;
8ea864cf 1683 } else if (common->bad_lun_okay)
d5e2b67a
MN
1684 sd = SS_NO_SENSE;
1685 else
1686 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1687
8ea864cf
MN
1688 if (common->phase_error) {
1689 DBG(common, "sending phase-error status\n");
d5e2b67a
MN
1690 status = USB_STATUS_PHASE_ERROR;
1691 sd = SS_INVALID_COMMAND;
1692 } else if (sd != SS_NO_SENSE) {
8ea864cf 1693 DBG(common, "sending command-failure status\n");
d5e2b67a 1694 status = USB_STATUS_FAIL;
8ea864cf 1695 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
d5e2b67a
MN
1696 " info x%x\n",
1697 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1698 }
1699
93bcf12e 1700 /* Store and send the Bulk-only CSW */
d26a6aa0 1701 csw = (void *)bh->buf;
d5e2b67a 1702
93bcf12e 1703 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
8ea864cf
MN
1704 csw->Tag = common->tag;
1705 csw->Residue = cpu_to_le32(common->residue);
93bcf12e 1706 csw->Status = status;
d5e2b67a 1707
93bcf12e
MN
1708 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1709 bh->inreq->zero = 0;
8ea864cf
MN
1710 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1711 &bh->inreq_busy, &bh->state)
1712 /* Don't know what to do if common->fsg is NULL */
1713 return -EIO;
d5e2b67a 1714
8ea864cf 1715 common->next_buffhd_to_fill = bh->next;
d5e2b67a
MN
1716 return 0;
1717}
1718
1719
1720/*-------------------------------------------------------------------------*/
1721
1722/* Check whether the command is properly formed and whether its data size
1723 * and direction agree with the values we already have. */
8ea864cf 1724static int check_command(struct fsg_common *common, int cmnd_size,
d5e2b67a
MN
1725 enum data_direction data_dir, unsigned int mask,
1726 int needs_medium, const char *name)
1727{
1728 int i;
8ea864cf 1729 int lun = common->cmnd[1] >> 5;
d5e2b67a
MN
1730 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1731 char hdlen[20];
1732 struct fsg_lun *curlun;
1733
d5e2b67a 1734 hdlen[0] = 0;
8ea864cf
MN
1735 if (common->data_dir != DATA_DIR_UNKNOWN)
1736 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1737 common->data_size);
1738 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
d26a6aa0 1739 name, cmnd_size, dirletter[(int) data_dir],
8ea864cf 1740 common->data_size_from_cmnd, common->cmnd_size, hdlen);
d5e2b67a
MN
1741
1742 /* We can't reply at all until we know the correct data direction
1743 * and size. */
8ea864cf 1744 if (common->data_size_from_cmnd == 0)
d5e2b67a 1745 data_dir = DATA_DIR_NONE;
8ea864cf
MN
1746 if (common->data_size < common->data_size_from_cmnd) {
1747 /* Host data size < Device data size is a phase error.
1748 * Carry out the command, but only transfer as much as
1749 * we are allowed. */
1750 common->data_size_from_cmnd = common->data_size;
1751 common->phase_error = 1;
d5e2b67a 1752 }
8ea864cf
MN
1753 common->residue = common->data_size;
1754 common->usb_amount_left = common->data_size;
d5e2b67a
MN
1755
1756 /* Conflicting data directions is a phase error */
8ea864cf
MN
1757 if (common->data_dir != data_dir
1758 && common->data_size_from_cmnd > 0) {
1759 common->phase_error = 1;
d5e2b67a
MN
1760 return -EINVAL;
1761 }
1762
1763 /* Verify the length of the command itself */
8ea864cf 1764 if (cmnd_size != common->cmnd_size) {
d5e2b67a
MN
1765
1766 /* Special case workaround: There are plenty of buggy SCSI
1767 * implementations. Many have issues with cbw->Length
1768 * field passing a wrong command size. For those cases we
1769 * always try to work around the problem by using the length
1770 * sent by the host side provided it is at least as large
1771 * as the correct command length.
1772 * Examples of such cases would be MS-Windows, which issues
1773 * REQUEST SENSE with cbw->Length == 12 where it should
1774 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1775 * REQUEST SENSE with cbw->Length == 10 where it should
1776 * be 6 as well.
1777 */
8ea864cf
MN
1778 if (cmnd_size <= common->cmnd_size) {
1779 DBG(common, "%s is buggy! Expected length %d "
a41ae418 1780 "but we got %d\n", name,
8ea864cf
MN
1781 cmnd_size, common->cmnd_size);
1782 cmnd_size = common->cmnd_size;
d5e2b67a 1783 } else {
8ea864cf 1784 common->phase_error = 1;
d5e2b67a
MN
1785 return -EINVAL;
1786 }
1787 }
1788
1789 /* Check that the LUN values are consistent */
8ea864cf
MN
1790 if (common->lun != lun)
1791 DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
1792 common->lun, lun);
d5e2b67a
MN
1793
1794 /* Check the LUN */
8ea864cf
MN
1795 if (common->lun >= 0 && common->lun < common->nluns) {
1796 curlun = &common->luns[common->lun];
1797 common->curlun = curlun;
1798 if (common->cmnd[0] != SC_REQUEST_SENSE) {
d5e2b67a
MN
1799 curlun->sense_data = SS_NO_SENSE;
1800 curlun->sense_data_info = 0;
1801 curlun->info_valid = 0;
1802 }
1803 } else {
8ea864cf
MN
1804 common->curlun = NULL;
1805 curlun = NULL;
1806 common->bad_lun_okay = 0;
d5e2b67a
MN
1807
1808 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1809 * to use unsupported LUNs; all others may not. */
8ea864cf
MN
1810 if (common->cmnd[0] != SC_INQUIRY &&
1811 common->cmnd[0] != SC_REQUEST_SENSE) {
1812 DBG(common, "unsupported LUN %d\n", common->lun);
d5e2b67a
MN
1813 return -EINVAL;
1814 }
1815 }
1816
1817 /* If a unit attention condition exists, only INQUIRY and
1818 * REQUEST SENSE commands are allowed; anything else must fail. */
1819 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
8ea864cf
MN
1820 common->cmnd[0] != SC_INQUIRY &&
1821 common->cmnd[0] != SC_REQUEST_SENSE) {
d5e2b67a
MN
1822 curlun->sense_data = curlun->unit_attention_data;
1823 curlun->unit_attention_data = SS_NO_SENSE;
1824 return -EINVAL;
1825 }
1826
1827 /* Check that only command bytes listed in the mask are non-zero */
8ea864cf 1828 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
d5e2b67a 1829 for (i = 1; i < cmnd_size; ++i) {
8ea864cf 1830 if (common->cmnd[i] && !(mask & (1 << i))) {
d5e2b67a
MN
1831 if (curlun)
1832 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1833 return -EINVAL;
1834 }
1835 }
1836
1837 /* If the medium isn't mounted and the command needs to access
1838 * it, return an error. */
1839 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
1840 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1841 return -EINVAL;
1842 }
1843
1844 return 0;
1845}
1846
1847
8ea864cf 1848static int do_scsi_command(struct fsg_common *common)
d5e2b67a
MN
1849{
1850 struct fsg_buffhd *bh;
1851 int rc;
1852 int reply = -EINVAL;
1853 int i;
1854 static char unknown[16];
1855
8ea864cf 1856 dump_cdb(common);
d5e2b67a
MN
1857
1858 /* Wait for the next buffer to become available for data or status */
8ea864cf
MN
1859 bh = common->next_buffhd_to_fill;
1860 common->next_buffhd_to_drain = bh;
d5e2b67a 1861 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 1862 rc = sleep_thread(common);
d5e2b67a
MN
1863 if (rc)
1864 return rc;
1865 }
8ea864cf
MN
1866 common->phase_error = 0;
1867 common->short_packet_received = 0;
d5e2b67a 1868
8ea864cf
MN
1869 down_read(&common->filesem); /* We're using the backing file */
1870 switch (common->cmnd[0]) {
d5e2b67a
MN
1871
1872 case SC_INQUIRY:
8ea864cf
MN
1873 common->data_size_from_cmnd = common->cmnd[4];
1874 reply = check_command(common, 6, DATA_DIR_TO_HOST,
d26a6aa0
MN
1875 (1<<4), 0,
1876 "INQUIRY");
1877 if (reply == 0)
8ea864cf 1878 reply = do_inquiry(common, bh);
d5e2b67a
MN
1879 break;
1880
1881 case SC_MODE_SELECT_6:
8ea864cf
MN
1882 common->data_size_from_cmnd = common->cmnd[4];
1883 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
d26a6aa0
MN
1884 (1<<1) | (1<<4), 0,
1885 "MODE SELECT(6)");
1886 if (reply == 0)
8ea864cf 1887 reply = do_mode_select(common, bh);
d5e2b67a
MN
1888 break;
1889
1890 case SC_MODE_SELECT_10:
8ea864cf
MN
1891 common->data_size_from_cmnd =
1892 get_unaligned_be16(&common->cmnd[7]);
1893 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
d26a6aa0
MN
1894 (1<<1) | (3<<7), 0,
1895 "MODE SELECT(10)");
1896 if (reply == 0)
8ea864cf 1897 reply = do_mode_select(common, bh);
d5e2b67a
MN
1898 break;
1899
1900 case SC_MODE_SENSE_6:
8ea864cf
MN
1901 common->data_size_from_cmnd = common->cmnd[4];
1902 reply = check_command(common, 6, DATA_DIR_TO_HOST,
d26a6aa0
MN
1903 (1<<1) | (1<<2) | (1<<4), 0,
1904 "MODE SENSE(6)");
1905 if (reply == 0)
8ea864cf 1906 reply = do_mode_sense(common, bh);
d5e2b67a
MN
1907 break;
1908
1909 case SC_MODE_SENSE_10:
8ea864cf
MN
1910 common->data_size_from_cmnd =
1911 get_unaligned_be16(&common->cmnd[7]);
1912 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1913 (1<<1) | (1<<2) | (3<<7), 0,
1914 "MODE SENSE(10)");
1915 if (reply == 0)
8ea864cf 1916 reply = do_mode_sense(common, bh);
d5e2b67a
MN
1917 break;
1918
1919 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
8ea864cf
MN
1920 common->data_size_from_cmnd = 0;
1921 reply = check_command(common, 6, DATA_DIR_NONE,
d26a6aa0
MN
1922 (1<<4), 0,
1923 "PREVENT-ALLOW MEDIUM REMOVAL");
1924 if (reply == 0)
8ea864cf 1925 reply = do_prevent_allow(common);
d5e2b67a
MN
1926 break;
1927
1928 case SC_READ_6:
8ea864cf
MN
1929 i = common->cmnd[4];
1930 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1931 reply = check_command(common, 6, DATA_DIR_TO_HOST,
d26a6aa0
MN
1932 (7<<1) | (1<<4), 1,
1933 "READ(6)");
1934 if (reply == 0)
8ea864cf 1935 reply = do_read(common);
d5e2b67a
MN
1936 break;
1937
1938 case SC_READ_10:
8ea864cf
MN
1939 common->data_size_from_cmnd =
1940 get_unaligned_be16(&common->cmnd[7]) << 9;
1941 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1942 (1<<1) | (0xf<<2) | (3<<7), 1,
1943 "READ(10)");
1944 if (reply == 0)
8ea864cf 1945 reply = do_read(common);
d5e2b67a
MN
1946 break;
1947
1948 case SC_READ_12:
8ea864cf
MN
1949 common->data_size_from_cmnd =
1950 get_unaligned_be32(&common->cmnd[6]) << 9;
1951 reply = check_command(common, 12, DATA_DIR_TO_HOST,
d26a6aa0
MN
1952 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1953 "READ(12)");
1954 if (reply == 0)
8ea864cf 1955 reply = do_read(common);
d5e2b67a
MN
1956 break;
1957
1958 case SC_READ_CAPACITY:
8ea864cf
MN
1959 common->data_size_from_cmnd = 8;
1960 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1961 (0xf<<2) | (1<<8), 1,
1962 "READ CAPACITY");
1963 if (reply == 0)
8ea864cf 1964 reply = do_read_capacity(common, bh);
d5e2b67a
MN
1965 break;
1966
1967 case SC_READ_HEADER:
8ea864cf 1968 if (!common->curlun || !common->curlun->cdrom)
d5e2b67a 1969 goto unknown_cmnd;
8ea864cf
MN
1970 common->data_size_from_cmnd =
1971 get_unaligned_be16(&common->cmnd[7]);
1972 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1973 (3<<7) | (0x1f<<1), 1,
1974 "READ HEADER");
1975 if (reply == 0)
8ea864cf 1976 reply = do_read_header(common, bh);
d5e2b67a
MN
1977 break;
1978
1979 case SC_READ_TOC:
8ea864cf 1980 if (!common->curlun || !common->curlun->cdrom)
d5e2b67a 1981 goto unknown_cmnd;
8ea864cf
MN
1982 common->data_size_from_cmnd =
1983 get_unaligned_be16(&common->cmnd[7]);
1984 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1985 (7<<6) | (1<<1), 1,
1986 "READ TOC");
1987 if (reply == 0)
8ea864cf 1988 reply = do_read_toc(common, bh);
d5e2b67a
MN
1989 break;
1990
1991 case SC_READ_FORMAT_CAPACITIES:
8ea864cf
MN
1992 common->data_size_from_cmnd =
1993 get_unaligned_be16(&common->cmnd[7]);
1994 reply = check_command(common, 10, DATA_DIR_TO_HOST,
d26a6aa0
MN
1995 (3<<7), 1,
1996 "READ FORMAT CAPACITIES");
1997 if (reply == 0)
8ea864cf 1998 reply = do_read_format_capacities(common, bh);
d5e2b67a
MN
1999 break;
2000
2001 case SC_REQUEST_SENSE:
8ea864cf
MN
2002 common->data_size_from_cmnd = common->cmnd[4];
2003 reply = check_command(common, 6, DATA_DIR_TO_HOST,
d26a6aa0
MN
2004 (1<<4), 0,
2005 "REQUEST SENSE");
2006 if (reply == 0)
8ea864cf 2007 reply = do_request_sense(common, bh);
d5e2b67a
MN
2008 break;
2009
2010 case SC_START_STOP_UNIT:
8ea864cf
MN
2011 common->data_size_from_cmnd = 0;
2012 reply = check_command(common, 6, DATA_DIR_NONE,
d26a6aa0
MN
2013 (1<<1) | (1<<4), 0,
2014 "START-STOP UNIT");
2015 if (reply == 0)
8ea864cf 2016 reply = do_start_stop(common);
d5e2b67a
MN
2017 break;
2018
2019 case SC_SYNCHRONIZE_CACHE:
8ea864cf
MN
2020 common->data_size_from_cmnd = 0;
2021 reply = check_command(common, 10, DATA_DIR_NONE,
d26a6aa0
MN
2022 (0xf<<2) | (3<<7), 1,
2023 "SYNCHRONIZE CACHE");
2024 if (reply == 0)
8ea864cf 2025 reply = do_synchronize_cache(common);
d5e2b67a
MN
2026 break;
2027
2028 case SC_TEST_UNIT_READY:
8ea864cf
MN
2029 common->data_size_from_cmnd = 0;
2030 reply = check_command(common, 6, DATA_DIR_NONE,
d5e2b67a
MN
2031 0, 1,
2032 "TEST UNIT READY");
2033 break;
2034
2035 /* Although optional, this command is used by MS-Windows. We
2036 * support a minimal version: BytChk must be 0. */
2037 case SC_VERIFY:
8ea864cf
MN
2038 common->data_size_from_cmnd = 0;
2039 reply = check_command(common, 10, DATA_DIR_NONE,
d26a6aa0
MN
2040 (1<<1) | (0xf<<2) | (3<<7), 1,
2041 "VERIFY");
2042 if (reply == 0)
8ea864cf 2043 reply = do_verify(common);
d5e2b67a
MN
2044 break;
2045
2046 case SC_WRITE_6:
8ea864cf
MN
2047 i = common->cmnd[4];
2048 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2049 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
d26a6aa0
MN
2050 (7<<1) | (1<<4), 1,
2051 "WRITE(6)");
2052 if (reply == 0)
8ea864cf 2053 reply = do_write(common);
d5e2b67a
MN
2054 break;
2055
2056 case SC_WRITE_10:
8ea864cf
MN
2057 common->data_size_from_cmnd =
2058 get_unaligned_be16(&common->cmnd[7]) << 9;
2059 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
d26a6aa0
MN
2060 (1<<1) | (0xf<<2) | (3<<7), 1,
2061 "WRITE(10)");
2062 if (reply == 0)
8ea864cf 2063 reply = do_write(common);
d5e2b67a
MN
2064 break;
2065
2066 case SC_WRITE_12:
8ea864cf
MN
2067 common->data_size_from_cmnd =
2068 get_unaligned_be32(&common->cmnd[6]) << 9;
2069 reply = check_command(common, 12, DATA_DIR_FROM_HOST,
d26a6aa0
MN
2070 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2071 "WRITE(12)");
2072 if (reply == 0)
8ea864cf 2073 reply = do_write(common);
d5e2b67a
MN
2074 break;
2075
2076 /* Some mandatory commands that we recognize but don't implement.
2077 * They don't mean much in this setting. It's left as an exercise
2078 * for anyone interested to implement RESERVE and RELEASE in terms
2079 * of Posix locks. */
2080 case SC_FORMAT_UNIT:
2081 case SC_RELEASE:
2082 case SC_RESERVE:
2083 case SC_SEND_DIAGNOSTIC:
d26a6aa0 2084 /* Fall through */
d5e2b67a
MN
2085
2086 default:
d26a6aa0 2087unknown_cmnd:
8ea864cf
MN
2088 common->data_size_from_cmnd = 0;
2089 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2090 reply = check_command(common, common->cmnd_size,
d26a6aa0
MN
2091 DATA_DIR_UNKNOWN, 0xff, 0, unknown);
2092 if (reply == 0) {
8ea864cf 2093 common->curlun->sense_data = SS_INVALID_COMMAND;
d5e2b67a
MN
2094 reply = -EINVAL;
2095 }
2096 break;
2097 }
8ea864cf 2098 up_read(&common->filesem);
d5e2b67a
MN
2099
2100 if (reply == -EINTR || signal_pending(current))
2101 return -EINTR;
2102
2103 /* Set up the single reply buffer for finish_reply() */
2104 if (reply == -EINVAL)
d26a6aa0 2105 reply = 0; /* Error reply length */
8ea864cf
MN
2106 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2107 reply = min((u32) reply, common->data_size_from_cmnd);
d5e2b67a
MN
2108 bh->inreq->length = reply;
2109 bh->state = BUF_STATE_FULL;
8ea864cf 2110 common->residue -= reply;
d26a6aa0 2111 } /* Otherwise it's already set */
d5e2b67a
MN
2112
2113 return 0;
2114}
2115
2116
2117/*-------------------------------------------------------------------------*/
2118
2119static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2120{
8ea864cf 2121 struct usb_request *req = bh->outreq;
d5e2b67a 2122 struct fsg_bulk_cb_wrap *cbw = req->buf;
8ea864cf 2123 struct fsg_common *common = fsg->common;
d5e2b67a
MN
2124
2125 /* Was this a real packet? Should it be ignored? */
2126 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2127 return -EINVAL;
2128
2129 /* Is the CBW valid? */
2130 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2131 cbw->Signature != cpu_to_le32(
2132 USB_BULK_CB_SIG)) {
2133 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2134 req->actual,
2135 le32_to_cpu(cbw->Signature));
2136
2137 /* The Bulk-only spec says we MUST stall the IN endpoint
2138 * (6.6.1), so it's unavoidable. It also says we must
2139 * retain this state until the next reset, but there's
2140 * no way to tell the controller driver it should ignore
2141 * Clear-Feature(HALT) requests.
2142 *
2143 * We aren't required to halt the OUT endpoint; instead
2144 * we can simply accept and discard any data received
2145 * until the next reset. */
2146 wedge_bulk_in_endpoint(fsg);
2147 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2148 return -EINVAL;
2149 }
2150
2151 /* Is the CBW meaningful? */
2152 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2153 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2154 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2155 "cmdlen %u\n",
2156 cbw->Lun, cbw->Flags, cbw->Length);
2157
2158 /* We can do anything we want here, so let's stall the
2159 * bulk pipes if we are allowed to. */
8ea864cf 2160 if (common->can_stall) {
d5e2b67a
MN
2161 fsg_set_halt(fsg, fsg->bulk_out);
2162 halt_bulk_in_endpoint(fsg);
2163 }
2164 return -EINVAL;
2165 }
2166
2167 /* Save the command for later */
8ea864cf
MN
2168 common->cmnd_size = cbw->Length;
2169 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
d5e2b67a 2170 if (cbw->Flags & USB_BULK_IN_FLAG)
8ea864cf 2171 common->data_dir = DATA_DIR_TO_HOST;
d5e2b67a 2172 else
8ea864cf
MN
2173 common->data_dir = DATA_DIR_FROM_HOST;
2174 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2175 if (common->data_size == 0)
2176 common->data_dir = DATA_DIR_NONE;
2177 common->lun = cbw->Lun;
2178 common->tag = cbw->Tag;
d5e2b67a
MN
2179 return 0;
2180}
2181
2182
8ea864cf 2183static int get_next_command(struct fsg_common *common)
d5e2b67a
MN
2184{
2185 struct fsg_buffhd *bh;
2186 int rc = 0;
2187
93bcf12e 2188 /* Wait for the next buffer to become available */
8ea864cf 2189 bh = common->next_buffhd_to_fill;
93bcf12e 2190 while (bh->state != BUF_STATE_EMPTY) {
8ea864cf 2191 rc = sleep_thread(common);
93bcf12e
MN
2192 if (rc)
2193 return rc;
2194 }
d5e2b67a 2195
93bcf12e 2196 /* Queue a request to read a Bulk-only CBW */
8ea864cf 2197 set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
93bcf12e 2198 bh->outreq->short_not_ok = 1;
8ea864cf
MN
2199 START_TRANSFER_OR(common, bulk_out, bh->outreq,
2200 &bh->outreq_busy, &bh->state)
2201 /* Don't know what to do if common->fsg is NULL */
2202 return -EIO;
d5e2b67a 2203
93bcf12e
MN
2204 /* We will drain the buffer in software, which means we
2205 * can reuse it for the next filling. No need to advance
2206 * next_buffhd_to_fill. */
d5e2b67a 2207
93bcf12e
MN
2208 /* Wait for the CBW to arrive */
2209 while (bh->state != BUF_STATE_FULL) {
8ea864cf 2210 rc = sleep_thread(common);
93bcf12e
MN
2211 if (rc)
2212 return rc;
d5e2b67a 2213 }
93bcf12e 2214 smp_rmb();
8ea864cf 2215 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
93bcf12e
MN
2216 bh->state = BUF_STATE_EMPTY;
2217
d5e2b67a
MN
2218 return rc;
2219}
2220
2221
2222/*-------------------------------------------------------------------------*/
2223
8ea864cf 2224static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
d5e2b67a
MN
2225 const struct usb_endpoint_descriptor *d)
2226{
2227 int rc;
2228
8ea864cf 2229 ep->driver_data = common;
d5e2b67a
MN
2230 rc = usb_ep_enable(ep, d);
2231 if (rc)
8ea864cf 2232 ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
d5e2b67a
MN
2233 return rc;
2234}
2235
8ea864cf 2236static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
d5e2b67a
MN
2237 struct usb_request **preq)
2238{
2239 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2240 if (*preq)
2241 return 0;
8ea864cf 2242 ERROR(common, "can't allocate request for %s\n", ep->name);
d5e2b67a
MN
2243 return -ENOMEM;
2244}
2245
2246/*
2247 * Reset interface setting and re-init endpoint state (toggle etc).
2248 * Call with altsetting < 0 to disable the interface. The only other
2249 * available altsetting is 0, which enables the interface.
2250 */
8ea864cf 2251static int do_set_interface(struct fsg_common *common, int altsetting)
d5e2b67a
MN
2252{
2253 int rc = 0;
2254 int i;
2255 const struct usb_endpoint_descriptor *d;
2256
8ea864cf
MN
2257 if (common->running)
2258 DBG(common, "reset interface\n");
d5e2b67a
MN
2259
2260reset:
2261 /* Deallocate the requests */
8ea864cf
MN
2262 if (common->prev_fsg) {
2263 struct fsg_dev *fsg = common->prev_fsg;
2264
2265 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2266 struct fsg_buffhd *bh = &common->buffhds[i];
d5e2b67a 2267
8ea864cf
MN
2268 if (bh->inreq) {
2269 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2270 bh->inreq = NULL;
2271 }
2272 if (bh->outreq) {
2273 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2274 bh->outreq = NULL;
2275 }
d5e2b67a 2276 }
8ea864cf
MN
2277
2278 /* Disable the endpoints */
2279 if (fsg->bulk_in_enabled) {
2280 usb_ep_disable(fsg->bulk_in);
2281 fsg->bulk_in_enabled = 0;
2282 }
2283 if (fsg->bulk_out_enabled) {
2284 usb_ep_disable(fsg->bulk_out);
2285 fsg->bulk_out_enabled = 0;
d5e2b67a 2286 }
d5e2b67a 2287
8ea864cf 2288 common->prev_fsg = 0;
d5e2b67a 2289 }
d5e2b67a 2290
8ea864cf 2291 common->running = 0;
d5e2b67a
MN
2292 if (altsetting < 0 || rc != 0)
2293 return rc;
2294
8ea864cf 2295 DBG(common, "set interface %d\n", altsetting);
d5e2b67a 2296
8ea864cf
MN
2297 if (fsg_is_set(common)) {
2298 struct fsg_dev *fsg = common->fsg;
2299 common->prev_fsg = common->fsg;
2300
2301 /* Enable the endpoints */
2302 d = fsg_ep_desc(common->gadget,
2303 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2304 rc = enable_endpoint(common, fsg->bulk_in, d);
2305 if (rc)
d5e2b67a 2306 goto reset;
8ea864cf
MN
2307 fsg->bulk_in_enabled = 1;
2308
2309 d = fsg_ep_desc(common->gadget,
2310 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2311 rc = enable_endpoint(common, fsg->bulk_out, d);
2312 if (rc)
d5e2b67a 2313 goto reset;
8ea864cf
MN
2314 fsg->bulk_out_enabled = 1;
2315 common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2316 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
d5e2b67a 2317
8ea864cf
MN
2318 /* Allocate the requests */
2319 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2320 struct fsg_buffhd *bh = &common->buffhds[i];
2321
2322 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2323 if (rc)
2324 goto reset;
2325 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2326 if (rc)
2327 goto reset;
2328 bh->inreq->buf = bh->outreq->buf = bh->buf;
2329 bh->inreq->context = bh->outreq->context = bh;
2330 bh->inreq->complete = bulk_in_complete;
2331 bh->outreq->complete = bulk_out_complete;
2332 }
2333
2334 common->running = 1;
2335 for (i = 0; i < common->nluns; ++i)
2336 common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2337 return rc;
2338 } else {
2339 return -EIO;
2340 }
d5e2b67a
MN
2341}
2342
2343
2344/*
2345 * Change our operational configuration. This code must agree with the code
2346 * that returns config descriptors, and with interface altsetting code.
2347 *
2348 * It's also responsible for power management interactions. Some
2349 * configurations might not work with our current power sources.
2350 * For now we just assume the gadget is always self-powered.
2351 */
8ea864cf 2352static int do_set_config(struct fsg_common *common, u8 new_config)
d5e2b67a
MN
2353{
2354 int rc = 0;
2355
2356 /* Disable the single interface */
8ea864cf
MN
2357 if (common->config != 0) {
2358 DBG(common, "reset config\n");
2359 common->config = 0;
2360 rc = do_set_interface(common, -1);
d5e2b67a
MN
2361 }
2362
2363 /* Enable the interface */
2364 if (new_config != 0) {
8ea864cf
MN
2365 common->config = new_config;
2366 rc = do_set_interface(common, 0);
d23b0f08 2367 if (rc != 0)
8ea864cf 2368 common->config = 0; /* Reset on errors */
d5e2b67a
MN
2369 }
2370 return rc;
2371}
2372
2373
d23b0f08
MN
2374/****************************** ALT CONFIGS ******************************/
2375
2376
2377static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2378{
2379 struct fsg_dev *fsg = fsg_from_func(f);
8ea864cf
MN
2380 fsg->common->prev_fsg = fsg->common->fsg;
2381 fsg->common->fsg = fsg;
2382 fsg->common->new_config = 1;
2383 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
d23b0f08
MN
2384 return 0;
2385}
2386
2387static void fsg_disable(struct usb_function *f)
2388{
2389 struct fsg_dev *fsg = fsg_from_func(f);
8ea864cf
MN
2390 fsg->common->prev_fsg = fsg->common->fsg;
2391 fsg->common->fsg = fsg;
2392 fsg->common->new_config = 0;
2393 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
d23b0f08
MN
2394}
2395
2396
d5e2b67a
MN
2397/*-------------------------------------------------------------------------*/
2398
8ea864cf 2399static void handle_exception(struct fsg_common *common)
d5e2b67a
MN
2400{
2401 siginfo_t info;
2402 int sig;
2403 int i;
d5e2b67a
MN
2404 struct fsg_buffhd *bh;
2405 enum fsg_state old_state;
2406 u8 new_config;
2407 struct fsg_lun *curlun;
2408 unsigned int exception_req_tag;
2409 int rc;
2410
2411 /* Clear the existing signals. Anything but SIGUSR1 is converted
2412 * into a high-priority EXIT exception. */
2413 for (;;) {
2414 sig = dequeue_signal_lock(current, &current->blocked, &info);
2415 if (!sig)
2416 break;
2417 if (sig != SIGUSR1) {
8ea864cf
MN
2418 if (common->state < FSG_STATE_EXIT)
2419 DBG(common, "Main thread exiting on signal\n");
2420 raise_exception(common, FSG_STATE_EXIT);
d5e2b67a
MN
2421 }
2422 }
2423
2424 /* Cancel all the pending transfers */
8ea864cf 2425 if (fsg_is_set(common)) {
d5e2b67a 2426 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
8ea864cf
MN
2427 bh = &common->buffhds[i];
2428 if (bh->inreq_busy)
2429 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2430 if (bh->outreq_busy)
2431 usb_ep_dequeue(common->fsg->bulk_out,
2432 bh->outreq);
d5e2b67a 2433 }
d5e2b67a 2434
8ea864cf
MN
2435 /* Wait until everything is idle */
2436 for (;;) {
2437 int num_active = 0;
2438 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2439 bh = &common->buffhds[i];
2440 num_active += bh->inreq_busy + bh->outreq_busy;
2441 }
2442 if (num_active == 0)
2443 break;
2444 if (sleep_thread(common))
2445 return;
2446 }
2447
2448 /* Clear out the controller's fifos */
2449 if (common->fsg->bulk_in_enabled)
2450 usb_ep_fifo_flush(common->fsg->bulk_in);
2451 if (common->fsg->bulk_out_enabled)
2452 usb_ep_fifo_flush(common->fsg->bulk_out);
2453 }
d5e2b67a
MN
2454
2455 /* Reset the I/O buffer states and pointers, the SCSI
2456 * state, and the exception. Then invoke the handler. */
8ea864cf 2457 spin_lock_irq(&common->lock);
d5e2b67a
MN
2458
2459 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
8ea864cf 2460 bh = &common->buffhds[i];
d5e2b67a
MN
2461 bh->state = BUF_STATE_EMPTY;
2462 }
8ea864cf
MN
2463 common->next_buffhd_to_fill = &common->buffhds[0];
2464 common->next_buffhd_to_drain = &common->buffhds[0];
2465 exception_req_tag = common->exception_req_tag;
2466 new_config = common->new_config;
2467 old_state = common->state;
d5e2b67a
MN
2468
2469 if (old_state == FSG_STATE_ABORT_BULK_OUT)
8ea864cf 2470 common->state = FSG_STATE_STATUS_PHASE;
d5e2b67a 2471 else {
8ea864cf
MN
2472 for (i = 0; i < common->nluns; ++i) {
2473 curlun = &common->luns[i];
d5e2b67a 2474 curlun->prevent_medium_removal = 0;
d26a6aa0
MN
2475 curlun->sense_data = SS_NO_SENSE;
2476 curlun->unit_attention_data = SS_NO_SENSE;
d5e2b67a
MN
2477 curlun->sense_data_info = 0;
2478 curlun->info_valid = 0;
2479 }
8ea864cf 2480 common->state = FSG_STATE_IDLE;
d5e2b67a 2481 }
8ea864cf 2482 spin_unlock_irq(&common->lock);
d5e2b67a
MN
2483
2484 /* Carry out any extra actions required for the exception */
2485 switch (old_state) {
d5e2b67a 2486 case FSG_STATE_ABORT_BULK_OUT:
8ea864cf
MN
2487 send_status(common);
2488 spin_lock_irq(&common->lock);
2489 if (common->state == FSG_STATE_STATUS_PHASE)
2490 common->state = FSG_STATE_IDLE;
2491 spin_unlock_irq(&common->lock);
d5e2b67a
MN
2492 break;
2493
2494 case FSG_STATE_RESET:
2495 /* In case we were forced against our will to halt a
2496 * bulk endpoint, clear the halt now. (The SuperH UDC
2497 * requires this.) */
8ea864cf
MN
2498 if (!fsg_is_set(common))
2499 break;
2500 if (test_and_clear_bit(IGNORE_BULK_OUT,
2501 &common->fsg->atomic_bitflags))
2502 usb_ep_clear_halt(common->fsg->bulk_in);
d5e2b67a 2503
8ea864cf
MN
2504 if (common->ep0_req_tag == exception_req_tag)
2505 ep0_queue(common); /* Complete the status stage */
d5e2b67a
MN
2506
2507 /* Technically this should go here, but it would only be
2508 * a waste of time. Ditto for the INTERFACE_CHANGE and
2509 * CONFIG_CHANGE cases. */
8ea864cf
MN
2510 /* for (i = 0; i < common->nluns; ++i) */
2511 /* common->luns[i].unit_attention_data = */
d26a6aa0 2512 /* SS_RESET_OCCURRED; */
d5e2b67a
MN
2513 break;
2514
d5e2b67a 2515 case FSG_STATE_CONFIG_CHANGE:
8ea864cf
MN
2516 rc = do_set_config(common, new_config);
2517 if (common->ep0_req_tag != exception_req_tag)
d5e2b67a 2518 break;
8ea864cf
MN
2519 if (rc != 0) { /* STALL on errors */
2520 DBG(common, "ep0 set halt\n");
2521 usb_ep_set_halt(common->ep0);
2522 } else { /* Complete the status stage */
2523 ep0_queue(common);
2524 }
d5e2b67a
MN
2525 break;
2526
d5e2b67a
MN
2527 case FSG_STATE_EXIT:
2528 case FSG_STATE_TERMINATED:
8ea864cf
MN
2529 do_set_config(common, 0); /* Free resources */
2530 spin_lock_irq(&common->lock);
2531 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2532 spin_unlock_irq(&common->lock);
d5e2b67a 2533 break;
d23b0f08
MN
2534
2535 case FSG_STATE_INTERFACE_CHANGE:
2536 case FSG_STATE_DISCONNECT:
2537 case FSG_STATE_COMMAND_PHASE:
2538 case FSG_STATE_DATA_PHASE:
2539 case FSG_STATE_STATUS_PHASE:
2540 case FSG_STATE_IDLE:
2541 break;
d5e2b67a
MN
2542 }
2543}
2544
2545
2546/*-------------------------------------------------------------------------*/
2547
8ea864cf 2548static int fsg_main_thread(void *common_)
d5e2b67a 2549{
8ea864cf 2550 struct fsg_common *common = common_;
d5e2b67a
MN
2551
2552 /* Allow the thread to be killed by a signal, but set the signal mask
2553 * to block everything but INT, TERM, KILL, and USR1. */
2554 allow_signal(SIGINT);
2555 allow_signal(SIGTERM);
2556 allow_signal(SIGKILL);
2557 allow_signal(SIGUSR1);
2558
2559 /* Allow the thread to be frozen */
2560 set_freezable();
2561
2562 /* Arrange for userspace references to be interpreted as kernel
2563 * pointers. That way we can pass a kernel pointer to a routine
2564 * that expects a __user pointer and it will work okay. */
2565 set_fs(get_ds());
2566
2567 /* The main loop */
8ea864cf
MN
2568 while (common->state != FSG_STATE_TERMINATED) {
2569 if (exception_in_progress(common) || signal_pending(current)) {
2570 handle_exception(common);
d5e2b67a
MN
2571 continue;
2572 }
2573
8ea864cf
MN
2574 if (!common->running) {
2575 sleep_thread(common);
d5e2b67a
MN
2576 continue;
2577 }
2578
8ea864cf 2579 if (get_next_command(common))
d5e2b67a
MN
2580 continue;
2581
8ea864cf
MN
2582 spin_lock_irq(&common->lock);
2583 if (!exception_in_progress(common))
2584 common->state = FSG_STATE_DATA_PHASE;
2585 spin_unlock_irq(&common->lock);
d5e2b67a 2586
8ea864cf 2587 if (do_scsi_command(common) || finish_reply(common))
d5e2b67a
MN
2588 continue;
2589
8ea864cf
MN
2590 spin_lock_irq(&common->lock);
2591 if (!exception_in_progress(common))
2592 common->state = FSG_STATE_STATUS_PHASE;
2593 spin_unlock_irq(&common->lock);
d5e2b67a 2594
8ea864cf 2595 if (send_status(common))
d5e2b67a
MN
2596 continue;
2597
8ea864cf
MN
2598 spin_lock_irq(&common->lock);
2599 if (!exception_in_progress(common))
2600 common->state = FSG_STATE_IDLE;
2601 spin_unlock_irq(&common->lock);
d23b0f08 2602 }
d5e2b67a 2603
8ea864cf
MN
2604 spin_lock_irq(&common->lock);
2605 common->thread_task = NULL;
2606 spin_unlock_irq(&common->lock);
d5e2b67a 2607
d23b0f08 2608 /* XXX */
d5e2b67a
MN
2609 /* If we are exiting because of a signal, unregister the
2610 * gadget driver. */
d23b0f08
MN
2611 /* if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags)) */
2612 /* usb_gadget_unregister_driver(&fsg_driver); */
d5e2b67a
MN
2613
2614 /* Let the unbind and cleanup routines know the thread has exited */
8ea864cf 2615 complete_and_exit(&common->thread_notifier, 0);
d5e2b67a
MN
2616}
2617
2618
9c610213 2619/*************************** DEVICE ATTRIBUTES ***************************/
d5e2b67a 2620
d23b0f08
MN
2621/* Write permission is checked per LUN in store_*() functions. */
2622static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
2623static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
d5e2b67a
MN
2624
2625
9c610213
MN
2626/****************************** FSG COMMON ******************************/
2627
2628static void fsg_common_release(struct kref *ref);
d5e2b67a 2629
9c610213 2630static void fsg_lun_release(struct device *dev)
d5e2b67a 2631{
9c610213 2632 /* Nothing needs to be done */
d5e2b67a
MN
2633}
2634
9c610213 2635static inline void fsg_common_get(struct fsg_common *common)
d5e2b67a 2636{
9c610213 2637 kref_get(&common->ref);
d5e2b67a
MN
2638}
2639
9c610213
MN
2640static inline void fsg_common_put(struct fsg_common *common)
2641{
2642 kref_put(&common->ref, fsg_common_release);
2643}
2644
2645
2646static struct fsg_common *fsg_common_init(struct fsg_common *common,
481e4929
MN
2647 struct usb_composite_dev *cdev,
2648 struct fsg_config *cfg)
9c610213 2649{
d23b0f08 2650 struct usb_gadget *gadget = cdev->gadget;
9c610213
MN
2651 struct fsg_buffhd *bh;
2652 struct fsg_lun *curlun;
481e4929 2653 struct fsg_lun_config *lcfg;
9c610213 2654 int nluns, i, rc;
d23b0f08 2655 char *pathbuf;
9c610213
MN
2656
2657 /* Find out how many LUNs there should be */
481e4929 2658 nluns = cfg->nluns;
9c610213
MN
2659 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2660 dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
2661 return ERR_PTR(-EINVAL);
2662 }
2663
2664 /* Allocate? */
2665 if (!common) {
2666 common = kzalloc(sizeof *common, GFP_KERNEL);
2667 if (!common)
2668 return ERR_PTR(-ENOMEM);
2669 common->free_storage_on_release = 1;
2670 } else {
2671 memset(common, 0, sizeof common);
2672 common->free_storage_on_release = 0;
2673 }
8ea864cf 2674
9c610213 2675 common->gadget = gadget;
8ea864cf
MN
2676 common->ep0 = gadget->ep0;
2677 common->ep0req = cdev->req;
2678
2679 /* Maybe allocate device-global string IDs, and patch descriptors */
2680 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2681 rc = usb_string_id(cdev);
2682 if (rc < 0) {
2683 kfree(common);
2684 return ERR_PTR(rc);
2685 }
2686 fsg_strings[FSG_STRING_INTERFACE].id = rc;
2687 fsg_intf_desc.iInterface = rc;
2688 }
9c610213
MN
2689
2690 /* Create the LUNs, open their backing files, and register the
2691 * LUN devices in sysfs. */
2692 curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
2693 if (!curlun) {
2694 kfree(common);
2695 return ERR_PTR(-ENOMEM);
2696 }
2697 common->luns = curlun;
2698
2699 init_rwsem(&common->filesem);
2700
481e4929
MN
2701 for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
2702 curlun->cdrom = !!lcfg->cdrom;
2703 curlun->ro = lcfg->cdrom || lcfg->ro;
2704 curlun->removable = lcfg->removable;
9c610213
MN
2705 curlun->dev.release = fsg_lun_release;
2706 curlun->dev.parent = &gadget->dev;
d23b0f08 2707 /* curlun->dev.driver = &fsg_driver.driver; XXX */
9c610213 2708 dev_set_drvdata(&curlun->dev, &common->filesem);
e8b6f8c5
MN
2709 dev_set_name(&curlun->dev,
2710 cfg->lun_name_format
2711 ? cfg->lun_name_format
2712 : "lun%d",
2713 i);
9c610213
MN
2714
2715 rc = device_register(&curlun->dev);
2716 if (rc) {
2717 INFO(common, "failed to register LUN%d: %d\n", i, rc);
2718 common->nluns = i;
2719 goto error_release;
2720 }
2721
2722 rc = device_create_file(&curlun->dev, &dev_attr_ro);
2723 if (rc)
2724 goto error_luns;
2725 rc = device_create_file(&curlun->dev, &dev_attr_file);
2726 if (rc)
2727 goto error_luns;
2728
481e4929
MN
2729 if (lcfg->filename) {
2730 rc = fsg_lun_open(curlun, lcfg->filename);
9c610213
MN
2731 if (rc)
2732 goto error_luns;
481e4929 2733 } else if (!curlun->removable) {
9c610213
MN
2734 ERROR(common, "no file given for LUN%d\n", i);
2735 rc = -EINVAL;
2736 goto error_luns;
2737 }
2738 }
2739 common->nluns = nluns;
2740
2741
2742 /* Data buffers cyclic list */
2743 /* Buffers in buffhds are static -- no need for additional
2744 * allocation. */
2745 bh = common->buffhds;
2746 i = FSG_NUM_BUFFERS - 1;
2747 do {
2748 bh->next = bh + 1;
2749 } while (++bh, --i);
2750 bh->next = common->buffhds;
2751
2752
481e4929
MN
2753 /* Prepare inquiryString */
2754 if (cfg->release != 0xffff) {
2755 i = cfg->release;
2756 } else {
9c610213 2757 /* The sa1100 controller is not supported */
481e4929
MN
2758 i = gadget_is_sa1100(gadget)
2759 ? -1
2760 : usb_gadget_controller_number(gadget);
2761 if (i >= 0) {
2762 i = 0x0300 + i;
2763 } else {
9c610213
MN
2764 WARNING(common, "controller '%s' not recognized\n",
2765 gadget->name);
481e4929 2766 i = 0x0399;
9c610213
MN
2767 }
2768 }
481e4929
MN
2769#define OR(x, y) ((x) ? (x) : (y))
2770 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2771 "%-8s%-16s%04x",
2772 OR(cfg->vendor_name, "Linux "),
2773 /* Assume product name dependent on the first LUN */
2774 OR(cfg->product_name, common->luns->cdrom
2775 ? "File-Stor Gadget"
2776 : "File-CD Gadget "),
2777 i);
9c610213
MN
2778
2779
2780 /* Some peripheral controllers are known not to be able to
2781 * halt bulk endpoints correctly. If one of them is present,
2782 * disable stalls.
2783 */
481e4929 2784 common->can_stall = cfg->can_stall &&
8ea864cf
MN
2785 !(gadget_is_sh(common->gadget) ||
2786 gadget_is_at91(common->gadget));
9c610213
MN
2787
2788
8ea864cf 2789 spin_lock_init(&common->lock);
9c610213 2790 kref_init(&common->ref);
8ea864cf
MN
2791
2792
2793 /* Tell the thread to start working */
2794 common->thread_task =
2795 kthread_create(fsg_main_thread, common,
2796 OR(cfg->thread_name, "file-storage"));
2797 if (IS_ERR(common->thread_task)) {
2798 rc = PTR_ERR(common->thread_task);
2799 goto error_release;
2800 }
2801 init_completion(&common->thread_notifier);
e8b6f8c5
MN
2802#undef OR
2803
d23b0f08
MN
2804
2805 /* Information */
2806 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2807 INFO(common, "Number of LUNs=%d\n", common->nluns);
2808
2809 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2810 for (i = 0, nluns = common->nluns, curlun = common->luns;
2811 i < nluns;
2812 ++curlun, ++i) {
2813 char *p = "(no medium)";
2814 if (fsg_lun_is_open(curlun)) {
2815 p = "(error)";
2816 if (pathbuf) {
2817 p = d_path(&curlun->filp->f_path,
2818 pathbuf, PATH_MAX);
2819 if (IS_ERR(p))
2820 p = "(error)";
2821 }
2822 }
2823 LINFO(curlun, "LUN: %s%s%sfile: %s\n",
2824 curlun->removable ? "removable " : "",
2825 curlun->ro ? "read only " : "",
2826 curlun->cdrom ? "CD-ROM " : "",
2827 p);
2828 }
2829 kfree(pathbuf);
2830
8ea864cf
MN
2831 DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
2832
2833 wake_up_process(common->thread_task);
2834
9c610213
MN
2835 return common;
2836
2837
2838error_luns:
2839 common->nluns = i + 1;
2840error_release:
8ea864cf 2841 common->state = FSG_STATE_TERMINATED; /* The thread is dead */
d26a6aa0
MN
2842 /* Call fsg_common_release() directly, ref might be not
2843 * initialised */
9c610213 2844 fsg_common_release(&common->ref);
8ea864cf 2845 complete(&common->thread_notifier);
9c610213
MN
2846 return ERR_PTR(rc);
2847}
2848
2849
2850static void fsg_common_release(struct kref *ref)
2851{
2852 struct fsg_common *common =
2853 container_of(ref, struct fsg_common, ref);
2854 unsigned i = common->nluns;
2855 struct fsg_lun *lun = common->luns;
2856
8ea864cf
MN
2857 /* If the thread isn't already dead, tell it to exit now */
2858 if (common->state != FSG_STATE_TERMINATED) {
2859 raise_exception(common, FSG_STATE_EXIT);
2860 wait_for_completion(&common->thread_notifier);
2861
2862 /* The cleanup routine waits for this completion also */
2863 complete(&common->thread_notifier);
2864 }
2865
9c610213
MN
2866 /* Beware tempting for -> do-while optimization: when in error
2867 * recovery nluns may be zero. */
2868
2869 for (; i; --i, ++lun) {
2870 device_remove_file(&lun->dev, &dev_attr_ro);
2871 device_remove_file(&lun->dev, &dev_attr_file);
2872 fsg_lun_close(lun);
2873 device_unregister(&lun->dev);
2874 }
2875
2876 kfree(common->luns);
2877 if (common->free_storage_on_release)
2878 kfree(common);
2879}
2880
2881
2882/*-------------------------------------------------------------------------*/
2883
2884
d23b0f08 2885static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
d5e2b67a 2886{
d23b0f08 2887 struct fsg_dev *fsg = fsg_from_func(f);
d5e2b67a
MN
2888
2889 DBG(fsg, "unbind\n");
9c610213
MN
2890 fsg_common_put(fsg->common);
2891 kfree(fsg);
d5e2b67a
MN
2892}
2893
2894
d23b0f08 2895static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
d5e2b67a 2896{
d23b0f08
MN
2897 struct fsg_dev *fsg = fsg_from_func(f);
2898 struct usb_gadget *gadget = c->cdev->gadget;
d5e2b67a
MN
2899 int rc;
2900 int i;
d5e2b67a 2901 struct usb_ep *ep;
d5e2b67a
MN
2902
2903 fsg->gadget = gadget;
d5e2b67a 2904
d23b0f08
MN
2905 /* New interface */
2906 i = usb_interface_id(c, f);
2907 if (i < 0)
2908 return i;
2909 fsg_intf_desc.bInterfaceNumber = i;
2910 fsg->interface_number = i;
d5e2b67a 2911
d5e2b67a 2912 /* Find all the endpoints we will use */
d5e2b67a
MN
2913 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2914 if (!ep)
2915 goto autoconf_fail;
8ea864cf 2916 ep->driver_data = fsg->common; /* claim the endpoint */
d5e2b67a
MN
2917 fsg->bulk_in = ep;
2918
2919 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2920 if (!ep)
2921 goto autoconf_fail;
8ea864cf 2922 ep->driver_data = fsg->common; /* claim the endpoint */
d5e2b67a
MN
2923 fsg->bulk_out = ep;
2924
d5e2b67a 2925 if (gadget_is_dualspeed(gadget)) {
d5e2b67a
MN
2926 /* Assume endpoint addresses are the same for both speeds */
2927 fsg_hs_bulk_in_desc.bEndpointAddress =
2928 fsg_fs_bulk_in_desc.bEndpointAddress;
2929 fsg_hs_bulk_out_desc.bEndpointAddress =
2930 fsg_fs_bulk_out_desc.bEndpointAddress;
d23b0f08 2931 f->hs_descriptors = fsg_hs_function;
d5e2b67a
MN
2932 }
2933
d5e2b67a
MN
2934 return 0;
2935
2936autoconf_fail:
2937 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2938 rc = -ENOTSUPP;
d23b0f08 2939 fsg_unbind(c, f);
d5e2b67a
MN
2940 return rc;
2941}
2942
2943
d23b0f08 2944/****************************** ADD FUNCTION ******************************/
d5e2b67a 2945
d23b0f08
MN
2946static struct usb_gadget_strings *fsg_strings_array[] = {
2947 &fsg_stringtab,
2948 NULL,
d5e2b67a
MN
2949};
2950
d23b0f08
MN
2951static int fsg_add(struct usb_composite_dev *cdev,
2952 struct usb_configuration *c,
2953 struct fsg_common *common)
d5e2b67a 2954{
d23b0f08
MN
2955 struct fsg_dev *fsg;
2956 int rc;
2957
2958 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
2959 if (unlikely(!fsg))
2960 return -ENOMEM;
d5e2b67a 2961
d23b0f08
MN
2962 fsg->function.name = FSG_DRIVER_DESC;
2963 fsg->function.strings = fsg_strings_array;
2964 fsg->function.descriptors = fsg_fs_function;
2965 fsg->function.bind = fsg_bind;
2966 fsg->function.unbind = fsg_unbind;
2967 fsg->function.setup = fsg_setup;
2968 fsg->function.set_alt = fsg_set_alt;
2969 fsg->function.disable = fsg_disable;
2970
2971 fsg->common = common;
2972 /* Our caller holds a reference to common structure so we
2973 * don't have to be worry about it being freed until we return
2974 * from this function. So instead of incrementing counter now
2975 * and decrement in error recovery we increment it only when
2976 * call to usb_add_function() was successful. */
2977
2978 rc = usb_add_function(c, &fsg->function);
2979
2980 if (likely(rc == 0))
2981 fsg_common_get(fsg->common);
2982 else
2983 kfree(fsg);
2984
2985 return rc;
d5e2b67a 2986}
481e4929
MN
2987
2988
2989
2990/************************* Module parameters *************************/
2991
2992
2993struct fsg_module_parameters {
2994 char *file[FSG_MAX_LUNS];
2995 int ro[FSG_MAX_LUNS];
2996 int removable[FSG_MAX_LUNS];
2997 int cdrom[FSG_MAX_LUNS];
2998
2999 unsigned int file_count, ro_count, removable_count, cdrom_count;
3000 unsigned int luns; /* nluns */
3001 int stall; /* can_stall */
3002};
3003
3004
3005#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
3006 module_param_array_named(prefix ## name, params.name, type, \
3007 &prefix ## params.name ## _count, \
3008 S_IRUGO); \
3009 MODULE_PARM_DESC(prefix ## name, desc)
3010
3011#define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \
3012 module_param_named(prefix ## name, params.name, type, \
3013 S_IRUGO); \
3014 MODULE_PARM_DESC(prefix ## name, desc)
3015
3016#define FSG_MODULE_PARAMETERS(prefix, params) \
3017 _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \
3018 "names of backing files or devices"); \
3019 _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \
3020 "true to force read-only"); \
3021 _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \
3022 "true to simulate removable media"); \
3023 _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
3024 "true to simulate CD-ROM instead of disk"); \
3025 _FSG_MODULE_PARAM(prefix, params, luns, uint, \
3026 "number of LUNs"); \
3027 _FSG_MODULE_PARAM(prefix, params, stall, bool, \
3028 "false to prevent bulk stalls")
3029
3030
3031static void
3032fsg_config_from_params(struct fsg_config *cfg,
3033 const struct fsg_module_parameters *params)
3034{
3035 struct fsg_lun_config *lun;
d26a6aa0 3036 unsigned i;
481e4929
MN
3037
3038 /* Configure LUNs */
d26a6aa0
MN
3039 cfg->nluns =
3040 min(params->luns ?: (params->file_count ?: 1u),
3041 (unsigned)FSG_MAX_LUNS);
3042 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
481e4929
MN
3043 lun->ro = !!params->ro[i];
3044 lun->cdrom = !!params->cdrom[i];
d26a6aa0 3045 lun->removable = /* Removable by default */
481e4929
MN
3046 params->removable_count <= i || params->removable[i];
3047 lun->filename =
3048 params->file_count > i && params->file[i][0]
3049 ? params->file[i]
3050 : 0;
3051 }
3052
d26a6aa0 3053 /* Let MSF use defaults */
e8b6f8c5
MN
3054 cfg->lun_name_format = 0;
3055 cfg->thread_name = 0;
481e4929
MN
3056 cfg->vendor_name = 0;
3057 cfg->product_name = 0;
3058 cfg->release = 0xffff;
3059
3060 /* Finalise */
3061 cfg->can_stall = params->stall;
3062}
3063
3064static inline struct fsg_common *
3065fsg_common_from_params(struct fsg_common *common,
3066 struct usb_composite_dev *cdev,
3067 const struct fsg_module_parameters *params)
3068 __attribute__((unused));
3069static inline struct fsg_common *
3070fsg_common_from_params(struct fsg_common *common,
3071 struct usb_composite_dev *cdev,
3072 const struct fsg_module_parameters *params)
3073{
3074 struct fsg_config cfg;
3075 fsg_config_from_params(&cfg, params);
3076 return fsg_common_init(common, cdev, &cfg);
3077}
3078