2 * f_mass_storage.c -- Mass Storage USB Composite Function
4 * Copyright (C) 2003-2008 Alan Stern
5 * Copyright (C) 2009 Samsung Electronics
6 * Author: Michal Nazarewicz <mina86@mina86.com>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation, either version 2 of that License or (at your option) any
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * The Mass Storage Function acts as a USB Mass Storage device,
42 * appearing to the host as a disk drive or as a CD-ROM drive. In
43 * addition to providing an example of a genuinely useful composite
44 * function for a USB device, it also illustrates a technique of
45 * double-buffering for increased throughput.
47 * For more information about MSF and in particular its module
48 * parameters and sysfs interface read the
49 * <Documentation/usb/mass-storage.txt> file.
53 * MSF is configured by specifying a fsg_config structure. It has the
56 * nluns Number of LUNs function have (anywhere from 1
58 * luns An array of LUN configuration values. This
59 * should be filled for each LUN that
60 * function will include (ie. for "nluns"
61 * LUNs). Each element of the array has
62 * the following fields:
63 * ->filename The path to the backing file for the LUN.
64 * Required if LUN is not marked as
66 * ->ro Flag specifying access to the LUN shall be
67 * read-only. This is implied if CD-ROM
68 * emulation is enabled as well as when
69 * it was impossible to open "filename"
71 * ->removable Flag specifying that LUN shall be indicated as
73 * ->cdrom Flag specifying that LUN shall be reported as
75 * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12)
76 * commands for this LUN shall be ignored.
80 * release Information used as a reply to INQUIRY
81 * request. To use default set to NULL,
82 * NULL, 0xffff respectively. The first
83 * field should be 8 and the second 16
86 * can_stall Set to permit function to halt bulk endpoints.
87 * Disabled on some USB devices known not
88 * to work correctly. You should set it
91 * If "removable" is not set for a LUN then a backing file must be
92 * specified. If it is set, then NULL filename means the LUN's medium
93 * is not loaded (an empty string as "filename" in the fsg_config
94 * structure causes error). The CD-ROM emulation includes a single
95 * data track and no audio tracks; hence there need be only one
96 * backing file per LUN.
98 * This function is heavily based on "File-backed Storage Gadget" by
99 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
100 * Brownell. The driver's SCSI command interface was based on the
101 * "Information technology - Small Computer System Interface - 2"
102 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
103 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
104 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
105 * was based on the "Universal Serial Bus Mass Storage Class UFI
106 * Command Specification" document, Revision 1.0, December 14, 1998,
108 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
114 * The MSF is fairly straightforward. There is a main kernel
115 * thread that handles most of the work. Interrupt routines field
116 * callbacks from the controller driver: bulk- and interrupt-request
117 * completion notifications, endpoint-0 events, and disconnect events.
118 * Completion events are passed to the main thread by wakeup calls. Many
119 * ep0 requests are handled at interrupt time, but SetInterface,
120 * SetConfiguration, and device reset requests are forwarded to the
121 * thread in the form of "exceptions" using SIGUSR1 signals (since they
122 * should interrupt any ongoing file I/O operations).
124 * The thread's main routine implements the standard command/data/status
125 * parts of a SCSI interaction. It and its subroutines are full of tests
126 * for pending signals/exceptions -- all this polling is necessary since
127 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
128 * indication that the driver really wants to be running in userspace.)
129 * An important point is that so long as the thread is alive it keeps an
130 * open reference to the backing file. This will prevent unmounting
131 * the backing file's underlying filesystem and could cause problems
132 * during system shutdown, for example. To prevent such problems, the
133 * thread catches INT, TERM, and KILL signals and converts them into
136 * In normal operation the main thread is started during the gadget's
137 * fsg_bind() callback and stopped during fsg_unbind(). But it can
138 * also exit when it receives a signal, and there's no point leaving
139 * the gadget running when the thread is dead. As of this moment, MSF
140 * provides no way to deregister the gadget when thread dies -- maybe
141 * a callback functions is needed.
143 * To provide maximum throughput, the driver uses a circular pipeline of
144 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
145 * arbitrarily long; in practice the benefits don't justify having more
146 * than 2 stages (i.e., double buffering). But it helps to think of the
147 * pipeline as being a long one. Each buffer head contains a bulk-in and
148 * a bulk-out request pointer (since the buffer can be used for both
149 * output and input -- directions always are given from the host's
150 * point of view) as well as a pointer to the buffer and various state
153 * Use of the pipeline follows a simple protocol. There is a variable
154 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
155 * At any time that buffer head may still be in use from an earlier
156 * request, so each buffer head has a state variable indicating whether
157 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
158 * buffer head to be EMPTY, filling the buffer either by file I/O or by
159 * USB I/O (during which the buffer head is BUSY), and marking the buffer
160 * head FULL when the I/O is complete. Then the buffer will be emptied
161 * (again possibly by USB I/O, during which it is marked BUSY) and
162 * finally marked EMPTY again (possibly by a completion routine).
164 * A module parameter tells the driver to avoid stalling the bulk
165 * endpoints wherever the transport specification allows. This is
166 * necessary for some UDCs like the SuperH, which cannot reliably clear a
167 * halt on a bulk endpoint. However, under certain circumstances the
168 * Bulk-only specification requires a stall. In such cases the driver
169 * will halt the endpoint and set a flag indicating that it should clear
170 * the halt in software during the next device reset. Hopefully this
171 * will permit everything to work correctly. Furthermore, although the
172 * specification allows the bulk-out endpoint to halt when the host sends
173 * too much data, implementing this would cause an unavoidable race.
174 * The driver will always use the "no-stall" approach for OUT transfers.
176 * One subtle point concerns sending status-stage responses for ep0
177 * requests. Some of these requests, such as device reset, can involve
178 * interrupting an ongoing file I/O operation, which might take an
179 * arbitrarily long time. During that delay the host might give up on
180 * the original ep0 request and issue a new one. When that happens the
181 * driver should not notify the host about completion of the original
182 * request, as the host will no longer be waiting for it. So the driver
183 * assigns to each ep0 request a unique tag, and it keeps track of the
184 * tag value of the request associated with a long-running exception
185 * (device-reset, interface-change, or configuration-change). When the
186 * exception handler is finished, the status-stage response is submitted
187 * only if the current ep0 request tag is equal to the exception request
188 * tag. Thus only the most recently received ep0 request will get a
189 * status-stage response.
191 * Warning: This driver source file is too long. It ought to be split up
192 * into a header file plus about 3 separate .c files, to handle the details
193 * of the Gadget, USB Mass Storage, and SCSI protocols.
197 /* #define VERBOSE_DEBUG */
198 /* #define DUMP_MSGS */
200 #include <linux/blkdev.h>
201 #include <linux/completion.h>
202 #include <linux/dcache.h>
203 #include <linux/delay.h>
204 #include <linux/device.h>
205 #include <linux/fcntl.h>
206 #include <linux/file.h>
207 #include <linux/fs.h>
208 #include <linux/kref.h>
209 #include <linux/kthread.h>
210 #include <linux/sched/signal.h>
211 #include <linux/limits.h>
212 #include <linux/rwsem.h>
213 #include <linux/slab.h>
214 #include <linux/spinlock.h>
215 #include <linux/string.h>
216 #include <linux/freezer.h>
217 #include <linux/module.h>
218 #include <linux/uaccess.h>
220 #include <linux/usb/ch9.h>
221 #include <linux/usb/gadget.h>
222 #include <linux/usb/composite.h>
224 #include "configfs.h"
227 /*------------------------------------------------------------------------*/
229 #define FSG_DRIVER_DESC "Mass Storage Function"
230 #define FSG_DRIVER_VERSION "2009/09/11"
232 static const char fsg_string_interface
[] = "Mass Storage";
234 #include "storage_common.h"
235 #include "f_mass_storage.h"
237 /* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
238 static struct usb_string fsg_strings
[] = {
239 {FSG_STRING_INTERFACE
, fsg_string_interface
},
243 static struct usb_gadget_strings fsg_stringtab
= {
244 .language
= 0x0409, /* en-us */
245 .strings
= fsg_strings
,
248 static struct usb_gadget_strings
*fsg_strings_array
[] = {
253 /*-------------------------------------------------------------------------*/
258 /* Data shared by all the FSG instances. */
260 struct usb_gadget
*gadget
;
261 struct usb_composite_dev
*cdev
;
262 struct fsg_dev
*fsg
, *new_fsg
;
263 wait_queue_head_t fsg_wait
;
265 /* filesem protects: backing files in use */
266 struct rw_semaphore filesem
;
268 /* lock protects: state, all the req_busy's */
271 struct usb_ep
*ep0
; /* Copy of gadget->ep0 */
272 struct usb_request
*ep0req
; /* Copy of cdev->req */
273 unsigned int ep0_req_tag
;
275 struct fsg_buffhd
*next_buffhd_to_fill
;
276 struct fsg_buffhd
*next_buffhd_to_drain
;
277 struct fsg_buffhd
*buffhds
;
278 unsigned int fsg_num_buffers
;
281 u8 cmnd
[MAX_COMMAND_SIZE
];
284 struct fsg_lun
*luns
[FSG_MAX_LUNS
];
285 struct fsg_lun
*curlun
;
287 unsigned int bulk_out_maxpacket
;
288 enum fsg_state state
; /* For exception handling */
289 unsigned int exception_req_tag
;
291 enum data_direction data_dir
;
293 u32 data_size_from_cmnd
;
298 unsigned int can_stall
:1;
299 unsigned int free_storage_on_release
:1;
300 unsigned int phase_error
:1;
301 unsigned int short_packet_received
:1;
302 unsigned int bad_lun_okay
:1;
303 unsigned int running
:1;
304 unsigned int sysfs
:1;
306 int thread_wakeup_needed
;
307 struct completion thread_notifier
;
308 struct task_struct
*thread_task
;
310 /* Callback functions. */
311 const struct fsg_operations
*ops
;
312 /* Gadget's private data. */
315 char inquiry_string
[INQUIRY_STRING_LEN
];
321 struct usb_function function
;
322 struct usb_gadget
*gadget
; /* Copy of cdev->gadget */
323 struct fsg_common
*common
;
325 u16 interface_number
;
327 unsigned int bulk_in_enabled
:1;
328 unsigned int bulk_out_enabled
:1;
330 unsigned long atomic_bitflags
;
331 #define IGNORE_BULK_OUT 0
333 struct usb_ep
*bulk_in
;
334 struct usb_ep
*bulk_out
;
337 static inline int __fsg_is_set(struct fsg_common
*common
,
338 const char *func
, unsigned line
)
342 ERROR(common
, "common->fsg is NULL in %s at %u\n", func
, line
);
347 #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
349 static inline struct fsg_dev
*fsg_from_func(struct usb_function
*f
)
351 return container_of(f
, struct fsg_dev
, function
);
354 typedef void (*fsg_routine_t
)(struct fsg_dev
*);
356 static int exception_in_progress(struct fsg_common
*common
)
358 return common
->state
> FSG_STATE_IDLE
;
361 /* Make bulk-out requests be divisible by the maxpacket size */
362 static void set_bulk_out_req_length(struct fsg_common
*common
,
363 struct fsg_buffhd
*bh
, unsigned int length
)
367 bh
->bulk_out_intended_length
= length
;
368 rem
= length
% common
->bulk_out_maxpacket
;
370 length
+= common
->bulk_out_maxpacket
- rem
;
371 bh
->outreq
->length
= length
;
375 /*-------------------------------------------------------------------------*/
377 static int fsg_set_halt(struct fsg_dev
*fsg
, struct usb_ep
*ep
)
381 if (ep
== fsg
->bulk_in
)
383 else if (ep
== fsg
->bulk_out
)
387 DBG(fsg
, "%s set halt\n", name
);
388 return usb_ep_set_halt(ep
);
392 /*-------------------------------------------------------------------------*/
394 /* These routines may be called in process context or in_irq */
396 /* Caller must hold fsg->lock */
397 static void wakeup_thread(struct fsg_common
*common
)
400 * Ensure the reading of thread_wakeup_needed
401 * and the writing of bh->state are completed
404 /* Tell the main thread that something has happened */
405 common
->thread_wakeup_needed
= 1;
406 if (common
->thread_task
)
407 wake_up_process(common
->thread_task
);
410 static void raise_exception(struct fsg_common
*common
, enum fsg_state new_state
)
415 * Do nothing if a higher-priority exception is already in progress.
416 * If a lower-or-equal priority exception is in progress, preempt it
417 * and notify the main thread by sending it a signal.
419 spin_lock_irqsave(&common
->lock
, flags
);
420 if (common
->state
<= new_state
) {
421 common
->exception_req_tag
= common
->ep0_req_tag
;
422 common
->state
= new_state
;
423 if (common
->thread_task
)
424 send_sig_info(SIGUSR1
, SEND_SIG_FORCED
,
425 common
->thread_task
);
427 spin_unlock_irqrestore(&common
->lock
, flags
);
431 /*-------------------------------------------------------------------------*/
433 static int ep0_queue(struct fsg_common
*common
)
437 rc
= usb_ep_queue(common
->ep0
, common
->ep0req
, GFP_ATOMIC
);
438 common
->ep0
->driver_data
= common
;
439 if (rc
!= 0 && rc
!= -ESHUTDOWN
) {
440 /* We can't do much more than wait for a reset */
441 WARNING(common
, "error in submission: %s --> %d\n",
442 common
->ep0
->name
, rc
);
448 /*-------------------------------------------------------------------------*/
450 /* Completion handlers. These always run in_irq. */
452 static void bulk_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
454 struct fsg_common
*common
= ep
->driver_data
;
455 struct fsg_buffhd
*bh
= req
->context
;
457 if (req
->status
|| req
->actual
!= req
->length
)
458 DBG(common
, "%s --> %d, %u/%u\n", __func__
,
459 req
->status
, req
->actual
, req
->length
);
460 if (req
->status
== -ECONNRESET
) /* Request was cancelled */
461 usb_ep_fifo_flush(ep
);
463 /* Hold the lock while we update the request and buffer states */
465 spin_lock(&common
->lock
);
467 bh
->state
= BUF_STATE_EMPTY
;
468 wakeup_thread(common
);
469 spin_unlock(&common
->lock
);
472 static void bulk_out_complete(struct usb_ep
*ep
, struct usb_request
*req
)
474 struct fsg_common
*common
= ep
->driver_data
;
475 struct fsg_buffhd
*bh
= req
->context
;
477 dump_msg(common
, "bulk-out", req
->buf
, req
->actual
);
478 if (req
->status
|| req
->actual
!= bh
->bulk_out_intended_length
)
479 DBG(common
, "%s --> %d, %u/%u\n", __func__
,
480 req
->status
, req
->actual
, bh
->bulk_out_intended_length
);
481 if (req
->status
== -ECONNRESET
) /* Request was cancelled */
482 usb_ep_fifo_flush(ep
);
484 /* Hold the lock while we update the request and buffer states */
486 spin_lock(&common
->lock
);
488 bh
->state
= BUF_STATE_FULL
;
489 wakeup_thread(common
);
490 spin_unlock(&common
->lock
);
493 static int _fsg_common_get_max_lun(struct fsg_common
*common
)
495 int i
= ARRAY_SIZE(common
->luns
) - 1;
497 while (i
>= 0 && !common
->luns
[i
])
503 static int fsg_setup(struct usb_function
*f
,
504 const struct usb_ctrlrequest
*ctrl
)
506 struct fsg_dev
*fsg
= fsg_from_func(f
);
507 struct usb_request
*req
= fsg
->common
->ep0req
;
508 u16 w_index
= le16_to_cpu(ctrl
->wIndex
);
509 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
510 u16 w_length
= le16_to_cpu(ctrl
->wLength
);
512 if (!fsg_is_set(fsg
->common
))
515 ++fsg
->common
->ep0_req_tag
; /* Record arrival of a new request */
518 dump_msg(fsg
, "ep0-setup", (u8
*) ctrl
, sizeof(*ctrl
));
520 switch (ctrl
->bRequest
) {
522 case US_BULK_RESET_REQUEST
:
523 if (ctrl
->bRequestType
!=
524 (USB_DIR_OUT
| USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
526 if (w_index
!= fsg
->interface_number
|| w_value
!= 0 ||
531 * Raise an exception to stop the current operation
532 * and reinitialize our state.
534 DBG(fsg
, "bulk reset request\n");
535 raise_exception(fsg
->common
, FSG_STATE_RESET
);
536 return USB_GADGET_DELAYED_STATUS
;
538 case US_BULK_GET_MAX_LUN
:
539 if (ctrl
->bRequestType
!=
540 (USB_DIR_IN
| USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
542 if (w_index
!= fsg
->interface_number
|| w_value
!= 0 ||
545 VDBG(fsg
, "get max LUN\n");
546 *(u8
*)req
->buf
= _fsg_common_get_max_lun(fsg
->common
);
548 /* Respond with data/status */
549 req
->length
= min((u16
)1, w_length
);
550 return ep0_queue(fsg
->common
);
554 "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
555 ctrl
->bRequestType
, ctrl
->bRequest
,
556 le16_to_cpu(ctrl
->wValue
), w_index
, w_length
);
561 /*-------------------------------------------------------------------------*/
563 /* All the following routines run in process context */
565 /* Use this for bulk or interrupt transfers, not ep0 */
566 static void start_transfer(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
567 struct usb_request
*req
, int *pbusy
,
568 enum fsg_buffer_state
*state
)
572 if (ep
== fsg
->bulk_in
)
573 dump_msg(fsg
, "bulk-in", req
->buf
, req
->length
);
575 spin_lock_irq(&fsg
->common
->lock
);
577 *state
= BUF_STATE_BUSY
;
578 spin_unlock_irq(&fsg
->common
->lock
);
580 rc
= usb_ep_queue(ep
, req
, GFP_KERNEL
);
582 return; /* All good, we're done */
585 *state
= BUF_STATE_EMPTY
;
587 /* We can't do much more than wait for a reset */
590 * Note: currently the net2280 driver fails zero-length
591 * submissions if DMA is enabled.
593 if (rc
!= -ESHUTDOWN
&& !(rc
== -EOPNOTSUPP
&& req
->length
== 0))
594 WARNING(fsg
, "error in submission: %s --> %d\n", ep
->name
, rc
);
597 static bool start_in_transfer(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
599 if (!fsg_is_set(common
))
601 start_transfer(common
->fsg
, common
->fsg
->bulk_in
,
602 bh
->inreq
, &bh
->inreq_busy
, &bh
->state
);
606 static bool start_out_transfer(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
608 if (!fsg_is_set(common
))
610 start_transfer(common
->fsg
, common
->fsg
->bulk_out
,
611 bh
->outreq
, &bh
->outreq_busy
, &bh
->state
);
615 static int sleep_thread(struct fsg_common
*common
, bool can_freeze
)
619 /* Wait until a signal arrives or we are woken up */
623 set_current_state(TASK_INTERRUPTIBLE
);
624 if (signal_pending(current
)) {
628 if (common
->thread_wakeup_needed
)
632 __set_current_state(TASK_RUNNING
);
633 common
->thread_wakeup_needed
= 0;
636 * Ensure the writing of thread_wakeup_needed
637 * and the reading of bh->state are completed
644 /*-------------------------------------------------------------------------*/
646 static int do_read(struct fsg_common
*common
)
648 struct fsg_lun
*curlun
= common
->curlun
;
650 struct fsg_buffhd
*bh
;
653 loff_t file_offset
, file_offset_tmp
;
658 * Get the starting Logical Block Address and check that it's
661 if (common
->cmnd
[0] == READ_6
)
662 lba
= get_unaligned_be24(&common
->cmnd
[1]);
664 lba
= get_unaligned_be32(&common
->cmnd
[2]);
667 * We allow DPO (Disable Page Out = don't save data in the
668 * cache) and FUA (Force Unit Access = don't read from the
669 * cache), but we don't implement them.
671 if ((common
->cmnd
[1] & ~0x18) != 0) {
672 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
676 if (lba
>= curlun
->num_sectors
) {
677 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
680 file_offset
= ((loff_t
) lba
) << curlun
->blkbits
;
682 /* Carry out the file reads */
683 amount_left
= common
->data_size_from_cmnd
;
684 if (unlikely(amount_left
== 0))
685 return -EIO
; /* No default reply */
689 * Figure out how much we need to read:
690 * Try to read the remaining amount.
691 * But don't read more than the buffer size.
692 * And don't try to read past the end of the file.
694 amount
= min(amount_left
, FSG_BUFLEN
);
695 amount
= min((loff_t
)amount
,
696 curlun
->file_length
- file_offset
);
698 /* Wait for the next buffer to become available */
699 bh
= common
->next_buffhd_to_fill
;
700 while (bh
->state
!= BUF_STATE_EMPTY
) {
701 rc
= sleep_thread(common
, false);
707 * If we were asked to read past the end of file,
708 * end with an empty buffer.
712 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
713 curlun
->sense_data_info
=
714 file_offset
>> curlun
->blkbits
;
715 curlun
->info_valid
= 1;
716 bh
->inreq
->length
= 0;
717 bh
->state
= BUF_STATE_FULL
;
721 /* Perform the read */
722 file_offset_tmp
= file_offset
;
723 nread
= vfs_read(curlun
->filp
,
724 (char __user
*)bh
->buf
,
725 amount
, &file_offset_tmp
);
726 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
727 (unsigned long long)file_offset
, (int)nread
);
728 if (signal_pending(current
))
732 LDBG(curlun
, "error in file read: %d\n", (int)nread
);
734 } else if (nread
< amount
) {
735 LDBG(curlun
, "partial file read: %d/%u\n",
737 nread
= round_down(nread
, curlun
->blksize
);
739 file_offset
+= nread
;
740 amount_left
-= nread
;
741 common
->residue
-= nread
;
744 * Except at the end of the transfer, nread will be
745 * equal to the buffer size, which is divisible by the
746 * bulk-in maxpacket size.
748 bh
->inreq
->length
= nread
;
749 bh
->state
= BUF_STATE_FULL
;
751 /* If an error occurred, report it and its position */
752 if (nread
< amount
) {
753 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
754 curlun
->sense_data_info
=
755 file_offset
>> curlun
->blkbits
;
756 curlun
->info_valid
= 1;
760 if (amount_left
== 0)
761 break; /* No more left to read */
763 /* Send this buffer and go read some more */
765 if (!start_in_transfer(common
, bh
))
766 /* Don't know what to do if common->fsg is NULL */
768 common
->next_buffhd_to_fill
= bh
->next
;
771 return -EIO
; /* No default reply */
775 /*-------------------------------------------------------------------------*/
777 static int do_write(struct fsg_common
*common
)
779 struct fsg_lun
*curlun
= common
->curlun
;
781 struct fsg_buffhd
*bh
;
783 u32 amount_left_to_req
, amount_left_to_write
;
784 loff_t usb_offset
, file_offset
, file_offset_tmp
;
790 curlun
->sense_data
= SS_WRITE_PROTECTED
;
793 spin_lock(&curlun
->filp
->f_lock
);
794 curlun
->filp
->f_flags
&= ~O_SYNC
; /* Default is not to wait */
795 spin_unlock(&curlun
->filp
->f_lock
);
798 * Get the starting Logical Block Address and check that it's
801 if (common
->cmnd
[0] == WRITE_6
)
802 lba
= get_unaligned_be24(&common
->cmnd
[1]);
804 lba
= get_unaligned_be32(&common
->cmnd
[2]);
807 * We allow DPO (Disable Page Out = don't save data in the
808 * cache) and FUA (Force Unit Access = write directly to the
809 * medium). We don't implement DPO; we implement FUA by
810 * performing synchronous output.
812 if (common
->cmnd
[1] & ~0x18) {
813 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
816 if (!curlun
->nofua
&& (common
->cmnd
[1] & 0x08)) { /* FUA */
817 spin_lock(&curlun
->filp
->f_lock
);
818 curlun
->filp
->f_flags
|= O_SYNC
;
819 spin_unlock(&curlun
->filp
->f_lock
);
822 if (lba
>= curlun
->num_sectors
) {
823 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
827 /* Carry out the file writes */
829 file_offset
= usb_offset
= ((loff_t
) lba
) << curlun
->blkbits
;
830 amount_left_to_req
= common
->data_size_from_cmnd
;
831 amount_left_to_write
= common
->data_size_from_cmnd
;
833 while (amount_left_to_write
> 0) {
835 /* Queue a request for more data from the host */
836 bh
= common
->next_buffhd_to_fill
;
837 if (bh
->state
== BUF_STATE_EMPTY
&& get_some_more
) {
840 * Figure out how much we want to get:
841 * Try to get the remaining amount,
842 * but not more than the buffer size.
844 amount
= min(amount_left_to_req
, FSG_BUFLEN
);
846 /* Beyond the end of the backing file? */
847 if (usb_offset
>= curlun
->file_length
) {
850 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
851 curlun
->sense_data_info
=
852 usb_offset
>> curlun
->blkbits
;
853 curlun
->info_valid
= 1;
857 /* Get the next buffer */
858 usb_offset
+= amount
;
859 common
->usb_amount_left
-= amount
;
860 amount_left_to_req
-= amount
;
861 if (amount_left_to_req
== 0)
865 * Except at the end of the transfer, amount will be
866 * equal to the buffer size, which is divisible by
867 * the bulk-out maxpacket size.
869 set_bulk_out_req_length(common
, bh
, amount
);
870 if (!start_out_transfer(common
, bh
))
871 /* Dunno what to do if common->fsg is NULL */
873 common
->next_buffhd_to_fill
= bh
->next
;
877 /* Write the received data to the backing file */
878 bh
= common
->next_buffhd_to_drain
;
879 if (bh
->state
== BUF_STATE_EMPTY
&& !get_some_more
)
880 break; /* We stopped early */
881 if (bh
->state
== BUF_STATE_FULL
) {
883 common
->next_buffhd_to_drain
= bh
->next
;
884 bh
->state
= BUF_STATE_EMPTY
;
886 /* Did something go wrong with the transfer? */
887 if (bh
->outreq
->status
!= 0) {
888 curlun
->sense_data
= SS_COMMUNICATION_FAILURE
;
889 curlun
->sense_data_info
=
890 file_offset
>> curlun
->blkbits
;
891 curlun
->info_valid
= 1;
895 amount
= bh
->outreq
->actual
;
896 if (curlun
->file_length
- file_offset
< amount
) {
898 "write %u @ %llu beyond end %llu\n",
899 amount
, (unsigned long long)file_offset
,
900 (unsigned long long)curlun
->file_length
);
901 amount
= curlun
->file_length
- file_offset
;
904 /* Don't accept excess data. The spec doesn't say
905 * what to do in this case. We'll ignore the error.
907 amount
= min(amount
, bh
->bulk_out_intended_length
);
909 /* Don't write a partial block */
910 amount
= round_down(amount
, curlun
->blksize
);
914 /* Perform the write */
915 file_offset_tmp
= file_offset
;
916 nwritten
= vfs_write(curlun
->filp
,
917 (char __user
*)bh
->buf
,
918 amount
, &file_offset_tmp
);
919 VLDBG(curlun
, "file write %u @ %llu -> %d\n", amount
,
920 (unsigned long long)file_offset
, (int)nwritten
);
921 if (signal_pending(current
))
922 return -EINTR
; /* Interrupted! */
925 LDBG(curlun
, "error in file write: %d\n",
928 } else if (nwritten
< amount
) {
929 LDBG(curlun
, "partial file write: %d/%u\n",
930 (int)nwritten
, amount
);
931 nwritten
= round_down(nwritten
, curlun
->blksize
);
933 file_offset
+= nwritten
;
934 amount_left_to_write
-= nwritten
;
935 common
->residue
-= nwritten
;
937 /* If an error occurred, report it and its position */
938 if (nwritten
< amount
) {
939 curlun
->sense_data
= SS_WRITE_ERROR
;
940 curlun
->sense_data_info
=
941 file_offset
>> curlun
->blkbits
;
942 curlun
->info_valid
= 1;
947 /* Did the host decide to stop early? */
948 if (bh
->outreq
->actual
< bh
->bulk_out_intended_length
) {
949 common
->short_packet_received
= 1;
955 /* Wait for something to happen */
956 rc
= sleep_thread(common
, false);
961 return -EIO
; /* No default reply */
965 /*-------------------------------------------------------------------------*/
967 static int do_synchronize_cache(struct fsg_common
*common
)
969 struct fsg_lun
*curlun
= common
->curlun
;
972 /* We ignore the requested LBA and write out all file's
973 * dirty data buffers. */
974 rc
= fsg_lun_fsync_sub(curlun
);
976 curlun
->sense_data
= SS_WRITE_ERROR
;
981 /*-------------------------------------------------------------------------*/
983 static void invalidate_sub(struct fsg_lun
*curlun
)
985 struct file
*filp
= curlun
->filp
;
986 struct inode
*inode
= file_inode(filp
);
989 rc
= invalidate_mapping_pages(inode
->i_mapping
, 0, -1);
990 VLDBG(curlun
, "invalidate_mapping_pages -> %ld\n", rc
);
993 static int do_verify(struct fsg_common
*common
)
995 struct fsg_lun
*curlun
= common
->curlun
;
997 u32 verification_length
;
998 struct fsg_buffhd
*bh
= common
->next_buffhd_to_fill
;
999 loff_t file_offset
, file_offset_tmp
;
1001 unsigned int amount
;
1005 * Get the starting Logical Block Address and check that it's
1008 lba
= get_unaligned_be32(&common
->cmnd
[2]);
1009 if (lba
>= curlun
->num_sectors
) {
1010 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1015 * We allow DPO (Disable Page Out = don't save data in the
1016 * cache) but we don't implement it.
1018 if (common
->cmnd
[1] & ~0x10) {
1019 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1023 verification_length
= get_unaligned_be16(&common
->cmnd
[7]);
1024 if (unlikely(verification_length
== 0))
1025 return -EIO
; /* No default reply */
1027 /* Prepare to carry out the file verify */
1028 amount_left
= verification_length
<< curlun
->blkbits
;
1029 file_offset
= ((loff_t
) lba
) << curlun
->blkbits
;
1031 /* Write out all the dirty buffers before invalidating them */
1032 fsg_lun_fsync_sub(curlun
);
1033 if (signal_pending(current
))
1036 invalidate_sub(curlun
);
1037 if (signal_pending(current
))
1040 /* Just try to read the requested blocks */
1041 while (amount_left
> 0) {
1043 * Figure out how much we need to read:
1044 * Try to read the remaining amount, but not more than
1046 * And don't try to read past the end of the file.
1048 amount
= min(amount_left
, FSG_BUFLEN
);
1049 amount
= min((loff_t
)amount
,
1050 curlun
->file_length
- file_offset
);
1052 curlun
->sense_data
=
1053 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1054 curlun
->sense_data_info
=
1055 file_offset
>> curlun
->blkbits
;
1056 curlun
->info_valid
= 1;
1060 /* Perform the read */
1061 file_offset_tmp
= file_offset
;
1062 nread
= vfs_read(curlun
->filp
,
1063 (char __user
*) bh
->buf
,
1064 amount
, &file_offset_tmp
);
1065 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
1066 (unsigned long long) file_offset
,
1068 if (signal_pending(current
))
1072 LDBG(curlun
, "error in file verify: %d\n", (int)nread
);
1074 } else if (nread
< amount
) {
1075 LDBG(curlun
, "partial file verify: %d/%u\n",
1076 (int)nread
, amount
);
1077 nread
= round_down(nread
, curlun
->blksize
);
1080 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
1081 curlun
->sense_data_info
=
1082 file_offset
>> curlun
->blkbits
;
1083 curlun
->info_valid
= 1;
1086 file_offset
+= nread
;
1087 amount_left
-= nread
;
1093 /*-------------------------------------------------------------------------*/
1095 static int do_inquiry(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1097 struct fsg_lun
*curlun
= common
->curlun
;
1098 u8
*buf
= (u8
*) bh
->buf
;
1100 if (!curlun
) { /* Unsupported LUNs are okay */
1101 common
->bad_lun_okay
= 1;
1103 buf
[0] = TYPE_NO_LUN
; /* Unsupported, no device-type */
1104 buf
[4] = 31; /* Additional length */
1108 buf
[0] = curlun
->cdrom
? TYPE_ROM
: TYPE_DISK
;
1109 buf
[1] = curlun
->removable
? 0x80 : 0;
1110 buf
[2] = 2; /* ANSI SCSI level 2 */
1111 buf
[3] = 2; /* SCSI-2 INQUIRY data format */
1112 buf
[4] = 31; /* Additional length */
1113 buf
[5] = 0; /* No special options */
1116 if (curlun
->inquiry_string
[0])
1117 memcpy(buf
+ 8, curlun
->inquiry_string
,
1118 sizeof(curlun
->inquiry_string
));
1120 memcpy(buf
+ 8, common
->inquiry_string
,
1121 sizeof(common
->inquiry_string
));
1125 static int do_request_sense(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1127 struct fsg_lun
*curlun
= common
->curlun
;
1128 u8
*buf
= (u8
*) bh
->buf
;
1133 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1135 * If a REQUEST SENSE command is received from an initiator
1136 * with a pending unit attention condition (before the target
1137 * generates the contingent allegiance condition), then the
1138 * target shall either:
1139 * a) report any pending sense data and preserve the unit
1140 * attention condition on the logical unit, or,
1141 * b) report the unit attention condition, may discard any
1142 * pending sense data, and clear the unit attention
1143 * condition on the logical unit for that initiator.
1145 * FSG normally uses option a); enable this code to use option b).
1148 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
) {
1149 curlun
->sense_data
= curlun
->unit_attention_data
;
1150 curlun
->unit_attention_data
= SS_NO_SENSE
;
1154 if (!curlun
) { /* Unsupported LUNs are okay */
1155 common
->bad_lun_okay
= 1;
1156 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
1160 sd
= curlun
->sense_data
;
1161 sdinfo
= curlun
->sense_data_info
;
1162 valid
= curlun
->info_valid
<< 7;
1163 curlun
->sense_data
= SS_NO_SENSE
;
1164 curlun
->sense_data_info
= 0;
1165 curlun
->info_valid
= 0;
1169 buf
[0] = valid
| 0x70; /* Valid, current error */
1171 put_unaligned_be32(sdinfo
, &buf
[3]); /* Sense information */
1172 buf
[7] = 18 - 8; /* Additional sense length */
1178 static int do_read_capacity(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1180 struct fsg_lun
*curlun
= common
->curlun
;
1181 u32 lba
= get_unaligned_be32(&common
->cmnd
[2]);
1182 int pmi
= common
->cmnd
[8];
1183 u8
*buf
= (u8
*)bh
->buf
;
1185 /* Check the PMI and LBA fields */
1186 if (pmi
> 1 || (pmi
== 0 && lba
!= 0)) {
1187 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1191 put_unaligned_be32(curlun
->num_sectors
- 1, &buf
[0]);
1192 /* Max logical block */
1193 put_unaligned_be32(curlun
->blksize
, &buf
[4]);/* Block length */
1197 static int do_read_header(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1199 struct fsg_lun
*curlun
= common
->curlun
;
1200 int msf
= common
->cmnd
[1] & 0x02;
1201 u32 lba
= get_unaligned_be32(&common
->cmnd
[2]);
1202 u8
*buf
= (u8
*)bh
->buf
;
1204 if (common
->cmnd
[1] & ~0x02) { /* Mask away MSF */
1205 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1208 if (lba
>= curlun
->num_sectors
) {
1209 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1214 buf
[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1215 store_cdrom_address(&buf
[4], msf
, lba
);
1219 static int do_read_toc(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1221 struct fsg_lun
*curlun
= common
->curlun
;
1222 int msf
= common
->cmnd
[1] & 0x02;
1223 int start_track
= common
->cmnd
[6];
1224 u8
*buf
= (u8
*)bh
->buf
;
1226 if ((common
->cmnd
[1] & ~0x02) != 0 || /* Mask away MSF */
1228 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1233 buf
[1] = (20-2); /* TOC data length */
1234 buf
[2] = 1; /* First track number */
1235 buf
[3] = 1; /* Last track number */
1236 buf
[5] = 0x16; /* Data track, copying allowed */
1237 buf
[6] = 0x01; /* Only track is number 1 */
1238 store_cdrom_address(&buf
[8], msf
, 0);
1240 buf
[13] = 0x16; /* Lead-out track is data */
1241 buf
[14] = 0xAA; /* Lead-out track number */
1242 store_cdrom_address(&buf
[16], msf
, curlun
->num_sectors
);
1246 static int do_mode_sense(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1248 struct fsg_lun
*curlun
= common
->curlun
;
1249 int mscmnd
= common
->cmnd
[0];
1250 u8
*buf
= (u8
*) bh
->buf
;
1253 int changeable_values
, all_pages
;
1257 if ((common
->cmnd
[1] & ~0x08) != 0) { /* Mask away DBD */
1258 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1261 pc
= common
->cmnd
[2] >> 6;
1262 page_code
= common
->cmnd
[2] & 0x3f;
1264 curlun
->sense_data
= SS_SAVING_PARAMETERS_NOT_SUPPORTED
;
1267 changeable_values
= (pc
== 1);
1268 all_pages
= (page_code
== 0x3f);
1271 * Write the mode parameter header. Fixed values are: default
1272 * medium type, no cache control (DPOFUA), and no block descriptors.
1273 * The only variable value is the WriteProtect bit. We will fill in
1274 * the mode data length later.
1277 if (mscmnd
== MODE_SENSE
) {
1278 buf
[2] = (curlun
->ro
? 0x80 : 0x00); /* WP, DPOFUA */
1281 } else { /* MODE_SENSE_10 */
1282 buf
[3] = (curlun
->ro
? 0x80 : 0x00); /* WP, DPOFUA */
1284 limit
= 65535; /* Should really be FSG_BUFLEN */
1287 /* No block descriptors */
1290 * The mode pages, in numerical order. The only page we support
1291 * is the Caching page.
1293 if (page_code
== 0x08 || all_pages
) {
1295 buf
[0] = 0x08; /* Page code */
1296 buf
[1] = 10; /* Page length */
1297 memset(buf
+2, 0, 10); /* None of the fields are changeable */
1299 if (!changeable_values
) {
1300 buf
[2] = 0x04; /* Write cache enable, */
1301 /* Read cache not disabled */
1302 /* No cache retention priorities */
1303 put_unaligned_be16(0xffff, &buf
[4]);
1304 /* Don't disable prefetch */
1305 /* Minimum prefetch = 0 */
1306 put_unaligned_be16(0xffff, &buf
[8]);
1307 /* Maximum prefetch */
1308 put_unaligned_be16(0xffff, &buf
[10]);
1309 /* Maximum prefetch ceiling */
1315 * Check that a valid page was requested and the mode data length
1319 if (!valid_page
|| len
> limit
) {
1320 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1324 /* Store the mode data length */
1325 if (mscmnd
== MODE_SENSE
)
1328 put_unaligned_be16(len
- 2, buf0
);
1332 static int do_start_stop(struct fsg_common
*common
)
1334 struct fsg_lun
*curlun
= common
->curlun
;
1339 } else if (!curlun
->removable
) {
1340 curlun
->sense_data
= SS_INVALID_COMMAND
;
1342 } else if ((common
->cmnd
[1] & ~0x01) != 0 || /* Mask away Immed */
1343 (common
->cmnd
[4] & ~0x03) != 0) { /* Mask LoEj, Start */
1344 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1348 loej
= common
->cmnd
[4] & 0x02;
1349 start
= common
->cmnd
[4] & 0x01;
1352 * Our emulation doesn't support mounting; the medium is
1353 * available for use as soon as it is loaded.
1356 if (!fsg_lun_is_open(curlun
)) {
1357 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
1363 /* Are we allowed to unload the media? */
1364 if (curlun
->prevent_medium_removal
) {
1365 LDBG(curlun
, "unload attempt prevented\n");
1366 curlun
->sense_data
= SS_MEDIUM_REMOVAL_PREVENTED
;
1373 up_read(&common
->filesem
);
1374 down_write(&common
->filesem
);
1375 fsg_lun_close(curlun
);
1376 up_write(&common
->filesem
);
1377 down_read(&common
->filesem
);
1382 static int do_prevent_allow(struct fsg_common
*common
)
1384 struct fsg_lun
*curlun
= common
->curlun
;
1387 if (!common
->curlun
) {
1389 } else if (!common
->curlun
->removable
) {
1390 common
->curlun
->sense_data
= SS_INVALID_COMMAND
;
1394 prevent
= common
->cmnd
[4] & 0x01;
1395 if ((common
->cmnd
[4] & ~0x01) != 0) { /* Mask away Prevent */
1396 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1400 if (curlun
->prevent_medium_removal
&& !prevent
)
1401 fsg_lun_fsync_sub(curlun
);
1402 curlun
->prevent_medium_removal
= prevent
;
1406 static int do_read_format_capacities(struct fsg_common
*common
,
1407 struct fsg_buffhd
*bh
)
1409 struct fsg_lun
*curlun
= common
->curlun
;
1410 u8
*buf
= (u8
*) bh
->buf
;
1412 buf
[0] = buf
[1] = buf
[2] = 0;
1413 buf
[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1416 put_unaligned_be32(curlun
->num_sectors
, &buf
[0]);
1417 /* Number of blocks */
1418 put_unaligned_be32(curlun
->blksize
, &buf
[4]);/* Block length */
1419 buf
[4] = 0x02; /* Current capacity */
1423 static int do_mode_select(struct fsg_common
*common
, struct fsg_buffhd
*bh
)
1425 struct fsg_lun
*curlun
= common
->curlun
;
1427 /* We don't support MODE SELECT */
1429 curlun
->sense_data
= SS_INVALID_COMMAND
;
1434 /*-------------------------------------------------------------------------*/
1436 static int halt_bulk_in_endpoint(struct fsg_dev
*fsg
)
1440 rc
= fsg_set_halt(fsg
, fsg
->bulk_in
);
1442 VDBG(fsg
, "delayed bulk-in endpoint halt\n");
1444 if (rc
!= -EAGAIN
) {
1445 WARNING(fsg
, "usb_ep_set_halt -> %d\n", rc
);
1450 /* Wait for a short time and then try again */
1451 if (msleep_interruptible(100) != 0)
1453 rc
= usb_ep_set_halt(fsg
->bulk_in
);
1458 static int wedge_bulk_in_endpoint(struct fsg_dev
*fsg
)
1462 DBG(fsg
, "bulk-in set wedge\n");
1463 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1465 VDBG(fsg
, "delayed bulk-in endpoint wedge\n");
1467 if (rc
!= -EAGAIN
) {
1468 WARNING(fsg
, "usb_ep_set_wedge -> %d\n", rc
);
1473 /* Wait for a short time and then try again */
1474 if (msleep_interruptible(100) != 0)
1476 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1481 static int throw_away_data(struct fsg_common
*common
)
1483 struct fsg_buffhd
*bh
;
1487 for (bh
= common
->next_buffhd_to_drain
;
1488 bh
->state
!= BUF_STATE_EMPTY
|| common
->usb_amount_left
> 0;
1489 bh
= common
->next_buffhd_to_drain
) {
1491 /* Throw away the data in a filled buffer */
1492 if (bh
->state
== BUF_STATE_FULL
) {
1494 bh
->state
= BUF_STATE_EMPTY
;
1495 common
->next_buffhd_to_drain
= bh
->next
;
1497 /* A short packet or an error ends everything */
1498 if (bh
->outreq
->actual
< bh
->bulk_out_intended_length
||
1499 bh
->outreq
->status
!= 0) {
1500 raise_exception(common
,
1501 FSG_STATE_ABORT_BULK_OUT
);
1507 /* Try to submit another request if we need one */
1508 bh
= common
->next_buffhd_to_fill
;
1509 if (bh
->state
== BUF_STATE_EMPTY
1510 && common
->usb_amount_left
> 0) {
1511 amount
= min(common
->usb_amount_left
, FSG_BUFLEN
);
1514 * Except at the end of the transfer, amount will be
1515 * equal to the buffer size, which is divisible by
1516 * the bulk-out maxpacket size.
1518 set_bulk_out_req_length(common
, bh
, amount
);
1519 if (!start_out_transfer(common
, bh
))
1520 /* Dunno what to do if common->fsg is NULL */
1522 common
->next_buffhd_to_fill
= bh
->next
;
1523 common
->usb_amount_left
-= amount
;
1527 /* Otherwise wait for something to happen */
1528 rc
= sleep_thread(common
, true);
1535 static int finish_reply(struct fsg_common
*common
)
1537 struct fsg_buffhd
*bh
= common
->next_buffhd_to_fill
;
1540 switch (common
->data_dir
) {
1542 break; /* Nothing to send */
1545 * If we don't know whether the host wants to read or write,
1546 * this must be CB or CBI with an unknown command. We mustn't
1547 * try to send or receive any data. So stall both bulk pipes
1548 * if we can and wait for a reset.
1550 case DATA_DIR_UNKNOWN
:
1551 if (!common
->can_stall
) {
1553 } else if (fsg_is_set(common
)) {
1554 fsg_set_halt(common
->fsg
, common
->fsg
->bulk_out
);
1555 rc
= halt_bulk_in_endpoint(common
->fsg
);
1557 /* Don't know what to do if common->fsg is NULL */
1562 /* All but the last buffer of data must have already been sent */
1563 case DATA_DIR_TO_HOST
:
1564 if (common
->data_size
== 0) {
1565 /* Nothing to send */
1567 /* Don't know what to do if common->fsg is NULL */
1568 } else if (!fsg_is_set(common
)) {
1571 /* If there's no residue, simply send the last buffer */
1572 } else if (common
->residue
== 0) {
1573 bh
->inreq
->zero
= 0;
1574 if (!start_in_transfer(common
, bh
))
1576 common
->next_buffhd_to_fill
= bh
->next
;
1579 * For Bulk-only, mark the end of the data with a short
1580 * packet. If we are allowed to stall, halt the bulk-in
1581 * endpoint. (Note: This violates the Bulk-Only Transport
1582 * specification, which requires us to pad the data if we
1583 * don't halt the endpoint. Presumably nobody will mind.)
1586 bh
->inreq
->zero
= 1;
1587 if (!start_in_transfer(common
, bh
))
1589 common
->next_buffhd_to_fill
= bh
->next
;
1590 if (common
->can_stall
)
1591 rc
= halt_bulk_in_endpoint(common
->fsg
);
1596 * We have processed all we want from the data the host has sent.
1597 * There may still be outstanding bulk-out requests.
1599 case DATA_DIR_FROM_HOST
:
1600 if (common
->residue
== 0) {
1601 /* Nothing to receive */
1603 /* Did the host stop sending unexpectedly early? */
1604 } else if (common
->short_packet_received
) {
1605 raise_exception(common
, FSG_STATE_ABORT_BULK_OUT
);
1609 * We haven't processed all the incoming data. Even though
1610 * we may be allowed to stall, doing so would cause a race.
1611 * The controller may already have ACK'ed all the remaining
1612 * bulk-out packets, in which case the host wouldn't see a
1613 * STALL. Not realizing the endpoint was halted, it wouldn't
1614 * clear the halt -- leading to problems later on.
1617 } else if (common
->can_stall
) {
1618 if (fsg_is_set(common
))
1619 fsg_set_halt(common
->fsg
,
1620 common
->fsg
->bulk_out
);
1621 raise_exception(common
, FSG_STATE_ABORT_BULK_OUT
);
1626 * We can't stall. Read in the excess data and throw it
1630 rc
= throw_away_data(common
);
1637 static int send_status(struct fsg_common
*common
)
1639 struct fsg_lun
*curlun
= common
->curlun
;
1640 struct fsg_buffhd
*bh
;
1641 struct bulk_cs_wrap
*csw
;
1643 u8 status
= US_BULK_STAT_OK
;
1646 /* Wait for the next buffer to become available */
1647 bh
= common
->next_buffhd_to_fill
;
1648 while (bh
->state
!= BUF_STATE_EMPTY
) {
1649 rc
= sleep_thread(common
, true);
1655 sd
= curlun
->sense_data
;
1656 sdinfo
= curlun
->sense_data_info
;
1657 } else if (common
->bad_lun_okay
)
1660 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
1662 if (common
->phase_error
) {
1663 DBG(common
, "sending phase-error status\n");
1664 status
= US_BULK_STAT_PHASE
;
1665 sd
= SS_INVALID_COMMAND
;
1666 } else if (sd
!= SS_NO_SENSE
) {
1667 DBG(common
, "sending command-failure status\n");
1668 status
= US_BULK_STAT_FAIL
;
1669 VDBG(common
, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1671 SK(sd
), ASC(sd
), ASCQ(sd
), sdinfo
);
1674 /* Store and send the Bulk-only CSW */
1675 csw
= (void *)bh
->buf
;
1677 csw
->Signature
= cpu_to_le32(US_BULK_CS_SIGN
);
1678 csw
->Tag
= common
->tag
;
1679 csw
->Residue
= cpu_to_le32(common
->residue
);
1680 csw
->Status
= status
;
1682 bh
->inreq
->length
= US_BULK_CS_WRAP_LEN
;
1683 bh
->inreq
->zero
= 0;
1684 if (!start_in_transfer(common
, bh
))
1685 /* Don't know what to do if common->fsg is NULL */
1688 common
->next_buffhd_to_fill
= bh
->next
;
1693 /*-------------------------------------------------------------------------*/
1696 * Check whether the command is properly formed and whether its data size
1697 * and direction agree with the values we already have.
1699 static int check_command(struct fsg_common
*common
, int cmnd_size
,
1700 enum data_direction data_dir
, unsigned int mask
,
1701 int needs_medium
, const char *name
)
1704 unsigned int lun
= common
->cmnd
[1] >> 5;
1705 static const char dirletter
[4] = {'u', 'o', 'i', 'n'};
1707 struct fsg_lun
*curlun
;
1710 if (common
->data_dir
!= DATA_DIR_UNKNOWN
)
1711 sprintf(hdlen
, ", H%c=%u", dirletter
[(int) common
->data_dir
],
1713 VDBG(common
, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1714 name
, cmnd_size
, dirletter
[(int) data_dir
],
1715 common
->data_size_from_cmnd
, common
->cmnd_size
, hdlen
);
1718 * We can't reply at all until we know the correct data direction
1721 if (common
->data_size_from_cmnd
== 0)
1722 data_dir
= DATA_DIR_NONE
;
1723 if (common
->data_size
< common
->data_size_from_cmnd
) {
1725 * Host data size < Device data size is a phase error.
1726 * Carry out the command, but only transfer as much as
1729 common
->data_size_from_cmnd
= common
->data_size
;
1730 common
->phase_error
= 1;
1732 common
->residue
= common
->data_size
;
1733 common
->usb_amount_left
= common
->data_size
;
1735 /* Conflicting data directions is a phase error */
1736 if (common
->data_dir
!= data_dir
&& common
->data_size_from_cmnd
> 0) {
1737 common
->phase_error
= 1;
1741 /* Verify the length of the command itself */
1742 if (cmnd_size
!= common
->cmnd_size
) {
1745 * Special case workaround: There are plenty of buggy SCSI
1746 * implementations. Many have issues with cbw->Length
1747 * field passing a wrong command size. For those cases we
1748 * always try to work around the problem by using the length
1749 * sent by the host side provided it is at least as large
1750 * as the correct command length.
1751 * Examples of such cases would be MS-Windows, which issues
1752 * REQUEST SENSE with cbw->Length == 12 where it should
1753 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1754 * REQUEST SENSE with cbw->Length == 10 where it should
1757 if (cmnd_size
<= common
->cmnd_size
) {
1758 DBG(common
, "%s is buggy! Expected length %d "
1759 "but we got %d\n", name
,
1760 cmnd_size
, common
->cmnd_size
);
1761 cmnd_size
= common
->cmnd_size
;
1763 common
->phase_error
= 1;
1768 /* Check that the LUN values are consistent */
1769 if (common
->lun
!= lun
)
1770 DBG(common
, "using LUN %u from CBW, not LUN %u from CDB\n",
1774 curlun
= common
->curlun
;
1776 if (common
->cmnd
[0] != REQUEST_SENSE
) {
1777 curlun
->sense_data
= SS_NO_SENSE
;
1778 curlun
->sense_data_info
= 0;
1779 curlun
->info_valid
= 0;
1782 common
->bad_lun_okay
= 0;
1785 * INQUIRY and REQUEST SENSE commands are explicitly allowed
1786 * to use unsupported LUNs; all others may not.
1788 if (common
->cmnd
[0] != INQUIRY
&&
1789 common
->cmnd
[0] != REQUEST_SENSE
) {
1790 DBG(common
, "unsupported LUN %u\n", common
->lun
);
1796 * If a unit attention condition exists, only INQUIRY and
1797 * REQUEST SENSE commands are allowed; anything else must fail.
1799 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
&&
1800 common
->cmnd
[0] != INQUIRY
&&
1801 common
->cmnd
[0] != REQUEST_SENSE
) {
1802 curlun
->sense_data
= curlun
->unit_attention_data
;
1803 curlun
->unit_attention_data
= SS_NO_SENSE
;
1807 /* Check that only command bytes listed in the mask are non-zero */
1808 common
->cmnd
[1] &= 0x1f; /* Mask away the LUN */
1809 for (i
= 1; i
< cmnd_size
; ++i
) {
1810 if (common
->cmnd
[i
] && !(mask
& (1 << i
))) {
1812 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1817 /* If the medium isn't mounted and the command needs to access
1818 * it, return an error. */
1819 if (curlun
&& !fsg_lun_is_open(curlun
) && needs_medium
) {
1820 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
1827 /* wrapper of check_command for data size in blocks handling */
1828 static int check_command_size_in_blocks(struct fsg_common
*common
,
1829 int cmnd_size
, enum data_direction data_dir
,
1830 unsigned int mask
, int needs_medium
, const char *name
)
1833 common
->data_size_from_cmnd
<<= common
->curlun
->blkbits
;
1834 return check_command(common
, cmnd_size
, data_dir
,
1835 mask
, needs_medium
, name
);
1838 static int do_scsi_command(struct fsg_common
*common
)
1840 struct fsg_buffhd
*bh
;
1842 int reply
= -EINVAL
;
1844 static char unknown
[16];
1848 /* Wait for the next buffer to become available for data or status */
1849 bh
= common
->next_buffhd_to_fill
;
1850 common
->next_buffhd_to_drain
= bh
;
1851 while (bh
->state
!= BUF_STATE_EMPTY
) {
1852 rc
= sleep_thread(common
, true);
1856 common
->phase_error
= 0;
1857 common
->short_packet_received
= 0;
1859 down_read(&common
->filesem
); /* We're using the backing file */
1860 switch (common
->cmnd
[0]) {
1863 common
->data_size_from_cmnd
= common
->cmnd
[4];
1864 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
1868 reply
= do_inquiry(common
, bh
);
1872 common
->data_size_from_cmnd
= common
->cmnd
[4];
1873 reply
= check_command(common
, 6, DATA_DIR_FROM_HOST
,
1877 reply
= do_mode_select(common
, bh
);
1880 case MODE_SELECT_10
:
1881 common
->data_size_from_cmnd
=
1882 get_unaligned_be16(&common
->cmnd
[7]);
1883 reply
= check_command(common
, 10, DATA_DIR_FROM_HOST
,
1887 reply
= do_mode_select(common
, bh
);
1891 common
->data_size_from_cmnd
= common
->cmnd
[4];
1892 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
1893 (1<<1) | (1<<2) | (1<<4), 0,
1896 reply
= do_mode_sense(common
, bh
);
1900 common
->data_size_from_cmnd
=
1901 get_unaligned_be16(&common
->cmnd
[7]);
1902 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1903 (1<<1) | (1<<2) | (3<<7), 0,
1906 reply
= do_mode_sense(common
, bh
);
1909 case ALLOW_MEDIUM_REMOVAL
:
1910 common
->data_size_from_cmnd
= 0;
1911 reply
= check_command(common
, 6, DATA_DIR_NONE
,
1913 "PREVENT-ALLOW MEDIUM REMOVAL");
1915 reply
= do_prevent_allow(common
);
1919 i
= common
->cmnd
[4];
1920 common
->data_size_from_cmnd
= (i
== 0) ? 256 : i
;
1921 reply
= check_command_size_in_blocks(common
, 6,
1926 reply
= do_read(common
);
1930 common
->data_size_from_cmnd
=
1931 get_unaligned_be16(&common
->cmnd
[7]);
1932 reply
= check_command_size_in_blocks(common
, 10,
1934 (1<<1) | (0xf<<2) | (3<<7), 1,
1937 reply
= do_read(common
);
1941 common
->data_size_from_cmnd
=
1942 get_unaligned_be32(&common
->cmnd
[6]);
1943 reply
= check_command_size_in_blocks(common
, 12,
1945 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1948 reply
= do_read(common
);
1952 common
->data_size_from_cmnd
= 8;
1953 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1954 (0xf<<2) | (1<<8), 1,
1957 reply
= do_read_capacity(common
, bh
);
1961 if (!common
->curlun
|| !common
->curlun
->cdrom
)
1963 common
->data_size_from_cmnd
=
1964 get_unaligned_be16(&common
->cmnd
[7]);
1965 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1966 (3<<7) | (0x1f<<1), 1,
1969 reply
= do_read_header(common
, bh
);
1973 if (!common
->curlun
|| !common
->curlun
->cdrom
)
1975 common
->data_size_from_cmnd
=
1976 get_unaligned_be16(&common
->cmnd
[7]);
1977 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1981 reply
= do_read_toc(common
, bh
);
1984 case READ_FORMAT_CAPACITIES
:
1985 common
->data_size_from_cmnd
=
1986 get_unaligned_be16(&common
->cmnd
[7]);
1987 reply
= check_command(common
, 10, DATA_DIR_TO_HOST
,
1989 "READ FORMAT CAPACITIES");
1991 reply
= do_read_format_capacities(common
, bh
);
1995 common
->data_size_from_cmnd
= common
->cmnd
[4];
1996 reply
= check_command(common
, 6, DATA_DIR_TO_HOST
,
2000 reply
= do_request_sense(common
, bh
);
2004 common
->data_size_from_cmnd
= 0;
2005 reply
= check_command(common
, 6, DATA_DIR_NONE
,
2009 reply
= do_start_stop(common
);
2012 case SYNCHRONIZE_CACHE
:
2013 common
->data_size_from_cmnd
= 0;
2014 reply
= check_command(common
, 10, DATA_DIR_NONE
,
2015 (0xf<<2) | (3<<7), 1,
2016 "SYNCHRONIZE CACHE");
2018 reply
= do_synchronize_cache(common
);
2021 case TEST_UNIT_READY
:
2022 common
->data_size_from_cmnd
= 0;
2023 reply
= check_command(common
, 6, DATA_DIR_NONE
,
2029 * Although optional, this command is used by MS-Windows. We
2030 * support a minimal version: BytChk must be 0.
2033 common
->data_size_from_cmnd
= 0;
2034 reply
= check_command(common
, 10, DATA_DIR_NONE
,
2035 (1<<1) | (0xf<<2) | (3<<7), 1,
2038 reply
= do_verify(common
);
2042 i
= common
->cmnd
[4];
2043 common
->data_size_from_cmnd
= (i
== 0) ? 256 : i
;
2044 reply
= check_command_size_in_blocks(common
, 6,
2049 reply
= do_write(common
);
2053 common
->data_size_from_cmnd
=
2054 get_unaligned_be16(&common
->cmnd
[7]);
2055 reply
= check_command_size_in_blocks(common
, 10,
2057 (1<<1) | (0xf<<2) | (3<<7), 1,
2060 reply
= do_write(common
);
2064 common
->data_size_from_cmnd
=
2065 get_unaligned_be32(&common
->cmnd
[6]);
2066 reply
= check_command_size_in_blocks(common
, 12,
2068 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2071 reply
= do_write(common
);
2075 * Some mandatory commands that we recognize but don't implement.
2076 * They don't mean much in this setting. It's left as an exercise
2077 * for anyone interested to implement RESERVE and RELEASE in terms
2083 case SEND_DIAGNOSTIC
:
2088 common
->data_size_from_cmnd
= 0;
2089 sprintf(unknown
, "Unknown x%02x", common
->cmnd
[0]);
2090 reply
= check_command(common
, common
->cmnd_size
,
2091 DATA_DIR_UNKNOWN
, ~0, 0, unknown
);
2093 common
->curlun
->sense_data
= SS_INVALID_COMMAND
;
2098 up_read(&common
->filesem
);
2100 if (reply
== -EINTR
|| signal_pending(current
))
2103 /* Set up the single reply buffer for finish_reply() */
2104 if (reply
== -EINVAL
)
2105 reply
= 0; /* Error reply length */
2106 if (reply
>= 0 && common
->data_dir
== DATA_DIR_TO_HOST
) {
2107 reply
= min((u32
)reply
, common
->data_size_from_cmnd
);
2108 bh
->inreq
->length
= reply
;
2109 bh
->state
= BUF_STATE_FULL
;
2110 common
->residue
-= reply
;
2111 } /* Otherwise it's already set */
2117 /*-------------------------------------------------------------------------*/
2119 static int received_cbw(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
2121 struct usb_request
*req
= bh
->outreq
;
2122 struct bulk_cb_wrap
*cbw
= req
->buf
;
2123 struct fsg_common
*common
= fsg
->common
;
2125 /* Was this a real packet? Should it be ignored? */
2126 if (req
->status
|| test_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
))
2129 /* Is the CBW valid? */
2130 if (req
->actual
!= US_BULK_CB_WRAP_LEN
||
2131 cbw
->Signature
!= cpu_to_le32(
2133 DBG(fsg
, "invalid CBW: len %u sig 0x%x\n",
2135 le32_to_cpu(cbw
->Signature
));
2138 * The Bulk-only spec says we MUST stall the IN endpoint
2139 * (6.6.1), so it's unavoidable. It also says we must
2140 * retain this state until the next reset, but there's
2141 * no way to tell the controller driver it should ignore
2142 * Clear-Feature(HALT) requests.
2144 * We aren't required to halt the OUT endpoint; instead
2145 * we can simply accept and discard any data received
2146 * until the next reset.
2148 wedge_bulk_in_endpoint(fsg
);
2149 set_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2153 /* Is the CBW meaningful? */
2154 if (cbw
->Lun
>= ARRAY_SIZE(common
->luns
) ||
2155 cbw
->Flags
& ~US_BULK_FLAG_IN
|| cbw
->Length
<= 0 ||
2156 cbw
->Length
> MAX_COMMAND_SIZE
) {
2157 DBG(fsg
, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2159 cbw
->Lun
, cbw
->Flags
, cbw
->Length
);
2162 * We can do anything we want here, so let's stall the
2163 * bulk pipes if we are allowed to.
2165 if (common
->can_stall
) {
2166 fsg_set_halt(fsg
, fsg
->bulk_out
);
2167 halt_bulk_in_endpoint(fsg
);
2172 /* Save the command for later */
2173 common
->cmnd_size
= cbw
->Length
;
2174 memcpy(common
->cmnd
, cbw
->CDB
, common
->cmnd_size
);
2175 if (cbw
->Flags
& US_BULK_FLAG_IN
)
2176 common
->data_dir
= DATA_DIR_TO_HOST
;
2178 common
->data_dir
= DATA_DIR_FROM_HOST
;
2179 common
->data_size
= le32_to_cpu(cbw
->DataTransferLength
);
2180 if (common
->data_size
== 0)
2181 common
->data_dir
= DATA_DIR_NONE
;
2182 common
->lun
= cbw
->Lun
;
2183 if (common
->lun
< ARRAY_SIZE(common
->luns
))
2184 common
->curlun
= common
->luns
[common
->lun
];
2186 common
->curlun
= NULL
;
2187 common
->tag
= cbw
->Tag
;
2191 static int get_next_command(struct fsg_common
*common
)
2193 struct fsg_buffhd
*bh
;
2196 /* Wait for the next buffer to become available */
2197 bh
= common
->next_buffhd_to_fill
;
2198 while (bh
->state
!= BUF_STATE_EMPTY
) {
2199 rc
= sleep_thread(common
, true);
2204 /* Queue a request to read a Bulk-only CBW */
2205 set_bulk_out_req_length(common
, bh
, US_BULK_CB_WRAP_LEN
);
2206 if (!start_out_transfer(common
, bh
))
2207 /* Don't know what to do if common->fsg is NULL */
2211 * We will drain the buffer in software, which means we
2212 * can reuse it for the next filling. No need to advance
2213 * next_buffhd_to_fill.
2216 /* Wait for the CBW to arrive */
2217 while (bh
->state
!= BUF_STATE_FULL
) {
2218 rc
= sleep_thread(common
, true);
2223 rc
= fsg_is_set(common
) ? received_cbw(common
->fsg
, bh
) : -EIO
;
2224 bh
->state
= BUF_STATE_EMPTY
;
2230 /*-------------------------------------------------------------------------*/
2232 static int alloc_request(struct fsg_common
*common
, struct usb_ep
*ep
,
2233 struct usb_request
**preq
)
2235 *preq
= usb_ep_alloc_request(ep
, GFP_ATOMIC
);
2238 ERROR(common
, "can't allocate request for %s\n", ep
->name
);
2242 /* Reset interface setting and re-init endpoint state (toggle etc). */
2243 static int do_set_interface(struct fsg_common
*common
, struct fsg_dev
*new_fsg
)
2245 struct fsg_dev
*fsg
;
2248 if (common
->running
)
2249 DBG(common
, "reset interface\n");
2252 /* Deallocate the requests */
2256 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2257 struct fsg_buffhd
*bh
= &common
->buffhds
[i
];
2260 usb_ep_free_request(fsg
->bulk_in
, bh
->inreq
);
2264 usb_ep_free_request(fsg
->bulk_out
, bh
->outreq
);
2269 /* Disable the endpoints */
2270 if (fsg
->bulk_in_enabled
) {
2271 usb_ep_disable(fsg
->bulk_in
);
2272 fsg
->bulk_in_enabled
= 0;
2274 if (fsg
->bulk_out_enabled
) {
2275 usb_ep_disable(fsg
->bulk_out
);
2276 fsg
->bulk_out_enabled
= 0;
2280 wake_up(&common
->fsg_wait
);
2283 common
->running
= 0;
2287 common
->fsg
= new_fsg
;
2290 /* Enable the endpoints */
2291 rc
= config_ep_by_speed(common
->gadget
, &(fsg
->function
), fsg
->bulk_in
);
2294 rc
= usb_ep_enable(fsg
->bulk_in
);
2297 fsg
->bulk_in
->driver_data
= common
;
2298 fsg
->bulk_in_enabled
= 1;
2300 rc
= config_ep_by_speed(common
->gadget
, &(fsg
->function
),
2304 rc
= usb_ep_enable(fsg
->bulk_out
);
2307 fsg
->bulk_out
->driver_data
= common
;
2308 fsg
->bulk_out_enabled
= 1;
2309 common
->bulk_out_maxpacket
= usb_endpoint_maxp(fsg
->bulk_out
->desc
);
2310 clear_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2312 /* Allocate the requests */
2313 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2314 struct fsg_buffhd
*bh
= &common
->buffhds
[i
];
2316 rc
= alloc_request(common
, fsg
->bulk_in
, &bh
->inreq
);
2319 rc
= alloc_request(common
, fsg
->bulk_out
, &bh
->outreq
);
2322 bh
->inreq
->buf
= bh
->outreq
->buf
= bh
->buf
;
2323 bh
->inreq
->context
= bh
->outreq
->context
= bh
;
2324 bh
->inreq
->complete
= bulk_in_complete
;
2325 bh
->outreq
->complete
= bulk_out_complete
;
2328 common
->running
= 1;
2329 for (i
= 0; i
< ARRAY_SIZE(common
->luns
); ++i
)
2330 if (common
->luns
[i
])
2331 common
->luns
[i
]->unit_attention_data
=
2337 /****************************** ALT CONFIGS ******************************/
2339 static int fsg_set_alt(struct usb_function
*f
, unsigned intf
, unsigned alt
)
2341 struct fsg_dev
*fsg
= fsg_from_func(f
);
2342 fsg
->common
->new_fsg
= fsg
;
2343 raise_exception(fsg
->common
, FSG_STATE_CONFIG_CHANGE
);
2344 return USB_GADGET_DELAYED_STATUS
;
2347 static void fsg_disable(struct usb_function
*f
)
2349 struct fsg_dev
*fsg
= fsg_from_func(f
);
2350 fsg
->common
->new_fsg
= NULL
;
2351 raise_exception(fsg
->common
, FSG_STATE_CONFIG_CHANGE
);
2355 /*-------------------------------------------------------------------------*/
2357 static void handle_exception(struct fsg_common
*common
)
2360 struct fsg_buffhd
*bh
;
2361 enum fsg_state old_state
;
2362 struct fsg_lun
*curlun
;
2363 unsigned int exception_req_tag
;
2366 * Clear the existing signals. Anything but SIGUSR1 is converted
2367 * into a high-priority EXIT exception.
2370 int sig
= kernel_dequeue_signal(NULL
);
2373 if (sig
!= SIGUSR1
) {
2374 if (common
->state
< FSG_STATE_EXIT
)
2375 DBG(common
, "Main thread exiting on signal\n");
2376 raise_exception(common
, FSG_STATE_EXIT
);
2380 /* Cancel all the pending transfers */
2381 if (likely(common
->fsg
)) {
2382 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2383 bh
= &common
->buffhds
[i
];
2385 usb_ep_dequeue(common
->fsg
->bulk_in
, bh
->inreq
);
2386 if (bh
->outreq_busy
)
2387 usb_ep_dequeue(common
->fsg
->bulk_out
,
2391 /* Wait until everything is idle */
2394 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2395 bh
= &common
->buffhds
[i
];
2396 num_active
+= bh
->inreq_busy
+ bh
->outreq_busy
;
2398 if (num_active
== 0)
2400 if (sleep_thread(common
, true))
2404 /* Clear out the controller's fifos */
2405 if (common
->fsg
->bulk_in_enabled
)
2406 usb_ep_fifo_flush(common
->fsg
->bulk_in
);
2407 if (common
->fsg
->bulk_out_enabled
)
2408 usb_ep_fifo_flush(common
->fsg
->bulk_out
);
2412 * Reset the I/O buffer states and pointers, the SCSI
2413 * state, and the exception. Then invoke the handler.
2415 spin_lock_irq(&common
->lock
);
2417 for (i
= 0; i
< common
->fsg_num_buffers
; ++i
) {
2418 bh
= &common
->buffhds
[i
];
2419 bh
->state
= BUF_STATE_EMPTY
;
2421 common
->next_buffhd_to_fill
= &common
->buffhds
[0];
2422 common
->next_buffhd_to_drain
= &common
->buffhds
[0];
2423 exception_req_tag
= common
->exception_req_tag
;
2424 old_state
= common
->state
;
2426 if (old_state
== FSG_STATE_ABORT_BULK_OUT
)
2427 common
->state
= FSG_STATE_STATUS_PHASE
;
2429 for (i
= 0; i
< ARRAY_SIZE(common
->luns
); ++i
) {
2430 curlun
= common
->luns
[i
];
2433 curlun
->prevent_medium_removal
= 0;
2434 curlun
->sense_data
= SS_NO_SENSE
;
2435 curlun
->unit_attention_data
= SS_NO_SENSE
;
2436 curlun
->sense_data_info
= 0;
2437 curlun
->info_valid
= 0;
2439 common
->state
= FSG_STATE_IDLE
;
2441 spin_unlock_irq(&common
->lock
);
2443 /* Carry out any extra actions required for the exception */
2444 switch (old_state
) {
2445 case FSG_STATE_ABORT_BULK_OUT
:
2446 send_status(common
);
2447 spin_lock_irq(&common
->lock
);
2448 if (common
->state
== FSG_STATE_STATUS_PHASE
)
2449 common
->state
= FSG_STATE_IDLE
;
2450 spin_unlock_irq(&common
->lock
);
2453 case FSG_STATE_RESET
:
2455 * In case we were forced against our will to halt a
2456 * bulk endpoint, clear the halt now. (The SuperH UDC
2459 if (!fsg_is_set(common
))
2461 if (test_and_clear_bit(IGNORE_BULK_OUT
,
2462 &common
->fsg
->atomic_bitflags
))
2463 usb_ep_clear_halt(common
->fsg
->bulk_in
);
2465 if (common
->ep0_req_tag
== exception_req_tag
)
2466 ep0_queue(common
); /* Complete the status stage */
2469 * Technically this should go here, but it would only be
2470 * a waste of time. Ditto for the INTERFACE_CHANGE and
2471 * CONFIG_CHANGE cases.
2473 /* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */
2474 /* if (common->luns[i]) */
2475 /* common->luns[i]->unit_attention_data = */
2476 /* SS_RESET_OCCURRED; */
2479 case FSG_STATE_CONFIG_CHANGE
:
2480 do_set_interface(common
, common
->new_fsg
);
2481 if (common
->new_fsg
)
2482 usb_composite_setup_continue(common
->cdev
);
2485 case FSG_STATE_EXIT
:
2486 case FSG_STATE_TERMINATED
:
2487 do_set_interface(common
, NULL
); /* Free resources */
2488 spin_lock_irq(&common
->lock
);
2489 common
->state
= FSG_STATE_TERMINATED
; /* Stop the thread */
2490 spin_unlock_irq(&common
->lock
);
2493 case FSG_STATE_INTERFACE_CHANGE
:
2494 case FSG_STATE_DISCONNECT
:
2495 case FSG_STATE_COMMAND_PHASE
:
2496 case FSG_STATE_DATA_PHASE
:
2497 case FSG_STATE_STATUS_PHASE
:
2498 case FSG_STATE_IDLE
:
2504 /*-------------------------------------------------------------------------*/
2506 static int fsg_main_thread(void *common_
)
2508 struct fsg_common
*common
= common_
;
2511 * Allow the thread to be killed by a signal, but set the signal mask
2512 * to block everything but INT, TERM, KILL, and USR1.
2514 allow_signal(SIGINT
);
2515 allow_signal(SIGTERM
);
2516 allow_signal(SIGKILL
);
2517 allow_signal(SIGUSR1
);
2519 /* Allow the thread to be frozen */
2523 * Arrange for userspace references to be interpreted as kernel
2524 * pointers. That way we can pass a kernel pointer to a routine
2525 * that expects a __user pointer and it will work okay.
2530 while (common
->state
!= FSG_STATE_TERMINATED
) {
2531 if (exception_in_progress(common
) || signal_pending(current
)) {
2532 handle_exception(common
);
2536 if (!common
->running
) {
2537 sleep_thread(common
, true);
2541 if (get_next_command(common
))
2544 spin_lock_irq(&common
->lock
);
2545 if (!exception_in_progress(common
))
2546 common
->state
= FSG_STATE_DATA_PHASE
;
2547 spin_unlock_irq(&common
->lock
);
2549 if (do_scsi_command(common
) || finish_reply(common
))
2552 spin_lock_irq(&common
->lock
);
2553 if (!exception_in_progress(common
))
2554 common
->state
= FSG_STATE_STATUS_PHASE
;
2555 spin_unlock_irq(&common
->lock
);
2557 if (send_status(common
))
2560 spin_lock_irq(&common
->lock
);
2561 if (!exception_in_progress(common
))
2562 common
->state
= FSG_STATE_IDLE
;
2563 spin_unlock_irq(&common
->lock
);
2566 spin_lock_irq(&common
->lock
);
2567 common
->thread_task
= NULL
;
2568 spin_unlock_irq(&common
->lock
);
2570 if (!common
->ops
|| !common
->ops
->thread_exits
2571 || common
->ops
->thread_exits(common
) < 0) {
2574 down_write(&common
->filesem
);
2575 for (i
= 0; i
< ARRAY_SIZE(common
->luns
); --i
) {
2576 struct fsg_lun
*curlun
= common
->luns
[i
];
2577 if (!curlun
|| !fsg_lun_is_open(curlun
))
2580 fsg_lun_close(curlun
);
2581 curlun
->unit_attention_data
= SS_MEDIUM_NOT_PRESENT
;
2583 up_write(&common
->filesem
);
2586 /* Let fsg_unbind() know the thread has exited */
2587 complete_and_exit(&common
->thread_notifier
, 0);
2591 /*************************** DEVICE ATTRIBUTES ***************************/
2593 static ssize_t
ro_show(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
2595 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2597 return fsg_show_ro(curlun
, buf
);
2600 static ssize_t
nofua_show(struct device
*dev
, struct device_attribute
*attr
,
2603 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2605 return fsg_show_nofua(curlun
, buf
);
2608 static ssize_t
file_show(struct device
*dev
, struct device_attribute
*attr
,
2611 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2612 struct rw_semaphore
*filesem
= dev_get_drvdata(dev
);
2614 return fsg_show_file(curlun
, filesem
, buf
);
2617 static ssize_t
ro_store(struct device
*dev
, struct device_attribute
*attr
,
2618 const char *buf
, size_t count
)
2620 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2621 struct rw_semaphore
*filesem
= dev_get_drvdata(dev
);
2623 return fsg_store_ro(curlun
, filesem
, buf
, count
);
2626 static ssize_t
nofua_store(struct device
*dev
, struct device_attribute
*attr
,
2627 const char *buf
, size_t count
)
2629 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2631 return fsg_store_nofua(curlun
, buf
, count
);
2634 static ssize_t
file_store(struct device
*dev
, struct device_attribute
*attr
,
2635 const char *buf
, size_t count
)
2637 struct fsg_lun
*curlun
= fsg_lun_from_dev(dev
);
2638 struct rw_semaphore
*filesem
= dev_get_drvdata(dev
);
2640 return fsg_store_file(curlun
, filesem
, buf
, count
);
2643 static DEVICE_ATTR_RW(nofua
);
2644 /* mode wil be set in fsg_lun_attr_is_visible() */
2645 static DEVICE_ATTR(ro
, 0, ro_show
, ro_store
);
2646 static DEVICE_ATTR(file
, 0, file_show
, file_store
);
2648 /****************************** FSG COMMON ******************************/
2650 static void fsg_common_release(struct kref
*ref
);
2652 static void fsg_lun_release(struct device
*dev
)
2654 /* Nothing needs to be done */
2657 void fsg_common_get(struct fsg_common
*common
)
2659 kref_get(&common
->ref
);
2661 EXPORT_SYMBOL_GPL(fsg_common_get
);
2663 void fsg_common_put(struct fsg_common
*common
)
2665 kref_put(&common
->ref
, fsg_common_release
);
2667 EXPORT_SYMBOL_GPL(fsg_common_put
);
2669 static struct fsg_common
*fsg_common_setup(struct fsg_common
*common
)
2672 common
= kzalloc(sizeof(*common
), GFP_KERNEL
);
2674 return ERR_PTR(-ENOMEM
);
2675 common
->free_storage_on_release
= 1;
2677 common
->free_storage_on_release
= 0;
2679 init_rwsem(&common
->filesem
);
2680 spin_lock_init(&common
->lock
);
2681 kref_init(&common
->ref
);
2682 init_completion(&common
->thread_notifier
);
2683 init_waitqueue_head(&common
->fsg_wait
);
2684 common
->state
= FSG_STATE_TERMINATED
;
2685 memset(common
->luns
, 0, sizeof(common
->luns
));
2690 void fsg_common_set_sysfs(struct fsg_common
*common
, bool sysfs
)
2692 common
->sysfs
= sysfs
;
2694 EXPORT_SYMBOL_GPL(fsg_common_set_sysfs
);
2696 static void _fsg_common_free_buffers(struct fsg_buffhd
*buffhds
, unsigned n
)
2699 struct fsg_buffhd
*bh
= buffhds
;
2708 int fsg_common_set_num_buffers(struct fsg_common
*common
, unsigned int n
)
2710 struct fsg_buffhd
*bh
, *buffhds
;
2713 buffhds
= kcalloc(n
, sizeof(*buffhds
), GFP_KERNEL
);
2717 /* Data buffers cyclic list */
2720 goto buffhds_first_it
;
2725 bh
->buf
= kmalloc(FSG_BUFLEN
, GFP_KERNEL
);
2726 if (unlikely(!bh
->buf
))
2731 _fsg_common_free_buffers(common
->buffhds
, common
->fsg_num_buffers
);
2732 common
->fsg_num_buffers
= n
;
2733 common
->buffhds
= buffhds
;
2739 * "buf"s pointed to by heads after n - i are NULL
2740 * so releasing them won't hurt
2742 _fsg_common_free_buffers(buffhds
, n
);
2746 EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers
);
2748 void fsg_common_remove_lun(struct fsg_lun
*lun
)
2750 if (device_is_registered(&lun
->dev
))
2751 device_unregister(&lun
->dev
);
2755 EXPORT_SYMBOL_GPL(fsg_common_remove_lun
);
2757 static void _fsg_common_remove_luns(struct fsg_common
*common
, int n
)
2761 for (i
= 0; i
< n
; ++i
)
2762 if (common
->luns
[i
]) {
2763 fsg_common_remove_lun(common
->luns
[i
]);
2764 common
->luns
[i
] = NULL
;
2768 void fsg_common_remove_luns(struct fsg_common
*common
)
2770 _fsg_common_remove_luns(common
, ARRAY_SIZE(common
->luns
));
2772 EXPORT_SYMBOL_GPL(fsg_common_remove_luns
);
2774 void fsg_common_set_ops(struct fsg_common
*common
,
2775 const struct fsg_operations
*ops
)
2779 EXPORT_SYMBOL_GPL(fsg_common_set_ops
);
2781 void fsg_common_free_buffers(struct fsg_common
*common
)
2783 _fsg_common_free_buffers(common
->buffhds
, common
->fsg_num_buffers
);
2784 common
->buffhds
= NULL
;
2786 EXPORT_SYMBOL_GPL(fsg_common_free_buffers
);
2788 int fsg_common_set_cdev(struct fsg_common
*common
,
2789 struct usb_composite_dev
*cdev
, bool can_stall
)
2791 struct usb_string
*us
;
2793 common
->gadget
= cdev
->gadget
;
2794 common
->ep0
= cdev
->gadget
->ep0
;
2795 common
->ep0req
= cdev
->req
;
2796 common
->cdev
= cdev
;
2798 us
= usb_gstrings_attach(cdev
, fsg_strings_array
,
2799 ARRAY_SIZE(fsg_strings
));
2803 fsg_intf_desc
.iInterface
= us
[FSG_STRING_INTERFACE
].id
;
2806 * Some peripheral controllers are known not to be able to
2807 * halt bulk endpoints correctly. If one of them is present,
2810 common
->can_stall
= can_stall
&&
2811 gadget_is_stall_supported(common
->gadget
);
2815 EXPORT_SYMBOL_GPL(fsg_common_set_cdev
);
2817 static struct attribute
*fsg_lun_dev_attrs
[] = {
2819 &dev_attr_file
.attr
,
2820 &dev_attr_nofua
.attr
,
2824 static umode_t
fsg_lun_dev_is_visible(struct kobject
*kobj
,
2825 struct attribute
*attr
, int idx
)
2827 struct device
*dev
= kobj_to_dev(kobj
);
2828 struct fsg_lun
*lun
= fsg_lun_from_dev(dev
);
2830 if (attr
== &dev_attr_ro
.attr
)
2831 return lun
->cdrom
? S_IRUGO
: (S_IWUSR
| S_IRUGO
);
2832 if (attr
== &dev_attr_file
.attr
)
2833 return lun
->removable
? (S_IWUSR
| S_IRUGO
) : S_IRUGO
;
2837 static const struct attribute_group fsg_lun_dev_group
= {
2838 .attrs
= fsg_lun_dev_attrs
,
2839 .is_visible
= fsg_lun_dev_is_visible
,
2842 static const struct attribute_group
*fsg_lun_dev_groups
[] = {
2847 int fsg_common_create_lun(struct fsg_common
*common
, struct fsg_lun_config
*cfg
,
2848 unsigned int id
, const char *name
,
2849 const char **name_pfx
)
2851 struct fsg_lun
*lun
;
2855 if (id
>= ARRAY_SIZE(common
->luns
))
2858 if (common
->luns
[id
])
2861 if (!cfg
->filename
&& !cfg
->removable
) {
2862 pr_err("no file given for LUN%d\n", id
);
2866 lun
= kzalloc(sizeof(*lun
), GFP_KERNEL
);
2870 lun
->name_pfx
= name_pfx
;
2872 lun
->cdrom
= !!cfg
->cdrom
;
2873 lun
->ro
= cfg
->cdrom
|| cfg
->ro
;
2874 lun
->initially_ro
= lun
->ro
;
2875 lun
->removable
= !!cfg
->removable
;
2877 if (!common
->sysfs
) {
2878 /* we DON'T own the name!*/
2881 lun
->dev
.release
= fsg_lun_release
;
2882 lun
->dev
.parent
= &common
->gadget
->dev
;
2883 lun
->dev
.groups
= fsg_lun_dev_groups
;
2884 dev_set_drvdata(&lun
->dev
, &common
->filesem
);
2885 dev_set_name(&lun
->dev
, "%s", name
);
2886 lun
->name
= dev_name(&lun
->dev
);
2888 rc
= device_register(&lun
->dev
);
2890 pr_info("failed to register LUN%d: %d\n", id
, rc
);
2891 put_device(&lun
->dev
);
2896 common
->luns
[id
] = lun
;
2898 if (cfg
->filename
) {
2899 rc
= fsg_lun_open(lun
, cfg
->filename
);
2904 pathbuf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
2906 if (fsg_lun_is_open(lun
)) {
2909 p
= file_path(lun
->filp
, pathbuf
, PATH_MAX
);
2914 pr_info("LUN: %s%s%sfile: %s\n",
2915 lun
->removable
? "removable " : "",
2916 lun
->ro
? "read only " : "",
2917 lun
->cdrom
? "CD-ROM " : "",
2924 if (device_is_registered(&lun
->dev
))
2925 device_unregister(&lun
->dev
);
2927 common
->luns
[id
] = NULL
;
2932 EXPORT_SYMBOL_GPL(fsg_common_create_lun
);
2934 int fsg_common_create_luns(struct fsg_common
*common
, struct fsg_config
*cfg
)
2936 char buf
[8]; /* enough for 100000000 different numbers, decimal */
2939 fsg_common_remove_luns(common
);
2941 for (i
= 0; i
< cfg
->nluns
; ++i
) {
2942 snprintf(buf
, sizeof(buf
), "lun%d", i
);
2943 rc
= fsg_common_create_lun(common
, &cfg
->luns
[i
], i
, buf
, NULL
);
2948 pr_info("Number of LUNs=%d\n", cfg
->nluns
);
2953 _fsg_common_remove_luns(common
, i
);
2956 EXPORT_SYMBOL_GPL(fsg_common_create_luns
);
2958 void fsg_common_set_inquiry_string(struct fsg_common
*common
, const char *vn
,
2963 /* Prepare inquiryString */
2964 i
= get_default_bcdDevice();
2965 snprintf(common
->inquiry_string
, sizeof(common
->inquiry_string
),
2966 "%-8s%-16s%04x", vn
?: "Linux",
2967 /* Assume product name dependent on the first LUN */
2968 pn
?: ((*common
->luns
)->cdrom
2970 : "File-Stor Gadget"),
2973 EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string
);
2975 static void fsg_common_release(struct kref
*ref
)
2977 struct fsg_common
*common
= container_of(ref
, struct fsg_common
, ref
);
2980 /* If the thread isn't already dead, tell it to exit now */
2981 if (common
->state
!= FSG_STATE_TERMINATED
) {
2982 raise_exception(common
, FSG_STATE_EXIT
);
2983 wait_for_completion(&common
->thread_notifier
);
2984 common
->thread_task
= NULL
;
2987 for (i
= 0; i
< ARRAY_SIZE(common
->luns
); ++i
) {
2988 struct fsg_lun
*lun
= common
->luns
[i
];
2992 if (device_is_registered(&lun
->dev
))
2993 device_unregister(&lun
->dev
);
2997 _fsg_common_free_buffers(common
->buffhds
, common
->fsg_num_buffers
);
2998 if (common
->free_storage_on_release
)
3003 /*-------------------------------------------------------------------------*/
3005 static int fsg_bind(struct usb_configuration
*c
, struct usb_function
*f
)
3007 struct fsg_dev
*fsg
= fsg_from_func(f
);
3008 struct fsg_common
*common
= fsg
->common
;
3009 struct usb_gadget
*gadget
= c
->cdev
->gadget
;
3014 struct fsg_opts
*opts
;
3016 /* Don't allow to bind if we don't have at least one LUN */
3017 ret
= _fsg_common_get_max_lun(common
);
3019 pr_err("There should be at least one LUN.\n");
3023 opts
= fsg_opts_from_func_inst(f
->fi
);
3024 if (!opts
->no_configfs
) {
3025 ret
= fsg_common_set_cdev(fsg
->common
, c
->cdev
,
3026 fsg
->common
->can_stall
);
3029 fsg_common_set_inquiry_string(fsg
->common
, NULL
, NULL
);
3032 if (!common
->thread_task
) {
3033 common
->state
= FSG_STATE_IDLE
;
3034 common
->thread_task
=
3035 kthread_create(fsg_main_thread
, common
, "file-storage");
3036 if (IS_ERR(common
->thread_task
)) {
3037 int ret
= PTR_ERR(common
->thread_task
);
3038 common
->thread_task
= NULL
;
3039 common
->state
= FSG_STATE_TERMINATED
;
3042 DBG(common
, "I/O thread pid: %d\n",
3043 task_pid_nr(common
->thread_task
));
3044 wake_up_process(common
->thread_task
);
3047 fsg
->gadget
= gadget
;
3050 i
= usb_interface_id(c
, f
);
3053 fsg_intf_desc
.bInterfaceNumber
= i
;
3054 fsg
->interface_number
= i
;
3056 /* Find all the endpoints we will use */
3057 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_in_desc
);
3062 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_out_desc
);
3067 /* Assume endpoint addresses are the same for both speeds */
3068 fsg_hs_bulk_in_desc
.bEndpointAddress
=
3069 fsg_fs_bulk_in_desc
.bEndpointAddress
;
3070 fsg_hs_bulk_out_desc
.bEndpointAddress
=
3071 fsg_fs_bulk_out_desc
.bEndpointAddress
;
3073 /* Calculate bMaxBurst, we know packet size is 1024 */
3074 max_burst
= min_t(unsigned, FSG_BUFLEN
/ 1024, 15);
3076 fsg_ss_bulk_in_desc
.bEndpointAddress
=
3077 fsg_fs_bulk_in_desc
.bEndpointAddress
;
3078 fsg_ss_bulk_in_comp_desc
.bMaxBurst
= max_burst
;
3080 fsg_ss_bulk_out_desc
.bEndpointAddress
=
3081 fsg_fs_bulk_out_desc
.bEndpointAddress
;
3082 fsg_ss_bulk_out_comp_desc
.bMaxBurst
= max_burst
;
3084 ret
= usb_assign_descriptors(f
, fsg_fs_function
, fsg_hs_function
,
3085 fsg_ss_function
, fsg_ss_function
);
3092 ERROR(fsg
, "unable to autoconfigure all endpoints\n");
3095 /* terminate the thread */
3096 if (fsg
->common
->state
!= FSG_STATE_TERMINATED
) {
3097 raise_exception(fsg
->common
, FSG_STATE_EXIT
);
3098 wait_for_completion(&fsg
->common
->thread_notifier
);
3103 /****************************** ALLOCATE FUNCTION *************************/
3105 static void fsg_unbind(struct usb_configuration
*c
, struct usb_function
*f
)
3107 struct fsg_dev
*fsg
= fsg_from_func(f
);
3108 struct fsg_common
*common
= fsg
->common
;
3110 DBG(fsg
, "unbind\n");
3111 if (fsg
->common
->fsg
== fsg
) {
3112 fsg
->common
->new_fsg
= NULL
;
3113 raise_exception(fsg
->common
, FSG_STATE_CONFIG_CHANGE
);
3114 /* FIXME: make interruptible or killable somehow? */
3115 wait_event(common
->fsg_wait
, common
->fsg
!= fsg
);
3118 usb_free_all_descriptors(&fsg
->function
);
3121 static inline struct fsg_lun_opts
*to_fsg_lun_opts(struct config_item
*item
)
3123 return container_of(to_config_group(item
), struct fsg_lun_opts
, group
);
3126 static inline struct fsg_opts
*to_fsg_opts(struct config_item
*item
)
3128 return container_of(to_config_group(item
), struct fsg_opts
,
3132 static void fsg_lun_attr_release(struct config_item
*item
)
3134 struct fsg_lun_opts
*lun_opts
;
3136 lun_opts
= to_fsg_lun_opts(item
);
3140 static struct configfs_item_operations fsg_lun_item_ops
= {
3141 .release
= fsg_lun_attr_release
,
3144 static ssize_t
fsg_lun_opts_file_show(struct config_item
*item
, char *page
)
3146 struct fsg_lun_opts
*opts
= to_fsg_lun_opts(item
);
3147 struct fsg_opts
*fsg_opts
= to_fsg_opts(opts
->group
.cg_item
.ci_parent
);
3149 return fsg_show_file(opts
->lun
, &fsg_opts
->common
->filesem
, page
);
3152 static ssize_t
fsg_lun_opts_file_store(struct config_item
*item
,
3153 const char *page
, size_t len
)
3155 struct fsg_lun_opts
*opts
= to_fsg_lun_opts(item
);
3156 struct fsg_opts
*fsg_opts
= to_fsg_opts(opts
->group
.cg_item
.ci_parent
);
3158 return fsg_store_file(opts
->lun
, &fsg_opts
->common
->filesem
, page
, len
);
3161 CONFIGFS_ATTR(fsg_lun_opts_
, file
);
3163 static ssize_t
fsg_lun_opts_ro_show(struct config_item
*item
, char *page
)
3165 return fsg_show_ro(to_fsg_lun_opts(item
)->lun
, page
);
3168 static ssize_t
fsg_lun_opts_ro_store(struct config_item
*item
,
3169 const char *page
, size_t len
)
3171 struct fsg_lun_opts
*opts
= to_fsg_lun_opts(item
);
3172 struct fsg_opts
*fsg_opts
= to_fsg_opts(opts
->group
.cg_item
.ci_parent
);
3174 return fsg_store_ro(opts
->lun
, &fsg_opts
->common
->filesem
, page
, len
);
3177 CONFIGFS_ATTR(fsg_lun_opts_
, ro
);
3179 static ssize_t
fsg_lun_opts_removable_show(struct config_item
*item
,
3182 return fsg_show_removable(to_fsg_lun_opts(item
)->lun
, page
);
3185 static ssize_t
fsg_lun_opts_removable_store(struct config_item
*item
,
3186 const char *page
, size_t len
)
3188 return fsg_store_removable(to_fsg_lun_opts(item
)->lun
, page
, len
);
3191 CONFIGFS_ATTR(fsg_lun_opts_
, removable
);
3193 static ssize_t
fsg_lun_opts_cdrom_show(struct config_item
*item
, char *page
)
3195 return fsg_show_cdrom(to_fsg_lun_opts(item
)->lun
, page
);
3198 static ssize_t
fsg_lun_opts_cdrom_store(struct config_item
*item
,
3199 const char *page
, size_t len
)
3201 struct fsg_lun_opts
*opts
= to_fsg_lun_opts(item
);
3202 struct fsg_opts
*fsg_opts
= to_fsg_opts(opts
->group
.cg_item
.ci_parent
);
3204 return fsg_store_cdrom(opts
->lun
, &fsg_opts
->common
->filesem
, page
,
3208 CONFIGFS_ATTR(fsg_lun_opts_
, cdrom
);
3210 static ssize_t
fsg_lun_opts_nofua_show(struct config_item
*item
, char *page
)
3212 return fsg_show_nofua(to_fsg_lun_opts(item
)->lun
, page
);
3215 static ssize_t
fsg_lun_opts_nofua_store(struct config_item
*item
,
3216 const char *page
, size_t len
)
3218 return fsg_store_nofua(to_fsg_lun_opts(item
)->lun
, page
, len
);
3221 CONFIGFS_ATTR(fsg_lun_opts_
, nofua
);
3223 static ssize_t
fsg_lun_opts_inquiry_string_show(struct config_item
*item
,
3226 return fsg_show_inquiry_string(to_fsg_lun_opts(item
)->lun
, page
);
3229 static ssize_t
fsg_lun_opts_inquiry_string_store(struct config_item
*item
,
3230 const char *page
, size_t len
)
3232 return fsg_store_inquiry_string(to_fsg_lun_opts(item
)->lun
, page
, len
);
3235 CONFIGFS_ATTR(fsg_lun_opts_
, inquiry_string
);
3237 static struct configfs_attribute
*fsg_lun_attrs
[] = {
3238 &fsg_lun_opts_attr_file
,
3239 &fsg_lun_opts_attr_ro
,
3240 &fsg_lun_opts_attr_removable
,
3241 &fsg_lun_opts_attr_cdrom
,
3242 &fsg_lun_opts_attr_nofua
,
3243 &fsg_lun_opts_attr_inquiry_string
,
3247 static struct config_item_type fsg_lun_type
= {
3248 .ct_item_ops
= &fsg_lun_item_ops
,
3249 .ct_attrs
= fsg_lun_attrs
,
3250 .ct_owner
= THIS_MODULE
,
3253 static struct config_group
*fsg_lun_make(struct config_group
*group
,
3256 struct fsg_lun_opts
*opts
;
3257 struct fsg_opts
*fsg_opts
;
3258 struct fsg_lun_config config
;
3263 num_str
= strchr(name
, '.');
3265 pr_err("Unable to locate . in LUN.NUMBER\n");
3266 return ERR_PTR(-EINVAL
);
3270 ret
= kstrtou8(num_str
, 0, &num
);
3272 return ERR_PTR(ret
);
3274 fsg_opts
= to_fsg_opts(&group
->cg_item
);
3275 if (num
>= FSG_MAX_LUNS
)
3276 return ERR_PTR(-ERANGE
);
3278 mutex_lock(&fsg_opts
->lock
);
3279 if (fsg_opts
->refcnt
|| fsg_opts
->common
->luns
[num
]) {
3284 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
3290 memset(&config
, 0, sizeof(config
));
3291 config
.removable
= true;
3293 ret
= fsg_common_create_lun(fsg_opts
->common
, &config
, num
, name
,
3294 (const char **)&group
->cg_item
.ci_name
);
3299 opts
->lun
= fsg_opts
->common
->luns
[num
];
3301 mutex_unlock(&fsg_opts
->lock
);
3303 config_group_init_type_name(&opts
->group
, name
, &fsg_lun_type
);
3305 return &opts
->group
;
3307 mutex_unlock(&fsg_opts
->lock
);
3308 return ERR_PTR(ret
);
3311 static void fsg_lun_drop(struct config_group
*group
, struct config_item
*item
)
3313 struct fsg_lun_opts
*lun_opts
;
3314 struct fsg_opts
*fsg_opts
;
3316 lun_opts
= to_fsg_lun_opts(item
);
3317 fsg_opts
= to_fsg_opts(&group
->cg_item
);
3319 mutex_lock(&fsg_opts
->lock
);
3320 if (fsg_opts
->refcnt
) {
3321 struct config_item
*gadget
;
3323 gadget
= group
->cg_item
.ci_parent
->ci_parent
;
3324 unregister_gadget_item(gadget
);
3327 fsg_common_remove_lun(lun_opts
->lun
);
3328 fsg_opts
->common
->luns
[lun_opts
->lun_id
] = NULL
;
3329 lun_opts
->lun_id
= 0;
3330 mutex_unlock(&fsg_opts
->lock
);
3332 config_item_put(item
);
3335 static void fsg_attr_release(struct config_item
*item
)
3337 struct fsg_opts
*opts
= to_fsg_opts(item
);
3339 usb_put_function_instance(&opts
->func_inst
);
3342 static struct configfs_item_operations fsg_item_ops
= {
3343 .release
= fsg_attr_release
,
3346 static ssize_t
fsg_opts_stall_show(struct config_item
*item
, char *page
)
3348 struct fsg_opts
*opts
= to_fsg_opts(item
);
3351 mutex_lock(&opts
->lock
);
3352 result
= sprintf(page
, "%d", opts
->common
->can_stall
);
3353 mutex_unlock(&opts
->lock
);
3358 static ssize_t
fsg_opts_stall_store(struct config_item
*item
, const char *page
,
3361 struct fsg_opts
*opts
= to_fsg_opts(item
);
3365 mutex_lock(&opts
->lock
);
3368 mutex_unlock(&opts
->lock
);
3372 ret
= strtobool(page
, &stall
);
3374 opts
->common
->can_stall
= stall
;
3378 mutex_unlock(&opts
->lock
);
3383 CONFIGFS_ATTR(fsg_opts_
, stall
);
3385 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
3386 static ssize_t
fsg_opts_num_buffers_show(struct config_item
*item
, char *page
)
3388 struct fsg_opts
*opts
= to_fsg_opts(item
);
3391 mutex_lock(&opts
->lock
);
3392 result
= sprintf(page
, "%d", opts
->common
->fsg_num_buffers
);
3393 mutex_unlock(&opts
->lock
);
3398 static ssize_t
fsg_opts_num_buffers_store(struct config_item
*item
,
3399 const char *page
, size_t len
)
3401 struct fsg_opts
*opts
= to_fsg_opts(item
);
3405 mutex_lock(&opts
->lock
);
3410 ret
= kstrtou8(page
, 0, &num
);
3414 fsg_common_set_num_buffers(opts
->common
, num
);
3418 mutex_unlock(&opts
->lock
);
3422 CONFIGFS_ATTR(fsg_opts_
, num_buffers
);
3425 static struct configfs_attribute
*fsg_attrs
[] = {
3426 &fsg_opts_attr_stall
,
3427 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
3428 &fsg_opts_attr_num_buffers
,
3433 static struct configfs_group_operations fsg_group_ops
= {
3434 .make_group
= fsg_lun_make
,
3435 .drop_item
= fsg_lun_drop
,
3438 static struct config_item_type fsg_func_type
= {
3439 .ct_item_ops
= &fsg_item_ops
,
3440 .ct_group_ops
= &fsg_group_ops
,
3441 .ct_attrs
= fsg_attrs
,
3442 .ct_owner
= THIS_MODULE
,
3445 static void fsg_free_inst(struct usb_function_instance
*fi
)
3447 struct fsg_opts
*opts
;
3449 opts
= fsg_opts_from_func_inst(fi
);
3450 fsg_common_put(opts
->common
);
3454 static struct usb_function_instance
*fsg_alloc_inst(void)
3456 struct fsg_opts
*opts
;
3457 struct fsg_lun_config config
;
3460 opts
= kzalloc(sizeof(*opts
), GFP_KERNEL
);
3462 return ERR_PTR(-ENOMEM
);
3463 mutex_init(&opts
->lock
);
3464 opts
->func_inst
.free_func_inst
= fsg_free_inst
;
3465 opts
->common
= fsg_common_setup(opts
->common
);
3466 if (IS_ERR(opts
->common
)) {
3467 rc
= PTR_ERR(opts
->common
);
3471 rc
= fsg_common_set_num_buffers(opts
->common
,
3472 CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
);
3476 pr_info(FSG_DRIVER_DESC
", version: " FSG_DRIVER_VERSION
"\n");
3478 memset(&config
, 0, sizeof(config
));
3479 config
.removable
= true;
3480 rc
= fsg_common_create_lun(opts
->common
, &config
, 0, "lun.0",
3481 (const char **)&opts
->func_inst
.group
.cg_item
.ci_name
);
3483 goto release_buffers
;
3485 opts
->lun0
.lun
= opts
->common
->luns
[0];
3486 opts
->lun0
.lun_id
= 0;
3488 config_group_init_type_name(&opts
->func_inst
.group
, "", &fsg_func_type
);
3490 config_group_init_type_name(&opts
->lun0
.group
, "lun.0", &fsg_lun_type
);
3491 configfs_add_default_group(&opts
->lun0
.group
, &opts
->func_inst
.group
);
3493 return &opts
->func_inst
;
3496 fsg_common_free_buffers(opts
->common
);
3502 static void fsg_free(struct usb_function
*f
)
3504 struct fsg_dev
*fsg
;
3505 struct fsg_opts
*opts
;
3507 fsg
= container_of(f
, struct fsg_dev
, function
);
3508 opts
= container_of(f
->fi
, struct fsg_opts
, func_inst
);
3510 mutex_lock(&opts
->lock
);
3512 mutex_unlock(&opts
->lock
);
3517 static struct usb_function
*fsg_alloc(struct usb_function_instance
*fi
)
3519 struct fsg_opts
*opts
= fsg_opts_from_func_inst(fi
);
3520 struct fsg_common
*common
= opts
->common
;
3521 struct fsg_dev
*fsg
;
3523 fsg
= kzalloc(sizeof(*fsg
), GFP_KERNEL
);
3525 return ERR_PTR(-ENOMEM
);
3527 mutex_lock(&opts
->lock
);
3529 mutex_unlock(&opts
->lock
);
3531 fsg
->function
.name
= FSG_DRIVER_DESC
;
3532 fsg
->function
.bind
= fsg_bind
;
3533 fsg
->function
.unbind
= fsg_unbind
;
3534 fsg
->function
.setup
= fsg_setup
;
3535 fsg
->function
.set_alt
= fsg_set_alt
;
3536 fsg
->function
.disable
= fsg_disable
;
3537 fsg
->function
.free_func
= fsg_free
;
3539 fsg
->common
= common
;
3541 return &fsg
->function
;
3544 DECLARE_USB_FUNCTION_INIT(mass_storage
, fsg_alloc_inst
, fsg_alloc
);
3545 MODULE_LICENSE("GPL");
3546 MODULE_AUTHOR("Michal Nazarewicz");
3548 /************************* Module parameters *************************/
3551 void fsg_config_from_params(struct fsg_config
*cfg
,
3552 const struct fsg_module_parameters
*params
,
3553 unsigned int fsg_num_buffers
)
3555 struct fsg_lun_config
*lun
;
3558 /* Configure LUNs */
3560 min(params
->luns
?: (params
->file_count
?: 1u),
3561 (unsigned)FSG_MAX_LUNS
);
3562 for (i
= 0, lun
= cfg
->luns
; i
< cfg
->nluns
; ++i
, ++lun
) {
3563 lun
->ro
= !!params
->ro
[i
];
3564 lun
->cdrom
= !!params
->cdrom
[i
];
3565 lun
->removable
= !!params
->removable
[i
];
3567 params
->file_count
> i
&& params
->file
[i
][0]
3572 /* Let MSF use defaults */
3573 cfg
->vendor_name
= NULL
;
3574 cfg
->product_name
= NULL
;
3577 cfg
->private_data
= NULL
;
3580 cfg
->can_stall
= params
->stall
;
3581 cfg
->fsg_num_buffers
= fsg_num_buffers
;
3583 EXPORT_SYMBOL_GPL(fsg_config_from_params
);