2 * file_storage.c -- File-backed USB Storage Gadget, for USB development
4 * Copyright (C) 2003-2008 Alan Stern
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * The File-backed Storage Gadget acts as a USB Mass Storage device,
41 * appearing to the host as a disk drive or as a CD-ROM drive. In addition
42 * to providing an example of a genuinely useful gadget driver for a USB
43 * device, it also illustrates a technique of double-buffering for increased
44 * throughput. Last but not least, it gives an easy way to probe the
45 * behavior of the Mass Storage drivers in a USB host.
47 * Backing storage is provided by a regular file or a block device, specified
48 * by the "file" module parameter. Access can be limited to read-only by
49 * setting the optional "ro" module parameter. (For CD-ROM emulation,
50 * access is always read-only.) The gadget will indicate that it has
51 * removable media if the optional "removable" module parameter is set.
53 * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
54 * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
55 * by the optional "transport" module parameter. It also supports the
56 * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
57 * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
58 * the optional "protocol" module parameter. In addition, the default
59 * Vendor ID, Product ID, release number and serial number can be overridden.
61 * There is support for multiple logical units (LUNs), each of which has
62 * its own backing file. The number of LUNs can be set using the optional
63 * "luns" module parameter (anywhere from 1 to 8), and the corresponding
64 * files are specified using comma-separated lists for "file" and "ro".
65 * The default number of LUNs is taken from the number of "file" elements;
66 * it is 1 if "file" is not given. If "removable" is not set then a backing
67 * file must be specified for each LUN. If it is set, then an unspecified
68 * or empty backing filename means the LUN's medium is not loaded. Ideally
69 * each LUN would be settable independently as a disk drive or a CD-ROM
70 * drive, but currently all LUNs have to be the same type. The CD-ROM
71 * emulation includes a single data track and no audio tracks; hence there
72 * need be only one backing file per LUN.
74 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
75 * needed (an interrupt-out endpoint is also needed for CBI). The memory
76 * requirement amounts to two 16K buffers, size configurable by a parameter.
77 * Support is included for both full-speed and high-speed operation.
79 * Note that the driver is slightly non-portable in that it assumes a
80 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
81 * interrupt-in endpoints. With most device controllers this isn't an
82 * issue, but there may be some with hardware restrictions that prevent
83 * a buffer from being used by more than one endpoint.
87 * file=filename[,filename...]
88 * Required if "removable" is not set, names of
89 * the files or block devices used for
91 * serial=HHHH... Required serial number (string of hex chars)
92 * ro=b[,b...] Default false, booleans for read-only access
93 * removable Default false, boolean for removable media
94 * luns=N Default N = number of filenames, number of
96 * nofua=b[,b...] Default false, booleans for ignore FUA flag
97 * in SCSI WRITE(10,12) commands
98 * stall Default determined according to the type of
99 * USB device controller (usually true),
100 * boolean to permit the driver to halt
102 * cdrom Default false, boolean for whether to emulate
104 * transport=XXX Default BBB, transport name (CB, CBI, or BBB)
105 * protocol=YYY Default SCSI, protocol name (RBC, 8020 or
106 * ATAPI, QIC, UFI, 8070, or SCSI;
108 * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID
109 * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID
110 * release=0xRRRR Override the USB release number (bcdDevice)
111 * buflen=N Default N=16384, buffer size used (will be
112 * rounded down to a multiple of
115 * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "serial", "ro",
116 * "removable", "luns", "nofua", "stall", and "cdrom" options are available;
117 * default values are used for everything else.
119 * The pathnames of the backing files and the ro settings are available in
120 * the attribute files "file", "nofua", and "ro" in the lun<n> subdirectory of
121 * the gadget's sysfs directory. If the "removable" option is set, writing to
122 * these files will simulate ejecting/loading the medium (writing an empty
123 * line means eject) and adjusting a write-enable tab. Changes to the ro
124 * setting are not allowed when the medium is loaded or if CD-ROM emulation
127 * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
128 * The driver's SCSI command interface was based on the "Information
129 * technology - Small Computer System Interface - 2" document from
130 * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
131 * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception
132 * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
133 * "Universal Serial Bus Mass Storage Class UFI Command Specification"
134 * document, Revision 1.0, December 14, 1998, available at
135 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
142 * The FSG driver is fairly straightforward. There is a main kernel
143 * thread that handles most of the work. Interrupt routines field
144 * callbacks from the controller driver: bulk- and interrupt-request
145 * completion notifications, endpoint-0 events, and disconnect events.
146 * Completion events are passed to the main thread by wakeup calls. Many
147 * ep0 requests are handled at interrupt time, but SetInterface,
148 * SetConfiguration, and device reset requests are forwarded to the
149 * thread in the form of "exceptions" using SIGUSR1 signals (since they
150 * should interrupt any ongoing file I/O operations).
152 * The thread's main routine implements the standard command/data/status
153 * parts of a SCSI interaction. It and its subroutines are full of tests
154 * for pending signals/exceptions -- all this polling is necessary since
155 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
156 * indication that the driver really wants to be running in userspace.)
157 * An important point is that so long as the thread is alive it keeps an
158 * open reference to the backing file. This will prevent unmounting
159 * the backing file's underlying filesystem and could cause problems
160 * during system shutdown, for example. To prevent such problems, the
161 * thread catches INT, TERM, and KILL signals and converts them into
164 * In normal operation the main thread is started during the gadget's
165 * fsg_bind() callback and stopped during fsg_unbind(). But it can also
166 * exit when it receives a signal, and there's no point leaving the
167 * gadget running when the thread is dead. So just before the thread
168 * exits, it deregisters the gadget driver. This makes things a little
169 * tricky: The driver is deregistered at two places, and the exiting
170 * thread can indirectly call fsg_unbind() which in turn can tell the
171 * thread to exit. The first problem is resolved through the use of the
172 * REGISTERED atomic bitflag; the driver will only be deregistered once.
173 * The second problem is resolved by having fsg_unbind() check
174 * fsg->state; it won't try to stop the thread if the state is already
175 * FSG_STATE_TERMINATED.
177 * To provide maximum throughput, the driver uses a circular pipeline of
178 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
179 * arbitrarily long; in practice the benefits don't justify having more
180 * than 2 stages (i.e., double buffering). But it helps to think of the
181 * pipeline as being a long one. Each buffer head contains a bulk-in and
182 * a bulk-out request pointer (since the buffer can be used for both
183 * output and input -- directions always are given from the host's
184 * point of view) as well as a pointer to the buffer and various state
187 * Use of the pipeline follows a simple protocol. There is a variable
188 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
189 * At any time that buffer head may still be in use from an earlier
190 * request, so each buffer head has a state variable indicating whether
191 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
192 * buffer head to be EMPTY, filling the buffer either by file I/O or by
193 * USB I/O (during which the buffer head is BUSY), and marking the buffer
194 * head FULL when the I/O is complete. Then the buffer will be emptied
195 * (again possibly by USB I/O, during which it is marked BUSY) and
196 * finally marked EMPTY again (possibly by a completion routine).
198 * A module parameter tells the driver to avoid stalling the bulk
199 * endpoints wherever the transport specification allows. This is
200 * necessary for some UDCs like the SuperH, which cannot reliably clear a
201 * halt on a bulk endpoint. However, under certain circumstances the
202 * Bulk-only specification requires a stall. In such cases the driver
203 * will halt the endpoint and set a flag indicating that it should clear
204 * the halt in software during the next device reset. Hopefully this
205 * will permit everything to work correctly. Furthermore, although the
206 * specification allows the bulk-out endpoint to halt when the host sends
207 * too much data, implementing this would cause an unavoidable race.
208 * The driver will always use the "no-stall" approach for OUT transfers.
210 * One subtle point concerns sending status-stage responses for ep0
211 * requests. Some of these requests, such as device reset, can involve
212 * interrupting an ongoing file I/O operation, which might take an
213 * arbitrarily long time. During that delay the host might give up on
214 * the original ep0 request and issue a new one. When that happens the
215 * driver should not notify the host about completion of the original
216 * request, as the host will no longer be waiting for it. So the driver
217 * assigns to each ep0 request a unique tag, and it keeps track of the
218 * tag value of the request associated with a long-running exception
219 * (device-reset, interface-change, or configuration-change). When the
220 * exception handler is finished, the status-stage response is submitted
221 * only if the current ep0 request tag is equal to the exception request
222 * tag. Thus only the most recently received ep0 request will get a
223 * status-stage response.
225 * Warning: This driver source file is too long. It ought to be split up
226 * into a header file plus about 3 separate .c files, to handle the details
227 * of the Gadget, USB Mass Storage, and SCSI protocols.
231 /* #define VERBOSE_DEBUG */
232 /* #define DUMP_MSGS */
235 #include <linux/blkdev.h>
236 #include <linux/completion.h>
237 #include <linux/dcache.h>
238 #include <linux/delay.h>
239 #include <linux/device.h>
240 #include <linux/fcntl.h>
241 #include <linux/file.h>
242 #include <linux/fs.h>
243 #include <linux/kref.h>
244 #include <linux/kthread.h>
245 #include <linux/limits.h>
246 #include <linux/module.h>
247 #include <linux/rwsem.h>
248 #include <linux/slab.h>
249 #include <linux/spinlock.h>
250 #include <linux/string.h>
251 #include <linux/freezer.h>
252 #include <linux/utsname.h>
254 #include <linux/usb/composite.h>
255 #include <linux/usb/ch9.h>
256 #include <linux/usb/gadget.h>
258 #include "gadget_chips.h"
260 #define DRIVER_DESC "File-backed Storage Gadget"
261 #define DRIVER_NAME "g_file_storage"
262 #define DRIVER_VERSION "1 September 2010"
264 static char fsg_string_manufacturer
[64];
265 static const char fsg_string_product
[] = DRIVER_DESC
;
266 static const char fsg_string_config
[] = "Self-powered";
267 static const char fsg_string_interface
[] = "Mass Storage";
270 #include "storage_common.c"
273 MODULE_DESCRIPTION(DRIVER_DESC
);
274 MODULE_AUTHOR("Alan Stern");
275 MODULE_LICENSE("Dual BSD/GPL");
278 * This driver assumes self-powered hardware and has no way for users to
279 * trigger remote wakeup. It uses autoconfiguration to select endpoints
280 * and endpoint addresses.
284 /*-------------------------------------------------------------------------*/
287 /* Encapsulate the module parameter settings */
290 char *file
[FSG_MAX_LUNS
];
292 bool ro
[FSG_MAX_LUNS
];
293 bool nofua
[FSG_MAX_LUNS
];
294 unsigned int num_filenames
;
295 unsigned int num_ros
;
296 unsigned int num_nofuas
;
303 char *transport_parm
;
305 unsigned short vendor
;
306 unsigned short product
;
307 unsigned short release
;
311 char *transport_name
;
315 } mod_data
= { // Default values
316 .transport_parm
= "BBB",
317 .protocol_parm
= "SCSI",
321 .vendor
= FSG_VENDOR_ID
,
322 .product
= FSG_PRODUCT_ID
,
323 .release
= 0xffff, // Use controller chip type
328 module_param_array_named(file
, mod_data
.file
, charp
, &mod_data
.num_filenames
,
330 MODULE_PARM_DESC(file
, "names of backing files or devices");
332 module_param_named(serial
, mod_data
.serial
, charp
, S_IRUGO
);
333 MODULE_PARM_DESC(serial
, "USB serial number");
335 module_param_array_named(ro
, mod_data
.ro
, bool, &mod_data
.num_ros
, S_IRUGO
);
336 MODULE_PARM_DESC(ro
, "true to force read-only");
338 module_param_array_named(nofua
, mod_data
.nofua
, bool, &mod_data
.num_nofuas
,
340 MODULE_PARM_DESC(nofua
, "true to ignore SCSI WRITE(10,12) FUA bit");
342 module_param_named(luns
, mod_data
.nluns
, uint
, S_IRUGO
);
343 MODULE_PARM_DESC(luns
, "number of LUNs");
345 module_param_named(removable
, mod_data
.removable
, bool, S_IRUGO
);
346 MODULE_PARM_DESC(removable
, "true to simulate removable media");
348 module_param_named(stall
, mod_data
.can_stall
, bool, S_IRUGO
);
349 MODULE_PARM_DESC(stall
, "false to prevent bulk stalls");
351 module_param_named(cdrom
, mod_data
.cdrom
, bool, S_IRUGO
);
352 MODULE_PARM_DESC(cdrom
, "true to emulate cdrom instead of disk");
354 /* In the non-TEST version, only the module parameters listed above
356 #ifdef CONFIG_USB_FILE_STORAGE_TEST
358 module_param_named(transport
, mod_data
.transport_parm
, charp
, S_IRUGO
);
359 MODULE_PARM_DESC(transport
, "type of transport (BBB, CBI, or CB)");
361 module_param_named(protocol
, mod_data
.protocol_parm
, charp
, S_IRUGO
);
362 MODULE_PARM_DESC(protocol
, "type of protocol (RBC, 8020, QIC, UFI, "
365 module_param_named(vendor
, mod_data
.vendor
, ushort
, S_IRUGO
);
366 MODULE_PARM_DESC(vendor
, "USB Vendor ID");
368 module_param_named(product
, mod_data
.product
, ushort
, S_IRUGO
);
369 MODULE_PARM_DESC(product
, "USB Product ID");
371 module_param_named(release
, mod_data
.release
, ushort
, S_IRUGO
);
372 MODULE_PARM_DESC(release
, "USB release number");
374 module_param_named(buflen
, mod_data
.buflen
, uint
, S_IRUGO
);
375 MODULE_PARM_DESC(buflen
, "I/O buffer size");
377 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
381 * These definitions will permit the compiler to avoid generating code for
382 * parts of the driver that aren't used in the non-TEST version. Even gcc
383 * can recognize when a test of a constant expression yields a dead code
387 #ifdef CONFIG_USB_FILE_STORAGE_TEST
389 #define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK)
390 #define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI)
391 #define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI)
395 #define transport_is_bbb() 1
396 #define transport_is_cbi() 0
397 #define protocol_is_scsi() 1
399 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
402 /*-------------------------------------------------------------------------*/
406 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
408 struct usb_gadget
*gadget
;
410 /* filesem protects: backing files in use */
411 struct rw_semaphore filesem
;
413 /* reference counting: wait until all LUNs are released */
416 struct usb_ep
*ep0
; // Handy copy of gadget->ep0
417 struct usb_request
*ep0req
; // For control responses
418 unsigned int ep0_req_tag
;
419 const char *ep0req_name
;
421 struct usb_request
*intreq
; // For interrupt responses
423 struct fsg_buffhd
*intr_buffhd
;
425 unsigned int bulk_out_maxpacket
;
426 enum fsg_state state
; // For exception handling
427 unsigned int exception_req_tag
;
429 u8 config
, new_config
;
431 unsigned int running
: 1;
432 unsigned int bulk_in_enabled
: 1;
433 unsigned int bulk_out_enabled
: 1;
434 unsigned int intr_in_enabled
: 1;
435 unsigned int phase_error
: 1;
436 unsigned int short_packet_received
: 1;
437 unsigned int bad_lun_okay
: 1;
439 unsigned long atomic_bitflags
;
441 #define IGNORE_BULK_OUT 1
444 struct usb_ep
*bulk_in
;
445 struct usb_ep
*bulk_out
;
446 struct usb_ep
*intr_in
;
448 struct fsg_buffhd
*next_buffhd_to_fill
;
449 struct fsg_buffhd
*next_buffhd_to_drain
;
451 int thread_wakeup_needed
;
452 struct completion thread_notifier
;
453 struct task_struct
*thread_task
;
456 u8 cmnd
[MAX_COMMAND_SIZE
];
457 enum data_direction data_dir
;
459 u32 data_size_from_cmnd
;
465 /* The CB protocol offers no way for a host to know when a command
466 * has completed. As a result the next command may arrive early,
467 * and we will still have to handle it. For that reason we need
468 * a buffer to store new commands when using CB (or CBI, which
469 * does not oblige a host to wait for command completion either). */
471 u8 cbbuf_cmnd
[MAX_COMMAND_SIZE
];
474 struct fsg_lun
*luns
;
475 struct fsg_lun
*curlun
;
476 /* Must be the last entry */
477 struct fsg_buffhd buffhds
[];
480 typedef void (*fsg_routine_t
)(struct fsg_dev
*);
482 static int exception_in_progress(struct fsg_dev
*fsg
)
484 return (fsg
->state
> FSG_STATE_IDLE
);
487 /* Make bulk-out requests be divisible by the maxpacket size */
488 static void set_bulk_out_req_length(struct fsg_dev
*fsg
,
489 struct fsg_buffhd
*bh
, unsigned int length
)
493 bh
->bulk_out_intended_length
= length
;
494 rem
= length
% fsg
->bulk_out_maxpacket
;
496 length
+= fsg
->bulk_out_maxpacket
- rem
;
497 bh
->outreq
->length
= length
;
500 static struct fsg_dev
*the_fsg
;
501 static struct usb_gadget_driver fsg_driver
;
504 /*-------------------------------------------------------------------------*/
506 static int fsg_set_halt(struct fsg_dev
*fsg
, struct usb_ep
*ep
)
510 if (ep
== fsg
->bulk_in
)
512 else if (ep
== fsg
->bulk_out
)
516 DBG(fsg
, "%s set halt\n", name
);
517 return usb_ep_set_halt(ep
);
521 /*-------------------------------------------------------------------------*/
524 * DESCRIPTORS ... most are static, but strings and (full) configuration
525 * descriptors are built on demand. Also the (static) config and interface
526 * descriptors are adjusted during fsg_bind().
529 /* There is only one configuration. */
530 #define CONFIG_VALUE 1
532 static struct usb_device_descriptor
534 .bLength
= sizeof device_desc
,
535 .bDescriptorType
= USB_DT_DEVICE
,
537 .bcdUSB
= cpu_to_le16(0x0200),
538 .bDeviceClass
= USB_CLASS_PER_INTERFACE
,
540 /* The next three values can be overridden by module parameters */
541 .idVendor
= cpu_to_le16(FSG_VENDOR_ID
),
542 .idProduct
= cpu_to_le16(FSG_PRODUCT_ID
),
543 .bcdDevice
= cpu_to_le16(0xffff),
545 .iManufacturer
= FSG_STRING_MANUFACTURER
,
546 .iProduct
= FSG_STRING_PRODUCT
,
547 .iSerialNumber
= FSG_STRING_SERIAL
,
548 .bNumConfigurations
= 1,
551 static struct usb_config_descriptor
553 .bLength
= sizeof config_desc
,
554 .bDescriptorType
= USB_DT_CONFIG
,
556 /* wTotalLength computed by usb_gadget_config_buf() */
558 .bConfigurationValue
= CONFIG_VALUE
,
559 .iConfiguration
= FSG_STRING_CONFIG
,
560 .bmAttributes
= USB_CONFIG_ATT_ONE
| USB_CONFIG_ATT_SELFPOWER
,
561 .bMaxPower
= CONFIG_USB_GADGET_VBUS_DRAW
/ 2,
565 static struct usb_qualifier_descriptor
567 .bLength
= sizeof dev_qualifier
,
568 .bDescriptorType
= USB_DT_DEVICE_QUALIFIER
,
570 .bcdUSB
= cpu_to_le16(0x0200),
571 .bDeviceClass
= USB_CLASS_PER_INTERFACE
,
573 .bNumConfigurations
= 1,
576 static int populate_bos(struct fsg_dev
*fsg
, u8
*buf
)
578 memcpy(buf
, &fsg_bos_desc
, USB_DT_BOS_SIZE
);
579 buf
+= USB_DT_BOS_SIZE
;
581 memcpy(buf
, &fsg_ext_cap_desc
, USB_DT_USB_EXT_CAP_SIZE
);
582 buf
+= USB_DT_USB_EXT_CAP_SIZE
;
584 memcpy(buf
, &fsg_ss_cap_desc
, USB_DT_USB_SS_CAP_SIZE
);
586 return USB_DT_BOS_SIZE
+ USB_DT_USB_SS_CAP_SIZE
587 + USB_DT_USB_EXT_CAP_SIZE
;
591 * Config descriptors must agree with the code that sets configurations
592 * and with code managing interfaces and their altsettings. They must
593 * also handle different speeds and other-speed requests.
595 static int populate_config_buf(struct usb_gadget
*gadget
,
596 u8
*buf
, u8 type
, unsigned index
)
598 enum usb_device_speed speed
= gadget
->speed
;
600 const struct usb_descriptor_header
**function
;
605 if (gadget_is_dualspeed(gadget
) && type
== USB_DT_OTHER_SPEED_CONFIG
)
606 speed
= (USB_SPEED_FULL
+ USB_SPEED_HIGH
) - speed
;
607 function
= gadget_is_dualspeed(gadget
) && speed
== USB_SPEED_HIGH
608 ? (const struct usb_descriptor_header
**)fsg_hs_function
609 : (const struct usb_descriptor_header
**)fsg_fs_function
;
611 /* for now, don't advertise srp-only devices */
612 if (!gadget_is_otg(gadget
))
615 len
= usb_gadget_config_buf(&config_desc
, buf
, EP0_BUFSIZE
, function
);
616 ((struct usb_config_descriptor
*) buf
)->bDescriptorType
= type
;
621 /*-------------------------------------------------------------------------*/
623 /* These routines may be called in process context or in_irq */
625 /* Caller must hold fsg->lock */
626 static void wakeup_thread(struct fsg_dev
*fsg
)
628 /* Tell the main thread that something has happened */
629 fsg
->thread_wakeup_needed
= 1;
630 if (fsg
->thread_task
)
631 wake_up_process(fsg
->thread_task
);
635 static void raise_exception(struct fsg_dev
*fsg
, enum fsg_state new_state
)
639 /* Do nothing if a higher-priority exception is already in progress.
640 * If a lower-or-equal priority exception is in progress, preempt it
641 * and notify the main thread by sending it a signal. */
642 spin_lock_irqsave(&fsg
->lock
, flags
);
643 if (fsg
->state
<= new_state
) {
644 fsg
->exception_req_tag
= fsg
->ep0_req_tag
;
645 fsg
->state
= new_state
;
646 if (fsg
->thread_task
)
647 send_sig_info(SIGUSR1
, SEND_SIG_FORCED
,
650 spin_unlock_irqrestore(&fsg
->lock
, flags
);
654 /*-------------------------------------------------------------------------*/
656 /* The disconnect callback and ep0 routines. These always run in_irq,
657 * except that ep0_queue() is called in the main thread to acknowledge
658 * completion of various requests: set config, set interface, and
659 * Bulk-only device reset. */
661 static void fsg_disconnect(struct usb_gadget
*gadget
)
663 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
665 DBG(fsg
, "disconnect or port reset\n");
666 raise_exception(fsg
, FSG_STATE_DISCONNECT
);
670 static int ep0_queue(struct fsg_dev
*fsg
)
674 rc
= usb_ep_queue(fsg
->ep0
, fsg
->ep0req
, GFP_ATOMIC
);
675 if (rc
!= 0 && rc
!= -ESHUTDOWN
) {
677 /* We can't do much more than wait for a reset */
678 WARNING(fsg
, "error in submission: %s --> %d\n",
684 static void ep0_complete(struct usb_ep
*ep
, struct usb_request
*req
)
686 struct fsg_dev
*fsg
= ep
->driver_data
;
689 dump_msg(fsg
, fsg
->ep0req_name
, req
->buf
, req
->actual
);
690 if (req
->status
|| req
->actual
!= req
->length
)
691 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
692 req
->status
, req
->actual
, req
->length
);
693 if (req
->status
== -ECONNRESET
) // Request was cancelled
694 usb_ep_fifo_flush(ep
);
696 if (req
->status
== 0 && req
->context
)
697 ((fsg_routine_t
) (req
->context
))(fsg
);
701 /*-------------------------------------------------------------------------*/
703 /* Bulk and interrupt endpoint completion handlers.
704 * These always run in_irq. */
706 static void bulk_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
708 struct fsg_dev
*fsg
= ep
->driver_data
;
709 struct fsg_buffhd
*bh
= req
->context
;
711 if (req
->status
|| req
->actual
!= req
->length
)
712 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
713 req
->status
, req
->actual
, req
->length
);
714 if (req
->status
== -ECONNRESET
) // Request was cancelled
715 usb_ep_fifo_flush(ep
);
717 /* Hold the lock while we update the request and buffer states */
719 spin_lock(&fsg
->lock
);
721 bh
->state
= BUF_STATE_EMPTY
;
723 spin_unlock(&fsg
->lock
);
726 static void bulk_out_complete(struct usb_ep
*ep
, struct usb_request
*req
)
728 struct fsg_dev
*fsg
= ep
->driver_data
;
729 struct fsg_buffhd
*bh
= req
->context
;
731 dump_msg(fsg
, "bulk-out", req
->buf
, req
->actual
);
732 if (req
->status
|| req
->actual
!= bh
->bulk_out_intended_length
)
733 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
734 req
->status
, req
->actual
,
735 bh
->bulk_out_intended_length
);
736 if (req
->status
== -ECONNRESET
) // Request was cancelled
737 usb_ep_fifo_flush(ep
);
739 /* Hold the lock while we update the request and buffer states */
741 spin_lock(&fsg
->lock
);
743 bh
->state
= BUF_STATE_FULL
;
745 spin_unlock(&fsg
->lock
);
749 #ifdef CONFIG_USB_FILE_STORAGE_TEST
750 static void intr_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
752 struct fsg_dev
*fsg
= ep
->driver_data
;
753 struct fsg_buffhd
*bh
= req
->context
;
755 if (req
->status
|| req
->actual
!= req
->length
)
756 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
757 req
->status
, req
->actual
, req
->length
);
758 if (req
->status
== -ECONNRESET
) // Request was cancelled
759 usb_ep_fifo_flush(ep
);
761 /* Hold the lock while we update the request and buffer states */
763 spin_lock(&fsg
->lock
);
764 fsg
->intreq_busy
= 0;
765 bh
->state
= BUF_STATE_EMPTY
;
767 spin_unlock(&fsg
->lock
);
771 static void intr_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
773 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
776 /*-------------------------------------------------------------------------*/
778 /* Ep0 class-specific handlers. These always run in_irq. */
780 #ifdef CONFIG_USB_FILE_STORAGE_TEST
781 static void received_cbi_adsc(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
783 struct usb_request
*req
= fsg
->ep0req
;
784 static u8 cbi_reset_cmnd
[6] = {
785 SEND_DIAGNOSTIC
, 4, 0xff, 0xff, 0xff, 0xff};
787 /* Error in command transfer? */
788 if (req
->status
|| req
->length
!= req
->actual
||
789 req
->actual
< 6 || req
->actual
> MAX_COMMAND_SIZE
) {
791 /* Not all controllers allow a protocol stall after
792 * receiving control-out data, but we'll try anyway. */
793 fsg_set_halt(fsg
, fsg
->ep0
);
794 return; // Wait for reset
797 /* Is it the special reset command? */
798 if (req
->actual
>= sizeof cbi_reset_cmnd
&&
799 memcmp(req
->buf
, cbi_reset_cmnd
,
800 sizeof cbi_reset_cmnd
) == 0) {
802 /* Raise an exception to stop the current operation
803 * and reinitialize our state. */
804 DBG(fsg
, "cbi reset request\n");
805 raise_exception(fsg
, FSG_STATE_RESET
);
809 VDBG(fsg
, "CB[I] accept device-specific command\n");
810 spin_lock(&fsg
->lock
);
812 /* Save the command for later */
813 if (fsg
->cbbuf_cmnd_size
)
814 WARNING(fsg
, "CB[I] overwriting previous command\n");
815 fsg
->cbbuf_cmnd_size
= req
->actual
;
816 memcpy(fsg
->cbbuf_cmnd
, req
->buf
, fsg
->cbbuf_cmnd_size
);
819 spin_unlock(&fsg
->lock
);
823 static void received_cbi_adsc(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
825 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
828 static int class_setup_req(struct fsg_dev
*fsg
,
829 const struct usb_ctrlrequest
*ctrl
)
831 struct usb_request
*req
= fsg
->ep0req
;
832 int value
= -EOPNOTSUPP
;
833 u16 w_index
= le16_to_cpu(ctrl
->wIndex
);
834 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
835 u16 w_length
= le16_to_cpu(ctrl
->wLength
);
840 /* Handle Bulk-only class-specific requests */
841 if (transport_is_bbb()) {
842 switch (ctrl
->bRequest
) {
844 case US_BULK_RESET_REQUEST
:
845 if (ctrl
->bRequestType
!= (USB_DIR_OUT
|
846 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
848 if (w_index
!= 0 || w_value
!= 0 || w_length
!= 0) {
853 /* Raise an exception to stop the current operation
854 * and reinitialize our state. */
855 DBG(fsg
, "bulk reset request\n");
856 raise_exception(fsg
, FSG_STATE_RESET
);
857 value
= DELAYED_STATUS
;
860 case US_BULK_GET_MAX_LUN
:
861 if (ctrl
->bRequestType
!= (USB_DIR_IN
|
862 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
864 if (w_index
!= 0 || w_value
!= 0 || w_length
!= 1) {
868 VDBG(fsg
, "get max LUN\n");
869 *(u8
*) req
->buf
= fsg
->nluns
- 1;
875 /* Handle CBI class-specific requests */
877 switch (ctrl
->bRequest
) {
879 case USB_CBI_ADSC_REQUEST
:
880 if (ctrl
->bRequestType
!= (USB_DIR_OUT
|
881 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
883 if (w_index
!= 0 || w_value
!= 0) {
887 if (w_length
> MAX_COMMAND_SIZE
) {
892 fsg
->ep0req
->context
= received_cbi_adsc
;
897 if (value
== -EOPNOTSUPP
)
899 "unknown class-specific control req "
900 "%02x.%02x v%04x i%04x l%u\n",
901 ctrl
->bRequestType
, ctrl
->bRequest
,
902 le16_to_cpu(ctrl
->wValue
), w_index
, w_length
);
907 /*-------------------------------------------------------------------------*/
909 /* Ep0 standard request handlers. These always run in_irq. */
911 static int standard_setup_req(struct fsg_dev
*fsg
,
912 const struct usb_ctrlrequest
*ctrl
)
914 struct usb_request
*req
= fsg
->ep0req
;
915 int value
= -EOPNOTSUPP
;
916 u16 w_index
= le16_to_cpu(ctrl
->wIndex
);
917 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
919 /* Usually this just stores reply data in the pre-allocated ep0 buffer,
920 * but config change events will also reconfigure hardware. */
921 switch (ctrl
->bRequest
) {
923 case USB_REQ_GET_DESCRIPTOR
:
924 if (ctrl
->bRequestType
!= (USB_DIR_IN
| USB_TYPE_STANDARD
|
927 switch (w_value
>> 8) {
930 VDBG(fsg
, "get device descriptor\n");
931 device_desc
.bMaxPacketSize0
= fsg
->ep0
->maxpacket
;
932 value
= sizeof device_desc
;
933 memcpy(req
->buf
, &device_desc
, value
);
935 case USB_DT_DEVICE_QUALIFIER
:
936 VDBG(fsg
, "get device qualifier\n");
937 if (!gadget_is_dualspeed(fsg
->gadget
) ||
938 fsg
->gadget
->speed
== USB_SPEED_SUPER
)
941 * Assume ep0 uses the same maxpacket value for both
944 dev_qualifier
.bMaxPacketSize0
= fsg
->ep0
->maxpacket
;
945 value
= sizeof dev_qualifier
;
946 memcpy(req
->buf
, &dev_qualifier
, value
);
949 case USB_DT_OTHER_SPEED_CONFIG
:
950 VDBG(fsg
, "get other-speed config descriptor\n");
951 if (!gadget_is_dualspeed(fsg
->gadget
) ||
952 fsg
->gadget
->speed
== USB_SPEED_SUPER
)
956 VDBG(fsg
, "get configuration descriptor\n");
958 value
= populate_config_buf(fsg
->gadget
,
965 VDBG(fsg
, "get string descriptor\n");
967 /* wIndex == language code */
968 value
= usb_gadget_get_string(&fsg_stringtab
,
969 w_value
& 0xff, req
->buf
);
973 VDBG(fsg
, "get bos descriptor\n");
975 if (gadget_is_superspeed(fsg
->gadget
))
976 value
= populate_bos(fsg
, req
->buf
);
982 /* One config, two speeds */
983 case USB_REQ_SET_CONFIGURATION
:
984 if (ctrl
->bRequestType
!= (USB_DIR_OUT
| USB_TYPE_STANDARD
|
987 VDBG(fsg
, "set configuration\n");
988 if (w_value
== CONFIG_VALUE
|| w_value
== 0) {
989 fsg
->new_config
= w_value
;
991 /* Raise an exception to wipe out previous transaction
992 * state (queued bufs, etc) and set the new config. */
993 raise_exception(fsg
, FSG_STATE_CONFIG_CHANGE
);
994 value
= DELAYED_STATUS
;
997 case USB_REQ_GET_CONFIGURATION
:
998 if (ctrl
->bRequestType
!= (USB_DIR_IN
| USB_TYPE_STANDARD
|
1001 VDBG(fsg
, "get configuration\n");
1002 *(u8
*) req
->buf
= fsg
->config
;
1006 case USB_REQ_SET_INTERFACE
:
1007 if (ctrl
->bRequestType
!= (USB_DIR_OUT
| USB_TYPE_STANDARD
|
1008 USB_RECIP_INTERFACE
))
1010 if (fsg
->config
&& w_index
== 0) {
1012 /* Raise an exception to wipe out previous transaction
1013 * state (queued bufs, etc) and install the new
1014 * interface altsetting. */
1015 raise_exception(fsg
, FSG_STATE_INTERFACE_CHANGE
);
1016 value
= DELAYED_STATUS
;
1019 case USB_REQ_GET_INTERFACE
:
1020 if (ctrl
->bRequestType
!= (USB_DIR_IN
| USB_TYPE_STANDARD
|
1021 USB_RECIP_INTERFACE
))
1029 VDBG(fsg
, "get interface\n");
1030 *(u8
*) req
->buf
= 0;
1036 "unknown control req %02x.%02x v%04x i%04x l%u\n",
1037 ctrl
->bRequestType
, ctrl
->bRequest
,
1038 w_value
, w_index
, le16_to_cpu(ctrl
->wLength
));
1045 static int fsg_setup(struct usb_gadget
*gadget
,
1046 const struct usb_ctrlrequest
*ctrl
)
1048 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
1050 int w_length
= le16_to_cpu(ctrl
->wLength
);
1052 ++fsg
->ep0_req_tag
; // Record arrival of a new request
1053 fsg
->ep0req
->context
= NULL
;
1054 fsg
->ep0req
->length
= 0;
1055 dump_msg(fsg
, "ep0-setup", (u8
*) ctrl
, sizeof(*ctrl
));
1057 if ((ctrl
->bRequestType
& USB_TYPE_MASK
) == USB_TYPE_CLASS
)
1058 rc
= class_setup_req(fsg
, ctrl
);
1060 rc
= standard_setup_req(fsg
, ctrl
);
1062 /* Respond with data/status or defer until later? */
1063 if (rc
>= 0 && rc
!= DELAYED_STATUS
) {
1064 rc
= min(rc
, w_length
);
1065 fsg
->ep0req
->length
= rc
;
1066 fsg
->ep0req
->zero
= rc
< w_length
;
1067 fsg
->ep0req_name
= (ctrl
->bRequestType
& USB_DIR_IN
?
1068 "ep0-in" : "ep0-out");
1069 rc
= ep0_queue(fsg
);
1072 /* Device either stalls (rc < 0) or reports success */
1077 /*-------------------------------------------------------------------------*/
1079 /* All the following routines run in process context */
1082 /* Use this for bulk or interrupt transfers, not ep0 */
1083 static void start_transfer(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
1084 struct usb_request
*req
, int *pbusy
,
1085 enum fsg_buffer_state
*state
)
1089 if (ep
== fsg
->bulk_in
)
1090 dump_msg(fsg
, "bulk-in", req
->buf
, req
->length
);
1091 else if (ep
== fsg
->intr_in
)
1092 dump_msg(fsg
, "intr-in", req
->buf
, req
->length
);
1094 spin_lock_irq(&fsg
->lock
);
1096 *state
= BUF_STATE_BUSY
;
1097 spin_unlock_irq(&fsg
->lock
);
1098 rc
= usb_ep_queue(ep
, req
, GFP_KERNEL
);
1101 *state
= BUF_STATE_EMPTY
;
1103 /* We can't do much more than wait for a reset */
1105 /* Note: currently the net2280 driver fails zero-length
1106 * submissions if DMA is enabled. */
1107 if (rc
!= -ESHUTDOWN
&& !(rc
== -EOPNOTSUPP
&&
1109 WARNING(fsg
, "error in submission: %s --> %d\n",
1115 static int sleep_thread(struct fsg_dev
*fsg
)
1119 /* Wait until a signal arrives or we are woken up */
1122 set_current_state(TASK_INTERRUPTIBLE
);
1123 if (signal_pending(current
)) {
1127 if (fsg
->thread_wakeup_needed
)
1131 __set_current_state(TASK_RUNNING
);
1132 fsg
->thread_wakeup_needed
= 0;
1137 /*-------------------------------------------------------------------------*/
1139 static int do_read(struct fsg_dev
*fsg
)
1141 struct fsg_lun
*curlun
= fsg
->curlun
;
1143 struct fsg_buffhd
*bh
;
1146 loff_t file_offset
, file_offset_tmp
;
1147 unsigned int amount
;
1150 /* Get the starting Logical Block Address and check that it's
1152 if (fsg
->cmnd
[0] == READ_6
)
1153 lba
= get_unaligned_be24(&fsg
->cmnd
[1]);
1155 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1157 /* We allow DPO (Disable Page Out = don't save data in the
1158 * cache) and FUA (Force Unit Access = don't read from the
1159 * cache), but we don't implement them. */
1160 if ((fsg
->cmnd
[1] & ~0x18) != 0) {
1161 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1165 if (lba
>= curlun
->num_sectors
) {
1166 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1169 file_offset
= ((loff_t
) lba
) << curlun
->blkbits
;
1171 /* Carry out the file reads */
1172 amount_left
= fsg
->data_size_from_cmnd
;
1173 if (unlikely(amount_left
== 0))
1174 return -EIO
; // No default reply
1178 /* Figure out how much we need to read:
1179 * Try to read the remaining amount.
1180 * But don't read more than the buffer size.
1181 * And don't try to read past the end of the file.
1183 amount
= min((unsigned int) amount_left
, mod_data
.buflen
);
1184 amount
= min((loff_t
) amount
,
1185 curlun
->file_length
- file_offset
);
1187 /* Wait for the next buffer to become available */
1188 bh
= fsg
->next_buffhd_to_fill
;
1189 while (bh
->state
!= BUF_STATE_EMPTY
) {
1190 rc
= sleep_thread(fsg
);
1195 /* If we were asked to read past the end of file,
1196 * end with an empty buffer. */
1198 curlun
->sense_data
=
1199 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1200 curlun
->sense_data_info
= file_offset
>> curlun
->blkbits
;
1201 curlun
->info_valid
= 1;
1202 bh
->inreq
->length
= 0;
1203 bh
->state
= BUF_STATE_FULL
;
1207 /* Perform the read */
1208 file_offset_tmp
= file_offset
;
1209 nread
= vfs_read(curlun
->filp
,
1210 (char __user
*) bh
->buf
,
1211 amount
, &file_offset_tmp
);
1212 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
1213 (unsigned long long) file_offset
,
1215 if (signal_pending(current
))
1219 LDBG(curlun
, "error in file read: %d\n",
1222 } else if (nread
< amount
) {
1223 LDBG(curlun
, "partial file read: %d/%u\n",
1224 (int) nread
, amount
);
1225 nread
= round_down(nread
, curlun
->blksize
);
1227 file_offset
+= nread
;
1228 amount_left
-= nread
;
1229 fsg
->residue
-= nread
;
1231 /* Except at the end of the transfer, nread will be
1232 * equal to the buffer size, which is divisible by the
1233 * bulk-in maxpacket size.
1235 bh
->inreq
->length
= nread
;
1236 bh
->state
= BUF_STATE_FULL
;
1238 /* If an error occurred, report it and its position */
1239 if (nread
< amount
) {
1240 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
1241 curlun
->sense_data_info
= file_offset
>> curlun
->blkbits
;
1242 curlun
->info_valid
= 1;
1246 if (amount_left
== 0)
1247 break; // No more left to read
1249 /* Send this buffer and go read some more */
1250 bh
->inreq
->zero
= 0;
1251 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
1252 &bh
->inreq_busy
, &bh
->state
);
1253 fsg
->next_buffhd_to_fill
= bh
->next
;
1256 return -EIO
; // No default reply
1260 /*-------------------------------------------------------------------------*/
1262 static int do_write(struct fsg_dev
*fsg
)
1264 struct fsg_lun
*curlun
= fsg
->curlun
;
1266 struct fsg_buffhd
*bh
;
1268 u32 amount_left_to_req
, amount_left_to_write
;
1269 loff_t usb_offset
, file_offset
, file_offset_tmp
;
1270 unsigned int amount
;
1275 curlun
->sense_data
= SS_WRITE_PROTECTED
;
1278 spin_lock(&curlun
->filp
->f_lock
);
1279 curlun
->filp
->f_flags
&= ~O_SYNC
; // Default is not to wait
1280 spin_unlock(&curlun
->filp
->f_lock
);
1282 /* Get the starting Logical Block Address and check that it's
1284 if (fsg
->cmnd
[0] == WRITE_6
)
1285 lba
= get_unaligned_be24(&fsg
->cmnd
[1]);
1287 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1289 /* We allow DPO (Disable Page Out = don't save data in the
1290 * cache) and FUA (Force Unit Access = write directly to the
1291 * medium). We don't implement DPO; we implement FUA by
1292 * performing synchronous output. */
1293 if ((fsg
->cmnd
[1] & ~0x18) != 0) {
1294 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1298 if (!curlun
->nofua
&& (fsg
->cmnd
[1] & 0x08)) {
1299 spin_lock(&curlun
->filp
->f_lock
);
1300 curlun
->filp
->f_flags
|= O_DSYNC
;
1301 spin_unlock(&curlun
->filp
->f_lock
);
1304 if (lba
>= curlun
->num_sectors
) {
1305 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1309 /* Carry out the file writes */
1311 file_offset
= usb_offset
= ((loff_t
) lba
) << curlun
->blkbits
;
1312 amount_left_to_req
= amount_left_to_write
= fsg
->data_size_from_cmnd
;
1314 while (amount_left_to_write
> 0) {
1316 /* Queue a request for more data from the host */
1317 bh
= fsg
->next_buffhd_to_fill
;
1318 if (bh
->state
== BUF_STATE_EMPTY
&& get_some_more
) {
1320 /* Figure out how much we want to get:
1321 * Try to get the remaining amount,
1322 * but not more than the buffer size.
1324 amount
= min(amount_left_to_req
, mod_data
.buflen
);
1326 /* Beyond the end of the backing file? */
1327 if (usb_offset
>= curlun
->file_length
) {
1329 curlun
->sense_data
=
1330 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1331 curlun
->sense_data_info
= usb_offset
>> curlun
->blkbits
;
1332 curlun
->info_valid
= 1;
1336 /* Get the next buffer */
1337 usb_offset
+= amount
;
1338 fsg
->usb_amount_left
-= amount
;
1339 amount_left_to_req
-= amount
;
1340 if (amount_left_to_req
== 0)
1343 /* Except at the end of the transfer, amount will be
1344 * equal to the buffer size, which is divisible by
1345 * the bulk-out maxpacket size.
1347 set_bulk_out_req_length(fsg
, bh
, amount
);
1348 start_transfer(fsg
, fsg
->bulk_out
, bh
->outreq
,
1349 &bh
->outreq_busy
, &bh
->state
);
1350 fsg
->next_buffhd_to_fill
= bh
->next
;
1354 /* Write the received data to the backing file */
1355 bh
= fsg
->next_buffhd_to_drain
;
1356 if (bh
->state
== BUF_STATE_EMPTY
&& !get_some_more
)
1357 break; // We stopped early
1358 if (bh
->state
== BUF_STATE_FULL
) {
1360 fsg
->next_buffhd_to_drain
= bh
->next
;
1361 bh
->state
= BUF_STATE_EMPTY
;
1363 /* Did something go wrong with the transfer? */
1364 if (bh
->outreq
->status
!= 0) {
1365 curlun
->sense_data
= SS_COMMUNICATION_FAILURE
;
1366 curlun
->sense_data_info
= file_offset
>> curlun
->blkbits
;
1367 curlun
->info_valid
= 1;
1371 amount
= bh
->outreq
->actual
;
1372 if (curlun
->file_length
- file_offset
< amount
) {
1374 "write %u @ %llu beyond end %llu\n",
1375 amount
, (unsigned long long) file_offset
,
1376 (unsigned long long) curlun
->file_length
);
1377 amount
= curlun
->file_length
- file_offset
;
1380 /* Don't accept excess data. The spec doesn't say
1381 * what to do in this case. We'll ignore the error.
1383 amount
= min(amount
, bh
->bulk_out_intended_length
);
1385 /* Don't write a partial block */
1386 amount
= round_down(amount
, curlun
->blksize
);
1390 /* Perform the write */
1391 file_offset_tmp
= file_offset
;
1392 nwritten
= vfs_write(curlun
->filp
,
1393 (char __user
*) bh
->buf
,
1394 amount
, &file_offset_tmp
);
1395 VLDBG(curlun
, "file write %u @ %llu -> %d\n", amount
,
1396 (unsigned long long) file_offset
,
1398 if (signal_pending(current
))
1399 return -EINTR
; // Interrupted!
1402 LDBG(curlun
, "error in file write: %d\n",
1405 } else if (nwritten
< amount
) {
1406 LDBG(curlun
, "partial file write: %d/%u\n",
1407 (int) nwritten
, amount
);
1408 nwritten
= round_down(nwritten
, curlun
->blksize
);
1410 file_offset
+= nwritten
;
1411 amount_left_to_write
-= nwritten
;
1412 fsg
->residue
-= nwritten
;
1414 /* If an error occurred, report it and its position */
1415 if (nwritten
< amount
) {
1416 curlun
->sense_data
= SS_WRITE_ERROR
;
1417 curlun
->sense_data_info
= file_offset
>> curlun
->blkbits
;
1418 curlun
->info_valid
= 1;
1423 /* Did the host decide to stop early? */
1424 if (bh
->outreq
->actual
< bh
->bulk_out_intended_length
) {
1425 fsg
->short_packet_received
= 1;
1431 /* Wait for something to happen */
1432 rc
= sleep_thread(fsg
);
1437 return -EIO
; // No default reply
1441 /*-------------------------------------------------------------------------*/
1443 static int do_synchronize_cache(struct fsg_dev
*fsg
)
1445 struct fsg_lun
*curlun
= fsg
->curlun
;
1448 /* We ignore the requested LBA and write out all file's
1449 * dirty data buffers. */
1450 rc
= fsg_lun_fsync_sub(curlun
);
1452 curlun
->sense_data
= SS_WRITE_ERROR
;
1457 /*-------------------------------------------------------------------------*/
1459 static void invalidate_sub(struct fsg_lun
*curlun
)
1461 struct file
*filp
= curlun
->filp
;
1462 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1465 rc
= invalidate_mapping_pages(inode
->i_mapping
, 0, -1);
1466 VLDBG(curlun
, "invalidate_mapping_pages -> %ld\n", rc
);
1469 static int do_verify(struct fsg_dev
*fsg
)
1471 struct fsg_lun
*curlun
= fsg
->curlun
;
1473 u32 verification_length
;
1474 struct fsg_buffhd
*bh
= fsg
->next_buffhd_to_fill
;
1475 loff_t file_offset
, file_offset_tmp
;
1477 unsigned int amount
;
1480 /* Get the starting Logical Block Address and check that it's
1482 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1483 if (lba
>= curlun
->num_sectors
) {
1484 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1488 /* We allow DPO (Disable Page Out = don't save data in the
1489 * cache) but we don't implement it. */
1490 if ((fsg
->cmnd
[1] & ~0x10) != 0) {
1491 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1495 verification_length
= get_unaligned_be16(&fsg
->cmnd
[7]);
1496 if (unlikely(verification_length
== 0))
1497 return -EIO
; // No default reply
1499 /* Prepare to carry out the file verify */
1500 amount_left
= verification_length
<< curlun
->blkbits
;
1501 file_offset
= ((loff_t
) lba
) << curlun
->blkbits
;
1503 /* Write out all the dirty buffers before invalidating them */
1504 fsg_lun_fsync_sub(curlun
);
1505 if (signal_pending(current
))
1508 invalidate_sub(curlun
);
1509 if (signal_pending(current
))
1512 /* Just try to read the requested blocks */
1513 while (amount_left
> 0) {
1515 /* Figure out how much we need to read:
1516 * Try to read the remaining amount, but not more than
1518 * And don't try to read past the end of the file.
1520 amount
= min((unsigned int) amount_left
, mod_data
.buflen
);
1521 amount
= min((loff_t
) amount
,
1522 curlun
->file_length
- file_offset
);
1524 curlun
->sense_data
=
1525 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1526 curlun
->sense_data_info
= file_offset
>> curlun
->blkbits
;
1527 curlun
->info_valid
= 1;
1531 /* Perform the read */
1532 file_offset_tmp
= file_offset
;
1533 nread
= vfs_read(curlun
->filp
,
1534 (char __user
*) bh
->buf
,
1535 amount
, &file_offset_tmp
);
1536 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
1537 (unsigned long long) file_offset
,
1539 if (signal_pending(current
))
1543 LDBG(curlun
, "error in file verify: %d\n",
1546 } else if (nread
< amount
) {
1547 LDBG(curlun
, "partial file verify: %d/%u\n",
1548 (int) nread
, amount
);
1549 nread
= round_down(nread
, curlun
->blksize
);
1552 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
1553 curlun
->sense_data_info
= file_offset
>> curlun
->blkbits
;
1554 curlun
->info_valid
= 1;
1557 file_offset
+= nread
;
1558 amount_left
-= nread
;
1564 /*-------------------------------------------------------------------------*/
1566 static int do_inquiry(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1568 u8
*buf
= (u8
*) bh
->buf
;
1570 static char vendor_id
[] = "Linux ";
1571 static char product_disk_id
[] = "File-Stor Gadget";
1572 static char product_cdrom_id
[] = "File-CD Gadget ";
1574 if (!fsg
->curlun
) { // Unsupported LUNs are okay
1575 fsg
->bad_lun_okay
= 1;
1577 buf
[0] = 0x7f; // Unsupported, no device-type
1578 buf
[4] = 31; // Additional length
1583 buf
[0] = (mod_data
.cdrom
? TYPE_ROM
: TYPE_DISK
);
1584 if (mod_data
.removable
)
1586 buf
[2] = 2; // ANSI SCSI level 2
1587 buf
[3] = 2; // SCSI-2 INQUIRY data format
1588 buf
[4] = 31; // Additional length
1589 // No special options
1590 sprintf(buf
+ 8, "%-8s%-16s%04x", vendor_id
,
1591 (mod_data
.cdrom
? product_cdrom_id
:
1598 static int do_request_sense(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1600 struct fsg_lun
*curlun
= fsg
->curlun
;
1601 u8
*buf
= (u8
*) bh
->buf
;
1606 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1608 * If a REQUEST SENSE command is received from an initiator
1609 * with a pending unit attention condition (before the target
1610 * generates the contingent allegiance condition), then the
1611 * target shall either:
1612 * a) report any pending sense data and preserve the unit
1613 * attention condition on the logical unit, or,
1614 * b) report the unit attention condition, may discard any
1615 * pending sense data, and clear the unit attention
1616 * condition on the logical unit for that initiator.
1618 * FSG normally uses option a); enable this code to use option b).
1621 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
) {
1622 curlun
->sense_data
= curlun
->unit_attention_data
;
1623 curlun
->unit_attention_data
= SS_NO_SENSE
;
1627 if (!curlun
) { // Unsupported LUNs are okay
1628 fsg
->bad_lun_okay
= 1;
1629 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
1633 sd
= curlun
->sense_data
;
1634 sdinfo
= curlun
->sense_data_info
;
1635 valid
= curlun
->info_valid
<< 7;
1636 curlun
->sense_data
= SS_NO_SENSE
;
1637 curlun
->sense_data_info
= 0;
1638 curlun
->info_valid
= 0;
1642 buf
[0] = valid
| 0x70; // Valid, current error
1644 put_unaligned_be32(sdinfo
, &buf
[3]); /* Sense information */
1645 buf
[7] = 18 - 8; // Additional sense length
1652 static int do_read_capacity(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1654 struct fsg_lun
*curlun
= fsg
->curlun
;
1655 u32 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1656 int pmi
= fsg
->cmnd
[8];
1657 u8
*buf
= (u8
*) bh
->buf
;
1659 /* Check the PMI and LBA fields */
1660 if (pmi
> 1 || (pmi
== 0 && lba
!= 0)) {
1661 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1665 put_unaligned_be32(curlun
->num_sectors
- 1, &buf
[0]);
1666 /* Max logical block */
1667 put_unaligned_be32(curlun
->blksize
, &buf
[4]); /* Block length */
1672 static int do_read_header(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1674 struct fsg_lun
*curlun
= fsg
->curlun
;
1675 int msf
= fsg
->cmnd
[1] & 0x02;
1676 u32 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1677 u8
*buf
= (u8
*) bh
->buf
;
1679 if ((fsg
->cmnd
[1] & ~0x02) != 0) { /* Mask away MSF */
1680 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1683 if (lba
>= curlun
->num_sectors
) {
1684 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1689 buf
[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1690 store_cdrom_address(&buf
[4], msf
, lba
);
1695 static int do_read_toc(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1697 struct fsg_lun
*curlun
= fsg
->curlun
;
1698 int msf
= fsg
->cmnd
[1] & 0x02;
1699 int start_track
= fsg
->cmnd
[6];
1700 u8
*buf
= (u8
*) bh
->buf
;
1702 if ((fsg
->cmnd
[1] & ~0x02) != 0 || /* Mask away MSF */
1704 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1709 buf
[1] = (20-2); /* TOC data length */
1710 buf
[2] = 1; /* First track number */
1711 buf
[3] = 1; /* Last track number */
1712 buf
[5] = 0x16; /* Data track, copying allowed */
1713 buf
[6] = 0x01; /* Only track is number 1 */
1714 store_cdrom_address(&buf
[8], msf
, 0);
1716 buf
[13] = 0x16; /* Lead-out track is data */
1717 buf
[14] = 0xAA; /* Lead-out track number */
1718 store_cdrom_address(&buf
[16], msf
, curlun
->num_sectors
);
1723 static int do_mode_sense(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1725 struct fsg_lun
*curlun
= fsg
->curlun
;
1726 int mscmnd
= fsg
->cmnd
[0];
1727 u8
*buf
= (u8
*) bh
->buf
;
1730 int changeable_values
, all_pages
;
1734 if ((fsg
->cmnd
[1] & ~0x08) != 0) { // Mask away DBD
1735 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1738 pc
= fsg
->cmnd
[2] >> 6;
1739 page_code
= fsg
->cmnd
[2] & 0x3f;
1741 curlun
->sense_data
= SS_SAVING_PARAMETERS_NOT_SUPPORTED
;
1744 changeable_values
= (pc
== 1);
1745 all_pages
= (page_code
== 0x3f);
1747 /* Write the mode parameter header. Fixed values are: default
1748 * medium type, no cache control (DPOFUA), and no block descriptors.
1749 * The only variable value is the WriteProtect bit. We will fill in
1750 * the mode data length later. */
1752 if (mscmnd
== MODE_SENSE
) {
1753 buf
[2] = (curlun
->ro
? 0x80 : 0x00); // WP, DPOFUA
1756 } else { // MODE_SENSE_10
1757 buf
[3] = (curlun
->ro
? 0x80 : 0x00); // WP, DPOFUA
1759 limit
= 65535; // Should really be mod_data.buflen
1762 /* No block descriptors */
1764 /* The mode pages, in numerical order. The only page we support
1765 * is the Caching page. */
1766 if (page_code
== 0x08 || all_pages
) {
1768 buf
[0] = 0x08; // Page code
1769 buf
[1] = 10; // Page length
1770 memset(buf
+2, 0, 10); // None of the fields are changeable
1772 if (!changeable_values
) {
1773 buf
[2] = 0x04; // Write cache enable,
1774 // Read cache not disabled
1775 // No cache retention priorities
1776 put_unaligned_be16(0xffff, &buf
[4]);
1777 /* Don't disable prefetch */
1778 /* Minimum prefetch = 0 */
1779 put_unaligned_be16(0xffff, &buf
[8]);
1780 /* Maximum prefetch */
1781 put_unaligned_be16(0xffff, &buf
[10]);
1782 /* Maximum prefetch ceiling */
1787 /* Check that a valid page was requested and the mode data length
1788 * isn't too long. */
1790 if (!valid_page
|| len
> limit
) {
1791 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1795 /* Store the mode data length */
1796 if (mscmnd
== MODE_SENSE
)
1799 put_unaligned_be16(len
- 2, buf0
);
1804 static int do_start_stop(struct fsg_dev
*fsg
)
1806 struct fsg_lun
*curlun
= fsg
->curlun
;
1809 if (!mod_data
.removable
) {
1810 curlun
->sense_data
= SS_INVALID_COMMAND
;
1814 // int immed = fsg->cmnd[1] & 0x01;
1815 loej
= fsg
->cmnd
[4] & 0x02;
1816 start
= fsg
->cmnd
[4] & 0x01;
1818 #ifdef CONFIG_USB_FILE_STORAGE_TEST
1819 if ((fsg
->cmnd
[1] & ~0x01) != 0 || // Mask away Immed
1820 (fsg
->cmnd
[4] & ~0x03) != 0) { // Mask LoEj, Start
1821 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1827 /* Are we allowed to unload the media? */
1828 if (curlun
->prevent_medium_removal
) {
1829 LDBG(curlun
, "unload attempt prevented\n");
1830 curlun
->sense_data
= SS_MEDIUM_REMOVAL_PREVENTED
;
1833 if (loej
) { // Simulate an unload/eject
1834 up_read(&fsg
->filesem
);
1835 down_write(&fsg
->filesem
);
1836 fsg_lun_close(curlun
);
1837 up_write(&fsg
->filesem
);
1838 down_read(&fsg
->filesem
);
1842 /* Our emulation doesn't support mounting; the medium is
1843 * available for use as soon as it is loaded. */
1844 if (!fsg_lun_is_open(curlun
)) {
1845 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
1854 static int do_prevent_allow(struct fsg_dev
*fsg
)
1856 struct fsg_lun
*curlun
= fsg
->curlun
;
1859 if (!mod_data
.removable
) {
1860 curlun
->sense_data
= SS_INVALID_COMMAND
;
1864 prevent
= fsg
->cmnd
[4] & 0x01;
1865 if ((fsg
->cmnd
[4] & ~0x01) != 0) { // Mask away Prevent
1866 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1870 if (curlun
->prevent_medium_removal
&& !prevent
)
1871 fsg_lun_fsync_sub(curlun
);
1872 curlun
->prevent_medium_removal
= prevent
;
1877 static int do_read_format_capacities(struct fsg_dev
*fsg
,
1878 struct fsg_buffhd
*bh
)
1880 struct fsg_lun
*curlun
= fsg
->curlun
;
1881 u8
*buf
= (u8
*) bh
->buf
;
1883 buf
[0] = buf
[1] = buf
[2] = 0;
1884 buf
[3] = 8; // Only the Current/Maximum Capacity Descriptor
1887 put_unaligned_be32(curlun
->num_sectors
, &buf
[0]);
1888 /* Number of blocks */
1889 put_unaligned_be32(curlun
->blksize
, &buf
[4]); /* Block length */
1890 buf
[4] = 0x02; /* Current capacity */
1895 static int do_mode_select(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1897 struct fsg_lun
*curlun
= fsg
->curlun
;
1899 /* We don't support MODE SELECT */
1900 curlun
->sense_data
= SS_INVALID_COMMAND
;
1905 /*-------------------------------------------------------------------------*/
1907 static int halt_bulk_in_endpoint(struct fsg_dev
*fsg
)
1911 rc
= fsg_set_halt(fsg
, fsg
->bulk_in
);
1913 VDBG(fsg
, "delayed bulk-in endpoint halt\n");
1915 if (rc
!= -EAGAIN
) {
1916 WARNING(fsg
, "usb_ep_set_halt -> %d\n", rc
);
1921 /* Wait for a short time and then try again */
1922 if (msleep_interruptible(100) != 0)
1924 rc
= usb_ep_set_halt(fsg
->bulk_in
);
1929 static int wedge_bulk_in_endpoint(struct fsg_dev
*fsg
)
1933 DBG(fsg
, "bulk-in set wedge\n");
1934 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1936 VDBG(fsg
, "delayed bulk-in endpoint wedge\n");
1938 if (rc
!= -EAGAIN
) {
1939 WARNING(fsg
, "usb_ep_set_wedge -> %d\n", rc
);
1944 /* Wait for a short time and then try again */
1945 if (msleep_interruptible(100) != 0)
1947 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1952 static int throw_away_data(struct fsg_dev
*fsg
)
1954 struct fsg_buffhd
*bh
;
1958 while ((bh
= fsg
->next_buffhd_to_drain
)->state
!= BUF_STATE_EMPTY
||
1959 fsg
->usb_amount_left
> 0) {
1961 /* Throw away the data in a filled buffer */
1962 if (bh
->state
== BUF_STATE_FULL
) {
1964 bh
->state
= BUF_STATE_EMPTY
;
1965 fsg
->next_buffhd_to_drain
= bh
->next
;
1967 /* A short packet or an error ends everything */
1968 if (bh
->outreq
->actual
< bh
->bulk_out_intended_length
||
1969 bh
->outreq
->status
!= 0) {
1970 raise_exception(fsg
, FSG_STATE_ABORT_BULK_OUT
);
1976 /* Try to submit another request if we need one */
1977 bh
= fsg
->next_buffhd_to_fill
;
1978 if (bh
->state
== BUF_STATE_EMPTY
&& fsg
->usb_amount_left
> 0) {
1979 amount
= min(fsg
->usb_amount_left
,
1980 (u32
) mod_data
.buflen
);
1982 /* Except at the end of the transfer, amount will be
1983 * equal to the buffer size, which is divisible by
1984 * the bulk-out maxpacket size.
1986 set_bulk_out_req_length(fsg
, bh
, amount
);
1987 start_transfer(fsg
, fsg
->bulk_out
, bh
->outreq
,
1988 &bh
->outreq_busy
, &bh
->state
);
1989 fsg
->next_buffhd_to_fill
= bh
->next
;
1990 fsg
->usb_amount_left
-= amount
;
1994 /* Otherwise wait for something to happen */
1995 rc
= sleep_thread(fsg
);
2003 static int finish_reply(struct fsg_dev
*fsg
)
2005 struct fsg_buffhd
*bh
= fsg
->next_buffhd_to_fill
;
2008 switch (fsg
->data_dir
) {
2010 break; // Nothing to send
2012 /* If we don't know whether the host wants to read or write,
2013 * this must be CB or CBI with an unknown command. We mustn't
2014 * try to send or receive any data. So stall both bulk pipes
2015 * if we can and wait for a reset. */
2016 case DATA_DIR_UNKNOWN
:
2017 if (mod_data
.can_stall
) {
2018 fsg_set_halt(fsg
, fsg
->bulk_out
);
2019 rc
= halt_bulk_in_endpoint(fsg
);
2023 /* All but the last buffer of data must have already been sent */
2024 case DATA_DIR_TO_HOST
:
2025 if (fsg
->data_size
== 0)
2026 ; // Nothing to send
2028 /* If there's no residue, simply send the last buffer */
2029 else if (fsg
->residue
== 0) {
2030 bh
->inreq
->zero
= 0;
2031 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
2032 &bh
->inreq_busy
, &bh
->state
);
2033 fsg
->next_buffhd_to_fill
= bh
->next
;
2036 /* There is a residue. For CB and CBI, simply mark the end
2037 * of the data with a short packet. However, if we are
2038 * allowed to stall, there was no data at all (residue ==
2039 * data_size), and the command failed (invalid LUN or
2040 * sense data is set), then halt the bulk-in endpoint
2042 else if (!transport_is_bbb()) {
2043 if (mod_data
.can_stall
&&
2044 fsg
->residue
== fsg
->data_size
&&
2045 (!fsg
->curlun
|| fsg
->curlun
->sense_data
!= SS_NO_SENSE
)) {
2046 bh
->state
= BUF_STATE_EMPTY
;
2047 rc
= halt_bulk_in_endpoint(fsg
);
2049 bh
->inreq
->zero
= 1;
2050 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
2051 &bh
->inreq_busy
, &bh
->state
);
2052 fsg
->next_buffhd_to_fill
= bh
->next
;
2057 * For Bulk-only, mark the end of the data with a short
2058 * packet. If we are allowed to stall, halt the bulk-in
2059 * endpoint. (Note: This violates the Bulk-Only Transport
2060 * specification, which requires us to pad the data if we
2061 * don't halt the endpoint. Presumably nobody will mind.)
2064 bh
->inreq
->zero
= 1;
2065 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
2066 &bh
->inreq_busy
, &bh
->state
);
2067 fsg
->next_buffhd_to_fill
= bh
->next
;
2068 if (mod_data
.can_stall
)
2069 rc
= halt_bulk_in_endpoint(fsg
);
2073 /* We have processed all we want from the data the host has sent.
2074 * There may still be outstanding bulk-out requests. */
2075 case DATA_DIR_FROM_HOST
:
2076 if (fsg
->residue
== 0)
2077 ; // Nothing to receive
2079 /* Did the host stop sending unexpectedly early? */
2080 else if (fsg
->short_packet_received
) {
2081 raise_exception(fsg
, FSG_STATE_ABORT_BULK_OUT
);
2085 /* We haven't processed all the incoming data. Even though
2086 * we may be allowed to stall, doing so would cause a race.
2087 * The controller may already have ACK'ed all the remaining
2088 * bulk-out packets, in which case the host wouldn't see a
2089 * STALL. Not realizing the endpoint was halted, it wouldn't
2090 * clear the halt -- leading to problems later on. */
2092 else if (mod_data
.can_stall
) {
2093 fsg_set_halt(fsg
, fsg
->bulk_out
);
2094 raise_exception(fsg
, FSG_STATE_ABORT_BULK_OUT
);
2099 /* We can't stall. Read in the excess data and throw it
2102 rc
= throw_away_data(fsg
);
2109 static int send_status(struct fsg_dev
*fsg
)
2111 struct fsg_lun
*curlun
= fsg
->curlun
;
2112 struct fsg_buffhd
*bh
;
2114 u8 status
= US_BULK_STAT_OK
;
2117 /* Wait for the next buffer to become available */
2118 bh
= fsg
->next_buffhd_to_fill
;
2119 while (bh
->state
!= BUF_STATE_EMPTY
) {
2120 rc
= sleep_thread(fsg
);
2126 sd
= curlun
->sense_data
;
2127 sdinfo
= curlun
->sense_data_info
;
2128 } else if (fsg
->bad_lun_okay
)
2131 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
2133 if (fsg
->phase_error
) {
2134 DBG(fsg
, "sending phase-error status\n");
2135 status
= US_BULK_STAT_PHASE
;
2136 sd
= SS_INVALID_COMMAND
;
2137 } else if (sd
!= SS_NO_SENSE
) {
2138 DBG(fsg
, "sending command-failure status\n");
2139 status
= US_BULK_STAT_FAIL
;
2140 VDBG(fsg
, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
2142 SK(sd
), ASC(sd
), ASCQ(sd
), sdinfo
);
2145 if (transport_is_bbb()) {
2146 struct bulk_cs_wrap
*csw
= bh
->buf
;
2148 /* Store and send the Bulk-only CSW */
2149 csw
->Signature
= cpu_to_le32(US_BULK_CS_SIGN
);
2150 csw
->Tag
= fsg
->tag
;
2151 csw
->Residue
= cpu_to_le32(fsg
->residue
);
2152 csw
->Status
= status
;
2154 bh
->inreq
->length
= US_BULK_CS_WRAP_LEN
;
2155 bh
->inreq
->zero
= 0;
2156 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
2157 &bh
->inreq_busy
, &bh
->state
);
2159 } else if (mod_data
.transport_type
== USB_PR_CB
) {
2161 /* Control-Bulk transport has no status phase! */
2164 } else { // USB_PR_CBI
2165 struct interrupt_data
*buf
= bh
->buf
;
2167 /* Store and send the Interrupt data. UFI sends the ASC
2168 * and ASCQ bytes. Everything else sends a Type (which
2169 * is always 0) and the status Value. */
2170 if (mod_data
.protocol_type
== USB_SC_UFI
) {
2171 buf
->bType
= ASC(sd
);
2172 buf
->bValue
= ASCQ(sd
);
2175 buf
->bValue
= status
;
2177 fsg
->intreq
->length
= CBI_INTERRUPT_DATA_LEN
;
2179 fsg
->intr_buffhd
= bh
; // Point to the right buffhd
2180 fsg
->intreq
->buf
= bh
->inreq
->buf
;
2181 fsg
->intreq
->context
= bh
;
2182 start_transfer(fsg
, fsg
->intr_in
, fsg
->intreq
,
2183 &fsg
->intreq_busy
, &bh
->state
);
2186 fsg
->next_buffhd_to_fill
= bh
->next
;
2191 /*-------------------------------------------------------------------------*/
2193 /* Check whether the command is properly formed and whether its data size
2194 * and direction agree with the values we already have. */
2195 static int check_command(struct fsg_dev
*fsg
, int cmnd_size
,
2196 enum data_direction data_dir
, unsigned int mask
,
2197 int needs_medium
, const char *name
)
2200 int lun
= fsg
->cmnd
[1] >> 5;
2201 static const char dirletter
[4] = {'u', 'o', 'i', 'n'};
2203 struct fsg_lun
*curlun
;
2205 /* Adjust the expected cmnd_size for protocol encapsulation padding.
2206 * Transparent SCSI doesn't pad. */
2207 if (protocol_is_scsi())
2210 /* There's some disagreement as to whether RBC pads commands or not.
2211 * We'll play it safe and accept either form. */
2212 else if (mod_data
.protocol_type
== USB_SC_RBC
) {
2213 if (fsg
->cmnd_size
== 12)
2216 /* All the other protocols pad to 12 bytes */
2221 if (fsg
->data_dir
!= DATA_DIR_UNKNOWN
)
2222 sprintf(hdlen
, ", H%c=%u", dirletter
[(int) fsg
->data_dir
],
2224 VDBG(fsg
, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
2225 name
, cmnd_size
, dirletter
[(int) data_dir
],
2226 fsg
->data_size_from_cmnd
, fsg
->cmnd_size
, hdlen
);
2228 /* We can't reply at all until we know the correct data direction
2230 if (fsg
->data_size_from_cmnd
== 0)
2231 data_dir
= DATA_DIR_NONE
;
2232 if (fsg
->data_dir
== DATA_DIR_UNKNOWN
) { // CB or CBI
2233 fsg
->data_dir
= data_dir
;
2234 fsg
->data_size
= fsg
->data_size_from_cmnd
;
2236 } else { // Bulk-only
2237 if (fsg
->data_size
< fsg
->data_size_from_cmnd
) {
2239 /* Host data size < Device data size is a phase error.
2240 * Carry out the command, but only transfer as much
2241 * as we are allowed. */
2242 fsg
->data_size_from_cmnd
= fsg
->data_size
;
2243 fsg
->phase_error
= 1;
2246 fsg
->residue
= fsg
->usb_amount_left
= fsg
->data_size
;
2248 /* Conflicting data directions is a phase error */
2249 if (fsg
->data_dir
!= data_dir
&& fsg
->data_size_from_cmnd
> 0) {
2250 fsg
->phase_error
= 1;
2254 /* Verify the length of the command itself */
2255 if (cmnd_size
!= fsg
->cmnd_size
) {
2257 /* Special case workaround: There are plenty of buggy SCSI
2258 * implementations. Many have issues with cbw->Length
2259 * field passing a wrong command size. For those cases we
2260 * always try to work around the problem by using the length
2261 * sent by the host side provided it is at least as large
2262 * as the correct command length.
2263 * Examples of such cases would be MS-Windows, which issues
2264 * REQUEST SENSE with cbw->Length == 12 where it should
2265 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
2266 * REQUEST SENSE with cbw->Length == 10 where it should
2269 if (cmnd_size
<= fsg
->cmnd_size
) {
2270 DBG(fsg
, "%s is buggy! Expected length %d "
2271 "but we got %d\n", name
,
2272 cmnd_size
, fsg
->cmnd_size
);
2273 cmnd_size
= fsg
->cmnd_size
;
2275 fsg
->phase_error
= 1;
2280 /* Check that the LUN values are consistent */
2281 if (transport_is_bbb()) {
2282 if (fsg
->lun
!= lun
)
2283 DBG(fsg
, "using LUN %d from CBW, "
2284 "not LUN %d from CDB\n",
2289 curlun
= fsg
->curlun
;
2291 if (fsg
->cmnd
[0] != REQUEST_SENSE
) {
2292 curlun
->sense_data
= SS_NO_SENSE
;
2293 curlun
->sense_data_info
= 0;
2294 curlun
->info_valid
= 0;
2297 fsg
->bad_lun_okay
= 0;
2299 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
2300 * to use unsupported LUNs; all others may not. */
2301 if (fsg
->cmnd
[0] != INQUIRY
&&
2302 fsg
->cmnd
[0] != REQUEST_SENSE
) {
2303 DBG(fsg
, "unsupported LUN %d\n", fsg
->lun
);
2308 /* If a unit attention condition exists, only INQUIRY and
2309 * REQUEST SENSE commands are allowed; anything else must fail. */
2310 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
&&
2311 fsg
->cmnd
[0] != INQUIRY
&&
2312 fsg
->cmnd
[0] != REQUEST_SENSE
) {
2313 curlun
->sense_data
= curlun
->unit_attention_data
;
2314 curlun
->unit_attention_data
= SS_NO_SENSE
;
2318 /* Check that only command bytes listed in the mask are non-zero */
2319 fsg
->cmnd
[1] &= 0x1f; // Mask away the LUN
2320 for (i
= 1; i
< cmnd_size
; ++i
) {
2321 if (fsg
->cmnd
[i
] && !(mask
& (1 << i
))) {
2323 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
2328 /* If the medium isn't mounted and the command needs to access
2329 * it, return an error. */
2330 if (curlun
&& !fsg_lun_is_open(curlun
) && needs_medium
) {
2331 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
2338 /* wrapper of check_command for data size in blocks handling */
2339 static int check_command_size_in_blocks(struct fsg_dev
*fsg
, int cmnd_size
,
2340 enum data_direction data_dir
, unsigned int mask
,
2341 int needs_medium
, const char *name
)
2344 fsg
->data_size_from_cmnd
<<= fsg
->curlun
->blkbits
;
2345 return check_command(fsg
, cmnd_size
, data_dir
,
2346 mask
, needs_medium
, name
);
2349 static int do_scsi_command(struct fsg_dev
*fsg
)
2351 struct fsg_buffhd
*bh
;
2353 int reply
= -EINVAL
;
2355 static char unknown
[16];
2359 /* Wait for the next buffer to become available for data or status */
2360 bh
= fsg
->next_buffhd_to_drain
= fsg
->next_buffhd_to_fill
;
2361 while (bh
->state
!= BUF_STATE_EMPTY
) {
2362 rc
= sleep_thread(fsg
);
2366 fsg
->phase_error
= 0;
2367 fsg
->short_packet_received
= 0;
2369 down_read(&fsg
->filesem
); // We're using the backing file
2370 switch (fsg
->cmnd
[0]) {
2373 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2374 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2377 reply
= do_inquiry(fsg
, bh
);
2381 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2382 if ((reply
= check_command(fsg
, 6, DATA_DIR_FROM_HOST
,
2384 "MODE SELECT(6)")) == 0)
2385 reply
= do_mode_select(fsg
, bh
);
2388 case MODE_SELECT_10
:
2389 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2390 if ((reply
= check_command(fsg
, 10, DATA_DIR_FROM_HOST
,
2392 "MODE SELECT(10)")) == 0)
2393 reply
= do_mode_select(fsg
, bh
);
2397 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2398 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2399 (1<<1) | (1<<2) | (1<<4), 0,
2400 "MODE SENSE(6)")) == 0)
2401 reply
= do_mode_sense(fsg
, bh
);
2405 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2406 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2407 (1<<1) | (1<<2) | (3<<7), 0,
2408 "MODE SENSE(10)")) == 0)
2409 reply
= do_mode_sense(fsg
, bh
);
2412 case ALLOW_MEDIUM_REMOVAL
:
2413 fsg
->data_size_from_cmnd
= 0;
2414 if ((reply
= check_command(fsg
, 6, DATA_DIR_NONE
,
2416 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
2417 reply
= do_prevent_allow(fsg
);
2422 fsg
->data_size_from_cmnd
= (i
== 0) ? 256 : i
;
2423 if ((reply
= check_command_size_in_blocks(fsg
, 6,
2427 reply
= do_read(fsg
);
2431 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2432 if ((reply
= check_command_size_in_blocks(fsg
, 10,
2434 (1<<1) | (0xf<<2) | (3<<7), 1,
2436 reply
= do_read(fsg
);
2440 fsg
->data_size_from_cmnd
= get_unaligned_be32(&fsg
->cmnd
[6]);
2441 if ((reply
= check_command_size_in_blocks(fsg
, 12,
2443 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2445 reply
= do_read(fsg
);
2449 fsg
->data_size_from_cmnd
= 8;
2450 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2451 (0xf<<2) | (1<<8), 1,
2452 "READ CAPACITY")) == 0)
2453 reply
= do_read_capacity(fsg
, bh
);
2457 if (!mod_data
.cdrom
)
2459 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2460 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2461 (3<<7) | (0x1f<<1), 1,
2462 "READ HEADER")) == 0)
2463 reply
= do_read_header(fsg
, bh
);
2467 if (!mod_data
.cdrom
)
2469 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2470 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2473 reply
= do_read_toc(fsg
, bh
);
2476 case READ_FORMAT_CAPACITIES
:
2477 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2478 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2480 "READ FORMAT CAPACITIES")) == 0)
2481 reply
= do_read_format_capacities(fsg
, bh
);
2485 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2486 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2488 "REQUEST SENSE")) == 0)
2489 reply
= do_request_sense(fsg
, bh
);
2493 fsg
->data_size_from_cmnd
= 0;
2494 if ((reply
= check_command(fsg
, 6, DATA_DIR_NONE
,
2496 "START-STOP UNIT")) == 0)
2497 reply
= do_start_stop(fsg
);
2500 case SYNCHRONIZE_CACHE
:
2501 fsg
->data_size_from_cmnd
= 0;
2502 if ((reply
= check_command(fsg
, 10, DATA_DIR_NONE
,
2503 (0xf<<2) | (3<<7), 1,
2504 "SYNCHRONIZE CACHE")) == 0)
2505 reply
= do_synchronize_cache(fsg
);
2508 case TEST_UNIT_READY
:
2509 fsg
->data_size_from_cmnd
= 0;
2510 reply
= check_command(fsg
, 6, DATA_DIR_NONE
,
2515 /* Although optional, this command is used by MS-Windows. We
2516 * support a minimal version: BytChk must be 0. */
2518 fsg
->data_size_from_cmnd
= 0;
2519 if ((reply
= check_command(fsg
, 10, DATA_DIR_NONE
,
2520 (1<<1) | (0xf<<2) | (3<<7), 1,
2522 reply
= do_verify(fsg
);
2527 fsg
->data_size_from_cmnd
= (i
== 0) ? 256 : i
;
2528 if ((reply
= check_command_size_in_blocks(fsg
, 6,
2532 reply
= do_write(fsg
);
2536 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2537 if ((reply
= check_command_size_in_blocks(fsg
, 10,
2539 (1<<1) | (0xf<<2) | (3<<7), 1,
2541 reply
= do_write(fsg
);
2545 fsg
->data_size_from_cmnd
= get_unaligned_be32(&fsg
->cmnd
[6]);
2546 if ((reply
= check_command_size_in_blocks(fsg
, 12,
2548 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2550 reply
= do_write(fsg
);
2553 /* Some mandatory commands that we recognize but don't implement.
2554 * They don't mean much in this setting. It's left as an exercise
2555 * for anyone interested to implement RESERVE and RELEASE in terms
2556 * of Posix locks. */
2560 case SEND_DIAGNOSTIC
:
2565 fsg
->data_size_from_cmnd
= 0;
2566 sprintf(unknown
, "Unknown x%02x", fsg
->cmnd
[0]);
2567 if ((reply
= check_command(fsg
, fsg
->cmnd_size
,
2568 DATA_DIR_UNKNOWN
, ~0, 0, unknown
)) == 0) {
2569 fsg
->curlun
->sense_data
= SS_INVALID_COMMAND
;
2574 up_read(&fsg
->filesem
);
2576 if (reply
== -EINTR
|| signal_pending(current
))
2579 /* Set up the single reply buffer for finish_reply() */
2580 if (reply
== -EINVAL
)
2581 reply
= 0; // Error reply length
2582 if (reply
>= 0 && fsg
->data_dir
== DATA_DIR_TO_HOST
) {
2583 reply
= min((u32
) reply
, fsg
->data_size_from_cmnd
);
2584 bh
->inreq
->length
= reply
;
2585 bh
->state
= BUF_STATE_FULL
;
2586 fsg
->residue
-= reply
;
2587 } // Otherwise it's already set
2593 /*-------------------------------------------------------------------------*/
2595 static int received_cbw(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
2597 struct usb_request
*req
= bh
->outreq
;
2598 struct bulk_cb_wrap
*cbw
= req
->buf
;
2600 /* Was this a real packet? Should it be ignored? */
2601 if (req
->status
|| test_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
))
2604 /* Is the CBW valid? */
2605 if (req
->actual
!= US_BULK_CB_WRAP_LEN
||
2606 cbw
->Signature
!= cpu_to_le32(
2608 DBG(fsg
, "invalid CBW: len %u sig 0x%x\n",
2610 le32_to_cpu(cbw
->Signature
));
2612 /* The Bulk-only spec says we MUST stall the IN endpoint
2613 * (6.6.1), so it's unavoidable. It also says we must
2614 * retain this state until the next reset, but there's
2615 * no way to tell the controller driver it should ignore
2616 * Clear-Feature(HALT) requests.
2618 * We aren't required to halt the OUT endpoint; instead
2619 * we can simply accept and discard any data received
2620 * until the next reset. */
2621 wedge_bulk_in_endpoint(fsg
);
2622 set_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2626 /* Is the CBW meaningful? */
2627 if (cbw
->Lun
>= FSG_MAX_LUNS
|| cbw
->Flags
& ~US_BULK_FLAG_IN
||
2628 cbw
->Length
<= 0 || cbw
->Length
> MAX_COMMAND_SIZE
) {
2629 DBG(fsg
, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2631 cbw
->Lun
, cbw
->Flags
, cbw
->Length
);
2633 /* We can do anything we want here, so let's stall the
2634 * bulk pipes if we are allowed to. */
2635 if (mod_data
.can_stall
) {
2636 fsg_set_halt(fsg
, fsg
->bulk_out
);
2637 halt_bulk_in_endpoint(fsg
);
2642 /* Save the command for later */
2643 fsg
->cmnd_size
= cbw
->Length
;
2644 memcpy(fsg
->cmnd
, cbw
->CDB
, fsg
->cmnd_size
);
2645 if (cbw
->Flags
& US_BULK_FLAG_IN
)
2646 fsg
->data_dir
= DATA_DIR_TO_HOST
;
2648 fsg
->data_dir
= DATA_DIR_FROM_HOST
;
2649 fsg
->data_size
= le32_to_cpu(cbw
->DataTransferLength
);
2650 if (fsg
->data_size
== 0)
2651 fsg
->data_dir
= DATA_DIR_NONE
;
2652 fsg
->lun
= cbw
->Lun
;
2653 fsg
->tag
= cbw
->Tag
;
2658 static int get_next_command(struct fsg_dev
*fsg
)
2660 struct fsg_buffhd
*bh
;
2663 if (transport_is_bbb()) {
2665 /* Wait for the next buffer to become available */
2666 bh
= fsg
->next_buffhd_to_fill
;
2667 while (bh
->state
!= BUF_STATE_EMPTY
) {
2668 rc
= sleep_thread(fsg
);
2673 /* Queue a request to read a Bulk-only CBW */
2674 set_bulk_out_req_length(fsg
, bh
, US_BULK_CB_WRAP_LEN
);
2675 start_transfer(fsg
, fsg
->bulk_out
, bh
->outreq
,
2676 &bh
->outreq_busy
, &bh
->state
);
2678 /* We will drain the buffer in software, which means we
2679 * can reuse it for the next filling. No need to advance
2680 * next_buffhd_to_fill. */
2682 /* Wait for the CBW to arrive */
2683 while (bh
->state
!= BUF_STATE_FULL
) {
2684 rc
= sleep_thread(fsg
);
2689 rc
= received_cbw(fsg
, bh
);
2690 bh
->state
= BUF_STATE_EMPTY
;
2692 } else { // USB_PR_CB or USB_PR_CBI
2694 /* Wait for the next command to arrive */
2695 while (fsg
->cbbuf_cmnd_size
== 0) {
2696 rc
= sleep_thread(fsg
);
2701 /* Is the previous status interrupt request still busy?
2702 * The host is allowed to skip reading the status,
2703 * so we must cancel it. */
2704 if (fsg
->intreq_busy
)
2705 usb_ep_dequeue(fsg
->intr_in
, fsg
->intreq
);
2707 /* Copy the command and mark the buffer empty */
2708 fsg
->data_dir
= DATA_DIR_UNKNOWN
;
2709 spin_lock_irq(&fsg
->lock
);
2710 fsg
->cmnd_size
= fsg
->cbbuf_cmnd_size
;
2711 memcpy(fsg
->cmnd
, fsg
->cbbuf_cmnd
, fsg
->cmnd_size
);
2712 fsg
->cbbuf_cmnd_size
= 0;
2713 spin_unlock_irq(&fsg
->lock
);
2715 /* Use LUN from the command */
2716 fsg
->lun
= fsg
->cmnd
[1] >> 5;
2719 /* Update current lun */
2720 if (fsg
->lun
>= 0 && fsg
->lun
< fsg
->nluns
)
2721 fsg
->curlun
= &fsg
->luns
[fsg
->lun
];
2729 /*-------------------------------------------------------------------------*/
2731 static int enable_endpoint(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
2732 const struct usb_endpoint_descriptor
*d
)
2736 ep
->driver_data
= fsg
;
2738 rc
= usb_ep_enable(ep
);
2740 ERROR(fsg
, "can't enable %s, result %d\n", ep
->name
, rc
);
2744 static int alloc_request(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
2745 struct usb_request
**preq
)
2747 *preq
= usb_ep_alloc_request(ep
, GFP_ATOMIC
);
2750 ERROR(fsg
, "can't allocate request for %s\n", ep
->name
);
2755 * Reset interface setting and re-init endpoint state (toggle etc).
2756 * Call with altsetting < 0 to disable the interface. The only other
2757 * available altsetting is 0, which enables the interface.
2759 static int do_set_interface(struct fsg_dev
*fsg
, int altsetting
)
2763 const struct usb_endpoint_descriptor
*d
;
2766 DBG(fsg
, "reset interface\n");
2769 /* Deallocate the requests */
2770 for (i
= 0; i
< fsg_num_buffers
; ++i
) {
2771 struct fsg_buffhd
*bh
= &fsg
->buffhds
[i
];
2774 usb_ep_free_request(fsg
->bulk_in
, bh
->inreq
);
2778 usb_ep_free_request(fsg
->bulk_out
, bh
->outreq
);
2783 usb_ep_free_request(fsg
->intr_in
, fsg
->intreq
);
2787 /* Disable the endpoints */
2788 if (fsg
->bulk_in_enabled
) {
2789 usb_ep_disable(fsg
->bulk_in
);
2790 fsg
->bulk_in_enabled
= 0;
2792 if (fsg
->bulk_out_enabled
) {
2793 usb_ep_disable(fsg
->bulk_out
);
2794 fsg
->bulk_out_enabled
= 0;
2796 if (fsg
->intr_in_enabled
) {
2797 usb_ep_disable(fsg
->intr_in
);
2798 fsg
->intr_in_enabled
= 0;
2802 if (altsetting
< 0 || rc
!= 0)
2805 DBG(fsg
, "set interface %d\n", altsetting
);
2807 /* Enable the endpoints */
2808 d
= fsg_ep_desc(fsg
->gadget
,
2809 &fsg_fs_bulk_in_desc
, &fsg_hs_bulk_in_desc
,
2810 &fsg_ss_bulk_in_desc
);
2811 if ((rc
= enable_endpoint(fsg
, fsg
->bulk_in
, d
)) != 0)
2813 fsg
->bulk_in_enabled
= 1;
2815 d
= fsg_ep_desc(fsg
->gadget
,
2816 &fsg_fs_bulk_out_desc
, &fsg_hs_bulk_out_desc
,
2817 &fsg_ss_bulk_out_desc
);
2818 if ((rc
= enable_endpoint(fsg
, fsg
->bulk_out
, d
)) != 0)
2820 fsg
->bulk_out_enabled
= 1;
2821 fsg
->bulk_out_maxpacket
= usb_endpoint_maxp(d
);
2822 clear_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2824 if (transport_is_cbi()) {
2825 d
= fsg_ep_desc(fsg
->gadget
,
2826 &fsg_fs_intr_in_desc
, &fsg_hs_intr_in_desc
,
2827 &fsg_ss_intr_in_desc
);
2828 if ((rc
= enable_endpoint(fsg
, fsg
->intr_in
, d
)) != 0)
2830 fsg
->intr_in_enabled
= 1;
2833 /* Allocate the requests */
2834 for (i
= 0; i
< fsg_num_buffers
; ++i
) {
2835 struct fsg_buffhd
*bh
= &fsg
->buffhds
[i
];
2837 if ((rc
= alloc_request(fsg
, fsg
->bulk_in
, &bh
->inreq
)) != 0)
2839 if ((rc
= alloc_request(fsg
, fsg
->bulk_out
, &bh
->outreq
)) != 0)
2841 bh
->inreq
->buf
= bh
->outreq
->buf
= bh
->buf
;
2842 bh
->inreq
->context
= bh
->outreq
->context
= bh
;
2843 bh
->inreq
->complete
= bulk_in_complete
;
2844 bh
->outreq
->complete
= bulk_out_complete
;
2846 if (transport_is_cbi()) {
2847 if ((rc
= alloc_request(fsg
, fsg
->intr_in
, &fsg
->intreq
)) != 0)
2849 fsg
->intreq
->complete
= intr_in_complete
;
2853 for (i
= 0; i
< fsg
->nluns
; ++i
)
2854 fsg
->luns
[i
].unit_attention_data
= SS_RESET_OCCURRED
;
2860 * Change our operational configuration. This code must agree with the code
2861 * that returns config descriptors, and with interface altsetting code.
2863 * It's also responsible for power management interactions. Some
2864 * configurations might not work with our current power sources.
2865 * For now we just assume the gadget is always self-powered.
2867 static int do_set_config(struct fsg_dev
*fsg
, u8 new_config
)
2871 /* Disable the single interface */
2872 if (fsg
->config
!= 0) {
2873 DBG(fsg
, "reset config\n");
2875 rc
= do_set_interface(fsg
, -1);
2878 /* Enable the interface */
2879 if (new_config
!= 0) {
2880 fsg
->config
= new_config
;
2881 if ((rc
= do_set_interface(fsg
, 0)) != 0)
2882 fsg
->config
= 0; // Reset on errors
2884 INFO(fsg
, "%s config #%d\n",
2885 usb_speed_string(fsg
->gadget
->speed
),
2892 /*-------------------------------------------------------------------------*/
2894 static void handle_exception(struct fsg_dev
*fsg
)
2900 struct fsg_buffhd
*bh
;
2901 enum fsg_state old_state
;
2903 struct fsg_lun
*curlun
;
2904 unsigned int exception_req_tag
;
2907 /* Clear the existing signals. Anything but SIGUSR1 is converted
2908 * into a high-priority EXIT exception. */
2910 sig
= dequeue_signal_lock(current
, ¤t
->blocked
, &info
);
2913 if (sig
!= SIGUSR1
) {
2914 if (fsg
->state
< FSG_STATE_EXIT
)
2915 DBG(fsg
, "Main thread exiting on signal\n");
2916 raise_exception(fsg
, FSG_STATE_EXIT
);
2920 /* Cancel all the pending transfers */
2921 if (fsg
->intreq_busy
)
2922 usb_ep_dequeue(fsg
->intr_in
, fsg
->intreq
);
2923 for (i
= 0; i
< fsg_num_buffers
; ++i
) {
2924 bh
= &fsg
->buffhds
[i
];
2926 usb_ep_dequeue(fsg
->bulk_in
, bh
->inreq
);
2927 if (bh
->outreq_busy
)
2928 usb_ep_dequeue(fsg
->bulk_out
, bh
->outreq
);
2931 /* Wait until everything is idle */
2933 num_active
= fsg
->intreq_busy
;
2934 for (i
= 0; i
< fsg_num_buffers
; ++i
) {
2935 bh
= &fsg
->buffhds
[i
];
2936 num_active
+= bh
->inreq_busy
+ bh
->outreq_busy
;
2938 if (num_active
== 0)
2940 if (sleep_thread(fsg
))
2944 /* Clear out the controller's fifos */
2945 if (fsg
->bulk_in_enabled
)
2946 usb_ep_fifo_flush(fsg
->bulk_in
);
2947 if (fsg
->bulk_out_enabled
)
2948 usb_ep_fifo_flush(fsg
->bulk_out
);
2949 if (fsg
->intr_in_enabled
)
2950 usb_ep_fifo_flush(fsg
->intr_in
);
2952 /* Reset the I/O buffer states and pointers, the SCSI
2953 * state, and the exception. Then invoke the handler. */
2954 spin_lock_irq(&fsg
->lock
);
2956 for (i
= 0; i
< fsg_num_buffers
; ++i
) {
2957 bh
= &fsg
->buffhds
[i
];
2958 bh
->state
= BUF_STATE_EMPTY
;
2960 fsg
->next_buffhd_to_fill
= fsg
->next_buffhd_to_drain
=
2963 exception_req_tag
= fsg
->exception_req_tag
;
2964 new_config
= fsg
->new_config
;
2965 old_state
= fsg
->state
;
2967 if (old_state
== FSG_STATE_ABORT_BULK_OUT
)
2968 fsg
->state
= FSG_STATE_STATUS_PHASE
;
2970 for (i
= 0; i
< fsg
->nluns
; ++i
) {
2971 curlun
= &fsg
->luns
[i
];
2972 curlun
->prevent_medium_removal
= 0;
2973 curlun
->sense_data
= curlun
->unit_attention_data
=
2975 curlun
->sense_data_info
= 0;
2976 curlun
->info_valid
= 0;
2978 fsg
->state
= FSG_STATE_IDLE
;
2980 spin_unlock_irq(&fsg
->lock
);
2982 /* Carry out any extra actions required for the exception */
2983 switch (old_state
) {
2987 case FSG_STATE_ABORT_BULK_OUT
:
2989 spin_lock_irq(&fsg
->lock
);
2990 if (fsg
->state
== FSG_STATE_STATUS_PHASE
)
2991 fsg
->state
= FSG_STATE_IDLE
;
2992 spin_unlock_irq(&fsg
->lock
);
2995 case FSG_STATE_RESET
:
2996 /* In case we were forced against our will to halt a
2997 * bulk endpoint, clear the halt now. (The SuperH UDC
2998 * requires this.) */
2999 if (test_and_clear_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
))
3000 usb_ep_clear_halt(fsg
->bulk_in
);
3002 if (transport_is_bbb()) {
3003 if (fsg
->ep0_req_tag
== exception_req_tag
)
3004 ep0_queue(fsg
); // Complete the status stage
3006 } else if (transport_is_cbi())
3007 send_status(fsg
); // Status by interrupt pipe
3009 /* Technically this should go here, but it would only be
3010 * a waste of time. Ditto for the INTERFACE_CHANGE and
3011 * CONFIG_CHANGE cases. */
3012 // for (i = 0; i < fsg->nluns; ++i)
3013 // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
3016 case FSG_STATE_INTERFACE_CHANGE
:
3017 rc
= do_set_interface(fsg
, 0);
3018 if (fsg
->ep0_req_tag
!= exception_req_tag
)
3020 if (rc
!= 0) // STALL on errors
3021 fsg_set_halt(fsg
, fsg
->ep0
);
3022 else // Complete the status stage
3026 case FSG_STATE_CONFIG_CHANGE
:
3027 rc
= do_set_config(fsg
, new_config
);
3028 if (fsg
->ep0_req_tag
!= exception_req_tag
)
3030 if (rc
!= 0) // STALL on errors
3031 fsg_set_halt(fsg
, fsg
->ep0
);
3032 else // Complete the status stage
3036 case FSG_STATE_DISCONNECT
:
3037 for (i
= 0; i
< fsg
->nluns
; ++i
)
3038 fsg_lun_fsync_sub(fsg
->luns
+ i
);
3039 do_set_config(fsg
, 0); // Unconfigured state
3042 case FSG_STATE_EXIT
:
3043 case FSG_STATE_TERMINATED
:
3044 do_set_config(fsg
, 0); // Free resources
3045 spin_lock_irq(&fsg
->lock
);
3046 fsg
->state
= FSG_STATE_TERMINATED
; // Stop the thread
3047 spin_unlock_irq(&fsg
->lock
);
3053 /*-------------------------------------------------------------------------*/
3055 static int fsg_main_thread(void *fsg_
)
3057 struct fsg_dev
*fsg
= fsg_
;
3059 /* Allow the thread to be killed by a signal, but set the signal mask
3060 * to block everything but INT, TERM, KILL, and USR1. */
3061 allow_signal(SIGINT
);
3062 allow_signal(SIGTERM
);
3063 allow_signal(SIGKILL
);
3064 allow_signal(SIGUSR1
);
3066 /* Allow the thread to be frozen */
3069 /* Arrange for userspace references to be interpreted as kernel
3070 * pointers. That way we can pass a kernel pointer to a routine
3071 * that expects a __user pointer and it will work okay. */
3075 while (fsg
->state
!= FSG_STATE_TERMINATED
) {
3076 if (exception_in_progress(fsg
) || signal_pending(current
)) {
3077 handle_exception(fsg
);
3081 if (!fsg
->running
) {
3086 if (get_next_command(fsg
))
3089 spin_lock_irq(&fsg
->lock
);
3090 if (!exception_in_progress(fsg
))
3091 fsg
->state
= FSG_STATE_DATA_PHASE
;
3092 spin_unlock_irq(&fsg
->lock
);
3094 if (do_scsi_command(fsg
) || finish_reply(fsg
))
3097 spin_lock_irq(&fsg
->lock
);
3098 if (!exception_in_progress(fsg
))
3099 fsg
->state
= FSG_STATE_STATUS_PHASE
;
3100 spin_unlock_irq(&fsg
->lock
);
3102 if (send_status(fsg
))
3105 spin_lock_irq(&fsg
->lock
);
3106 if (!exception_in_progress(fsg
))
3107 fsg
->state
= FSG_STATE_IDLE
;
3108 spin_unlock_irq(&fsg
->lock
);
3111 spin_lock_irq(&fsg
->lock
);
3112 fsg
->thread_task
= NULL
;
3113 spin_unlock_irq(&fsg
->lock
);
3115 /* If we are exiting because of a signal, unregister the
3117 if (test_and_clear_bit(REGISTERED
, &fsg
->atomic_bitflags
))
3118 usb_gadget_unregister_driver(&fsg_driver
);
3120 /* Let the unbind and cleanup routines know the thread has exited */
3121 complete_and_exit(&fsg
->thread_notifier
, 0);
3125 /*-------------------------------------------------------------------------*/
3128 /* The write permissions and store_xxx pointers are set in fsg_bind() */
3129 static DEVICE_ATTR(ro
, 0444, fsg_show_ro
, NULL
);
3130 static DEVICE_ATTR(nofua
, 0644, fsg_show_nofua
, NULL
);
3131 static DEVICE_ATTR(file
, 0444, fsg_show_file
, NULL
);
3134 /*-------------------------------------------------------------------------*/
3136 static void fsg_release(struct kref
*ref
)
3138 struct fsg_dev
*fsg
= container_of(ref
, struct fsg_dev
, ref
);
3144 static void lun_release(struct device
*dev
)
3146 struct rw_semaphore
*filesem
= dev_get_drvdata(dev
);
3147 struct fsg_dev
*fsg
=
3148 container_of(filesem
, struct fsg_dev
, filesem
);
3150 kref_put(&fsg
->ref
, fsg_release
);
3153 static void /* __init_or_exit */ fsg_unbind(struct usb_gadget
*gadget
)
3155 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
3157 struct fsg_lun
*curlun
;
3158 struct usb_request
*req
= fsg
->ep0req
;
3160 DBG(fsg
, "unbind\n");
3161 clear_bit(REGISTERED
, &fsg
->atomic_bitflags
);
3163 /* If the thread isn't already dead, tell it to exit now */
3164 if (fsg
->state
!= FSG_STATE_TERMINATED
) {
3165 raise_exception(fsg
, FSG_STATE_EXIT
);
3166 wait_for_completion(&fsg
->thread_notifier
);
3168 /* The cleanup routine waits for this completion also */
3169 complete(&fsg
->thread_notifier
);
3172 /* Unregister the sysfs attribute files and the LUNs */
3173 for (i
= 0; i
< fsg
->nluns
; ++i
) {
3174 curlun
= &fsg
->luns
[i
];
3175 if (curlun
->registered
) {
3176 device_remove_file(&curlun
->dev
, &dev_attr_nofua
);
3177 device_remove_file(&curlun
->dev
, &dev_attr_ro
);
3178 device_remove_file(&curlun
->dev
, &dev_attr_file
);
3179 fsg_lun_close(curlun
);
3180 device_unregister(&curlun
->dev
);
3181 curlun
->registered
= 0;
3185 /* Free the data buffers */
3186 for (i
= 0; i
< fsg_num_buffers
; ++i
)
3187 kfree(fsg
->buffhds
[i
].buf
);
3189 /* Free the request and buffer for endpoint 0 */
3192 usb_ep_free_request(fsg
->ep0
, req
);
3195 set_gadget_data(gadget
, NULL
);
3199 static int __init
check_parameters(struct fsg_dev
*fsg
)
3203 /* Store the default values */
3204 mod_data
.transport_type
= USB_PR_BULK
;
3205 mod_data
.transport_name
= "Bulk-only";
3206 mod_data
.protocol_type
= USB_SC_SCSI
;
3207 mod_data
.protocol_name
= "Transparent SCSI";
3209 /* Some peripheral controllers are known not to be able to
3210 * halt bulk endpoints correctly. If one of them is present,
3213 if (gadget_is_at91(fsg
->gadget
))
3214 mod_data
.can_stall
= 0;
3216 if (mod_data
.release
== 0xffff)
3217 mod_data
.release
= get_default_bcdDevice();
3219 prot
= simple_strtol(mod_data
.protocol_parm
, NULL
, 0);
3221 #ifdef CONFIG_USB_FILE_STORAGE_TEST
3222 if (strnicmp(mod_data
.transport_parm
, "BBB", 10) == 0) {
3223 ; // Use default setting
3224 } else if (strnicmp(mod_data
.transport_parm
, "CB", 10) == 0) {
3225 mod_data
.transport_type
= USB_PR_CB
;
3226 mod_data
.transport_name
= "Control-Bulk";
3227 } else if (strnicmp(mod_data
.transport_parm
, "CBI", 10) == 0) {
3228 mod_data
.transport_type
= USB_PR_CBI
;
3229 mod_data
.transport_name
= "Control-Bulk-Interrupt";
3231 ERROR(fsg
, "invalid transport: %s\n", mod_data
.transport_parm
);
3235 if (strnicmp(mod_data
.protocol_parm
, "SCSI", 10) == 0 ||
3236 prot
== USB_SC_SCSI
) {
3237 ; // Use default setting
3238 } else if (strnicmp(mod_data
.protocol_parm
, "RBC", 10) == 0 ||
3239 prot
== USB_SC_RBC
) {
3240 mod_data
.protocol_type
= USB_SC_RBC
;
3241 mod_data
.protocol_name
= "RBC";
3242 } else if (strnicmp(mod_data
.protocol_parm
, "8020", 4) == 0 ||
3243 strnicmp(mod_data
.protocol_parm
, "ATAPI", 10) == 0 ||
3244 prot
== USB_SC_8020
) {
3245 mod_data
.protocol_type
= USB_SC_8020
;
3246 mod_data
.protocol_name
= "8020i (ATAPI)";
3247 } else if (strnicmp(mod_data
.protocol_parm
, "QIC", 3) == 0 ||
3248 prot
== USB_SC_QIC
) {
3249 mod_data
.protocol_type
= USB_SC_QIC
;
3250 mod_data
.protocol_name
= "QIC-157";
3251 } else if (strnicmp(mod_data
.protocol_parm
, "UFI", 10) == 0 ||
3252 prot
== USB_SC_UFI
) {
3253 mod_data
.protocol_type
= USB_SC_UFI
;
3254 mod_data
.protocol_name
= "UFI";
3255 } else if (strnicmp(mod_data
.protocol_parm
, "8070", 4) == 0 ||
3256 prot
== USB_SC_8070
) {
3257 mod_data
.protocol_type
= USB_SC_8070
;
3258 mod_data
.protocol_name
= "8070i";
3260 ERROR(fsg
, "invalid protocol: %s\n", mod_data
.protocol_parm
);
3264 mod_data
.buflen
&= PAGE_CACHE_MASK
;
3265 if (mod_data
.buflen
<= 0) {
3266 ERROR(fsg
, "invalid buflen\n");
3270 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
3272 /* Serial string handling.
3273 * On a real device, the serial string would be loaded
3274 * from permanent storage. */
3275 if (mod_data
.serial
) {
3280 * The CB[I] specification limits the serial string to
3281 * 12 uppercase hexadecimal characters.
3282 * BBB need at least 12 uppercase hexadecimal characters,
3283 * with a maximum of 126. */
3284 for (ch
= mod_data
.serial
; *ch
; ++ch
) {
3286 if ((*ch
< '0' || *ch
> '9') &&
3287 (*ch
< 'A' || *ch
> 'F')) { /* not uppercase hex */
3289 "Invalid serial string character: %c\n",
3295 (mod_data
.transport_type
== USB_PR_BULK
&& len
< 12) ||
3296 (mod_data
.transport_type
!= USB_PR_BULK
&& len
> 12)) {
3297 WARNING(fsg
, "Invalid serial string length!\n");
3300 fsg_strings
[FSG_STRING_SERIAL
- 1].s
= mod_data
.serial
;
3302 WARNING(fsg
, "No serial-number string provided!\n");
3304 device_desc
.iSerialNumber
= 0;
3311 static int __init
fsg_bind(struct usb_gadget
*gadget
,
3312 struct usb_gadget_driver
*driver
)
3314 struct fsg_dev
*fsg
= the_fsg
;
3317 struct fsg_lun
*curlun
;
3319 struct usb_request
*req
;
3322 fsg
->gadget
= gadget
;
3323 set_gadget_data(gadget
, fsg
);
3324 fsg
->ep0
= gadget
->ep0
;
3325 fsg
->ep0
->driver_data
= fsg
;
3327 if ((rc
= check_parameters(fsg
)) != 0)
3330 if (mod_data
.removable
) { // Enable the store_xxx attributes
3331 dev_attr_file
.attr
.mode
= 0644;
3332 dev_attr_file
.store
= fsg_store_file
;
3333 if (!mod_data
.cdrom
) {
3334 dev_attr_ro
.attr
.mode
= 0644;
3335 dev_attr_ro
.store
= fsg_store_ro
;
3339 /* Only for removable media? */
3340 dev_attr_nofua
.attr
.mode
= 0644;
3341 dev_attr_nofua
.store
= fsg_store_nofua
;
3343 /* Find out how many LUNs there should be */
3346 i
= max(mod_data
.num_filenames
, 1u);
3347 if (i
> FSG_MAX_LUNS
) {
3348 ERROR(fsg
, "invalid number of LUNs: %d\n", i
);
3353 /* Create the LUNs, open their backing files, and register the
3354 * LUN devices in sysfs. */
3355 fsg
->luns
= kzalloc(i
* sizeof(struct fsg_lun
), GFP_KERNEL
);
3362 for (i
= 0; i
< fsg
->nluns
; ++i
) {
3363 curlun
= &fsg
->luns
[i
];
3364 curlun
->cdrom
= !!mod_data
.cdrom
;
3365 curlun
->ro
= mod_data
.cdrom
|| mod_data
.ro
[i
];
3366 curlun
->initially_ro
= curlun
->ro
;
3367 curlun
->removable
= mod_data
.removable
;
3368 curlun
->nofua
= mod_data
.nofua
[i
];
3369 curlun
->dev
.release
= lun_release
;
3370 curlun
->dev
.parent
= &gadget
->dev
;
3371 curlun
->dev
.driver
= &fsg_driver
.driver
;
3372 dev_set_drvdata(&curlun
->dev
, &fsg
->filesem
);
3373 dev_set_name(&curlun
->dev
,"%s-lun%d",
3374 dev_name(&gadget
->dev
), i
);
3376 kref_get(&fsg
->ref
);
3377 rc
= device_register(&curlun
->dev
);
3379 INFO(fsg
, "failed to register LUN%d: %d\n", i
, rc
);
3380 put_device(&curlun
->dev
);
3383 curlun
->registered
= 1;
3385 rc
= device_create_file(&curlun
->dev
, &dev_attr_ro
);
3388 rc
= device_create_file(&curlun
->dev
, &dev_attr_nofua
);
3391 rc
= device_create_file(&curlun
->dev
, &dev_attr_file
);
3395 if (mod_data
.file
[i
] && *mod_data
.file
[i
]) {
3396 rc
= fsg_lun_open(curlun
, mod_data
.file
[i
]);
3399 } else if (!mod_data
.removable
) {
3400 ERROR(fsg
, "no file given for LUN%d\n", i
);
3406 /* Find all the endpoints we will use */
3407 usb_ep_autoconfig_reset(gadget
);
3408 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_in_desc
);
3411 ep
->driver_data
= fsg
; // claim the endpoint
3414 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_out_desc
);
3417 ep
->driver_data
= fsg
; // claim the endpoint
3420 if (transport_is_cbi()) {
3421 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_intr_in_desc
);
3424 ep
->driver_data
= fsg
; // claim the endpoint
3428 /* Fix up the descriptors */
3429 device_desc
.idVendor
= cpu_to_le16(mod_data
.vendor
);
3430 device_desc
.idProduct
= cpu_to_le16(mod_data
.product
);
3431 device_desc
.bcdDevice
= cpu_to_le16(mod_data
.release
);
3433 i
= (transport_is_cbi() ? 3 : 2); // Number of endpoints
3434 fsg_intf_desc
.bNumEndpoints
= i
;
3435 fsg_intf_desc
.bInterfaceSubClass
= mod_data
.protocol_type
;
3436 fsg_intf_desc
.bInterfaceProtocol
= mod_data
.transport_type
;
3437 fsg_fs_function
[i
+ FSG_FS_FUNCTION_PRE_EP_ENTRIES
] = NULL
;
3439 if (gadget_is_dualspeed(gadget
)) {
3440 fsg_hs_function
[i
+ FSG_HS_FUNCTION_PRE_EP_ENTRIES
] = NULL
;
3442 /* Assume endpoint addresses are the same for both speeds */
3443 fsg_hs_bulk_in_desc
.bEndpointAddress
=
3444 fsg_fs_bulk_in_desc
.bEndpointAddress
;
3445 fsg_hs_bulk_out_desc
.bEndpointAddress
=
3446 fsg_fs_bulk_out_desc
.bEndpointAddress
;
3447 fsg_hs_intr_in_desc
.bEndpointAddress
=
3448 fsg_fs_intr_in_desc
.bEndpointAddress
;
3451 if (gadget_is_superspeed(gadget
)) {
3454 fsg_ss_function
[i
+ FSG_SS_FUNCTION_PRE_EP_ENTRIES
] = NULL
;
3456 /* Calculate bMaxBurst, we know packet size is 1024 */
3457 max_burst
= min_t(unsigned, mod_data
.buflen
/ 1024, 15);
3459 /* Assume endpoint addresses are the same for both speeds */
3460 fsg_ss_bulk_in_desc
.bEndpointAddress
=
3461 fsg_fs_bulk_in_desc
.bEndpointAddress
;
3462 fsg_ss_bulk_in_comp_desc
.bMaxBurst
= max_burst
;
3464 fsg_ss_bulk_out_desc
.bEndpointAddress
=
3465 fsg_fs_bulk_out_desc
.bEndpointAddress
;
3466 fsg_ss_bulk_out_comp_desc
.bMaxBurst
= max_burst
;
3469 if (gadget_is_otg(gadget
))
3470 fsg_otg_desc
.bmAttributes
|= USB_OTG_HNP
;
3474 /* Allocate the request and buffer for endpoint 0 */
3475 fsg
->ep0req
= req
= usb_ep_alloc_request(fsg
->ep0
, GFP_KERNEL
);
3478 req
->buf
= kmalloc(EP0_BUFSIZE
, GFP_KERNEL
);
3481 req
->complete
= ep0_complete
;
3483 /* Allocate the data buffers */
3484 for (i
= 0; i
< fsg_num_buffers
; ++i
) {
3485 struct fsg_buffhd
*bh
= &fsg
->buffhds
[i
];
3487 /* Allocate for the bulk-in endpoint. We assume that
3488 * the buffer will also work with the bulk-out (and
3489 * interrupt-in) endpoint. */
3490 bh
->buf
= kmalloc(mod_data
.buflen
, GFP_KERNEL
);
3495 fsg
->buffhds
[fsg_num_buffers
- 1].next
= &fsg
->buffhds
[0];
3497 /* This should reflect the actual gadget power source */
3498 usb_gadget_set_selfpowered(gadget
);
3500 snprintf(fsg_string_manufacturer
, sizeof fsg_string_manufacturer
,
3502 init_utsname()->sysname
, init_utsname()->release
,
3505 fsg
->thread_task
= kthread_create(fsg_main_thread
, fsg
,
3506 "file-storage-gadget");
3507 if (IS_ERR(fsg
->thread_task
)) {
3508 rc
= PTR_ERR(fsg
->thread_task
);
3512 INFO(fsg
, DRIVER_DESC
", version: " DRIVER_VERSION
"\n");
3513 INFO(fsg
, "NOTE: This driver is deprecated. "
3514 "Consider using g_mass_storage instead.\n");
3515 INFO(fsg
, "Number of LUNs=%d\n", fsg
->nluns
);
3517 pathbuf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
3518 for (i
= 0; i
< fsg
->nluns
; ++i
) {
3519 curlun
= &fsg
->luns
[i
];
3520 if (fsg_lun_is_open(curlun
)) {
3523 p
= d_path(&curlun
->filp
->f_path
,
3528 LINFO(curlun
, "ro=%d, nofua=%d, file: %s\n",
3529 curlun
->ro
, curlun
->nofua
, (p
? p
: "(error)"));
3534 DBG(fsg
, "transport=%s (x%02x)\n",
3535 mod_data
.transport_name
, mod_data
.transport_type
);
3536 DBG(fsg
, "protocol=%s (x%02x)\n",
3537 mod_data
.protocol_name
, mod_data
.protocol_type
);
3538 DBG(fsg
, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
3539 mod_data
.vendor
, mod_data
.product
, mod_data
.release
);
3540 DBG(fsg
, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
3541 mod_data
.removable
, mod_data
.can_stall
,
3542 mod_data
.cdrom
, mod_data
.buflen
);
3543 DBG(fsg
, "I/O thread pid: %d\n", task_pid_nr(fsg
->thread_task
));
3545 set_bit(REGISTERED
, &fsg
->atomic_bitflags
);
3547 /* Tell the thread to start working */
3548 wake_up_process(fsg
->thread_task
);
3552 ERROR(fsg
, "unable to autoconfigure all endpoints\n");
3556 fsg
->state
= FSG_STATE_TERMINATED
; // The thread is dead
3558 complete(&fsg
->thread_notifier
);
3563 /*-------------------------------------------------------------------------*/
3565 static void fsg_suspend(struct usb_gadget
*gadget
)
3567 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
3569 DBG(fsg
, "suspend\n");
3570 set_bit(SUSPENDED
, &fsg
->atomic_bitflags
);
3573 static void fsg_resume(struct usb_gadget
*gadget
)
3575 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
3577 DBG(fsg
, "resume\n");
3578 clear_bit(SUSPENDED
, &fsg
->atomic_bitflags
);
3582 /*-------------------------------------------------------------------------*/
3584 static __refdata
struct usb_gadget_driver fsg_driver
= {
3585 .max_speed
= USB_SPEED_SUPER
,
3586 .function
= (char *) fsg_string_product
,
3588 .unbind
= fsg_unbind
,
3589 .disconnect
= fsg_disconnect
,
3591 .suspend
= fsg_suspend
,
3592 .resume
= fsg_resume
,
3595 .name
= DRIVER_NAME
,
3596 .owner
= THIS_MODULE
,
3604 static int __init
fsg_alloc(void)
3606 struct fsg_dev
*fsg
;
3608 fsg
= kzalloc(sizeof *fsg
+
3609 fsg_num_buffers
* sizeof *(fsg
->buffhds
), GFP_KERNEL
);
3613 spin_lock_init(&fsg
->lock
);
3614 init_rwsem(&fsg
->filesem
);
3615 kref_init(&fsg
->ref
);
3616 init_completion(&fsg
->thread_notifier
);
3623 static int __init
fsg_init(void)
3626 struct fsg_dev
*fsg
;
3628 rc
= fsg_num_buffers_validate();
3632 if ((rc
= fsg_alloc()) != 0)
3635 rc
= usb_gadget_probe_driver(&fsg_driver
);
3637 kref_put(&fsg
->ref
, fsg_release
);
3640 module_init(fsg_init
);
3643 static void __exit
fsg_cleanup(void)
3645 struct fsg_dev
*fsg
= the_fsg
;
3647 /* Unregister the driver iff the thread hasn't already done so */
3648 if (test_and_clear_bit(REGISTERED
, &fsg
->atomic_bitflags
))
3649 usb_gadget_unregister_driver(&fsg_driver
);
3651 /* Wait for the thread to finish up */
3652 wait_for_completion(&fsg
->thread_notifier
);
3654 kref_put(&fsg
->ref
, fsg_release
);
3656 module_exit(fsg_cleanup
);