Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * f_mass_storage.c -- Mass Storage USB Composite Function
4 *
5 * Copyright (C) 2003-2008 Alan Stern
6 * Copyright (C) 2009 Samsung Electronics
7 * Author: Michal Nazarewicz <mina86@mina86.com>
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The names of the above-listed copyright holders may not be used
20 * to endorse or promote products derived from this software without
21 * specific prior written permission.
22 *
23 * ALTERNATIVELY, this software may be distributed under the terms of the
24 * GNU General Public License ("GPL") as published by the Free Software
25 * Foundation, either version 2 of that License or (at your option) any
26 * later version.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
29 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
30 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
32 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
33 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
34 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
35 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
36 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
37 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
38 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41/*
42 * The Mass Storage Function acts as a USB Mass Storage device,
43 * appearing to the host as a disk drive or as a CD-ROM drive. In
44 * addition to providing an example of a genuinely useful composite
45 * function for a USB device, it also illustrates a technique of
46 * double-buffering for increased throughput.
47 *
48 * For more information about MSF and in particular its module
49 * parameters and sysfs interface read the
50 * <Documentation/usb/mass-storage.rst> file.
51 */
52
53/*
54 * MSF is configured by specifying a fsg_config structure. It has the
55 * following fields:
56 *
57 * nluns Number of LUNs function have (anywhere from 1
58 * to FSG_MAX_LUNS).
59 * luns An array of LUN configuration values. This
60 * should be filled for each LUN that
61 * function will include (ie. for "nluns"
62 * LUNs). Each element of the array has
63 * the following fields:
64 * ->filename The path to the backing file for the LUN.
65 * Required if LUN is not marked as
66 * removable.
67 * ->ro Flag specifying access to the LUN shall be
68 * read-only. This is implied if CD-ROM
69 * emulation is enabled as well as when
70 * it was impossible to open "filename"
71 * in R/W mode.
72 * ->removable Flag specifying that LUN shall be indicated as
73 * being removable.
74 * ->cdrom Flag specifying that LUN shall be reported as
75 * being a CD-ROM.
76 * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12)
77 * commands for this LUN shall be ignored.
78 *
79 * vendor_name
80 * product_name
81 * release Information used as a reply to INQUIRY
82 * request. To use default set to NULL,
83 * NULL, 0xffff respectively. The first
84 * field should be 8 and the second 16
85 * characters or less.
86 *
87 * can_stall Set to permit function to halt bulk endpoints.
88 * Disabled on some USB devices known not
89 * to work correctly. You should set it
90 * to true.
91 *
92 * If "removable" is not set for a LUN then a backing file must be
93 * specified. If it is set, then NULL filename means the LUN's medium
94 * is not loaded (an empty string as "filename" in the fsg_config
95 * structure causes error). The CD-ROM emulation includes a single
96 * data track and no audio tracks; hence there need be only one
97 * backing file per LUN.
98 *
99 * This function is heavily based on "File-backed Storage Gadget" by
100 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
101 * Brownell. The driver's SCSI command interface was based on the
102 * "Information technology - Small Computer System Interface - 2"
103 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
104 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
105 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
106 * was based on the "Universal Serial Bus Mass Storage Class UFI
107 * Command Specification" document, Revision 1.0, December 14, 1998,
108 * available at
109 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
110 */
111
112/*
113 * Driver Design
114 *
115 * The MSF is fairly straightforward. There is a main kernel
116 * thread that handles most of the work. Interrupt routines field
117 * callbacks from the controller driver: bulk- and interrupt-request
118 * completion notifications, endpoint-0 events, and disconnect events.
119 * Completion events are passed to the main thread by wakeup calls. Many
120 * ep0 requests are handled at interrupt time, but SetInterface,
121 * SetConfiguration, and device reset requests are forwarded to the
122 * thread in the form of "exceptions" using SIGUSR1 signals (since they
123 * should interrupt any ongoing file I/O operations).
124 *
125 * The thread's main routine implements the standard command/data/status
126 * parts of a SCSI interaction. It and its subroutines are full of tests
127 * for pending signals/exceptions -- all this polling is necessary since
128 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
129 * indication that the driver really wants to be running in userspace.)
130 * An important point is that so long as the thread is alive it keeps an
131 * open reference to the backing file. This will prevent unmounting
132 * the backing file's underlying filesystem and could cause problems
133 * during system shutdown, for example. To prevent such problems, the
134 * thread catches INT, TERM, and KILL signals and converts them into
135 * an EXIT exception.
136 *
137 * In normal operation the main thread is started during the gadget's
138 * fsg_bind() callback and stopped during fsg_unbind(). But it can
139 * also exit when it receives a signal, and there's no point leaving
140 * the gadget running when the thread is dead. As of this moment, MSF
141 * provides no way to deregister the gadget when thread dies -- maybe
142 * a callback functions is needed.
143 *
144 * To provide maximum throughput, the driver uses a circular pipeline of
145 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
146 * arbitrarily long; in practice the benefits don't justify having more
147 * than 2 stages (i.e., double buffering). But it helps to think of the
148 * pipeline as being a long one. Each buffer head contains a bulk-in and
149 * a bulk-out request pointer (since the buffer can be used for both
150 * output and input -- directions always are given from the host's
151 * point of view) as well as a pointer to the buffer and various state
152 * variables.
153 *
154 * Use of the pipeline follows a simple protocol. There is a variable
155 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
156 * At any time that buffer head may still be in use from an earlier
157 * request, so each buffer head has a state variable indicating whether
158 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
159 * buffer head to be EMPTY, filling the buffer either by file I/O or by
160 * USB I/O (during which the buffer head is BUSY), and marking the buffer
161 * head FULL when the I/O is complete. Then the buffer will be emptied
162 * (again possibly by USB I/O, during which it is marked BUSY) and
163 * finally marked EMPTY again (possibly by a completion routine).
164 *
165 * A module parameter tells the driver to avoid stalling the bulk
166 * endpoints wherever the transport specification allows. This is
167 * necessary for some UDCs like the SuperH, which cannot reliably clear a
168 * halt on a bulk endpoint. However, under certain circumstances the
169 * Bulk-only specification requires a stall. In such cases the driver
170 * will halt the endpoint and set a flag indicating that it should clear
171 * the halt in software during the next device reset. Hopefully this
172 * will permit everything to work correctly. Furthermore, although the
173 * specification allows the bulk-out endpoint to halt when the host sends
174 * too much data, implementing this would cause an unavoidable race.
175 * The driver will always use the "no-stall" approach for OUT transfers.
176 *
177 * One subtle point concerns sending status-stage responses for ep0
178 * requests. Some of these requests, such as device reset, can involve
179 * interrupting an ongoing file I/O operation, which might take an
180 * arbitrarily long time. During that delay the host might give up on
181 * the original ep0 request and issue a new one. When that happens the
182 * driver should not notify the host about completion of the original
183 * request, as the host will no longer be waiting for it. So the driver
184 * assigns to each ep0 request a unique tag, and it keeps track of the
185 * tag value of the request associated with a long-running exception
186 * (device-reset, interface-change, or configuration-change). When the
187 * exception handler is finished, the status-stage response is submitted
188 * only if the current ep0 request tag is equal to the exception request
189 * tag. Thus only the most recently received ep0 request will get a
190 * status-stage response.
191 *
192 * Warning: This driver source file is too long. It ought to be split up
193 * into a header file plus about 3 separate .c files, to handle the details
194 * of the Gadget, USB Mass Storage, and SCSI protocols.
195 */
196
197
198/* #define VERBOSE_DEBUG */
199/* #define DUMP_MSGS */
200
201#include <linux/blkdev.h>
202#include <linux/completion.h>
203#include <linux/dcache.h>
204#include <linux/delay.h>
205#include <linux/device.h>
206#include <linux/fcntl.h>
207#include <linux/file.h>
208#include <linux/fs.h>
209#include <linux/kthread.h>
210#include <linux/sched/signal.h>
211#include <linux/limits.h>
212#include <linux/rwsem.h>
213#include <linux/slab.h>
214#include <linux/spinlock.h>
215#include <linux/string.h>
216#include <linux/freezer.h>
217#include <linux/module.h>
218#include <linux/uaccess.h>
219#include <asm/unaligned.h>
220
221#include <linux/usb/ch9.h>
222#include <linux/usb/gadget.h>
223#include <linux/usb/composite.h>
224
225#include <linux/nospec.h>
226
227#include "configfs.h"
228
229
230/*------------------------------------------------------------------------*/
231
232#define FSG_DRIVER_DESC "Mass Storage Function"
233#define FSG_DRIVER_VERSION "2009/09/11"
234
235static const char fsg_string_interface[] = "Mass Storage";
236
237#include "storage_common.h"
238#include "f_mass_storage.h"
239
240/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
241static struct usb_string fsg_strings[] = {
242 {FSG_STRING_INTERFACE, fsg_string_interface},
243 {}
244};
245
246static struct usb_gadget_strings fsg_stringtab = {
247 .language = 0x0409, /* en-us */
248 .strings = fsg_strings,
249};
250
251static struct usb_gadget_strings *fsg_strings_array[] = {
252 &fsg_stringtab,
253 NULL,
254};
255
256/*-------------------------------------------------------------------------*/
257
258struct fsg_dev;
259struct fsg_common;
260
261/* Data shared by all the FSG instances. */
262struct fsg_common {
263 struct usb_gadget *gadget;
264 struct usb_composite_dev *cdev;
265 struct fsg_dev *fsg;
266 wait_queue_head_t io_wait;
267 wait_queue_head_t fsg_wait;
268
269 /* filesem protects: backing files in use */
270 struct rw_semaphore filesem;
271
272 /* lock protects: state and thread_task */
273 spinlock_t lock;
274
275 struct usb_ep *ep0; /* Copy of gadget->ep0 */
276 struct usb_request *ep0req; /* Copy of cdev->req */
277 unsigned int ep0_req_tag;
278
279 struct fsg_buffhd *next_buffhd_to_fill;
280 struct fsg_buffhd *next_buffhd_to_drain;
281 struct fsg_buffhd *buffhds;
282 unsigned int fsg_num_buffers;
283
284 int cmnd_size;
285 u8 cmnd[MAX_COMMAND_SIZE];
286
287 unsigned int lun;
288 struct fsg_lun *luns[FSG_MAX_LUNS];
289 struct fsg_lun *curlun;
290
291 unsigned int bulk_out_maxpacket;
292 enum fsg_state state; /* For exception handling */
293 unsigned int exception_req_tag;
294 void *exception_arg;
295
296 enum data_direction data_dir;
297 u32 data_size;
298 u32 data_size_from_cmnd;
299 u32 tag;
300 u32 residue;
301 u32 usb_amount_left;
302
303 unsigned int can_stall:1;
304 unsigned int free_storage_on_release:1;
305 unsigned int phase_error:1;
306 unsigned int short_packet_received:1;
307 unsigned int bad_lun_okay:1;
308 unsigned int running:1;
309 unsigned int sysfs:1;
310
311 struct completion thread_notifier;
312 struct task_struct *thread_task;
313
314 /* Gadget's private data. */
315 void *private_data;
316
317 char inquiry_string[INQUIRY_STRING_LEN];
318};
319
320struct fsg_dev {
321 struct usb_function function;
322 struct usb_gadget *gadget; /* Copy of cdev->gadget */
323 struct fsg_common *common;
324
325 u16 interface_number;
326
327 unsigned int bulk_in_enabled:1;
328 unsigned int bulk_out_enabled:1;
329
330 unsigned long atomic_bitflags;
331#define IGNORE_BULK_OUT 0
332
333 struct usb_ep *bulk_in;
334 struct usb_ep *bulk_out;
335};
336
337static inline int __fsg_is_set(struct fsg_common *common,
338 const char *func, unsigned line)
339{
340 if (common->fsg)
341 return 1;
342 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
343 WARN_ON(1);
344 return 0;
345}
346
347#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
348
349static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
350{
351 return container_of(f, struct fsg_dev, function);
352}
353
354static int exception_in_progress(struct fsg_common *common)
355{
356 return common->state > FSG_STATE_NORMAL;
357}
358
359/* Make bulk-out requests be divisible by the maxpacket size */
360static void set_bulk_out_req_length(struct fsg_common *common,
361 struct fsg_buffhd *bh, unsigned int length)
362{
363 unsigned int rem;
364
365 bh->bulk_out_intended_length = length;
366 rem = length % common->bulk_out_maxpacket;
367 if (rem > 0)
368 length += common->bulk_out_maxpacket - rem;
369 bh->outreq->length = length;
370}
371
372
373/*-------------------------------------------------------------------------*/
374
375static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
376{
377 const char *name;
378
379 if (ep == fsg->bulk_in)
380 name = "bulk-in";
381 else if (ep == fsg->bulk_out)
382 name = "bulk-out";
383 else
384 name = ep->name;
385 DBG(fsg, "%s set halt\n", name);
386 return usb_ep_set_halt(ep);
387}
388
389
390/*-------------------------------------------------------------------------*/
391
392/* These routines may be called in process context or in_irq */
393
394static void __raise_exception(struct fsg_common *common, enum fsg_state new_state,
395 void *arg)
396{
397 unsigned long flags;
398
399 /*
400 * Do nothing if a higher-priority exception is already in progress.
401 * If a lower-or-equal priority exception is in progress, preempt it
402 * and notify the main thread by sending it a signal.
403 */
404 spin_lock_irqsave(&common->lock, flags);
405 if (common->state <= new_state) {
406 common->exception_req_tag = common->ep0_req_tag;
407 common->state = new_state;
408 common->exception_arg = arg;
409 if (common->thread_task)
410 send_sig_info(SIGUSR1, SEND_SIG_PRIV,
411 common->thread_task);
412 }
413 spin_unlock_irqrestore(&common->lock, flags);
414}
415
416static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
417{
418 __raise_exception(common, new_state, NULL);
419}
420
421/*-------------------------------------------------------------------------*/
422
423static int ep0_queue(struct fsg_common *common)
424{
425 int rc;
426
427 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
428 common->ep0->driver_data = common;
429 if (rc != 0 && rc != -ESHUTDOWN) {
430 /* We can't do much more than wait for a reset */
431 WARNING(common, "error in submission: %s --> %d\n",
432 common->ep0->name, rc);
433 }
434 return rc;
435}
436
437
438/*-------------------------------------------------------------------------*/
439
440/* Completion handlers. These always run in_irq. */
441
442static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
443{
444 struct fsg_common *common = ep->driver_data;
445 struct fsg_buffhd *bh = req->context;
446
447 if (req->status || req->actual != req->length)
448 DBG(common, "%s --> %d, %u/%u\n", __func__,
449 req->status, req->actual, req->length);
450 if (req->status == -ECONNRESET) /* Request was cancelled */
451 usb_ep_fifo_flush(ep);
452
453 /* Synchronize with the smp_load_acquire() in sleep_thread() */
454 smp_store_release(&bh->state, BUF_STATE_EMPTY);
455 wake_up(&common->io_wait);
456}
457
458static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
459{
460 struct fsg_common *common = ep->driver_data;
461 struct fsg_buffhd *bh = req->context;
462
463 dump_msg(common, "bulk-out", req->buf, req->actual);
464 if (req->status || req->actual != bh->bulk_out_intended_length)
465 DBG(common, "%s --> %d, %u/%u\n", __func__,
466 req->status, req->actual, bh->bulk_out_intended_length);
467 if (req->status == -ECONNRESET) /* Request was cancelled */
468 usb_ep_fifo_flush(ep);
469
470 /* Synchronize with the smp_load_acquire() in sleep_thread() */
471 smp_store_release(&bh->state, BUF_STATE_FULL);
472 wake_up(&common->io_wait);
473}
474
475static int _fsg_common_get_max_lun(struct fsg_common *common)
476{
477 int i = ARRAY_SIZE(common->luns) - 1;
478
479 while (i >= 0 && !common->luns[i])
480 --i;
481
482 return i;
483}
484
485static int fsg_setup(struct usb_function *f,
486 const struct usb_ctrlrequest *ctrl)
487{
488 struct fsg_dev *fsg = fsg_from_func(f);
489 struct usb_request *req = fsg->common->ep0req;
490 u16 w_index = le16_to_cpu(ctrl->wIndex);
491 u16 w_value = le16_to_cpu(ctrl->wValue);
492 u16 w_length = le16_to_cpu(ctrl->wLength);
493
494 if (!fsg_is_set(fsg->common))
495 return -EOPNOTSUPP;
496
497 ++fsg->common->ep0_req_tag; /* Record arrival of a new request */
498 req->context = NULL;
499 req->length = 0;
500 dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
501
502 switch (ctrl->bRequest) {
503
504 case US_BULK_RESET_REQUEST:
505 if (ctrl->bRequestType !=
506 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
507 break;
508 if (w_index != fsg->interface_number || w_value != 0 ||
509 w_length != 0)
510 return -EDOM;
511
512 /*
513 * Raise an exception to stop the current operation
514 * and reinitialize our state.
515 */
516 DBG(fsg, "bulk reset request\n");
517 raise_exception(fsg->common, FSG_STATE_PROTOCOL_RESET);
518 return USB_GADGET_DELAYED_STATUS;
519
520 case US_BULK_GET_MAX_LUN:
521 if (ctrl->bRequestType !=
522 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
523 break;
524 if (w_index != fsg->interface_number || w_value != 0 ||
525 w_length != 1)
526 return -EDOM;
527 VDBG(fsg, "get max LUN\n");
528 *(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common);
529
530 /* Respond with data/status */
531 req->length = min((u16)1, w_length);
532 return ep0_queue(fsg->common);
533 }
534
535 VDBG(fsg,
536 "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
537 ctrl->bRequestType, ctrl->bRequest,
538 le16_to_cpu(ctrl->wValue), w_index, w_length);
539 return -EOPNOTSUPP;
540}
541
542
543/*-------------------------------------------------------------------------*/
544
545/* All the following routines run in process context */
546
547/* Use this for bulk or interrupt transfers, not ep0 */
548static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
549 struct usb_request *req)
550{
551 int rc;
552
553 if (ep == fsg->bulk_in)
554 dump_msg(fsg, "bulk-in", req->buf, req->length);
555
556 rc = usb_ep_queue(ep, req, GFP_KERNEL);
557 if (rc) {
558
559 /* We can't do much more than wait for a reset */
560 req->status = rc;
561
562 /*
563 * Note: currently the net2280 driver fails zero-length
564 * submissions if DMA is enabled.
565 */
566 if (rc != -ESHUTDOWN &&
567 !(rc == -EOPNOTSUPP && req->length == 0))
568 WARNING(fsg, "error in submission: %s --> %d\n",
569 ep->name, rc);
570 }
571 return rc;
572}
573
574static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
575{
576 if (!fsg_is_set(common))
577 return false;
578 bh->state = BUF_STATE_SENDING;
579 if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq))
580 bh->state = BUF_STATE_EMPTY;
581 return true;
582}
583
584static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
585{
586 if (!fsg_is_set(common))
587 return false;
588 bh->state = BUF_STATE_RECEIVING;
589 if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq))
590 bh->state = BUF_STATE_FULL;
591 return true;
592}
593
594static int sleep_thread(struct fsg_common *common, bool can_freeze,
595 struct fsg_buffhd *bh)
596{
597 int rc;
598
599 /* Wait until a signal arrives or bh is no longer busy */
600 if (can_freeze)
601 /*
602 * synchronize with the smp_store_release(&bh->state) in
603 * bulk_in_complete() or bulk_out_complete()
604 */
605 rc = wait_event_freezable(common->io_wait,
606 bh && smp_load_acquire(&bh->state) >=
607 BUF_STATE_EMPTY);
608 else
609 rc = wait_event_interruptible(common->io_wait,
610 bh && smp_load_acquire(&bh->state) >=
611 BUF_STATE_EMPTY);
612 return rc ? -EINTR : 0;
613}
614
615
616/*-------------------------------------------------------------------------*/
617
618static int do_read(struct fsg_common *common)
619{
620 struct fsg_lun *curlun = common->curlun;
621 u32 lba;
622 struct fsg_buffhd *bh;
623 int rc;
624 u32 amount_left;
625 loff_t file_offset, file_offset_tmp;
626 unsigned int amount;
627 ssize_t nread;
628
629 /*
630 * Get the starting Logical Block Address and check that it's
631 * not too big.
632 */
633 if (common->cmnd[0] == READ_6)
634 lba = get_unaligned_be24(&common->cmnd[1]);
635 else {
636 lba = get_unaligned_be32(&common->cmnd[2]);
637
638 /*
639 * We allow DPO (Disable Page Out = don't save data in the
640 * cache) and FUA (Force Unit Access = don't read from the
641 * cache), but we don't implement them.
642 */
643 if ((common->cmnd[1] & ~0x18) != 0) {
644 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
645 return -EINVAL;
646 }
647 }
648 if (lba >= curlun->num_sectors) {
649 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
650 return -EINVAL;
651 }
652 file_offset = ((loff_t) lba) << curlun->blkbits;
653
654 /* Carry out the file reads */
655 amount_left = common->data_size_from_cmnd;
656 if (unlikely(amount_left == 0))
657 return -EIO; /* No default reply */
658
659 for (;;) {
660 /*
661 * Figure out how much we need to read:
662 * Try to read the remaining amount.
663 * But don't read more than the buffer size.
664 * And don't try to read past the end of the file.
665 */
666 amount = min(amount_left, FSG_BUFLEN);
667 amount = min((loff_t)amount,
668 curlun->file_length - file_offset);
669
670 /* Wait for the next buffer to become available */
671 bh = common->next_buffhd_to_fill;
672 rc = sleep_thread(common, false, bh);
673 if (rc)
674 return rc;
675
676 /*
677 * If we were asked to read past the end of file,
678 * end with an empty buffer.
679 */
680 if (amount == 0) {
681 curlun->sense_data =
682 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
683 curlun->sense_data_info =
684 file_offset >> curlun->blkbits;
685 curlun->info_valid = 1;
686 bh->inreq->length = 0;
687 bh->state = BUF_STATE_FULL;
688 break;
689 }
690
691 /* Perform the read */
692 file_offset_tmp = file_offset;
693 nread = kernel_read(curlun->filp, bh->buf, amount,
694 &file_offset_tmp);
695 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
696 (unsigned long long)file_offset, (int)nread);
697 if (signal_pending(current))
698 return -EINTR;
699
700 if (nread < 0) {
701 LDBG(curlun, "error in file read: %d\n", (int)nread);
702 nread = 0;
703 } else if (nread < amount) {
704 LDBG(curlun, "partial file read: %d/%u\n",
705 (int)nread, amount);
706 nread = round_down(nread, curlun->blksize);
707 }
708 file_offset += nread;
709 amount_left -= nread;
710 common->residue -= nread;
711
712 /*
713 * Except at the end of the transfer, nread will be
714 * equal to the buffer size, which is divisible by the
715 * bulk-in maxpacket size.
716 */
717 bh->inreq->length = nread;
718 bh->state = BUF_STATE_FULL;
719
720 /* If an error occurred, report it and its position */
721 if (nread < amount) {
722 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
723 curlun->sense_data_info =
724 file_offset >> curlun->blkbits;
725 curlun->info_valid = 1;
726 break;
727 }
728
729 if (amount_left == 0)
730 break; /* No more left to read */
731
732 /* Send this buffer and go read some more */
733 bh->inreq->zero = 0;
734 if (!start_in_transfer(common, bh))
735 /* Don't know what to do if common->fsg is NULL */
736 return -EIO;
737 common->next_buffhd_to_fill = bh->next;
738 }
739
740 return -EIO; /* No default reply */
741}
742
743
744/*-------------------------------------------------------------------------*/
745
746static int do_write(struct fsg_common *common)
747{
748 struct fsg_lun *curlun = common->curlun;
749 u32 lba;
750 struct fsg_buffhd *bh;
751 int get_some_more;
752 u32 amount_left_to_req, amount_left_to_write;
753 loff_t usb_offset, file_offset, file_offset_tmp;
754 unsigned int amount;
755 ssize_t nwritten;
756 int rc;
757
758 if (curlun->ro) {
759 curlun->sense_data = SS_WRITE_PROTECTED;
760 return -EINVAL;
761 }
762 spin_lock(&curlun->filp->f_lock);
763 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
764 spin_unlock(&curlun->filp->f_lock);
765
766 /*
767 * Get the starting Logical Block Address and check that it's
768 * not too big
769 */
770 if (common->cmnd[0] == WRITE_6)
771 lba = get_unaligned_be24(&common->cmnd[1]);
772 else {
773 lba = get_unaligned_be32(&common->cmnd[2]);
774
775 /*
776 * We allow DPO (Disable Page Out = don't save data in the
777 * cache) and FUA (Force Unit Access = write directly to the
778 * medium). We don't implement DPO; we implement FUA by
779 * performing synchronous output.
780 */
781 if (common->cmnd[1] & ~0x18) {
782 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
783 return -EINVAL;
784 }
785 if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
786 spin_lock(&curlun->filp->f_lock);
787 curlun->filp->f_flags |= O_SYNC;
788 spin_unlock(&curlun->filp->f_lock);
789 }
790 }
791 if (lba >= curlun->num_sectors) {
792 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
793 return -EINVAL;
794 }
795
796 /* Carry out the file writes */
797 get_some_more = 1;
798 file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
799 amount_left_to_req = common->data_size_from_cmnd;
800 amount_left_to_write = common->data_size_from_cmnd;
801
802 while (amount_left_to_write > 0) {
803
804 /* Queue a request for more data from the host */
805 bh = common->next_buffhd_to_fill;
806 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
807
808 /*
809 * Figure out how much we want to get:
810 * Try to get the remaining amount,
811 * but not more than the buffer size.
812 */
813 amount = min(amount_left_to_req, FSG_BUFLEN);
814
815 /* Beyond the end of the backing file? */
816 if (usb_offset >= curlun->file_length) {
817 get_some_more = 0;
818 curlun->sense_data =
819 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
820 curlun->sense_data_info =
821 usb_offset >> curlun->blkbits;
822 curlun->info_valid = 1;
823 continue;
824 }
825
826 /* Get the next buffer */
827 usb_offset += amount;
828 common->usb_amount_left -= amount;
829 amount_left_to_req -= amount;
830 if (amount_left_to_req == 0)
831 get_some_more = 0;
832
833 /*
834 * Except at the end of the transfer, amount will be
835 * equal to the buffer size, which is divisible by
836 * the bulk-out maxpacket size.
837 */
838 set_bulk_out_req_length(common, bh, amount);
839 if (!start_out_transfer(common, bh))
840 /* Dunno what to do if common->fsg is NULL */
841 return -EIO;
842 common->next_buffhd_to_fill = bh->next;
843 continue;
844 }
845
846 /* Write the received data to the backing file */
847 bh = common->next_buffhd_to_drain;
848 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
849 break; /* We stopped early */
850
851 /* Wait for the data to be received */
852 rc = sleep_thread(common, false, bh);
853 if (rc)
854 return rc;
855
856 common->next_buffhd_to_drain = bh->next;
857 bh->state = BUF_STATE_EMPTY;
858
859 /* Did something go wrong with the transfer? */
860 if (bh->outreq->status != 0) {
861 curlun->sense_data = SS_COMMUNICATION_FAILURE;
862 curlun->sense_data_info =
863 file_offset >> curlun->blkbits;
864 curlun->info_valid = 1;
865 break;
866 }
867
868 amount = bh->outreq->actual;
869 if (curlun->file_length - file_offset < amount) {
870 LERROR(curlun, "write %u @ %llu beyond end %llu\n",
871 amount, (unsigned long long)file_offset,
872 (unsigned long long)curlun->file_length);
873 amount = curlun->file_length - file_offset;
874 }
875
876 /*
877 * Don't accept excess data. The spec doesn't say
878 * what to do in this case. We'll ignore the error.
879 */
880 amount = min(amount, bh->bulk_out_intended_length);
881
882 /* Don't write a partial block */
883 amount = round_down(amount, curlun->blksize);
884 if (amount == 0)
885 goto empty_write;
886
887 /* Perform the write */
888 file_offset_tmp = file_offset;
889 nwritten = kernel_write(curlun->filp, bh->buf, amount,
890 &file_offset_tmp);
891 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
892 (unsigned long long)file_offset, (int)nwritten);
893 if (signal_pending(current))
894 return -EINTR; /* Interrupted! */
895
896 if (nwritten < 0) {
897 LDBG(curlun, "error in file write: %d\n",
898 (int) nwritten);
899 nwritten = 0;
900 } else if (nwritten < amount) {
901 LDBG(curlun, "partial file write: %d/%u\n",
902 (int) nwritten, amount);
903 nwritten = round_down(nwritten, curlun->blksize);
904 }
905 file_offset += nwritten;
906 amount_left_to_write -= nwritten;
907 common->residue -= nwritten;
908
909 /* If an error occurred, report it and its position */
910 if (nwritten < amount) {
911 curlun->sense_data = SS_WRITE_ERROR;
912 curlun->sense_data_info =
913 file_offset >> curlun->blkbits;
914 curlun->info_valid = 1;
915 break;
916 }
917
918 empty_write:
919 /* Did the host decide to stop early? */
920 if (bh->outreq->actual < bh->bulk_out_intended_length) {
921 common->short_packet_received = 1;
922 break;
923 }
924 }
925
926 return -EIO; /* No default reply */
927}
928
929
930/*-------------------------------------------------------------------------*/
931
932static int do_synchronize_cache(struct fsg_common *common)
933{
934 struct fsg_lun *curlun = common->curlun;
935 int rc;
936
937 /* We ignore the requested LBA and write out all file's
938 * dirty data buffers. */
939 rc = fsg_lun_fsync_sub(curlun);
940 if (rc)
941 curlun->sense_data = SS_WRITE_ERROR;
942 return 0;
943}
944
945
946/*-------------------------------------------------------------------------*/
947
948static void invalidate_sub(struct fsg_lun *curlun)
949{
950 struct file *filp = curlun->filp;
951 struct inode *inode = file_inode(filp);
952 unsigned long rc;
953
954 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
955 VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
956}
957
958static int do_verify(struct fsg_common *common)
959{
960 struct fsg_lun *curlun = common->curlun;
961 u32 lba;
962 u32 verification_length;
963 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
964 loff_t file_offset, file_offset_tmp;
965 u32 amount_left;
966 unsigned int amount;
967 ssize_t nread;
968
969 /*
970 * Get the starting Logical Block Address and check that it's
971 * not too big.
972 */
973 lba = get_unaligned_be32(&common->cmnd[2]);
974 if (lba >= curlun->num_sectors) {
975 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
976 return -EINVAL;
977 }
978
979 /*
980 * We allow DPO (Disable Page Out = don't save data in the
981 * cache) but we don't implement it.
982 */
983 if (common->cmnd[1] & ~0x10) {
984 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
985 return -EINVAL;
986 }
987
988 verification_length = get_unaligned_be16(&common->cmnd[7]);
989 if (unlikely(verification_length == 0))
990 return -EIO; /* No default reply */
991
992 /* Prepare to carry out the file verify */
993 amount_left = verification_length << curlun->blkbits;
994 file_offset = ((loff_t) lba) << curlun->blkbits;
995
996 /* Write out all the dirty buffers before invalidating them */
997 fsg_lun_fsync_sub(curlun);
998 if (signal_pending(current))
999 return -EINTR;
1000
1001 invalidate_sub(curlun);
1002 if (signal_pending(current))
1003 return -EINTR;
1004
1005 /* Just try to read the requested blocks */
1006 while (amount_left > 0) {
1007 /*
1008 * Figure out how much we need to read:
1009 * Try to read the remaining amount, but not more than
1010 * the buffer size.
1011 * And don't try to read past the end of the file.
1012 */
1013 amount = min(amount_left, FSG_BUFLEN);
1014 amount = min((loff_t)amount,
1015 curlun->file_length - file_offset);
1016 if (amount == 0) {
1017 curlun->sense_data =
1018 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1019 curlun->sense_data_info =
1020 file_offset >> curlun->blkbits;
1021 curlun->info_valid = 1;
1022 break;
1023 }
1024
1025 /* Perform the read */
1026 file_offset_tmp = file_offset;
1027 nread = kernel_read(curlun->filp, bh->buf, amount,
1028 &file_offset_tmp);
1029 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1030 (unsigned long long) file_offset,
1031 (int) nread);
1032 if (signal_pending(current))
1033 return -EINTR;
1034
1035 if (nread < 0) {
1036 LDBG(curlun, "error in file verify: %d\n", (int)nread);
1037 nread = 0;
1038 } else if (nread < amount) {
1039 LDBG(curlun, "partial file verify: %d/%u\n",
1040 (int)nread, amount);
1041 nread = round_down(nread, curlun->blksize);
1042 }
1043 if (nread == 0) {
1044 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1045 curlun->sense_data_info =
1046 file_offset >> curlun->blkbits;
1047 curlun->info_valid = 1;
1048 break;
1049 }
1050 file_offset += nread;
1051 amount_left -= nread;
1052 }
1053 return 0;
1054}
1055
1056
1057/*-------------------------------------------------------------------------*/
1058
1059static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1060{
1061 struct fsg_lun *curlun = common->curlun;
1062 u8 *buf = (u8 *) bh->buf;
1063
1064 if (!curlun) { /* Unsupported LUNs are okay */
1065 common->bad_lun_okay = 1;
1066 memset(buf, 0, 36);
1067 buf[0] = TYPE_NO_LUN; /* Unsupported, no device-type */
1068 buf[4] = 31; /* Additional length */
1069 return 36;
1070 }
1071
1072 buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
1073 buf[1] = curlun->removable ? 0x80 : 0;
1074 buf[2] = 2; /* ANSI SCSI level 2 */
1075 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1076 buf[4] = 31; /* Additional length */
1077 buf[5] = 0; /* No special options */
1078 buf[6] = 0;
1079 buf[7] = 0;
1080 if (curlun->inquiry_string[0])
1081 memcpy(buf + 8, curlun->inquiry_string,
1082 sizeof(curlun->inquiry_string));
1083 else
1084 memcpy(buf + 8, common->inquiry_string,
1085 sizeof(common->inquiry_string));
1086 return 36;
1087}
1088
1089static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1090{
1091 struct fsg_lun *curlun = common->curlun;
1092 u8 *buf = (u8 *) bh->buf;
1093 u32 sd, sdinfo;
1094 int valid;
1095
1096 /*
1097 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1098 *
1099 * If a REQUEST SENSE command is received from an initiator
1100 * with a pending unit attention condition (before the target
1101 * generates the contingent allegiance condition), then the
1102 * target shall either:
1103 * a) report any pending sense data and preserve the unit
1104 * attention condition on the logical unit, or,
1105 * b) report the unit attention condition, may discard any
1106 * pending sense data, and clear the unit attention
1107 * condition on the logical unit for that initiator.
1108 *
1109 * FSG normally uses option a); enable this code to use option b).
1110 */
1111#if 0
1112 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1113 curlun->sense_data = curlun->unit_attention_data;
1114 curlun->unit_attention_data = SS_NO_SENSE;
1115 }
1116#endif
1117
1118 if (!curlun) { /* Unsupported LUNs are okay */
1119 common->bad_lun_okay = 1;
1120 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1121 sdinfo = 0;
1122 valid = 0;
1123 } else {
1124 sd = curlun->sense_data;
1125 sdinfo = curlun->sense_data_info;
1126 valid = curlun->info_valid << 7;
1127 curlun->sense_data = SS_NO_SENSE;
1128 curlun->sense_data_info = 0;
1129 curlun->info_valid = 0;
1130 }
1131
1132 memset(buf, 0, 18);
1133 buf[0] = valid | 0x70; /* Valid, current error */
1134 buf[2] = SK(sd);
1135 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1136 buf[7] = 18 - 8; /* Additional sense length */
1137 buf[12] = ASC(sd);
1138 buf[13] = ASCQ(sd);
1139 return 18;
1140}
1141
1142static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1143{
1144 struct fsg_lun *curlun = common->curlun;
1145 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1146 int pmi = common->cmnd[8];
1147 u8 *buf = (u8 *)bh->buf;
1148
1149 /* Check the PMI and LBA fields */
1150 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1151 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1152 return -EINVAL;
1153 }
1154
1155 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1156 /* Max logical block */
1157 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
1158 return 8;
1159}
1160
1161static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1162{
1163 struct fsg_lun *curlun = common->curlun;
1164 int msf = common->cmnd[1] & 0x02;
1165 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1166 u8 *buf = (u8 *)bh->buf;
1167
1168 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
1169 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1170 return -EINVAL;
1171 }
1172 if (lba >= curlun->num_sectors) {
1173 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1174 return -EINVAL;
1175 }
1176
1177 memset(buf, 0, 8);
1178 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1179 store_cdrom_address(&buf[4], msf, lba);
1180 return 8;
1181}
1182
1183static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1184{
1185 struct fsg_lun *curlun = common->curlun;
1186 int msf = common->cmnd[1] & 0x02;
1187 int start_track = common->cmnd[6];
1188 u8 *buf = (u8 *)bh->buf;
1189
1190 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1191 start_track > 1) {
1192 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1193 return -EINVAL;
1194 }
1195
1196 memset(buf, 0, 20);
1197 buf[1] = (20-2); /* TOC data length */
1198 buf[2] = 1; /* First track number */
1199 buf[3] = 1; /* Last track number */
1200 buf[5] = 0x16; /* Data track, copying allowed */
1201 buf[6] = 0x01; /* Only track is number 1 */
1202 store_cdrom_address(&buf[8], msf, 0);
1203
1204 buf[13] = 0x16; /* Lead-out track is data */
1205 buf[14] = 0xAA; /* Lead-out track number */
1206 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1207 return 20;
1208}
1209
1210static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1211{
1212 struct fsg_lun *curlun = common->curlun;
1213 int mscmnd = common->cmnd[0];
1214 u8 *buf = (u8 *) bh->buf;
1215 u8 *buf0 = buf;
1216 int pc, page_code;
1217 int changeable_values, all_pages;
1218 int valid_page = 0;
1219 int len, limit;
1220
1221 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
1222 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1223 return -EINVAL;
1224 }
1225 pc = common->cmnd[2] >> 6;
1226 page_code = common->cmnd[2] & 0x3f;
1227 if (pc == 3) {
1228 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1229 return -EINVAL;
1230 }
1231 changeable_values = (pc == 1);
1232 all_pages = (page_code == 0x3f);
1233
1234 /*
1235 * Write the mode parameter header. Fixed values are: default
1236 * medium type, no cache control (DPOFUA), and no block descriptors.
1237 * The only variable value is the WriteProtect bit. We will fill in
1238 * the mode data length later.
1239 */
1240 memset(buf, 0, 8);
1241 if (mscmnd == MODE_SENSE) {
1242 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1243 buf += 4;
1244 limit = 255;
1245 } else { /* MODE_SENSE_10 */
1246 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1247 buf += 8;
1248 limit = 65535; /* Should really be FSG_BUFLEN */
1249 }
1250
1251 /* No block descriptors */
1252
1253 /*
1254 * The mode pages, in numerical order. The only page we support
1255 * is the Caching page.
1256 */
1257 if (page_code == 0x08 || all_pages) {
1258 valid_page = 1;
1259 buf[0] = 0x08; /* Page code */
1260 buf[1] = 10; /* Page length */
1261 memset(buf+2, 0, 10); /* None of the fields are changeable */
1262
1263 if (!changeable_values) {
1264 buf[2] = 0x04; /* Write cache enable, */
1265 /* Read cache not disabled */
1266 /* No cache retention priorities */
1267 put_unaligned_be16(0xffff, &buf[4]);
1268 /* Don't disable prefetch */
1269 /* Minimum prefetch = 0 */
1270 put_unaligned_be16(0xffff, &buf[8]);
1271 /* Maximum prefetch */
1272 put_unaligned_be16(0xffff, &buf[10]);
1273 /* Maximum prefetch ceiling */
1274 }
1275 buf += 12;
1276 }
1277
1278 /*
1279 * Check that a valid page was requested and the mode data length
1280 * isn't too long.
1281 */
1282 len = buf - buf0;
1283 if (!valid_page || len > limit) {
1284 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1285 return -EINVAL;
1286 }
1287
1288 /* Store the mode data length */
1289 if (mscmnd == MODE_SENSE)
1290 buf0[0] = len - 1;
1291 else
1292 put_unaligned_be16(len - 2, buf0);
1293 return len;
1294}
1295
1296static int do_start_stop(struct fsg_common *common)
1297{
1298 struct fsg_lun *curlun = common->curlun;
1299 int loej, start;
1300
1301 if (!curlun) {
1302 return -EINVAL;
1303 } else if (!curlun->removable) {
1304 curlun->sense_data = SS_INVALID_COMMAND;
1305 return -EINVAL;
1306 } else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
1307 (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
1308 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1309 return -EINVAL;
1310 }
1311
1312 loej = common->cmnd[4] & 0x02;
1313 start = common->cmnd[4] & 0x01;
1314
1315 /*
1316 * Our emulation doesn't support mounting; the medium is
1317 * available for use as soon as it is loaded.
1318 */
1319 if (start) {
1320 if (!fsg_lun_is_open(curlun)) {
1321 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1322 return -EINVAL;
1323 }
1324 return 0;
1325 }
1326
1327 /* Are we allowed to unload the media? */
1328 if (curlun->prevent_medium_removal) {
1329 LDBG(curlun, "unload attempt prevented\n");
1330 curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
1331 return -EINVAL;
1332 }
1333
1334 if (!loej)
1335 return 0;
1336
1337 up_read(&common->filesem);
1338 down_write(&common->filesem);
1339 fsg_lun_close(curlun);
1340 up_write(&common->filesem);
1341 down_read(&common->filesem);
1342
1343 return 0;
1344}
1345
1346static int do_prevent_allow(struct fsg_common *common)
1347{
1348 struct fsg_lun *curlun = common->curlun;
1349 int prevent;
1350
1351 if (!common->curlun) {
1352 return -EINVAL;
1353 } else if (!common->curlun->removable) {
1354 common->curlun->sense_data = SS_INVALID_COMMAND;
1355 return -EINVAL;
1356 }
1357
1358 prevent = common->cmnd[4] & 0x01;
1359 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
1360 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1361 return -EINVAL;
1362 }
1363
1364 if (curlun->prevent_medium_removal && !prevent)
1365 fsg_lun_fsync_sub(curlun);
1366 curlun->prevent_medium_removal = prevent;
1367 return 0;
1368}
1369
1370static int do_read_format_capacities(struct fsg_common *common,
1371 struct fsg_buffhd *bh)
1372{
1373 struct fsg_lun *curlun = common->curlun;
1374 u8 *buf = (u8 *) bh->buf;
1375
1376 buf[0] = buf[1] = buf[2] = 0;
1377 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1378 buf += 4;
1379
1380 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1381 /* Number of blocks */
1382 put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
1383 buf[4] = 0x02; /* Current capacity */
1384 return 12;
1385}
1386
1387static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1388{
1389 struct fsg_lun *curlun = common->curlun;
1390
1391 /* We don't support MODE SELECT */
1392 if (curlun)
1393 curlun->sense_data = SS_INVALID_COMMAND;
1394 return -EINVAL;
1395}
1396
1397
1398/*-------------------------------------------------------------------------*/
1399
1400static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1401{
1402 int rc;
1403
1404 rc = fsg_set_halt(fsg, fsg->bulk_in);
1405 if (rc == -EAGAIN)
1406 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1407 while (rc != 0) {
1408 if (rc != -EAGAIN) {
1409 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1410 rc = 0;
1411 break;
1412 }
1413
1414 /* Wait for a short time and then try again */
1415 if (msleep_interruptible(100) != 0)
1416 return -EINTR;
1417 rc = usb_ep_set_halt(fsg->bulk_in);
1418 }
1419 return rc;
1420}
1421
1422static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1423{
1424 int rc;
1425
1426 DBG(fsg, "bulk-in set wedge\n");
1427 rc = usb_ep_set_wedge(fsg->bulk_in);
1428 if (rc == -EAGAIN)
1429 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1430 while (rc != 0) {
1431 if (rc != -EAGAIN) {
1432 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1433 rc = 0;
1434 break;
1435 }
1436
1437 /* Wait for a short time and then try again */
1438 if (msleep_interruptible(100) != 0)
1439 return -EINTR;
1440 rc = usb_ep_set_wedge(fsg->bulk_in);
1441 }
1442 return rc;
1443}
1444
1445static int throw_away_data(struct fsg_common *common)
1446{
1447 struct fsg_buffhd *bh, *bh2;
1448 u32 amount;
1449 int rc;
1450
1451 for (bh = common->next_buffhd_to_drain;
1452 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1453 bh = common->next_buffhd_to_drain) {
1454
1455 /* Try to submit another request if we need one */
1456 bh2 = common->next_buffhd_to_fill;
1457 if (bh2->state == BUF_STATE_EMPTY &&
1458 common->usb_amount_left > 0) {
1459 amount = min(common->usb_amount_left, FSG_BUFLEN);
1460
1461 /*
1462 * Except at the end of the transfer, amount will be
1463 * equal to the buffer size, which is divisible by
1464 * the bulk-out maxpacket size.
1465 */
1466 set_bulk_out_req_length(common, bh2, amount);
1467 if (!start_out_transfer(common, bh2))
1468 /* Dunno what to do if common->fsg is NULL */
1469 return -EIO;
1470 common->next_buffhd_to_fill = bh2->next;
1471 common->usb_amount_left -= amount;
1472 continue;
1473 }
1474
1475 /* Wait for the data to be received */
1476 rc = sleep_thread(common, false, bh);
1477 if (rc)
1478 return rc;
1479
1480 /* Throw away the data in a filled buffer */
1481 bh->state = BUF_STATE_EMPTY;
1482 common->next_buffhd_to_drain = bh->next;
1483
1484 /* A short packet or an error ends everything */
1485 if (bh->outreq->actual < bh->bulk_out_intended_length ||
1486 bh->outreq->status != 0) {
1487 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1488 return -EINTR;
1489 }
1490 }
1491 return 0;
1492}
1493
1494static int finish_reply(struct fsg_common *common)
1495{
1496 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1497 int rc = 0;
1498
1499 switch (common->data_dir) {
1500 case DATA_DIR_NONE:
1501 break; /* Nothing to send */
1502
1503 /*
1504 * If we don't know whether the host wants to read or write,
1505 * this must be CB or CBI with an unknown command. We mustn't
1506 * try to send or receive any data. So stall both bulk pipes
1507 * if we can and wait for a reset.
1508 */
1509 case DATA_DIR_UNKNOWN:
1510 if (!common->can_stall) {
1511 /* Nothing */
1512 } else if (fsg_is_set(common)) {
1513 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1514 rc = halt_bulk_in_endpoint(common->fsg);
1515 } else {
1516 /* Don't know what to do if common->fsg is NULL */
1517 rc = -EIO;
1518 }
1519 break;
1520
1521 /* All but the last buffer of data must have already been sent */
1522 case DATA_DIR_TO_HOST:
1523 if (common->data_size == 0) {
1524 /* Nothing to send */
1525
1526 /* Don't know what to do if common->fsg is NULL */
1527 } else if (!fsg_is_set(common)) {
1528 rc = -EIO;
1529
1530 /* If there's no residue, simply send the last buffer */
1531 } else if (common->residue == 0) {
1532 bh->inreq->zero = 0;
1533 if (!start_in_transfer(common, bh))
1534 return -EIO;
1535 common->next_buffhd_to_fill = bh->next;
1536
1537 /*
1538 * For Bulk-only, mark the end of the data with a short
1539 * packet. If we are allowed to stall, halt the bulk-in
1540 * endpoint. (Note: This violates the Bulk-Only Transport
1541 * specification, which requires us to pad the data if we
1542 * don't halt the endpoint. Presumably nobody will mind.)
1543 */
1544 } else {
1545 bh->inreq->zero = 1;
1546 if (!start_in_transfer(common, bh))
1547 rc = -EIO;
1548 common->next_buffhd_to_fill = bh->next;
1549 if (common->can_stall)
1550 rc = halt_bulk_in_endpoint(common->fsg);
1551 }
1552 break;
1553
1554 /*
1555 * We have processed all we want from the data the host has sent.
1556 * There may still be outstanding bulk-out requests.
1557 */
1558 case DATA_DIR_FROM_HOST:
1559 if (common->residue == 0) {
1560 /* Nothing to receive */
1561
1562 /* Did the host stop sending unexpectedly early? */
1563 } else if (common->short_packet_received) {
1564 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1565 rc = -EINTR;
1566
1567 /*
1568 * We haven't processed all the incoming data. Even though
1569 * we may be allowed to stall, doing so would cause a race.
1570 * The controller may already have ACK'ed all the remaining
1571 * bulk-out packets, in which case the host wouldn't see a
1572 * STALL. Not realizing the endpoint was halted, it wouldn't
1573 * clear the halt -- leading to problems later on.
1574 */
1575#if 0
1576 } else if (common->can_stall) {
1577 if (fsg_is_set(common))
1578 fsg_set_halt(common->fsg,
1579 common->fsg->bulk_out);
1580 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1581 rc = -EINTR;
1582#endif
1583
1584 /*
1585 * We can't stall. Read in the excess data and throw it
1586 * all away.
1587 */
1588 } else {
1589 rc = throw_away_data(common);
1590 }
1591 break;
1592 }
1593 return rc;
1594}
1595
1596static void send_status(struct fsg_common *common)
1597{
1598 struct fsg_lun *curlun = common->curlun;
1599 struct fsg_buffhd *bh;
1600 struct bulk_cs_wrap *csw;
1601 int rc;
1602 u8 status = US_BULK_STAT_OK;
1603 u32 sd, sdinfo = 0;
1604
1605 /* Wait for the next buffer to become available */
1606 bh = common->next_buffhd_to_fill;
1607 rc = sleep_thread(common, false, bh);
1608 if (rc)
1609 return;
1610
1611 if (curlun) {
1612 sd = curlun->sense_data;
1613 sdinfo = curlun->sense_data_info;
1614 } else if (common->bad_lun_okay)
1615 sd = SS_NO_SENSE;
1616 else
1617 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1618
1619 if (common->phase_error) {
1620 DBG(common, "sending phase-error status\n");
1621 status = US_BULK_STAT_PHASE;
1622 sd = SS_INVALID_COMMAND;
1623 } else if (sd != SS_NO_SENSE) {
1624 DBG(common, "sending command-failure status\n");
1625 status = US_BULK_STAT_FAIL;
1626 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1627 " info x%x\n",
1628 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1629 }
1630
1631 /* Store and send the Bulk-only CSW */
1632 csw = (void *)bh->buf;
1633
1634 csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
1635 csw->Tag = common->tag;
1636 csw->Residue = cpu_to_le32(common->residue);
1637 csw->Status = status;
1638
1639 bh->inreq->length = US_BULK_CS_WRAP_LEN;
1640 bh->inreq->zero = 0;
1641 if (!start_in_transfer(common, bh))
1642 /* Don't know what to do if common->fsg is NULL */
1643 return;
1644
1645 common->next_buffhd_to_fill = bh->next;
1646 return;
1647}
1648
1649
1650/*-------------------------------------------------------------------------*/
1651
1652/*
1653 * Check whether the command is properly formed and whether its data size
1654 * and direction agree with the values we already have.
1655 */
1656static int check_command(struct fsg_common *common, int cmnd_size,
1657 enum data_direction data_dir, unsigned int mask,
1658 int needs_medium, const char *name)
1659{
1660 int i;
1661 unsigned int lun = common->cmnd[1] >> 5;
1662 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1663 char hdlen[20];
1664 struct fsg_lun *curlun;
1665
1666 hdlen[0] = 0;
1667 if (common->data_dir != DATA_DIR_UNKNOWN)
1668 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1669 common->data_size);
1670 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1671 name, cmnd_size, dirletter[(int) data_dir],
1672 common->data_size_from_cmnd, common->cmnd_size, hdlen);
1673
1674 /*
1675 * We can't reply at all until we know the correct data direction
1676 * and size.
1677 */
1678 if (common->data_size_from_cmnd == 0)
1679 data_dir = DATA_DIR_NONE;
1680 if (common->data_size < common->data_size_from_cmnd) {
1681 /*
1682 * Host data size < Device data size is a phase error.
1683 * Carry out the command, but only transfer as much as
1684 * we are allowed.
1685 */
1686 common->data_size_from_cmnd = common->data_size;
1687 common->phase_error = 1;
1688 }
1689 common->residue = common->data_size;
1690 common->usb_amount_left = common->data_size;
1691
1692 /* Conflicting data directions is a phase error */
1693 if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
1694 common->phase_error = 1;
1695 return -EINVAL;
1696 }
1697
1698 /* Verify the length of the command itself */
1699 if (cmnd_size != common->cmnd_size) {
1700
1701 /*
1702 * Special case workaround: There are plenty of buggy SCSI
1703 * implementations. Many have issues with cbw->Length
1704 * field passing a wrong command size. For those cases we
1705 * always try to work around the problem by using the length
1706 * sent by the host side provided it is at least as large
1707 * as the correct command length.
1708 * Examples of such cases would be MS-Windows, which issues
1709 * REQUEST SENSE with cbw->Length == 12 where it should
1710 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1711 * REQUEST SENSE with cbw->Length == 10 where it should
1712 * be 6 as well.
1713 */
1714 if (cmnd_size <= common->cmnd_size) {
1715 DBG(common, "%s is buggy! Expected length %d "
1716 "but we got %d\n", name,
1717 cmnd_size, common->cmnd_size);
1718 cmnd_size = common->cmnd_size;
1719 } else {
1720 common->phase_error = 1;
1721 return -EINVAL;
1722 }
1723 }
1724
1725 /* Check that the LUN values are consistent */
1726 if (common->lun != lun)
1727 DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n",
1728 common->lun, lun);
1729
1730 /* Check the LUN */
1731 curlun = common->curlun;
1732 if (curlun) {
1733 if (common->cmnd[0] != REQUEST_SENSE) {
1734 curlun->sense_data = SS_NO_SENSE;
1735 curlun->sense_data_info = 0;
1736 curlun->info_valid = 0;
1737 }
1738 } else {
1739 common->bad_lun_okay = 0;
1740
1741 /*
1742 * INQUIRY and REQUEST SENSE commands are explicitly allowed
1743 * to use unsupported LUNs; all others may not.
1744 */
1745 if (common->cmnd[0] != INQUIRY &&
1746 common->cmnd[0] != REQUEST_SENSE) {
1747 DBG(common, "unsupported LUN %u\n", common->lun);
1748 return -EINVAL;
1749 }
1750 }
1751
1752 /*
1753 * If a unit attention condition exists, only INQUIRY and
1754 * REQUEST SENSE commands are allowed; anything else must fail.
1755 */
1756 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1757 common->cmnd[0] != INQUIRY &&
1758 common->cmnd[0] != REQUEST_SENSE) {
1759 curlun->sense_data = curlun->unit_attention_data;
1760 curlun->unit_attention_data = SS_NO_SENSE;
1761 return -EINVAL;
1762 }
1763
1764 /* Check that only command bytes listed in the mask are non-zero */
1765 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
1766 for (i = 1; i < cmnd_size; ++i) {
1767 if (common->cmnd[i] && !(mask & (1 << i))) {
1768 if (curlun)
1769 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1770 return -EINVAL;
1771 }
1772 }
1773
1774 /* If the medium isn't mounted and the command needs to access
1775 * it, return an error. */
1776 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
1777 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1778 return -EINVAL;
1779 }
1780
1781 return 0;
1782}
1783
1784/* wrapper of check_command for data size in blocks handling */
1785static int check_command_size_in_blocks(struct fsg_common *common,
1786 int cmnd_size, enum data_direction data_dir,
1787 unsigned int mask, int needs_medium, const char *name)
1788{
1789 if (common->curlun)
1790 common->data_size_from_cmnd <<= common->curlun->blkbits;
1791 return check_command(common, cmnd_size, data_dir,
1792 mask, needs_medium, name);
1793}
1794
1795static int do_scsi_command(struct fsg_common *common)
1796{
1797 struct fsg_buffhd *bh;
1798 int rc;
1799 int reply = -EINVAL;
1800 int i;
1801 static char unknown[16];
1802
1803 dump_cdb(common);
1804
1805 /* Wait for the next buffer to become available for data or status */
1806 bh = common->next_buffhd_to_fill;
1807 common->next_buffhd_to_drain = bh;
1808 rc = sleep_thread(common, false, bh);
1809 if (rc)
1810 return rc;
1811
1812 common->phase_error = 0;
1813 common->short_packet_received = 0;
1814
1815 down_read(&common->filesem); /* We're using the backing file */
1816 switch (common->cmnd[0]) {
1817
1818 case INQUIRY:
1819 common->data_size_from_cmnd = common->cmnd[4];
1820 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1821 (1<<4), 0,
1822 "INQUIRY");
1823 if (reply == 0)
1824 reply = do_inquiry(common, bh);
1825 break;
1826
1827 case MODE_SELECT:
1828 common->data_size_from_cmnd = common->cmnd[4];
1829 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1830 (1<<1) | (1<<4), 0,
1831 "MODE SELECT(6)");
1832 if (reply == 0)
1833 reply = do_mode_select(common, bh);
1834 break;
1835
1836 case MODE_SELECT_10:
1837 common->data_size_from_cmnd =
1838 get_unaligned_be16(&common->cmnd[7]);
1839 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1840 (1<<1) | (3<<7), 0,
1841 "MODE SELECT(10)");
1842 if (reply == 0)
1843 reply = do_mode_select(common, bh);
1844 break;
1845
1846 case MODE_SENSE:
1847 common->data_size_from_cmnd = common->cmnd[4];
1848 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1849 (1<<1) | (1<<2) | (1<<4), 0,
1850 "MODE SENSE(6)");
1851 if (reply == 0)
1852 reply = do_mode_sense(common, bh);
1853 break;
1854
1855 case MODE_SENSE_10:
1856 common->data_size_from_cmnd =
1857 get_unaligned_be16(&common->cmnd[7]);
1858 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1859 (1<<1) | (1<<2) | (3<<7), 0,
1860 "MODE SENSE(10)");
1861 if (reply == 0)
1862 reply = do_mode_sense(common, bh);
1863 break;
1864
1865 case ALLOW_MEDIUM_REMOVAL:
1866 common->data_size_from_cmnd = 0;
1867 reply = check_command(common, 6, DATA_DIR_NONE,
1868 (1<<4), 0,
1869 "PREVENT-ALLOW MEDIUM REMOVAL");
1870 if (reply == 0)
1871 reply = do_prevent_allow(common);
1872 break;
1873
1874 case READ_6:
1875 i = common->cmnd[4];
1876 common->data_size_from_cmnd = (i == 0) ? 256 : i;
1877 reply = check_command_size_in_blocks(common, 6,
1878 DATA_DIR_TO_HOST,
1879 (7<<1) | (1<<4), 1,
1880 "READ(6)");
1881 if (reply == 0)
1882 reply = do_read(common);
1883 break;
1884
1885 case READ_10:
1886 common->data_size_from_cmnd =
1887 get_unaligned_be16(&common->cmnd[7]);
1888 reply = check_command_size_in_blocks(common, 10,
1889 DATA_DIR_TO_HOST,
1890 (1<<1) | (0xf<<2) | (3<<7), 1,
1891 "READ(10)");
1892 if (reply == 0)
1893 reply = do_read(common);
1894 break;
1895
1896 case READ_12:
1897 common->data_size_from_cmnd =
1898 get_unaligned_be32(&common->cmnd[6]);
1899 reply = check_command_size_in_blocks(common, 12,
1900 DATA_DIR_TO_HOST,
1901 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1902 "READ(12)");
1903 if (reply == 0)
1904 reply = do_read(common);
1905 break;
1906
1907 case READ_CAPACITY:
1908 common->data_size_from_cmnd = 8;
1909 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1910 (0xf<<2) | (1<<8), 1,
1911 "READ CAPACITY");
1912 if (reply == 0)
1913 reply = do_read_capacity(common, bh);
1914 break;
1915
1916 case READ_HEADER:
1917 if (!common->curlun || !common->curlun->cdrom)
1918 goto unknown_cmnd;
1919 common->data_size_from_cmnd =
1920 get_unaligned_be16(&common->cmnd[7]);
1921 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1922 (3<<7) | (0x1f<<1), 1,
1923 "READ HEADER");
1924 if (reply == 0)
1925 reply = do_read_header(common, bh);
1926 break;
1927
1928 case READ_TOC:
1929 if (!common->curlun || !common->curlun->cdrom)
1930 goto unknown_cmnd;
1931 common->data_size_from_cmnd =
1932 get_unaligned_be16(&common->cmnd[7]);
1933 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1934 (7<<6) | (1<<1), 1,
1935 "READ TOC");
1936 if (reply == 0)
1937 reply = do_read_toc(common, bh);
1938 break;
1939
1940 case READ_FORMAT_CAPACITIES:
1941 common->data_size_from_cmnd =
1942 get_unaligned_be16(&common->cmnd[7]);
1943 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1944 (3<<7), 1,
1945 "READ FORMAT CAPACITIES");
1946 if (reply == 0)
1947 reply = do_read_format_capacities(common, bh);
1948 break;
1949
1950 case REQUEST_SENSE:
1951 common->data_size_from_cmnd = common->cmnd[4];
1952 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1953 (1<<4), 0,
1954 "REQUEST SENSE");
1955 if (reply == 0)
1956 reply = do_request_sense(common, bh);
1957 break;
1958
1959 case START_STOP:
1960 common->data_size_from_cmnd = 0;
1961 reply = check_command(common, 6, DATA_DIR_NONE,
1962 (1<<1) | (1<<4), 0,
1963 "START-STOP UNIT");
1964 if (reply == 0)
1965 reply = do_start_stop(common);
1966 break;
1967
1968 case SYNCHRONIZE_CACHE:
1969 common->data_size_from_cmnd = 0;
1970 reply = check_command(common, 10, DATA_DIR_NONE,
1971 (0xf<<2) | (3<<7), 1,
1972 "SYNCHRONIZE CACHE");
1973 if (reply == 0)
1974 reply = do_synchronize_cache(common);
1975 break;
1976
1977 case TEST_UNIT_READY:
1978 common->data_size_from_cmnd = 0;
1979 reply = check_command(common, 6, DATA_DIR_NONE,
1980 0, 1,
1981 "TEST UNIT READY");
1982 break;
1983
1984 /*
1985 * Although optional, this command is used by MS-Windows. We
1986 * support a minimal version: BytChk must be 0.
1987 */
1988 case VERIFY:
1989 common->data_size_from_cmnd = 0;
1990 reply = check_command(common, 10, DATA_DIR_NONE,
1991 (1<<1) | (0xf<<2) | (3<<7), 1,
1992 "VERIFY");
1993 if (reply == 0)
1994 reply = do_verify(common);
1995 break;
1996
1997 case WRITE_6:
1998 i = common->cmnd[4];
1999 common->data_size_from_cmnd = (i == 0) ? 256 : i;
2000 reply = check_command_size_in_blocks(common, 6,
2001 DATA_DIR_FROM_HOST,
2002 (7<<1) | (1<<4), 1,
2003 "WRITE(6)");
2004 if (reply == 0)
2005 reply = do_write(common);
2006 break;
2007
2008 case WRITE_10:
2009 common->data_size_from_cmnd =
2010 get_unaligned_be16(&common->cmnd[7]);
2011 reply = check_command_size_in_blocks(common, 10,
2012 DATA_DIR_FROM_HOST,
2013 (1<<1) | (0xf<<2) | (3<<7), 1,
2014 "WRITE(10)");
2015 if (reply == 0)
2016 reply = do_write(common);
2017 break;
2018
2019 case WRITE_12:
2020 common->data_size_from_cmnd =
2021 get_unaligned_be32(&common->cmnd[6]);
2022 reply = check_command_size_in_blocks(common, 12,
2023 DATA_DIR_FROM_HOST,
2024 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2025 "WRITE(12)");
2026 if (reply == 0)
2027 reply = do_write(common);
2028 break;
2029
2030 /*
2031 * Some mandatory commands that we recognize but don't implement.
2032 * They don't mean much in this setting. It's left as an exercise
2033 * for anyone interested to implement RESERVE and RELEASE in terms
2034 * of Posix locks.
2035 */
2036 case FORMAT_UNIT:
2037 case RELEASE:
2038 case RESERVE:
2039 case SEND_DIAGNOSTIC:
2040
2041 default:
2042unknown_cmnd:
2043 common->data_size_from_cmnd = 0;
2044 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2045 reply = check_command(common, common->cmnd_size,
2046 DATA_DIR_UNKNOWN, ~0, 0, unknown);
2047 if (reply == 0) {
2048 common->curlun->sense_data = SS_INVALID_COMMAND;
2049 reply = -EINVAL;
2050 }
2051 break;
2052 }
2053 up_read(&common->filesem);
2054
2055 if (reply == -EINTR || signal_pending(current))
2056 return -EINTR;
2057
2058 /* Set up the single reply buffer for finish_reply() */
2059 if (reply == -EINVAL)
2060 reply = 0; /* Error reply length */
2061 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2062 reply = min((u32)reply, common->data_size_from_cmnd);
2063 bh->inreq->length = reply;
2064 bh->state = BUF_STATE_FULL;
2065 common->residue -= reply;
2066 } /* Otherwise it's already set */
2067
2068 return 0;
2069}
2070
2071
2072/*-------------------------------------------------------------------------*/
2073
2074static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2075{
2076 struct usb_request *req = bh->outreq;
2077 struct bulk_cb_wrap *cbw = req->buf;
2078 struct fsg_common *common = fsg->common;
2079
2080 /* Was this a real packet? Should it be ignored? */
2081 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2082 return -EINVAL;
2083
2084 /* Is the CBW valid? */
2085 if (req->actual != US_BULK_CB_WRAP_LEN ||
2086 cbw->Signature != cpu_to_le32(
2087 US_BULK_CB_SIGN)) {
2088 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2089 req->actual,
2090 le32_to_cpu(cbw->Signature));
2091
2092 /*
2093 * The Bulk-only spec says we MUST stall the IN endpoint
2094 * (6.6.1), so it's unavoidable. It also says we must
2095 * retain this state until the next reset, but there's
2096 * no way to tell the controller driver it should ignore
2097 * Clear-Feature(HALT) requests.
2098 *
2099 * We aren't required to halt the OUT endpoint; instead
2100 * we can simply accept and discard any data received
2101 * until the next reset.
2102 */
2103 wedge_bulk_in_endpoint(fsg);
2104 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2105 return -EINVAL;
2106 }
2107
2108 /* Is the CBW meaningful? */
2109 if (cbw->Lun >= ARRAY_SIZE(common->luns) ||
2110 cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 ||
2111 cbw->Length > MAX_COMMAND_SIZE) {
2112 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2113 "cmdlen %u\n",
2114 cbw->Lun, cbw->Flags, cbw->Length);
2115
2116 /*
2117 * We can do anything we want here, so let's stall the
2118 * bulk pipes if we are allowed to.
2119 */
2120 if (common->can_stall) {
2121 fsg_set_halt(fsg, fsg->bulk_out);
2122 halt_bulk_in_endpoint(fsg);
2123 }
2124 return -EINVAL;
2125 }
2126
2127 /* Save the command for later */
2128 common->cmnd_size = cbw->Length;
2129 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2130 if (cbw->Flags & US_BULK_FLAG_IN)
2131 common->data_dir = DATA_DIR_TO_HOST;
2132 else
2133 common->data_dir = DATA_DIR_FROM_HOST;
2134 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2135 if (common->data_size == 0)
2136 common->data_dir = DATA_DIR_NONE;
2137 common->lun = cbw->Lun;
2138 if (common->lun < ARRAY_SIZE(common->luns))
2139 common->curlun = common->luns[common->lun];
2140 else
2141 common->curlun = NULL;
2142 common->tag = cbw->Tag;
2143 return 0;
2144}
2145
2146static int get_next_command(struct fsg_common *common)
2147{
2148 struct fsg_buffhd *bh;
2149 int rc = 0;
2150
2151 /* Wait for the next buffer to become available */
2152 bh = common->next_buffhd_to_fill;
2153 rc = sleep_thread(common, true, bh);
2154 if (rc)
2155 return rc;
2156
2157 /* Queue a request to read a Bulk-only CBW */
2158 set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
2159 if (!start_out_transfer(common, bh))
2160 /* Don't know what to do if common->fsg is NULL */
2161 return -EIO;
2162
2163 /*
2164 * We will drain the buffer in software, which means we
2165 * can reuse it for the next filling. No need to advance
2166 * next_buffhd_to_fill.
2167 */
2168
2169 /* Wait for the CBW to arrive */
2170 rc = sleep_thread(common, true, bh);
2171 if (rc)
2172 return rc;
2173
2174 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2175 bh->state = BUF_STATE_EMPTY;
2176
2177 return rc;
2178}
2179
2180
2181/*-------------------------------------------------------------------------*/
2182
2183static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2184 struct usb_request **preq)
2185{
2186 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2187 if (*preq)
2188 return 0;
2189 ERROR(common, "can't allocate request for %s\n", ep->name);
2190 return -ENOMEM;
2191}
2192
2193/* Reset interface setting and re-init endpoint state (toggle etc). */
2194static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
2195{
2196 struct fsg_dev *fsg;
2197 int i, rc = 0;
2198
2199 if (common->running)
2200 DBG(common, "reset interface\n");
2201
2202reset:
2203 /* Deallocate the requests */
2204 if (common->fsg) {
2205 fsg = common->fsg;
2206
2207 for (i = 0; i < common->fsg_num_buffers; ++i) {
2208 struct fsg_buffhd *bh = &common->buffhds[i];
2209
2210 if (bh->inreq) {
2211 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2212 bh->inreq = NULL;
2213 }
2214 if (bh->outreq) {
2215 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2216 bh->outreq = NULL;
2217 }
2218 }
2219
2220 /* Disable the endpoints */
2221 if (fsg->bulk_in_enabled) {
2222 usb_ep_disable(fsg->bulk_in);
2223 fsg->bulk_in_enabled = 0;
2224 }
2225 if (fsg->bulk_out_enabled) {
2226 usb_ep_disable(fsg->bulk_out);
2227 fsg->bulk_out_enabled = 0;
2228 }
2229
2230 common->fsg = NULL;
2231 wake_up(&common->fsg_wait);
2232 }
2233
2234 common->running = 0;
2235 if (!new_fsg || rc)
2236 return rc;
2237
2238 common->fsg = new_fsg;
2239 fsg = common->fsg;
2240
2241 /* Enable the endpoints */
2242 rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
2243 if (rc)
2244 goto reset;
2245 rc = usb_ep_enable(fsg->bulk_in);
2246 if (rc)
2247 goto reset;
2248 fsg->bulk_in->driver_data = common;
2249 fsg->bulk_in_enabled = 1;
2250
2251 rc = config_ep_by_speed(common->gadget, &(fsg->function),
2252 fsg->bulk_out);
2253 if (rc)
2254 goto reset;
2255 rc = usb_ep_enable(fsg->bulk_out);
2256 if (rc)
2257 goto reset;
2258 fsg->bulk_out->driver_data = common;
2259 fsg->bulk_out_enabled = 1;
2260 common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
2261 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2262
2263 /* Allocate the requests */
2264 for (i = 0; i < common->fsg_num_buffers; ++i) {
2265 struct fsg_buffhd *bh = &common->buffhds[i];
2266
2267 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2268 if (rc)
2269 goto reset;
2270 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2271 if (rc)
2272 goto reset;
2273 bh->inreq->buf = bh->outreq->buf = bh->buf;
2274 bh->inreq->context = bh->outreq->context = bh;
2275 bh->inreq->complete = bulk_in_complete;
2276 bh->outreq->complete = bulk_out_complete;
2277 }
2278
2279 common->running = 1;
2280 for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
2281 if (common->luns[i])
2282 common->luns[i]->unit_attention_data =
2283 SS_RESET_OCCURRED;
2284 return rc;
2285}
2286
2287
2288/****************************** ALT CONFIGS ******************************/
2289
2290static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2291{
2292 struct fsg_dev *fsg = fsg_from_func(f);
2293
2294 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, fsg);
2295 return USB_GADGET_DELAYED_STATUS;
2296}
2297
2298static void fsg_disable(struct usb_function *f)
2299{
2300 struct fsg_dev *fsg = fsg_from_func(f);
2301
2302 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
2303}
2304
2305
2306/*-------------------------------------------------------------------------*/
2307
2308static void handle_exception(struct fsg_common *common)
2309{
2310 int i;
2311 struct fsg_buffhd *bh;
2312 enum fsg_state old_state;
2313 struct fsg_lun *curlun;
2314 unsigned int exception_req_tag;
2315 struct fsg_dev *new_fsg;
2316
2317 /*
2318 * Clear the existing signals. Anything but SIGUSR1 is converted
2319 * into a high-priority EXIT exception.
2320 */
2321 for (;;) {
2322 int sig = kernel_dequeue_signal();
2323 if (!sig)
2324 break;
2325 if (sig != SIGUSR1) {
2326 spin_lock_irq(&common->lock);
2327 if (common->state < FSG_STATE_EXIT)
2328 DBG(common, "Main thread exiting on signal\n");
2329 common->state = FSG_STATE_EXIT;
2330 spin_unlock_irq(&common->lock);
2331 }
2332 }
2333
2334 /* Cancel all the pending transfers */
2335 if (likely(common->fsg)) {
2336 for (i = 0; i < common->fsg_num_buffers; ++i) {
2337 bh = &common->buffhds[i];
2338 if (bh->state == BUF_STATE_SENDING)
2339 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2340 if (bh->state == BUF_STATE_RECEIVING)
2341 usb_ep_dequeue(common->fsg->bulk_out,
2342 bh->outreq);
2343
2344 /* Wait for a transfer to become idle */
2345 if (sleep_thread(common, false, bh))
2346 return;
2347 }
2348
2349 /* Clear out the controller's fifos */
2350 if (common->fsg->bulk_in_enabled)
2351 usb_ep_fifo_flush(common->fsg->bulk_in);
2352 if (common->fsg->bulk_out_enabled)
2353 usb_ep_fifo_flush(common->fsg->bulk_out);
2354 }
2355
2356 /*
2357 * Reset the I/O buffer states and pointers, the SCSI
2358 * state, and the exception. Then invoke the handler.
2359 */
2360 spin_lock_irq(&common->lock);
2361
2362 for (i = 0; i < common->fsg_num_buffers; ++i) {
2363 bh = &common->buffhds[i];
2364 bh->state = BUF_STATE_EMPTY;
2365 }
2366 common->next_buffhd_to_fill = &common->buffhds[0];
2367 common->next_buffhd_to_drain = &common->buffhds[0];
2368 exception_req_tag = common->exception_req_tag;
2369 new_fsg = common->exception_arg;
2370 old_state = common->state;
2371 common->state = FSG_STATE_NORMAL;
2372
2373 if (old_state != FSG_STATE_ABORT_BULK_OUT) {
2374 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
2375 curlun = common->luns[i];
2376 if (!curlun)
2377 continue;
2378 curlun->prevent_medium_removal = 0;
2379 curlun->sense_data = SS_NO_SENSE;
2380 curlun->unit_attention_data = SS_NO_SENSE;
2381 curlun->sense_data_info = 0;
2382 curlun->info_valid = 0;
2383 }
2384 }
2385 spin_unlock_irq(&common->lock);
2386
2387 /* Carry out any extra actions required for the exception */
2388 switch (old_state) {
2389 case FSG_STATE_NORMAL:
2390 break;
2391
2392 case FSG_STATE_ABORT_BULK_OUT:
2393 send_status(common);
2394 break;
2395
2396 case FSG_STATE_PROTOCOL_RESET:
2397 /*
2398 * In case we were forced against our will to halt a
2399 * bulk endpoint, clear the halt now. (The SuperH UDC
2400 * requires this.)
2401 */
2402 if (!fsg_is_set(common))
2403 break;
2404 if (test_and_clear_bit(IGNORE_BULK_OUT,
2405 &common->fsg->atomic_bitflags))
2406 usb_ep_clear_halt(common->fsg->bulk_in);
2407
2408 if (common->ep0_req_tag == exception_req_tag)
2409 ep0_queue(common); /* Complete the status stage */
2410
2411 /*
2412 * Technically this should go here, but it would only be
2413 * a waste of time. Ditto for the INTERFACE_CHANGE and
2414 * CONFIG_CHANGE cases.
2415 */
2416 /* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */
2417 /* if (common->luns[i]) */
2418 /* common->luns[i]->unit_attention_data = */
2419 /* SS_RESET_OCCURRED; */
2420 break;
2421
2422 case FSG_STATE_CONFIG_CHANGE:
2423 do_set_interface(common, new_fsg);
2424 if (new_fsg)
2425 usb_composite_setup_continue(common->cdev);
2426 break;
2427
2428 case FSG_STATE_EXIT:
2429 do_set_interface(common, NULL); /* Free resources */
2430 spin_lock_irq(&common->lock);
2431 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2432 spin_unlock_irq(&common->lock);
2433 break;
2434
2435 case FSG_STATE_TERMINATED:
2436 break;
2437 }
2438}
2439
2440
2441/*-------------------------------------------------------------------------*/
2442
2443static int fsg_main_thread(void *common_)
2444{
2445 struct fsg_common *common = common_;
2446 int i;
2447
2448 /*
2449 * Allow the thread to be killed by a signal, but set the signal mask
2450 * to block everything but INT, TERM, KILL, and USR1.
2451 */
2452 allow_signal(SIGINT);
2453 allow_signal(SIGTERM);
2454 allow_signal(SIGKILL);
2455 allow_signal(SIGUSR1);
2456
2457 /* Allow the thread to be frozen */
2458 set_freezable();
2459
2460 /* The main loop */
2461 while (common->state != FSG_STATE_TERMINATED) {
2462 if (exception_in_progress(common) || signal_pending(current)) {
2463 handle_exception(common);
2464 continue;
2465 }
2466
2467 if (!common->running) {
2468 sleep_thread(common, true, NULL);
2469 continue;
2470 }
2471
2472 if (get_next_command(common) || exception_in_progress(common))
2473 continue;
2474 if (do_scsi_command(common) || exception_in_progress(common))
2475 continue;
2476 if (finish_reply(common) || exception_in_progress(common))
2477 continue;
2478 send_status(common);
2479 }
2480
2481 spin_lock_irq(&common->lock);
2482 common->thread_task = NULL;
2483 spin_unlock_irq(&common->lock);
2484
2485 /* Eject media from all LUNs */
2486
2487 down_write(&common->filesem);
2488 for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
2489 struct fsg_lun *curlun = common->luns[i];
2490
2491 if (curlun && fsg_lun_is_open(curlun))
2492 fsg_lun_close(curlun);
2493 }
2494 up_write(&common->filesem);
2495
2496 /* Let fsg_unbind() know the thread has exited */
2497 complete_and_exit(&common->thread_notifier, 0);
2498}
2499
2500
2501/*************************** DEVICE ATTRIBUTES ***************************/
2502
2503static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf)
2504{
2505 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2506
2507 return fsg_show_ro(curlun, buf);
2508}
2509
2510static ssize_t nofua_show(struct device *dev, struct device_attribute *attr,
2511 char *buf)
2512{
2513 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2514
2515 return fsg_show_nofua(curlun, buf);
2516}
2517
2518static ssize_t file_show(struct device *dev, struct device_attribute *attr,
2519 char *buf)
2520{
2521 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2522 struct rw_semaphore *filesem = dev_get_drvdata(dev);
2523
2524 return fsg_show_file(curlun, filesem, buf);
2525}
2526
2527static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
2528 const char *buf, size_t count)
2529{
2530 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2531 struct rw_semaphore *filesem = dev_get_drvdata(dev);
2532
2533 return fsg_store_ro(curlun, filesem, buf, count);
2534}
2535
2536static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
2537 const char *buf, size_t count)
2538{
2539 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2540
2541 return fsg_store_nofua(curlun, buf, count);
2542}
2543
2544static ssize_t file_store(struct device *dev, struct device_attribute *attr,
2545 const char *buf, size_t count)
2546{
2547 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
2548 struct rw_semaphore *filesem = dev_get_drvdata(dev);
2549
2550 return fsg_store_file(curlun, filesem, buf, count);
2551}
2552
2553static DEVICE_ATTR_RW(nofua);
2554/* mode wil be set in fsg_lun_attr_is_visible() */
2555static DEVICE_ATTR(ro, 0, ro_show, ro_store);
2556static DEVICE_ATTR(file, 0, file_show, file_store);
2557
2558/****************************** FSG COMMON ******************************/
2559
2560static void fsg_lun_release(struct device *dev)
2561{
2562 /* Nothing needs to be done */
2563}
2564
2565static struct fsg_common *fsg_common_setup(struct fsg_common *common)
2566{
2567 if (!common) {
2568 common = kzalloc(sizeof(*common), GFP_KERNEL);
2569 if (!common)
2570 return ERR_PTR(-ENOMEM);
2571 common->free_storage_on_release = 1;
2572 } else {
2573 common->free_storage_on_release = 0;
2574 }
2575 init_rwsem(&common->filesem);
2576 spin_lock_init(&common->lock);
2577 init_completion(&common->thread_notifier);
2578 init_waitqueue_head(&common->io_wait);
2579 init_waitqueue_head(&common->fsg_wait);
2580 common->state = FSG_STATE_TERMINATED;
2581 memset(common->luns, 0, sizeof(common->luns));
2582
2583 return common;
2584}
2585
2586void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs)
2587{
2588 common->sysfs = sysfs;
2589}
2590EXPORT_SYMBOL_GPL(fsg_common_set_sysfs);
2591
2592static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
2593{
2594 if (buffhds) {
2595 struct fsg_buffhd *bh = buffhds;
2596 while (n--) {
2597 kfree(bh->buf);
2598 ++bh;
2599 }
2600 kfree(buffhds);
2601 }
2602}
2603
2604int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
2605{
2606 struct fsg_buffhd *bh, *buffhds;
2607 int i;
2608
2609 buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL);
2610 if (!buffhds)
2611 return -ENOMEM;
2612
2613 /* Data buffers cyclic list */
2614 bh = buffhds;
2615 i = n;
2616 goto buffhds_first_it;
2617 do {
2618 bh->next = bh + 1;
2619 ++bh;
2620buffhds_first_it:
2621 bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
2622 if (unlikely(!bh->buf))
2623 goto error_release;
2624 } while (--i);
2625 bh->next = buffhds;
2626
2627 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
2628 common->fsg_num_buffers = n;
2629 common->buffhds = buffhds;
2630
2631 return 0;
2632
2633error_release:
2634 /*
2635 * "buf"s pointed to by heads after n - i are NULL
2636 * so releasing them won't hurt
2637 */
2638 _fsg_common_free_buffers(buffhds, n);
2639
2640 return -ENOMEM;
2641}
2642EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
2643
2644void fsg_common_remove_lun(struct fsg_lun *lun)
2645{
2646 if (device_is_registered(&lun->dev))
2647 device_unregister(&lun->dev);
2648 fsg_lun_close(lun);
2649 kfree(lun);
2650}
2651EXPORT_SYMBOL_GPL(fsg_common_remove_lun);
2652
2653static void _fsg_common_remove_luns(struct fsg_common *common, int n)
2654{
2655 int i;
2656
2657 for (i = 0; i < n; ++i)
2658 if (common->luns[i]) {
2659 fsg_common_remove_lun(common->luns[i]);
2660 common->luns[i] = NULL;
2661 }
2662}
2663
2664void fsg_common_remove_luns(struct fsg_common *common)
2665{
2666 _fsg_common_remove_luns(common, ARRAY_SIZE(common->luns));
2667}
2668EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
2669
2670void fsg_common_free_buffers(struct fsg_common *common)
2671{
2672 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
2673 common->buffhds = NULL;
2674}
2675EXPORT_SYMBOL_GPL(fsg_common_free_buffers);
2676
2677int fsg_common_set_cdev(struct fsg_common *common,
2678 struct usb_composite_dev *cdev, bool can_stall)
2679{
2680 struct usb_string *us;
2681
2682 common->gadget = cdev->gadget;
2683 common->ep0 = cdev->gadget->ep0;
2684 common->ep0req = cdev->req;
2685 common->cdev = cdev;
2686
2687 us = usb_gstrings_attach(cdev, fsg_strings_array,
2688 ARRAY_SIZE(fsg_strings));
2689 if (IS_ERR(us))
2690 return PTR_ERR(us);
2691
2692 fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id;
2693
2694 /*
2695 * Some peripheral controllers are known not to be able to
2696 * halt bulk endpoints correctly. If one of them is present,
2697 * disable stalls.
2698 */
2699 common->can_stall = can_stall &&
2700 gadget_is_stall_supported(common->gadget);
2701
2702 return 0;
2703}
2704EXPORT_SYMBOL_GPL(fsg_common_set_cdev);
2705
2706static struct attribute *fsg_lun_dev_attrs[] = {
2707 &dev_attr_ro.attr,
2708 &dev_attr_file.attr,
2709 &dev_attr_nofua.attr,
2710 NULL
2711};
2712
2713static umode_t fsg_lun_dev_is_visible(struct kobject *kobj,
2714 struct attribute *attr, int idx)
2715{
2716 struct device *dev = kobj_to_dev(kobj);
2717 struct fsg_lun *lun = fsg_lun_from_dev(dev);
2718
2719 if (attr == &dev_attr_ro.attr)
2720 return lun->cdrom ? S_IRUGO : (S_IWUSR | S_IRUGO);
2721 if (attr == &dev_attr_file.attr)
2722 return lun->removable ? (S_IWUSR | S_IRUGO) : S_IRUGO;
2723 return attr->mode;
2724}
2725
2726static const struct attribute_group fsg_lun_dev_group = {
2727 .attrs = fsg_lun_dev_attrs,
2728 .is_visible = fsg_lun_dev_is_visible,
2729};
2730
2731static const struct attribute_group *fsg_lun_dev_groups[] = {
2732 &fsg_lun_dev_group,
2733 NULL
2734};
2735
2736int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
2737 unsigned int id, const char *name,
2738 const char **name_pfx)
2739{
2740 struct fsg_lun *lun;
2741 char *pathbuf, *p;
2742 int rc = -ENOMEM;
2743
2744 if (id >= ARRAY_SIZE(common->luns))
2745 return -ENODEV;
2746
2747 if (common->luns[id])
2748 return -EBUSY;
2749
2750 if (!cfg->filename && !cfg->removable) {
2751 pr_err("no file given for LUN%d\n", id);
2752 return -EINVAL;
2753 }
2754
2755 lun = kzalloc(sizeof(*lun), GFP_KERNEL);
2756 if (!lun)
2757 return -ENOMEM;
2758
2759 lun->name_pfx = name_pfx;
2760
2761 lun->cdrom = !!cfg->cdrom;
2762 lun->ro = cfg->cdrom || cfg->ro;
2763 lun->initially_ro = lun->ro;
2764 lun->removable = !!cfg->removable;
2765
2766 if (!common->sysfs) {
2767 /* we DON'T own the name!*/
2768 lun->name = name;
2769 } else {
2770 lun->dev.release = fsg_lun_release;
2771 lun->dev.parent = &common->gadget->dev;
2772 lun->dev.groups = fsg_lun_dev_groups;
2773 dev_set_drvdata(&lun->dev, &common->filesem);
2774 dev_set_name(&lun->dev, "%s", name);
2775 lun->name = dev_name(&lun->dev);
2776
2777 rc = device_register(&lun->dev);
2778 if (rc) {
2779 pr_info("failed to register LUN%d: %d\n", id, rc);
2780 put_device(&lun->dev);
2781 goto error_sysfs;
2782 }
2783 }
2784
2785 common->luns[id] = lun;
2786
2787 if (cfg->filename) {
2788 rc = fsg_lun_open(lun, cfg->filename);
2789 if (rc)
2790 goto error_lun;
2791 }
2792
2793 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2794 p = "(no medium)";
2795 if (fsg_lun_is_open(lun)) {
2796 p = "(error)";
2797 if (pathbuf) {
2798 p = file_path(lun->filp, pathbuf, PATH_MAX);
2799 if (IS_ERR(p))
2800 p = "(error)";
2801 }
2802 }
2803 pr_info("LUN: %s%s%sfile: %s\n",
2804 lun->removable ? "removable " : "",
2805 lun->ro ? "read only " : "",
2806 lun->cdrom ? "CD-ROM " : "",
2807 p);
2808 kfree(pathbuf);
2809
2810 return 0;
2811
2812error_lun:
2813 if (device_is_registered(&lun->dev))
2814 device_unregister(&lun->dev);
2815 fsg_lun_close(lun);
2816 common->luns[id] = NULL;
2817error_sysfs:
2818 kfree(lun);
2819 return rc;
2820}
2821EXPORT_SYMBOL_GPL(fsg_common_create_lun);
2822
2823int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
2824{
2825 char buf[8]; /* enough for 100000000 different numbers, decimal */
2826 int i, rc;
2827
2828 fsg_common_remove_luns(common);
2829
2830 for (i = 0; i < cfg->nluns; ++i) {
2831 snprintf(buf, sizeof(buf), "lun%d", i);
2832 rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
2833 if (rc)
2834 goto fail;
2835 }
2836
2837 pr_info("Number of LUNs=%d\n", cfg->nluns);
2838
2839 return 0;
2840
2841fail:
2842 _fsg_common_remove_luns(common, i);
2843 return rc;
2844}
2845EXPORT_SYMBOL_GPL(fsg_common_create_luns);
2846
2847void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
2848 const char *pn)
2849{
2850 int i;
2851
2852 /* Prepare inquiryString */
2853 i = get_default_bcdDevice();
2854 snprintf(common->inquiry_string, sizeof(common->inquiry_string),
2855 "%-8s%-16s%04x", vn ?: "Linux",
2856 /* Assume product name dependent on the first LUN */
2857 pn ?: ((*common->luns)->cdrom
2858 ? "File-CD Gadget"
2859 : "File-Stor Gadget"),
2860 i);
2861}
2862EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
2863
2864static void fsg_common_release(struct fsg_common *common)
2865{
2866 int i;
2867
2868 /* If the thread isn't already dead, tell it to exit now */
2869 if (common->state != FSG_STATE_TERMINATED) {
2870 raise_exception(common, FSG_STATE_EXIT);
2871 wait_for_completion(&common->thread_notifier);
2872 }
2873
2874 for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
2875 struct fsg_lun *lun = common->luns[i];
2876 if (!lun)
2877 continue;
2878 fsg_lun_close(lun);
2879 if (device_is_registered(&lun->dev))
2880 device_unregister(&lun->dev);
2881 kfree(lun);
2882 }
2883
2884 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
2885 if (common->free_storage_on_release)
2886 kfree(common);
2887}
2888
2889
2890/*-------------------------------------------------------------------------*/
2891
2892static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2893{
2894 struct fsg_dev *fsg = fsg_from_func(f);
2895 struct fsg_common *common = fsg->common;
2896 struct usb_gadget *gadget = c->cdev->gadget;
2897 int i;
2898 struct usb_ep *ep;
2899 unsigned max_burst;
2900 int ret;
2901 struct fsg_opts *opts;
2902
2903 /* Don't allow to bind if we don't have at least one LUN */
2904 ret = _fsg_common_get_max_lun(common);
2905 if (ret < 0) {
2906 pr_err("There should be at least one LUN.\n");
2907 return -EINVAL;
2908 }
2909
2910 opts = fsg_opts_from_func_inst(f->fi);
2911 if (!opts->no_configfs) {
2912 ret = fsg_common_set_cdev(fsg->common, c->cdev,
2913 fsg->common->can_stall);
2914 if (ret)
2915 return ret;
2916 fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
2917 }
2918
2919 if (!common->thread_task) {
2920 common->state = FSG_STATE_NORMAL;
2921 common->thread_task =
2922 kthread_create(fsg_main_thread, common, "file-storage");
2923 if (IS_ERR(common->thread_task)) {
2924 ret = PTR_ERR(common->thread_task);
2925 common->thread_task = NULL;
2926 common->state = FSG_STATE_TERMINATED;
2927 return ret;
2928 }
2929 DBG(common, "I/O thread pid: %d\n",
2930 task_pid_nr(common->thread_task));
2931 wake_up_process(common->thread_task);
2932 }
2933
2934 fsg->gadget = gadget;
2935
2936 /* New interface */
2937 i = usb_interface_id(c, f);
2938 if (i < 0)
2939 goto fail;
2940 fsg_intf_desc.bInterfaceNumber = i;
2941 fsg->interface_number = i;
2942
2943 /* Find all the endpoints we will use */
2944 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2945 if (!ep)
2946 goto autoconf_fail;
2947 fsg->bulk_in = ep;
2948
2949 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2950 if (!ep)
2951 goto autoconf_fail;
2952 fsg->bulk_out = ep;
2953
2954 /* Assume endpoint addresses are the same for both speeds */
2955 fsg_hs_bulk_in_desc.bEndpointAddress =
2956 fsg_fs_bulk_in_desc.bEndpointAddress;
2957 fsg_hs_bulk_out_desc.bEndpointAddress =
2958 fsg_fs_bulk_out_desc.bEndpointAddress;
2959
2960 /* Calculate bMaxBurst, we know packet size is 1024 */
2961 max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
2962
2963 fsg_ss_bulk_in_desc.bEndpointAddress =
2964 fsg_fs_bulk_in_desc.bEndpointAddress;
2965 fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
2966
2967 fsg_ss_bulk_out_desc.bEndpointAddress =
2968 fsg_fs_bulk_out_desc.bEndpointAddress;
2969 fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
2970
2971 ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function,
2972 fsg_ss_function, fsg_ss_function);
2973 if (ret)
2974 goto autoconf_fail;
2975
2976 return 0;
2977
2978autoconf_fail:
2979 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2980 i = -ENOTSUPP;
2981fail:
2982 /* terminate the thread */
2983 if (fsg->common->state != FSG_STATE_TERMINATED) {
2984 raise_exception(fsg->common, FSG_STATE_EXIT);
2985 wait_for_completion(&fsg->common->thread_notifier);
2986 }
2987 return i;
2988}
2989
2990/****************************** ALLOCATE FUNCTION *************************/
2991
2992static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2993{
2994 struct fsg_dev *fsg = fsg_from_func(f);
2995 struct fsg_common *common = fsg->common;
2996
2997 DBG(fsg, "unbind\n");
2998 if (fsg->common->fsg == fsg) {
2999 __raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
3000 /* FIXME: make interruptible or killable somehow? */
3001 wait_event(common->fsg_wait, common->fsg != fsg);
3002 }
3003
3004 usb_free_all_descriptors(&fsg->function);
3005}
3006
3007static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
3008{
3009 return container_of(to_config_group(item), struct fsg_lun_opts, group);
3010}
3011
3012static inline struct fsg_opts *to_fsg_opts(struct config_item *item)
3013{
3014 return container_of(to_config_group(item), struct fsg_opts,
3015 func_inst.group);
3016}
3017
3018static void fsg_lun_attr_release(struct config_item *item)
3019{
3020 struct fsg_lun_opts *lun_opts;
3021
3022 lun_opts = to_fsg_lun_opts(item);
3023 kfree(lun_opts);
3024}
3025
3026static struct configfs_item_operations fsg_lun_item_ops = {
3027 .release = fsg_lun_attr_release,
3028};
3029
3030static ssize_t fsg_lun_opts_file_show(struct config_item *item, char *page)
3031{
3032 struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
3033 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
3034
3035 return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page);
3036}
3037
3038static ssize_t fsg_lun_opts_file_store(struct config_item *item,
3039 const char *page, size_t len)
3040{
3041 struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
3042 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
3043
3044 return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len);
3045}
3046
3047CONFIGFS_ATTR(fsg_lun_opts_, file);
3048
3049static ssize_t fsg_lun_opts_ro_show(struct config_item *item, char *page)
3050{
3051 return fsg_show_ro(to_fsg_lun_opts(item)->lun, page);
3052}
3053
3054static ssize_t fsg_lun_opts_ro_store(struct config_item *item,
3055 const char *page, size_t len)
3056{
3057 struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
3058 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
3059
3060 return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len);
3061}
3062
3063CONFIGFS_ATTR(fsg_lun_opts_, ro);
3064
3065static ssize_t fsg_lun_opts_removable_show(struct config_item *item,
3066 char *page)
3067{
3068 return fsg_show_removable(to_fsg_lun_opts(item)->lun, page);
3069}
3070
3071static ssize_t fsg_lun_opts_removable_store(struct config_item *item,
3072 const char *page, size_t len)
3073{
3074 return fsg_store_removable(to_fsg_lun_opts(item)->lun, page, len);
3075}
3076
3077CONFIGFS_ATTR(fsg_lun_opts_, removable);
3078
3079static ssize_t fsg_lun_opts_cdrom_show(struct config_item *item, char *page)
3080{
3081 return fsg_show_cdrom(to_fsg_lun_opts(item)->lun, page);
3082}
3083
3084static ssize_t fsg_lun_opts_cdrom_store(struct config_item *item,
3085 const char *page, size_t len)
3086{
3087 struct fsg_lun_opts *opts = to_fsg_lun_opts(item);
3088 struct fsg_opts *fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent);
3089
3090 return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page,
3091 len);
3092}
3093
3094CONFIGFS_ATTR(fsg_lun_opts_, cdrom);
3095
3096static ssize_t fsg_lun_opts_nofua_show(struct config_item *item, char *page)
3097{
3098 return fsg_show_nofua(to_fsg_lun_opts(item)->lun, page);
3099}
3100
3101static ssize_t fsg_lun_opts_nofua_store(struct config_item *item,
3102 const char *page, size_t len)
3103{
3104 return fsg_store_nofua(to_fsg_lun_opts(item)->lun, page, len);
3105}
3106
3107CONFIGFS_ATTR(fsg_lun_opts_, nofua);
3108
3109static ssize_t fsg_lun_opts_inquiry_string_show(struct config_item *item,
3110 char *page)
3111{
3112 return fsg_show_inquiry_string(to_fsg_lun_opts(item)->lun, page);
3113}
3114
3115static ssize_t fsg_lun_opts_inquiry_string_store(struct config_item *item,
3116 const char *page, size_t len)
3117{
3118 return fsg_store_inquiry_string(to_fsg_lun_opts(item)->lun, page, len);
3119}
3120
3121CONFIGFS_ATTR(fsg_lun_opts_, inquiry_string);
3122
3123static struct configfs_attribute *fsg_lun_attrs[] = {
3124 &fsg_lun_opts_attr_file,
3125 &fsg_lun_opts_attr_ro,
3126 &fsg_lun_opts_attr_removable,
3127 &fsg_lun_opts_attr_cdrom,
3128 &fsg_lun_opts_attr_nofua,
3129 &fsg_lun_opts_attr_inquiry_string,
3130 NULL,
3131};
3132
3133static const struct config_item_type fsg_lun_type = {
3134 .ct_item_ops = &fsg_lun_item_ops,
3135 .ct_attrs = fsg_lun_attrs,
3136 .ct_owner = THIS_MODULE,
3137};
3138
3139static struct config_group *fsg_lun_make(struct config_group *group,
3140 const char *name)
3141{
3142 struct fsg_lun_opts *opts;
3143 struct fsg_opts *fsg_opts;
3144 struct fsg_lun_config config;
3145 char *num_str;
3146 u8 num;
3147 int ret;
3148
3149 num_str = strchr(name, '.');
3150 if (!num_str) {
3151 pr_err("Unable to locate . in LUN.NUMBER\n");
3152 return ERR_PTR(-EINVAL);
3153 }
3154 num_str++;
3155
3156 ret = kstrtou8(num_str, 0, &num);
3157 if (ret)
3158 return ERR_PTR(ret);
3159
3160 fsg_opts = to_fsg_opts(&group->cg_item);
3161 if (num >= FSG_MAX_LUNS)
3162 return ERR_PTR(-ERANGE);
3163 num = array_index_nospec(num, FSG_MAX_LUNS);
3164
3165 mutex_lock(&fsg_opts->lock);
3166 if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
3167 ret = -EBUSY;
3168 goto out;
3169 }
3170
3171 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3172 if (!opts) {
3173 ret = -ENOMEM;
3174 goto out;
3175 }
3176
3177 memset(&config, 0, sizeof(config));
3178 config.removable = true;
3179
3180 ret = fsg_common_create_lun(fsg_opts->common, &config, num, name,
3181 (const char **)&group->cg_item.ci_name);
3182 if (ret) {
3183 kfree(opts);
3184 goto out;
3185 }
3186 opts->lun = fsg_opts->common->luns[num];
3187 opts->lun_id = num;
3188 mutex_unlock(&fsg_opts->lock);
3189
3190 config_group_init_type_name(&opts->group, name, &fsg_lun_type);
3191
3192 return &opts->group;
3193out:
3194 mutex_unlock(&fsg_opts->lock);
3195 return ERR_PTR(ret);
3196}
3197
3198static void fsg_lun_drop(struct config_group *group, struct config_item *item)
3199{
3200 struct fsg_lun_opts *lun_opts;
3201 struct fsg_opts *fsg_opts;
3202
3203 lun_opts = to_fsg_lun_opts(item);
3204 fsg_opts = to_fsg_opts(&group->cg_item);
3205
3206 mutex_lock(&fsg_opts->lock);
3207 if (fsg_opts->refcnt) {
3208 struct config_item *gadget;
3209
3210 gadget = group->cg_item.ci_parent->ci_parent;
3211 unregister_gadget_item(gadget);
3212 }
3213
3214 fsg_common_remove_lun(lun_opts->lun);
3215 fsg_opts->common->luns[lun_opts->lun_id] = NULL;
3216 lun_opts->lun_id = 0;
3217 mutex_unlock(&fsg_opts->lock);
3218
3219 config_item_put(item);
3220}
3221
3222static void fsg_attr_release(struct config_item *item)
3223{
3224 struct fsg_opts *opts = to_fsg_opts(item);
3225
3226 usb_put_function_instance(&opts->func_inst);
3227}
3228
3229static struct configfs_item_operations fsg_item_ops = {
3230 .release = fsg_attr_release,
3231};
3232
3233static ssize_t fsg_opts_stall_show(struct config_item *item, char *page)
3234{
3235 struct fsg_opts *opts = to_fsg_opts(item);
3236 int result;
3237
3238 mutex_lock(&opts->lock);
3239 result = sprintf(page, "%d", opts->common->can_stall);
3240 mutex_unlock(&opts->lock);
3241
3242 return result;
3243}
3244
3245static ssize_t fsg_opts_stall_store(struct config_item *item, const char *page,
3246 size_t len)
3247{
3248 struct fsg_opts *opts = to_fsg_opts(item);
3249 int ret;
3250 bool stall;
3251
3252 mutex_lock(&opts->lock);
3253
3254 if (opts->refcnt) {
3255 mutex_unlock(&opts->lock);
3256 return -EBUSY;
3257 }
3258
3259 ret = strtobool(page, &stall);
3260 if (!ret) {
3261 opts->common->can_stall = stall;
3262 ret = len;
3263 }
3264
3265 mutex_unlock(&opts->lock);
3266
3267 return ret;
3268}
3269
3270CONFIGFS_ATTR(fsg_opts_, stall);
3271
3272#ifdef CONFIG_USB_GADGET_DEBUG_FILES
3273static ssize_t fsg_opts_num_buffers_show(struct config_item *item, char *page)
3274{
3275 struct fsg_opts *opts = to_fsg_opts(item);
3276 int result;
3277
3278 mutex_lock(&opts->lock);
3279 result = sprintf(page, "%d", opts->common->fsg_num_buffers);
3280 mutex_unlock(&opts->lock);
3281
3282 return result;
3283}
3284
3285static ssize_t fsg_opts_num_buffers_store(struct config_item *item,
3286 const char *page, size_t len)
3287{
3288 struct fsg_opts *opts = to_fsg_opts(item);
3289 int ret;
3290 u8 num;
3291
3292 mutex_lock(&opts->lock);
3293 if (opts->refcnt) {
3294 ret = -EBUSY;
3295 goto end;
3296 }
3297 ret = kstrtou8(page, 0, &num);
3298 if (ret)
3299 goto end;
3300
3301 ret = fsg_common_set_num_buffers(opts->common, num);
3302 if (ret)
3303 goto end;
3304 ret = len;
3305
3306end:
3307 mutex_unlock(&opts->lock);
3308 return ret;
3309}
3310
3311CONFIGFS_ATTR(fsg_opts_, num_buffers);
3312#endif
3313
3314static struct configfs_attribute *fsg_attrs[] = {
3315 &fsg_opts_attr_stall,
3316#ifdef CONFIG_USB_GADGET_DEBUG_FILES
3317 &fsg_opts_attr_num_buffers,
3318#endif
3319 NULL,
3320};
3321
3322static struct configfs_group_operations fsg_group_ops = {
3323 .make_group = fsg_lun_make,
3324 .drop_item = fsg_lun_drop,
3325};
3326
3327static const struct config_item_type fsg_func_type = {
3328 .ct_item_ops = &fsg_item_ops,
3329 .ct_group_ops = &fsg_group_ops,
3330 .ct_attrs = fsg_attrs,
3331 .ct_owner = THIS_MODULE,
3332};
3333
3334static void fsg_free_inst(struct usb_function_instance *fi)
3335{
3336 struct fsg_opts *opts;
3337
3338 opts = fsg_opts_from_func_inst(fi);
3339 fsg_common_release(opts->common);
3340 kfree(opts);
3341}
3342
3343static struct usb_function_instance *fsg_alloc_inst(void)
3344{
3345 struct fsg_opts *opts;
3346 struct fsg_lun_config config;
3347 int rc;
3348
3349 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
3350 if (!opts)
3351 return ERR_PTR(-ENOMEM);
3352 mutex_init(&opts->lock);
3353 opts->func_inst.free_func_inst = fsg_free_inst;
3354 opts->common = fsg_common_setup(opts->common);
3355 if (IS_ERR(opts->common)) {
3356 rc = PTR_ERR(opts->common);
3357 goto release_opts;
3358 }
3359
3360 rc = fsg_common_set_num_buffers(opts->common,
3361 CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
3362 if (rc)
3363 goto release_common;
3364
3365 pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
3366
3367 memset(&config, 0, sizeof(config));
3368 config.removable = true;
3369 rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
3370 (const char **)&opts->func_inst.group.cg_item.ci_name);
3371 if (rc)
3372 goto release_buffers;
3373
3374 opts->lun0.lun = opts->common->luns[0];
3375 opts->lun0.lun_id = 0;
3376
3377 config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type);
3378
3379 config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
3380 configfs_add_default_group(&opts->lun0.group, &opts->func_inst.group);
3381
3382 return &opts->func_inst;
3383
3384release_buffers:
3385 fsg_common_free_buffers(opts->common);
3386release_common:
3387 kfree(opts->common);
3388release_opts:
3389 kfree(opts);
3390 return ERR_PTR(rc);
3391}
3392
3393static void fsg_free(struct usb_function *f)
3394{
3395 struct fsg_dev *fsg;
3396 struct fsg_opts *opts;
3397
3398 fsg = container_of(f, struct fsg_dev, function);
3399 opts = container_of(f->fi, struct fsg_opts, func_inst);
3400
3401 mutex_lock(&opts->lock);
3402 opts->refcnt--;
3403 mutex_unlock(&opts->lock);
3404
3405 kfree(fsg);
3406}
3407
3408static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
3409{
3410 struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
3411 struct fsg_common *common = opts->common;
3412 struct fsg_dev *fsg;
3413
3414 fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
3415 if (unlikely(!fsg))
3416 return ERR_PTR(-ENOMEM);
3417
3418 mutex_lock(&opts->lock);
3419 opts->refcnt++;
3420 mutex_unlock(&opts->lock);
3421
3422 fsg->function.name = FSG_DRIVER_DESC;
3423 fsg->function.bind = fsg_bind;
3424 fsg->function.unbind = fsg_unbind;
3425 fsg->function.setup = fsg_setup;
3426 fsg->function.set_alt = fsg_set_alt;
3427 fsg->function.disable = fsg_disable;
3428 fsg->function.free_func = fsg_free;
3429
3430 fsg->common = common;
3431
3432 return &fsg->function;
3433}
3434
3435DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc);
3436MODULE_LICENSE("GPL");
3437MODULE_AUTHOR("Michal Nazarewicz");
3438
3439/************************* Module parameters *************************/
3440
3441
3442void fsg_config_from_params(struct fsg_config *cfg,
3443 const struct fsg_module_parameters *params,
3444 unsigned int fsg_num_buffers)
3445{
3446 struct fsg_lun_config *lun;
3447 unsigned i;
3448
3449 /* Configure LUNs */
3450 cfg->nluns =
3451 min(params->luns ?: (params->file_count ?: 1u),
3452 (unsigned)FSG_MAX_LUNS);
3453 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
3454 lun->ro = !!params->ro[i];
3455 lun->cdrom = !!params->cdrom[i];
3456 lun->removable = !!params->removable[i];
3457 lun->filename =
3458 params->file_count > i && params->file[i][0]
3459 ? params->file[i]
3460 : NULL;
3461 }
3462
3463 /* Let MSF use defaults */
3464 cfg->vendor_name = NULL;
3465 cfg->product_name = NULL;
3466
3467 cfg->ops = NULL;
3468 cfg->private_data = NULL;
3469
3470 /* Finalise */
3471 cfg->can_stall = params->stall;
3472 cfg->fsg_num_buffers = fsg_num_buffers;
3473}
3474EXPORT_SYMBOL_GPL(fsg_config_from_params);