Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: ccg: include all sourced files

This Android gadget includes a bunch of .c files. Fixing normal gadgets
is not the real problem but this gadget is not always fixable since the
problem here are fundumential / design.

*I* wanted to get this removed but other people want to keep it even
though there were reports that Android itself is not using it. Some
poeple think that it is better to have this instead of nothing and other
argue that they need sdb and mass storage gadget. The sdb function is
not provided by ccg so I don't see the point of this. I don't see any
logical reasoning behind it and I decided that it is time for retreat.

This patch brings all dependencies of ccg into staging so I can do
whatever I want in drivers/usb/gadget without breaking ccg.

Cc: devel@driverdev.osuosl.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Felipe Balbi <balbi@ti.com>

authored by

Sebastian Andrzej Siewior and committed by
Felipe Balbi
e220ff75 93952956

+15071 -14
-2
drivers/staging/ccg/Makefile
··· 1 1 g_ccg-y := ccg.o 2 - ccflags-y += -Idrivers/usb/gadget 3 - 4 2 obj-$(CONFIG_USB_G_CCG) += g_ccg.o
+12 -12
drivers/staging/ccg/ccg.c
··· 32 32 #include <linux/platform_device.h> 33 33 34 34 #include <linux/usb/ch9.h> 35 - #include <linux/usb/composite.h> 35 + #include "composite.h" 36 36 #include <linux/usb/gadget.h> 37 37 38 38 #include "gadget_chips.h" ··· 44 44 * the runtime footprint, and giving us at least some parts of what 45 45 * a "gcc --combine ... part1.c part2.c part3.c ... " build would. 46 46 */ 47 - #include "../../usb/gadget/usbstring.c" 48 - #include "../../usb/gadget/config.c" 49 - #include "../../usb/gadget/epautoconf.c" 50 - #include "../../usb/gadget/composite.c" 47 + #include "usbstring.c" 48 + #include "config.c" 49 + #include "epautoconf.c" 50 + #include "composite.c" 51 51 52 - #include "../../usb/gadget/f_mass_storage.c" 53 - #include "../../usb/gadget/u_serial.c" 54 - #include "../../usb/gadget/f_acm.c" 52 + #include "f_mass_storage.c" 53 + #include "u_serial.c" 54 + #include "f_acm.c" 55 55 #define USB_ETH_RNDIS y 56 - #include "../../usb/gadget/f_rndis.c" 57 - #include "../../usb/gadget/rndis.c" 58 - #include "../../usb/gadget/u_ether.c" 59 - #include "../../usb/gadget/f_fs.c" 56 + #include "f_rndis.c" 57 + #include "rndis.c" 58 + #include "u_ether.c" 59 + #include "f_fs.c" 60 60 61 61 MODULE_AUTHOR("Mike Lockwood, Andrzej Pietrasiewicz"); 62 62 MODULE_DESCRIPTION("Configurable Composite USB Gadget");
+1687
drivers/staging/ccg/composite.c
··· 1 + /* 2 + * composite.c - infrastructure for Composite USB Gadgets 3 + * 4 + * Copyright (C) 2006-2008 David Brownell 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + */ 11 + 12 + /* #define VERBOSE_DEBUG */ 13 + 14 + #include <linux/kallsyms.h> 15 + #include <linux/kernel.h> 16 + #include <linux/slab.h> 17 + #include <linux/module.h> 18 + #include <linux/device.h> 19 + #include <linux/utsname.h> 20 + 21 + #include <linux/usb/composite.h> 22 + #include <asm/unaligned.h> 23 + 24 + /* 25 + * The code in this file is utility code, used to build a gadget driver 26 + * from one or more "function" drivers, one or more "configuration" 27 + * objects, and a "usb_composite_driver" by gluing them together along 28 + * with the relevant device-wide data. 29 + */ 30 + 31 + /* big enough to hold our biggest descriptor */ 32 + #define USB_BUFSIZ 1024 33 + 34 + static struct usb_composite_driver *composite; 35 + 36 + /* Some systems will need runtime overrides for the product identifiers 37 + * published in the device descriptor, either numbers or strings or both. 38 + * String parameters are in UTF-8 (superset of ASCII's 7 bit characters). 39 + */ 40 + 41 + static ushort idVendor; 42 + module_param(idVendor, ushort, 0644); 43 + MODULE_PARM_DESC(idVendor, "USB Vendor ID"); 44 + 45 + static ushort idProduct; 46 + module_param(idProduct, ushort, 0644); 47 + MODULE_PARM_DESC(idProduct, "USB Product ID"); 48 + 49 + static ushort bcdDevice; 50 + module_param(bcdDevice, ushort, 0644); 51 + MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)"); 52 + 53 + static char *iManufacturer; 54 + module_param(iManufacturer, charp, 0644); 55 + MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string"); 56 + 57 + static char *iProduct; 58 + module_param(iProduct, charp, 0644); 59 + MODULE_PARM_DESC(iProduct, "USB Product string"); 60 + 61 + static char *iSerialNumber; 62 + module_param(iSerialNumber, charp, 0644); 63 + MODULE_PARM_DESC(iSerialNumber, "SerialNumber string"); 64 + 65 + static char composite_manufacturer[50]; 66 + 67 + /*-------------------------------------------------------------------------*/ 68 + /** 69 + * next_ep_desc() - advance to the next EP descriptor 70 + * @t: currect pointer within descriptor array 71 + * 72 + * Return: next EP descriptor or NULL 73 + * 74 + * Iterate over @t until either EP descriptor found or 75 + * NULL (that indicates end of list) encountered 76 + */ 77 + static struct usb_descriptor_header** 78 + next_ep_desc(struct usb_descriptor_header **t) 79 + { 80 + for (; *t; t++) { 81 + if ((*t)->bDescriptorType == USB_DT_ENDPOINT) 82 + return t; 83 + } 84 + return NULL; 85 + } 86 + 87 + /* 88 + * for_each_ep_desc()- iterate over endpoint descriptors in the 89 + * descriptors list 90 + * @start: pointer within descriptor array. 91 + * @ep_desc: endpoint descriptor to use as the loop cursor 92 + */ 93 + #define for_each_ep_desc(start, ep_desc) \ 94 + for (ep_desc = next_ep_desc(start); \ 95 + ep_desc; ep_desc = next_ep_desc(ep_desc+1)) 96 + 97 + /** 98 + * config_ep_by_speed() - configures the given endpoint 99 + * according to gadget speed. 100 + * @g: pointer to the gadget 101 + * @f: usb function 102 + * @_ep: the endpoint to configure 103 + * 104 + * Return: error code, 0 on success 105 + * 106 + * This function chooses the right descriptors for a given 107 + * endpoint according to gadget speed and saves it in the 108 + * endpoint desc field. If the endpoint already has a descriptor 109 + * assigned to it - overwrites it with currently corresponding 110 + * descriptor. The endpoint maxpacket field is updated according 111 + * to the chosen descriptor. 112 + * Note: the supplied function should hold all the descriptors 113 + * for supported speeds 114 + */ 115 + int config_ep_by_speed(struct usb_gadget *g, 116 + struct usb_function *f, 117 + struct usb_ep *_ep) 118 + { 119 + struct usb_composite_dev *cdev = get_gadget_data(g); 120 + struct usb_endpoint_descriptor *chosen_desc = NULL; 121 + struct usb_descriptor_header **speed_desc = NULL; 122 + 123 + struct usb_ss_ep_comp_descriptor *comp_desc = NULL; 124 + int want_comp_desc = 0; 125 + 126 + struct usb_descriptor_header **d_spd; /* cursor for speed desc */ 127 + 128 + if (!g || !f || !_ep) 129 + return -EIO; 130 + 131 + /* select desired speed */ 132 + switch (g->speed) { 133 + case USB_SPEED_SUPER: 134 + if (gadget_is_superspeed(g)) { 135 + speed_desc = f->ss_descriptors; 136 + want_comp_desc = 1; 137 + break; 138 + } 139 + /* else: Fall trough */ 140 + case USB_SPEED_HIGH: 141 + if (gadget_is_dualspeed(g)) { 142 + speed_desc = f->hs_descriptors; 143 + break; 144 + } 145 + /* else: fall through */ 146 + default: 147 + speed_desc = f->descriptors; 148 + } 149 + /* find descriptors */ 150 + for_each_ep_desc(speed_desc, d_spd) { 151 + chosen_desc = (struct usb_endpoint_descriptor *)*d_spd; 152 + if (chosen_desc->bEndpointAddress == _ep->address) 153 + goto ep_found; 154 + } 155 + return -EIO; 156 + 157 + ep_found: 158 + /* commit results */ 159 + _ep->maxpacket = usb_endpoint_maxp(chosen_desc); 160 + _ep->desc = chosen_desc; 161 + _ep->comp_desc = NULL; 162 + _ep->maxburst = 0; 163 + _ep->mult = 0; 164 + if (!want_comp_desc) 165 + return 0; 166 + 167 + /* 168 + * Companion descriptor should follow EP descriptor 169 + * USB 3.0 spec, #9.6.7 170 + */ 171 + comp_desc = (struct usb_ss_ep_comp_descriptor *)*(++d_spd); 172 + if (!comp_desc || 173 + (comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP)) 174 + return -EIO; 175 + _ep->comp_desc = comp_desc; 176 + if (g->speed == USB_SPEED_SUPER) { 177 + switch (usb_endpoint_type(_ep->desc)) { 178 + case USB_ENDPOINT_XFER_ISOC: 179 + /* mult: bits 1:0 of bmAttributes */ 180 + _ep->mult = comp_desc->bmAttributes & 0x3; 181 + case USB_ENDPOINT_XFER_BULK: 182 + case USB_ENDPOINT_XFER_INT: 183 + _ep->maxburst = comp_desc->bMaxBurst + 1; 184 + break; 185 + default: 186 + if (comp_desc->bMaxBurst != 0) 187 + ERROR(cdev, "ep0 bMaxBurst must be 0\n"); 188 + _ep->maxburst = 1; 189 + break; 190 + } 191 + } 192 + return 0; 193 + } 194 + 195 + /** 196 + * usb_add_function() - add a function to a configuration 197 + * @config: the configuration 198 + * @function: the function being added 199 + * Context: single threaded during gadget setup 200 + * 201 + * After initialization, each configuration must have one or more 202 + * functions added to it. Adding a function involves calling its @bind() 203 + * method to allocate resources such as interface and string identifiers 204 + * and endpoints. 205 + * 206 + * This function returns the value of the function's bind(), which is 207 + * zero for success else a negative errno value. 208 + */ 209 + int usb_add_function(struct usb_configuration *config, 210 + struct usb_function *function) 211 + { 212 + int value = -EINVAL; 213 + 214 + DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n", 215 + function->name, function, 216 + config->label, config); 217 + 218 + if (!function->set_alt || !function->disable) 219 + goto done; 220 + 221 + function->config = config; 222 + list_add_tail(&function->list, &config->functions); 223 + 224 + /* REVISIT *require* function->bind? */ 225 + if (function->bind) { 226 + value = function->bind(config, function); 227 + if (value < 0) { 228 + list_del(&function->list); 229 + function->config = NULL; 230 + } 231 + } else 232 + value = 0; 233 + 234 + /* We allow configurations that don't work at both speeds. 235 + * If we run into a lowspeed Linux system, treat it the same 236 + * as full speed ... it's the function drivers that will need 237 + * to avoid bulk and ISO transfers. 238 + */ 239 + if (!config->fullspeed && function->descriptors) 240 + config->fullspeed = true; 241 + if (!config->highspeed && function->hs_descriptors) 242 + config->highspeed = true; 243 + if (!config->superspeed && function->ss_descriptors) 244 + config->superspeed = true; 245 + 246 + done: 247 + if (value) 248 + DBG(config->cdev, "adding '%s'/%p --> %d\n", 249 + function->name, function, value); 250 + return value; 251 + } 252 + 253 + /** 254 + * usb_function_deactivate - prevent function and gadget enumeration 255 + * @function: the function that isn't yet ready to respond 256 + * 257 + * Blocks response of the gadget driver to host enumeration by 258 + * preventing the data line pullup from being activated. This is 259 + * normally called during @bind() processing to change from the 260 + * initial "ready to respond" state, or when a required resource 261 + * becomes available. 262 + * 263 + * For example, drivers that serve as a passthrough to a userspace 264 + * daemon can block enumeration unless that daemon (such as an OBEX, 265 + * MTP, or print server) is ready to handle host requests. 266 + * 267 + * Not all systems support software control of their USB peripheral 268 + * data pullups. 269 + * 270 + * Returns zero on success, else negative errno. 271 + */ 272 + int usb_function_deactivate(struct usb_function *function) 273 + { 274 + struct usb_composite_dev *cdev = function->config->cdev; 275 + unsigned long flags; 276 + int status = 0; 277 + 278 + spin_lock_irqsave(&cdev->lock, flags); 279 + 280 + if (cdev->deactivations == 0) 281 + status = usb_gadget_disconnect(cdev->gadget); 282 + if (status == 0) 283 + cdev->deactivations++; 284 + 285 + spin_unlock_irqrestore(&cdev->lock, flags); 286 + return status; 287 + } 288 + 289 + /** 290 + * usb_function_activate - allow function and gadget enumeration 291 + * @function: function on which usb_function_activate() was called 292 + * 293 + * Reverses effect of usb_function_deactivate(). If no more functions 294 + * are delaying their activation, the gadget driver will respond to 295 + * host enumeration procedures. 296 + * 297 + * Returns zero on success, else negative errno. 298 + */ 299 + int usb_function_activate(struct usb_function *function) 300 + { 301 + struct usb_composite_dev *cdev = function->config->cdev; 302 + unsigned long flags; 303 + int status = 0; 304 + 305 + spin_lock_irqsave(&cdev->lock, flags); 306 + 307 + if (WARN_ON(cdev->deactivations == 0)) 308 + status = -EINVAL; 309 + else { 310 + cdev->deactivations--; 311 + if (cdev->deactivations == 0) 312 + status = usb_gadget_connect(cdev->gadget); 313 + } 314 + 315 + spin_unlock_irqrestore(&cdev->lock, flags); 316 + return status; 317 + } 318 + 319 + /** 320 + * usb_interface_id() - allocate an unused interface ID 321 + * @config: configuration associated with the interface 322 + * @function: function handling the interface 323 + * Context: single threaded during gadget setup 324 + * 325 + * usb_interface_id() is called from usb_function.bind() callbacks to 326 + * allocate new interface IDs. The function driver will then store that 327 + * ID in interface, association, CDC union, and other descriptors. It 328 + * will also handle any control requests targeted at that interface, 329 + * particularly changing its altsetting via set_alt(). There may 330 + * also be class-specific or vendor-specific requests to handle. 331 + * 332 + * All interface identifier should be allocated using this routine, to 333 + * ensure that for example different functions don't wrongly assign 334 + * different meanings to the same identifier. Note that since interface 335 + * identifiers are configuration-specific, functions used in more than 336 + * one configuration (or more than once in a given configuration) need 337 + * multiple versions of the relevant descriptors. 338 + * 339 + * Returns the interface ID which was allocated; or -ENODEV if no 340 + * more interface IDs can be allocated. 341 + */ 342 + int usb_interface_id(struct usb_configuration *config, 343 + struct usb_function *function) 344 + { 345 + unsigned id = config->next_interface_id; 346 + 347 + if (id < MAX_CONFIG_INTERFACES) { 348 + config->interface[id] = function; 349 + config->next_interface_id = id + 1; 350 + return id; 351 + } 352 + return -ENODEV; 353 + } 354 + 355 + static int config_buf(struct usb_configuration *config, 356 + enum usb_device_speed speed, void *buf, u8 type) 357 + { 358 + struct usb_config_descriptor *c = buf; 359 + void *next = buf + USB_DT_CONFIG_SIZE; 360 + int len = USB_BUFSIZ - USB_DT_CONFIG_SIZE; 361 + struct usb_function *f; 362 + int status; 363 + 364 + /* write the config descriptor */ 365 + c = buf; 366 + c->bLength = USB_DT_CONFIG_SIZE; 367 + c->bDescriptorType = type; 368 + /* wTotalLength is written later */ 369 + c->bNumInterfaces = config->next_interface_id; 370 + c->bConfigurationValue = config->bConfigurationValue; 371 + c->iConfiguration = config->iConfiguration; 372 + c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes; 373 + c->bMaxPower = config->bMaxPower ? : (CONFIG_USB_GADGET_VBUS_DRAW / 2); 374 + 375 + /* There may be e.g. OTG descriptors */ 376 + if (config->descriptors) { 377 + status = usb_descriptor_fillbuf(next, len, 378 + config->descriptors); 379 + if (status < 0) 380 + return status; 381 + len -= status; 382 + next += status; 383 + } 384 + 385 + /* add each function's descriptors */ 386 + list_for_each_entry(f, &config->functions, list) { 387 + struct usb_descriptor_header **descriptors; 388 + 389 + switch (speed) { 390 + case USB_SPEED_SUPER: 391 + descriptors = f->ss_descriptors; 392 + break; 393 + case USB_SPEED_HIGH: 394 + descriptors = f->hs_descriptors; 395 + break; 396 + default: 397 + descriptors = f->descriptors; 398 + } 399 + 400 + if (!descriptors) 401 + continue; 402 + status = usb_descriptor_fillbuf(next, len, 403 + (const struct usb_descriptor_header **) descriptors); 404 + if (status < 0) 405 + return status; 406 + len -= status; 407 + next += status; 408 + } 409 + 410 + len = next - buf; 411 + c->wTotalLength = cpu_to_le16(len); 412 + return len; 413 + } 414 + 415 + static int config_desc(struct usb_composite_dev *cdev, unsigned w_value) 416 + { 417 + struct usb_gadget *gadget = cdev->gadget; 418 + struct usb_configuration *c; 419 + u8 type = w_value >> 8; 420 + enum usb_device_speed speed = USB_SPEED_UNKNOWN; 421 + 422 + if (gadget->speed == USB_SPEED_SUPER) 423 + speed = gadget->speed; 424 + else if (gadget_is_dualspeed(gadget)) { 425 + int hs = 0; 426 + if (gadget->speed == USB_SPEED_HIGH) 427 + hs = 1; 428 + if (type == USB_DT_OTHER_SPEED_CONFIG) 429 + hs = !hs; 430 + if (hs) 431 + speed = USB_SPEED_HIGH; 432 + 433 + } 434 + 435 + /* This is a lookup by config *INDEX* */ 436 + w_value &= 0xff; 437 + list_for_each_entry(c, &cdev->configs, list) { 438 + /* ignore configs that won't work at this speed */ 439 + switch (speed) { 440 + case USB_SPEED_SUPER: 441 + if (!c->superspeed) 442 + continue; 443 + break; 444 + case USB_SPEED_HIGH: 445 + if (!c->highspeed) 446 + continue; 447 + break; 448 + default: 449 + if (!c->fullspeed) 450 + continue; 451 + } 452 + 453 + if (w_value == 0) 454 + return config_buf(c, speed, cdev->req->buf, type); 455 + w_value--; 456 + } 457 + return -EINVAL; 458 + } 459 + 460 + static int count_configs(struct usb_composite_dev *cdev, unsigned type) 461 + { 462 + struct usb_gadget *gadget = cdev->gadget; 463 + struct usb_configuration *c; 464 + unsigned count = 0; 465 + int hs = 0; 466 + int ss = 0; 467 + 468 + if (gadget_is_dualspeed(gadget)) { 469 + if (gadget->speed == USB_SPEED_HIGH) 470 + hs = 1; 471 + if (gadget->speed == USB_SPEED_SUPER) 472 + ss = 1; 473 + if (type == USB_DT_DEVICE_QUALIFIER) 474 + hs = !hs; 475 + } 476 + list_for_each_entry(c, &cdev->configs, list) { 477 + /* ignore configs that won't work at this speed */ 478 + if (ss) { 479 + if (!c->superspeed) 480 + continue; 481 + } else if (hs) { 482 + if (!c->highspeed) 483 + continue; 484 + } else { 485 + if (!c->fullspeed) 486 + continue; 487 + } 488 + count++; 489 + } 490 + return count; 491 + } 492 + 493 + /** 494 + * bos_desc() - prepares the BOS descriptor. 495 + * @cdev: pointer to usb_composite device to generate the bos 496 + * descriptor for 497 + * 498 + * This function generates the BOS (Binary Device Object) 499 + * descriptor and its device capabilities descriptors. The BOS 500 + * descriptor should be supported by a SuperSpeed device. 501 + */ 502 + static int bos_desc(struct usb_composite_dev *cdev) 503 + { 504 + struct usb_ext_cap_descriptor *usb_ext; 505 + struct usb_ss_cap_descriptor *ss_cap; 506 + struct usb_dcd_config_params dcd_config_params; 507 + struct usb_bos_descriptor *bos = cdev->req->buf; 508 + 509 + bos->bLength = USB_DT_BOS_SIZE; 510 + bos->bDescriptorType = USB_DT_BOS; 511 + 512 + bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE); 513 + bos->bNumDeviceCaps = 0; 514 + 515 + /* 516 + * A SuperSpeed device shall include the USB2.0 extension descriptor 517 + * and shall support LPM when operating in USB2.0 HS mode. 518 + */ 519 + usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength); 520 + bos->bNumDeviceCaps++; 521 + le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE); 522 + usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE; 523 + usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY; 524 + usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT; 525 + usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT); 526 + 527 + /* 528 + * The Superspeed USB Capability descriptor shall be implemented by all 529 + * SuperSpeed devices. 530 + */ 531 + ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength); 532 + bos->bNumDeviceCaps++; 533 + le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE); 534 + ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE; 535 + ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY; 536 + ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE; 537 + ss_cap->bmAttributes = 0; /* LTM is not supported yet */ 538 + ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION | 539 + USB_FULL_SPEED_OPERATION | 540 + USB_HIGH_SPEED_OPERATION | 541 + USB_5GBPS_OPERATION); 542 + ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION; 543 + 544 + /* Get Controller configuration */ 545 + if (cdev->gadget->ops->get_config_params) 546 + cdev->gadget->ops->get_config_params(&dcd_config_params); 547 + else { 548 + dcd_config_params.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT; 549 + dcd_config_params.bU2DevExitLat = 550 + cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT); 551 + } 552 + ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat; 553 + ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat; 554 + 555 + return le16_to_cpu(bos->wTotalLength); 556 + } 557 + 558 + static void device_qual(struct usb_composite_dev *cdev) 559 + { 560 + struct usb_qualifier_descriptor *qual = cdev->req->buf; 561 + 562 + qual->bLength = sizeof(*qual); 563 + qual->bDescriptorType = USB_DT_DEVICE_QUALIFIER; 564 + /* POLICY: same bcdUSB and device type info at both speeds */ 565 + qual->bcdUSB = cdev->desc.bcdUSB; 566 + qual->bDeviceClass = cdev->desc.bDeviceClass; 567 + qual->bDeviceSubClass = cdev->desc.bDeviceSubClass; 568 + qual->bDeviceProtocol = cdev->desc.bDeviceProtocol; 569 + /* ASSUME same EP0 fifo size at both speeds */ 570 + qual->bMaxPacketSize0 = cdev->gadget->ep0->maxpacket; 571 + qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER); 572 + qual->bRESERVED = 0; 573 + } 574 + 575 + /*-------------------------------------------------------------------------*/ 576 + 577 + static void reset_config(struct usb_composite_dev *cdev) 578 + { 579 + struct usb_function *f; 580 + 581 + DBG(cdev, "reset config\n"); 582 + 583 + list_for_each_entry(f, &cdev->config->functions, list) { 584 + if (f->disable) 585 + f->disable(f); 586 + 587 + bitmap_zero(f->endpoints, 32); 588 + } 589 + cdev->config = NULL; 590 + } 591 + 592 + static int set_config(struct usb_composite_dev *cdev, 593 + const struct usb_ctrlrequest *ctrl, unsigned number) 594 + { 595 + struct usb_gadget *gadget = cdev->gadget; 596 + struct usb_configuration *c = NULL; 597 + int result = -EINVAL; 598 + unsigned power = gadget_is_otg(gadget) ? 8 : 100; 599 + int tmp; 600 + 601 + if (number) { 602 + list_for_each_entry(c, &cdev->configs, list) { 603 + if (c->bConfigurationValue == number) { 604 + /* 605 + * We disable the FDs of the previous 606 + * configuration only if the new configuration 607 + * is a valid one 608 + */ 609 + if (cdev->config) 610 + reset_config(cdev); 611 + result = 0; 612 + break; 613 + } 614 + } 615 + if (result < 0) 616 + goto done; 617 + } else { /* Zero configuration value - need to reset the config */ 618 + if (cdev->config) 619 + reset_config(cdev); 620 + result = 0; 621 + } 622 + 623 + INFO(cdev, "%s config #%d: %s\n", 624 + usb_speed_string(gadget->speed), 625 + number, c ? c->label : "unconfigured"); 626 + 627 + if (!c) 628 + goto done; 629 + 630 + cdev->config = c; 631 + 632 + /* Initialize all interfaces by setting them to altsetting zero. */ 633 + for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) { 634 + struct usb_function *f = c->interface[tmp]; 635 + struct usb_descriptor_header **descriptors; 636 + 637 + if (!f) 638 + break; 639 + 640 + /* 641 + * Record which endpoints are used by the function. This is used 642 + * to dispatch control requests targeted at that endpoint to the 643 + * function's setup callback instead of the current 644 + * configuration's setup callback. 645 + */ 646 + switch (gadget->speed) { 647 + case USB_SPEED_SUPER: 648 + descriptors = f->ss_descriptors; 649 + break; 650 + case USB_SPEED_HIGH: 651 + descriptors = f->hs_descriptors; 652 + break; 653 + default: 654 + descriptors = f->descriptors; 655 + } 656 + 657 + for (; *descriptors; ++descriptors) { 658 + struct usb_endpoint_descriptor *ep; 659 + int addr; 660 + 661 + if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT) 662 + continue; 663 + 664 + ep = (struct usb_endpoint_descriptor *)*descriptors; 665 + addr = ((ep->bEndpointAddress & 0x80) >> 3) 666 + | (ep->bEndpointAddress & 0x0f); 667 + set_bit(addr, f->endpoints); 668 + } 669 + 670 + result = f->set_alt(f, tmp, 0); 671 + if (result < 0) { 672 + DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n", 673 + tmp, f->name, f, result); 674 + 675 + reset_config(cdev); 676 + goto done; 677 + } 678 + 679 + if (result == USB_GADGET_DELAYED_STATUS) { 680 + DBG(cdev, 681 + "%s: interface %d (%s) requested delayed status\n", 682 + __func__, tmp, f->name); 683 + cdev->delayed_status++; 684 + DBG(cdev, "delayed_status count %d\n", 685 + cdev->delayed_status); 686 + } 687 + } 688 + 689 + /* when we return, be sure our power usage is valid */ 690 + power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW; 691 + done: 692 + usb_gadget_vbus_draw(gadget, power); 693 + if (result >= 0 && cdev->delayed_status) 694 + result = USB_GADGET_DELAYED_STATUS; 695 + return result; 696 + } 697 + 698 + /** 699 + * usb_add_config() - add a configuration to a device. 700 + * @cdev: wraps the USB gadget 701 + * @config: the configuration, with bConfigurationValue assigned 702 + * @bind: the configuration's bind function 703 + * Context: single threaded during gadget setup 704 + * 705 + * One of the main tasks of a composite @bind() routine is to 706 + * add each of the configurations it supports, using this routine. 707 + * 708 + * This function returns the value of the configuration's @bind(), which 709 + * is zero for success else a negative errno value. Binding configurations 710 + * assigns global resources including string IDs, and per-configuration 711 + * resources such as interface IDs and endpoints. 712 + */ 713 + int usb_add_config(struct usb_composite_dev *cdev, 714 + struct usb_configuration *config, 715 + int (*bind)(struct usb_configuration *)) 716 + { 717 + int status = -EINVAL; 718 + struct usb_configuration *c; 719 + 720 + DBG(cdev, "adding config #%u '%s'/%p\n", 721 + config->bConfigurationValue, 722 + config->label, config); 723 + 724 + if (!config->bConfigurationValue || !bind) 725 + goto done; 726 + 727 + /* Prevent duplicate configuration identifiers */ 728 + list_for_each_entry(c, &cdev->configs, list) { 729 + if (c->bConfigurationValue == config->bConfigurationValue) { 730 + status = -EBUSY; 731 + goto done; 732 + } 733 + } 734 + 735 + config->cdev = cdev; 736 + list_add_tail(&config->list, &cdev->configs); 737 + 738 + INIT_LIST_HEAD(&config->functions); 739 + config->next_interface_id = 0; 740 + memset(config->interface, 0, sizeof(config->interface)); 741 + 742 + status = bind(config); 743 + if (status < 0) { 744 + while (!list_empty(&config->functions)) { 745 + struct usb_function *f; 746 + 747 + f = list_first_entry(&config->functions, 748 + struct usb_function, list); 749 + list_del(&f->list); 750 + if (f->unbind) { 751 + DBG(cdev, "unbind function '%s'/%p\n", 752 + f->name, f); 753 + f->unbind(config, f); 754 + /* may free memory for "f" */ 755 + } 756 + } 757 + list_del(&config->list); 758 + config->cdev = NULL; 759 + } else { 760 + unsigned i; 761 + 762 + DBG(cdev, "cfg %d/%p speeds:%s%s%s\n", 763 + config->bConfigurationValue, config, 764 + config->superspeed ? " super" : "", 765 + config->highspeed ? " high" : "", 766 + config->fullspeed 767 + ? (gadget_is_dualspeed(cdev->gadget) 768 + ? " full" 769 + : " full/low") 770 + : ""); 771 + 772 + for (i = 0; i < MAX_CONFIG_INTERFACES; i++) { 773 + struct usb_function *f = config->interface[i]; 774 + 775 + if (!f) 776 + continue; 777 + DBG(cdev, " interface %d = %s/%p\n", 778 + i, f->name, f); 779 + } 780 + } 781 + 782 + /* set_alt(), or next bind(), sets up 783 + * ep->driver_data as needed. 784 + */ 785 + usb_ep_autoconfig_reset(cdev->gadget); 786 + 787 + done: 788 + if (status) 789 + DBG(cdev, "added config '%s'/%u --> %d\n", config->label, 790 + config->bConfigurationValue, status); 791 + return status; 792 + } 793 + 794 + static void remove_config(struct usb_composite_dev *cdev, 795 + struct usb_configuration *config) 796 + { 797 + while (!list_empty(&config->functions)) { 798 + struct usb_function *f; 799 + 800 + f = list_first_entry(&config->functions, 801 + struct usb_function, list); 802 + list_del(&f->list); 803 + if (f->unbind) { 804 + DBG(cdev, "unbind function '%s'/%p\n", f->name, f); 805 + f->unbind(config, f); 806 + /* may free memory for "f" */ 807 + } 808 + } 809 + list_del(&config->list); 810 + if (config->unbind) { 811 + DBG(cdev, "unbind config '%s'/%p\n", config->label, config); 812 + config->unbind(config); 813 + /* may free memory for "c" */ 814 + } 815 + } 816 + 817 + /** 818 + * usb_remove_config() - remove a configuration from a device. 819 + * @cdev: wraps the USB gadget 820 + * @config: the configuration 821 + * 822 + * Drivers must call usb_gadget_disconnect before calling this function 823 + * to disconnect the device from the host and make sure the host will not 824 + * try to enumerate the device while we are changing the config list. 825 + */ 826 + void usb_remove_config(struct usb_composite_dev *cdev, 827 + struct usb_configuration *config) 828 + { 829 + unsigned long flags; 830 + 831 + spin_lock_irqsave(&cdev->lock, flags); 832 + 833 + if (cdev->config == config) 834 + reset_config(cdev); 835 + 836 + spin_unlock_irqrestore(&cdev->lock, flags); 837 + 838 + remove_config(cdev, config); 839 + } 840 + 841 + /*-------------------------------------------------------------------------*/ 842 + 843 + /* We support strings in multiple languages ... string descriptor zero 844 + * says which languages are supported. The typical case will be that 845 + * only one language (probably English) is used, with I18N handled on 846 + * the host side. 847 + */ 848 + 849 + static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf) 850 + { 851 + const struct usb_gadget_strings *s; 852 + __le16 language; 853 + __le16 *tmp; 854 + 855 + while (*sp) { 856 + s = *sp; 857 + language = cpu_to_le16(s->language); 858 + for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) { 859 + if (*tmp == language) 860 + goto repeat; 861 + } 862 + *tmp++ = language; 863 + repeat: 864 + sp++; 865 + } 866 + } 867 + 868 + static int lookup_string( 869 + struct usb_gadget_strings **sp, 870 + void *buf, 871 + u16 language, 872 + int id 873 + ) 874 + { 875 + struct usb_gadget_strings *s; 876 + int value; 877 + 878 + while (*sp) { 879 + s = *sp++; 880 + if (s->language != language) 881 + continue; 882 + value = usb_gadget_get_string(s, id, buf); 883 + if (value > 0) 884 + return value; 885 + } 886 + return -EINVAL; 887 + } 888 + 889 + static int get_string(struct usb_composite_dev *cdev, 890 + void *buf, u16 language, int id) 891 + { 892 + struct usb_configuration *c; 893 + struct usb_function *f; 894 + int len; 895 + const char *str; 896 + 897 + /* Yes, not only is USB's I18N support probably more than most 898 + * folk will ever care about ... also, it's all supported here. 899 + * (Except for UTF8 support for Unicode's "Astral Planes".) 900 + */ 901 + 902 + /* 0 == report all available language codes */ 903 + if (id == 0) { 904 + struct usb_string_descriptor *s = buf; 905 + struct usb_gadget_strings **sp; 906 + 907 + memset(s, 0, 256); 908 + s->bDescriptorType = USB_DT_STRING; 909 + 910 + sp = composite->strings; 911 + if (sp) 912 + collect_langs(sp, s->wData); 913 + 914 + list_for_each_entry(c, &cdev->configs, list) { 915 + sp = c->strings; 916 + if (sp) 917 + collect_langs(sp, s->wData); 918 + 919 + list_for_each_entry(f, &c->functions, list) { 920 + sp = f->strings; 921 + if (sp) 922 + collect_langs(sp, s->wData); 923 + } 924 + } 925 + 926 + for (len = 0; len <= 126 && s->wData[len]; len++) 927 + continue; 928 + if (!len) 929 + return -EINVAL; 930 + 931 + s->bLength = 2 * (len + 1); 932 + return s->bLength; 933 + } 934 + 935 + /* Otherwise, look up and return a specified string. First 936 + * check if the string has not been overridden. 937 + */ 938 + if (cdev->manufacturer_override == id) 939 + str = iManufacturer ?: composite->iManufacturer ?: 940 + composite_manufacturer; 941 + else if (cdev->product_override == id) 942 + str = iProduct ?: composite->iProduct; 943 + else if (cdev->serial_override == id) 944 + str = iSerialNumber ?: composite->iSerialNumber; 945 + else 946 + str = NULL; 947 + if (str) { 948 + struct usb_gadget_strings strings = { 949 + .language = language, 950 + .strings = &(struct usb_string) { 0xff, str } 951 + }; 952 + return usb_gadget_get_string(&strings, 0xff, buf); 953 + } 954 + 955 + /* String IDs are device-scoped, so we look up each string 956 + * table we're told about. These lookups are infrequent; 957 + * simpler-is-better here. 958 + */ 959 + if (composite->strings) { 960 + len = lookup_string(composite->strings, buf, language, id); 961 + if (len > 0) 962 + return len; 963 + } 964 + list_for_each_entry(c, &cdev->configs, list) { 965 + if (c->strings) { 966 + len = lookup_string(c->strings, buf, language, id); 967 + if (len > 0) 968 + return len; 969 + } 970 + list_for_each_entry(f, &c->functions, list) { 971 + if (!f->strings) 972 + continue; 973 + len = lookup_string(f->strings, buf, language, id); 974 + if (len > 0) 975 + return len; 976 + } 977 + } 978 + return -EINVAL; 979 + } 980 + 981 + /** 982 + * usb_string_id() - allocate an unused string ID 983 + * @cdev: the device whose string descriptor IDs are being allocated 984 + * Context: single threaded during gadget setup 985 + * 986 + * @usb_string_id() is called from bind() callbacks to allocate 987 + * string IDs. Drivers for functions, configurations, or gadgets will 988 + * then store that ID in the appropriate descriptors and string table. 989 + * 990 + * All string identifier should be allocated using this, 991 + * @usb_string_ids_tab() or @usb_string_ids_n() routine, to ensure 992 + * that for example different functions don't wrongly assign different 993 + * meanings to the same identifier. 994 + */ 995 + int usb_string_id(struct usb_composite_dev *cdev) 996 + { 997 + if (cdev->next_string_id < 254) { 998 + /* string id 0 is reserved by USB spec for list of 999 + * supported languages */ 1000 + /* 255 reserved as well? -- mina86 */ 1001 + cdev->next_string_id++; 1002 + return cdev->next_string_id; 1003 + } 1004 + return -ENODEV; 1005 + } 1006 + 1007 + /** 1008 + * usb_string_ids() - allocate unused string IDs in batch 1009 + * @cdev: the device whose string descriptor IDs are being allocated 1010 + * @str: an array of usb_string objects to assign numbers to 1011 + * Context: single threaded during gadget setup 1012 + * 1013 + * @usb_string_ids() is called from bind() callbacks to allocate 1014 + * string IDs. Drivers for functions, configurations, or gadgets will 1015 + * then copy IDs from the string table to the appropriate descriptors 1016 + * and string table for other languages. 1017 + * 1018 + * All string identifier should be allocated using this, 1019 + * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for 1020 + * example different functions don't wrongly assign different meanings 1021 + * to the same identifier. 1022 + */ 1023 + int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str) 1024 + { 1025 + int next = cdev->next_string_id; 1026 + 1027 + for (; str->s; ++str) { 1028 + if (unlikely(next >= 254)) 1029 + return -ENODEV; 1030 + str->id = ++next; 1031 + } 1032 + 1033 + cdev->next_string_id = next; 1034 + 1035 + return 0; 1036 + } 1037 + 1038 + /** 1039 + * usb_string_ids_n() - allocate unused string IDs in batch 1040 + * @c: the device whose string descriptor IDs are being allocated 1041 + * @n: number of string IDs to allocate 1042 + * Context: single threaded during gadget setup 1043 + * 1044 + * Returns the first requested ID. This ID and next @n-1 IDs are now 1045 + * valid IDs. At least provided that @n is non-zero because if it 1046 + * is, returns last requested ID which is now very useful information. 1047 + * 1048 + * @usb_string_ids_n() is called from bind() callbacks to allocate 1049 + * string IDs. Drivers for functions, configurations, or gadgets will 1050 + * then store that ID in the appropriate descriptors and string table. 1051 + * 1052 + * All string identifier should be allocated using this, 1053 + * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for 1054 + * example different functions don't wrongly assign different meanings 1055 + * to the same identifier. 1056 + */ 1057 + int usb_string_ids_n(struct usb_composite_dev *c, unsigned n) 1058 + { 1059 + unsigned next = c->next_string_id; 1060 + if (unlikely(n > 254 || (unsigned)next + n > 254)) 1061 + return -ENODEV; 1062 + c->next_string_id += n; 1063 + return next + 1; 1064 + } 1065 + 1066 + 1067 + /*-------------------------------------------------------------------------*/ 1068 + 1069 + static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req) 1070 + { 1071 + if (req->status || req->actual != req->length) 1072 + DBG((struct usb_composite_dev *) ep->driver_data, 1073 + "setup complete --> %d, %d/%d\n", 1074 + req->status, req->actual, req->length); 1075 + } 1076 + 1077 + /* 1078 + * The setup() callback implements all the ep0 functionality that's 1079 + * not handled lower down, in hardware or the hardware driver(like 1080 + * device and endpoint feature flags, and their status). It's all 1081 + * housekeeping for the gadget function we're implementing. Most of 1082 + * the work is in config and function specific setup. 1083 + */ 1084 + static int 1085 + composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) 1086 + { 1087 + struct usb_composite_dev *cdev = get_gadget_data(gadget); 1088 + struct usb_request *req = cdev->req; 1089 + int value = -EOPNOTSUPP; 1090 + int status = 0; 1091 + u16 w_index = le16_to_cpu(ctrl->wIndex); 1092 + u8 intf = w_index & 0xFF; 1093 + u16 w_value = le16_to_cpu(ctrl->wValue); 1094 + u16 w_length = le16_to_cpu(ctrl->wLength); 1095 + struct usb_function *f = NULL; 1096 + u8 endp; 1097 + 1098 + /* partial re-init of the response message; the function or the 1099 + * gadget might need to intercept e.g. a control-OUT completion 1100 + * when we delegate to it. 1101 + */ 1102 + req->zero = 0; 1103 + req->complete = composite_setup_complete; 1104 + req->length = 0; 1105 + gadget->ep0->driver_data = cdev; 1106 + 1107 + switch (ctrl->bRequest) { 1108 + 1109 + /* we handle all standard USB descriptors */ 1110 + case USB_REQ_GET_DESCRIPTOR: 1111 + if (ctrl->bRequestType != USB_DIR_IN) 1112 + goto unknown; 1113 + switch (w_value >> 8) { 1114 + 1115 + case USB_DT_DEVICE: 1116 + cdev->desc.bNumConfigurations = 1117 + count_configs(cdev, USB_DT_DEVICE); 1118 + cdev->desc.bMaxPacketSize0 = 1119 + cdev->gadget->ep0->maxpacket; 1120 + if (gadget_is_superspeed(gadget)) { 1121 + if (gadget->speed >= USB_SPEED_SUPER) { 1122 + cdev->desc.bcdUSB = cpu_to_le16(0x0300); 1123 + cdev->desc.bMaxPacketSize0 = 9; 1124 + } else { 1125 + cdev->desc.bcdUSB = cpu_to_le16(0x0210); 1126 + } 1127 + } 1128 + 1129 + value = min(w_length, (u16) sizeof cdev->desc); 1130 + memcpy(req->buf, &cdev->desc, value); 1131 + break; 1132 + case USB_DT_DEVICE_QUALIFIER: 1133 + if (!gadget_is_dualspeed(gadget) || 1134 + gadget->speed >= USB_SPEED_SUPER) 1135 + break; 1136 + device_qual(cdev); 1137 + value = min_t(int, w_length, 1138 + sizeof(struct usb_qualifier_descriptor)); 1139 + break; 1140 + case USB_DT_OTHER_SPEED_CONFIG: 1141 + if (!gadget_is_dualspeed(gadget) || 1142 + gadget->speed >= USB_SPEED_SUPER) 1143 + break; 1144 + /* FALLTHROUGH */ 1145 + case USB_DT_CONFIG: 1146 + value = config_desc(cdev, w_value); 1147 + if (value >= 0) 1148 + value = min(w_length, (u16) value); 1149 + break; 1150 + case USB_DT_STRING: 1151 + value = get_string(cdev, req->buf, 1152 + w_index, w_value & 0xff); 1153 + if (value >= 0) 1154 + value = min(w_length, (u16) value); 1155 + break; 1156 + case USB_DT_BOS: 1157 + if (gadget_is_superspeed(gadget)) { 1158 + value = bos_desc(cdev); 1159 + value = min(w_length, (u16) value); 1160 + } 1161 + break; 1162 + } 1163 + break; 1164 + 1165 + /* any number of configs can work */ 1166 + case USB_REQ_SET_CONFIGURATION: 1167 + if (ctrl->bRequestType != 0) 1168 + goto unknown; 1169 + if (gadget_is_otg(gadget)) { 1170 + if (gadget->a_hnp_support) 1171 + DBG(cdev, "HNP available\n"); 1172 + else if (gadget->a_alt_hnp_support) 1173 + DBG(cdev, "HNP on another port\n"); 1174 + else 1175 + VDBG(cdev, "HNP inactive\n"); 1176 + } 1177 + spin_lock(&cdev->lock); 1178 + value = set_config(cdev, ctrl, w_value); 1179 + spin_unlock(&cdev->lock); 1180 + break; 1181 + case USB_REQ_GET_CONFIGURATION: 1182 + if (ctrl->bRequestType != USB_DIR_IN) 1183 + goto unknown; 1184 + if (cdev->config) 1185 + *(u8 *)req->buf = cdev->config->bConfigurationValue; 1186 + else 1187 + *(u8 *)req->buf = 0; 1188 + value = min(w_length, (u16) 1); 1189 + break; 1190 + 1191 + /* function drivers must handle get/set altsetting; if there's 1192 + * no get() method, we know only altsetting zero works. 1193 + */ 1194 + case USB_REQ_SET_INTERFACE: 1195 + if (ctrl->bRequestType != USB_RECIP_INTERFACE) 1196 + goto unknown; 1197 + if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) 1198 + break; 1199 + f = cdev->config->interface[intf]; 1200 + if (!f) 1201 + break; 1202 + if (w_value && !f->set_alt) 1203 + break; 1204 + value = f->set_alt(f, w_index, w_value); 1205 + if (value == USB_GADGET_DELAYED_STATUS) { 1206 + DBG(cdev, 1207 + "%s: interface %d (%s) requested delayed status\n", 1208 + __func__, intf, f->name); 1209 + cdev->delayed_status++; 1210 + DBG(cdev, "delayed_status count %d\n", 1211 + cdev->delayed_status); 1212 + } 1213 + break; 1214 + case USB_REQ_GET_INTERFACE: 1215 + if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) 1216 + goto unknown; 1217 + if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) 1218 + break; 1219 + f = cdev->config->interface[intf]; 1220 + if (!f) 1221 + break; 1222 + /* lots of interfaces only need altsetting zero... */ 1223 + value = f->get_alt ? f->get_alt(f, w_index) : 0; 1224 + if (value < 0) 1225 + break; 1226 + *((u8 *)req->buf) = value; 1227 + value = min(w_length, (u16) 1); 1228 + break; 1229 + 1230 + /* 1231 + * USB 3.0 additions: 1232 + * Function driver should handle get_status request. If such cb 1233 + * wasn't supplied we respond with default value = 0 1234 + * Note: function driver should supply such cb only for the first 1235 + * interface of the function 1236 + */ 1237 + case USB_REQ_GET_STATUS: 1238 + if (!gadget_is_superspeed(gadget)) 1239 + goto unknown; 1240 + if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE)) 1241 + goto unknown; 1242 + value = 2; /* This is the length of the get_status reply */ 1243 + put_unaligned_le16(0, req->buf); 1244 + if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) 1245 + break; 1246 + f = cdev->config->interface[intf]; 1247 + if (!f) 1248 + break; 1249 + status = f->get_status ? f->get_status(f) : 0; 1250 + if (status < 0) 1251 + break; 1252 + put_unaligned_le16(status & 0x0000ffff, req->buf); 1253 + break; 1254 + /* 1255 + * Function drivers should handle SetFeature/ClearFeature 1256 + * (FUNCTION_SUSPEND) request. function_suspend cb should be supplied 1257 + * only for the first interface of the function 1258 + */ 1259 + case USB_REQ_CLEAR_FEATURE: 1260 + case USB_REQ_SET_FEATURE: 1261 + if (!gadget_is_superspeed(gadget)) 1262 + goto unknown; 1263 + if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE)) 1264 + goto unknown; 1265 + switch (w_value) { 1266 + case USB_INTRF_FUNC_SUSPEND: 1267 + if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) 1268 + break; 1269 + f = cdev->config->interface[intf]; 1270 + if (!f) 1271 + break; 1272 + value = 0; 1273 + if (f->func_suspend) 1274 + value = f->func_suspend(f, w_index >> 8); 1275 + if (value < 0) { 1276 + ERROR(cdev, 1277 + "func_suspend() returned error %d\n", 1278 + value); 1279 + value = 0; 1280 + } 1281 + break; 1282 + } 1283 + break; 1284 + default: 1285 + unknown: 1286 + VDBG(cdev, 1287 + "non-core control req%02x.%02x v%04x i%04x l%d\n", 1288 + ctrl->bRequestType, ctrl->bRequest, 1289 + w_value, w_index, w_length); 1290 + 1291 + /* functions always handle their interfaces and endpoints... 1292 + * punt other recipients (other, WUSB, ...) to the current 1293 + * configuration code. 1294 + * 1295 + * REVISIT it could make sense to let the composite device 1296 + * take such requests too, if that's ever needed: to work 1297 + * in config 0, etc. 1298 + */ 1299 + switch (ctrl->bRequestType & USB_RECIP_MASK) { 1300 + case USB_RECIP_INTERFACE: 1301 + if (!cdev->config || intf >= MAX_CONFIG_INTERFACES) 1302 + break; 1303 + f = cdev->config->interface[intf]; 1304 + break; 1305 + 1306 + case USB_RECIP_ENDPOINT: 1307 + endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f); 1308 + list_for_each_entry(f, &cdev->config->functions, list) { 1309 + if (test_bit(endp, f->endpoints)) 1310 + break; 1311 + } 1312 + if (&f->list == &cdev->config->functions) 1313 + f = NULL; 1314 + break; 1315 + } 1316 + 1317 + if (f && f->setup) 1318 + value = f->setup(f, ctrl); 1319 + else { 1320 + struct usb_configuration *c; 1321 + 1322 + c = cdev->config; 1323 + if (c && c->setup) 1324 + value = c->setup(c, ctrl); 1325 + } 1326 + 1327 + goto done; 1328 + } 1329 + 1330 + /* respond with data transfer before status phase? */ 1331 + if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) { 1332 + req->length = value; 1333 + req->zero = value < w_length; 1334 + value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC); 1335 + if (value < 0) { 1336 + DBG(cdev, "ep_queue --> %d\n", value); 1337 + req->status = 0; 1338 + composite_setup_complete(gadget->ep0, req); 1339 + } 1340 + } else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) { 1341 + WARN(cdev, 1342 + "%s: Delayed status not supported for w_length != 0", 1343 + __func__); 1344 + } 1345 + 1346 + done: 1347 + /* device either stalls (value < 0) or reports success */ 1348 + return value; 1349 + } 1350 + 1351 + static void composite_disconnect(struct usb_gadget *gadget) 1352 + { 1353 + struct usb_composite_dev *cdev = get_gadget_data(gadget); 1354 + unsigned long flags; 1355 + 1356 + /* REVISIT: should we have config and device level 1357 + * disconnect callbacks? 1358 + */ 1359 + spin_lock_irqsave(&cdev->lock, flags); 1360 + if (cdev->config) 1361 + reset_config(cdev); 1362 + if (composite->disconnect) 1363 + composite->disconnect(cdev); 1364 + spin_unlock_irqrestore(&cdev->lock, flags); 1365 + } 1366 + 1367 + /*-------------------------------------------------------------------------*/ 1368 + 1369 + static ssize_t composite_show_suspended(struct device *dev, 1370 + struct device_attribute *attr, 1371 + char *buf) 1372 + { 1373 + struct usb_gadget *gadget = dev_to_usb_gadget(dev); 1374 + struct usb_composite_dev *cdev = get_gadget_data(gadget); 1375 + 1376 + return sprintf(buf, "%d\n", cdev->suspended); 1377 + } 1378 + 1379 + static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL); 1380 + 1381 + static void 1382 + composite_unbind(struct usb_gadget *gadget) 1383 + { 1384 + struct usb_composite_dev *cdev = get_gadget_data(gadget); 1385 + 1386 + /* composite_disconnect() must already have been called 1387 + * by the underlying peripheral controller driver! 1388 + * so there's no i/o concurrency that could affect the 1389 + * state protected by cdev->lock. 1390 + */ 1391 + WARN_ON(cdev->config); 1392 + 1393 + while (!list_empty(&cdev->configs)) { 1394 + struct usb_configuration *c; 1395 + c = list_first_entry(&cdev->configs, 1396 + struct usb_configuration, list); 1397 + remove_config(cdev, c); 1398 + } 1399 + if (composite->unbind) 1400 + composite->unbind(cdev); 1401 + 1402 + if (cdev->req) { 1403 + kfree(cdev->req->buf); 1404 + usb_ep_free_request(gadget->ep0, cdev->req); 1405 + } 1406 + device_remove_file(&gadget->dev, &dev_attr_suspended); 1407 + kfree(cdev); 1408 + set_gadget_data(gadget, NULL); 1409 + composite = NULL; 1410 + } 1411 + 1412 + static u8 override_id(struct usb_composite_dev *cdev, u8 *desc) 1413 + { 1414 + if (!*desc) { 1415 + int ret = usb_string_id(cdev); 1416 + if (unlikely(ret < 0)) 1417 + WARNING(cdev, "failed to override string ID\n"); 1418 + else 1419 + *desc = ret; 1420 + } 1421 + 1422 + return *desc; 1423 + } 1424 + 1425 + static int composite_bind(struct usb_gadget *gadget) 1426 + { 1427 + struct usb_composite_dev *cdev; 1428 + int status = -ENOMEM; 1429 + 1430 + cdev = kzalloc(sizeof *cdev, GFP_KERNEL); 1431 + if (!cdev) 1432 + return status; 1433 + 1434 + spin_lock_init(&cdev->lock); 1435 + cdev->gadget = gadget; 1436 + set_gadget_data(gadget, cdev); 1437 + INIT_LIST_HEAD(&cdev->configs); 1438 + 1439 + /* preallocate control response and buffer */ 1440 + cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL); 1441 + if (!cdev->req) 1442 + goto fail; 1443 + cdev->req->buf = kmalloc(USB_BUFSIZ, GFP_KERNEL); 1444 + if (!cdev->req->buf) 1445 + goto fail; 1446 + cdev->req->complete = composite_setup_complete; 1447 + gadget->ep0->driver_data = cdev; 1448 + 1449 + cdev->bufsiz = USB_BUFSIZ; 1450 + cdev->driver = composite; 1451 + 1452 + /* 1453 + * As per USB compliance update, a device that is actively drawing 1454 + * more than 100mA from USB must report itself as bus-powered in 1455 + * the GetStatus(DEVICE) call. 1456 + */ 1457 + if (CONFIG_USB_GADGET_VBUS_DRAW <= USB_SELF_POWER_VBUS_MAX_DRAW) 1458 + usb_gadget_set_selfpowered(gadget); 1459 + 1460 + /* interface and string IDs start at zero via kzalloc. 1461 + * we force endpoints to start unassigned; few controller 1462 + * drivers will zero ep->driver_data. 1463 + */ 1464 + usb_ep_autoconfig_reset(cdev->gadget); 1465 + 1466 + /* composite gadget needs to assign strings for whole device (like 1467 + * serial number), register function drivers, potentially update 1468 + * power state and consumption, etc 1469 + */ 1470 + status = composite->bind(cdev); 1471 + if (status < 0) 1472 + goto fail; 1473 + 1474 + cdev->desc = *composite->dev; 1475 + 1476 + /* standardized runtime overrides for device ID data */ 1477 + if (idVendor) 1478 + cdev->desc.idVendor = cpu_to_le16(idVendor); 1479 + else 1480 + idVendor = le16_to_cpu(cdev->desc.idVendor); 1481 + if (idProduct) 1482 + cdev->desc.idProduct = cpu_to_le16(idProduct); 1483 + else 1484 + idProduct = le16_to_cpu(cdev->desc.idProduct); 1485 + if (bcdDevice) 1486 + cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); 1487 + else 1488 + bcdDevice = le16_to_cpu(cdev->desc.bcdDevice); 1489 + 1490 + /* string overrides */ 1491 + if (iManufacturer || !cdev->desc.iManufacturer) { 1492 + if (!iManufacturer && !composite->iManufacturer && 1493 + !*composite_manufacturer) 1494 + snprintf(composite_manufacturer, 1495 + sizeof composite_manufacturer, 1496 + "%s %s with %s", 1497 + init_utsname()->sysname, 1498 + init_utsname()->release, 1499 + gadget->name); 1500 + 1501 + cdev->manufacturer_override = 1502 + override_id(cdev, &cdev->desc.iManufacturer); 1503 + } 1504 + 1505 + if (iProduct || (!cdev->desc.iProduct && composite->iProduct)) 1506 + cdev->product_override = 1507 + override_id(cdev, &cdev->desc.iProduct); 1508 + 1509 + if (iSerialNumber || 1510 + (!cdev->desc.iSerialNumber && composite->iSerialNumber)) 1511 + cdev->serial_override = 1512 + override_id(cdev, &cdev->desc.iSerialNumber); 1513 + 1514 + /* has userspace failed to provide a serial number? */ 1515 + if (composite->needs_serial && !cdev->desc.iSerialNumber) 1516 + WARNING(cdev, "userspace failed to provide iSerialNumber\n"); 1517 + 1518 + /* finish up */ 1519 + status = device_create_file(&gadget->dev, &dev_attr_suspended); 1520 + if (status) 1521 + goto fail; 1522 + 1523 + INFO(cdev, "%s ready\n", composite->name); 1524 + return 0; 1525 + 1526 + fail: 1527 + composite_unbind(gadget); 1528 + return status; 1529 + } 1530 + 1531 + /*-------------------------------------------------------------------------*/ 1532 + 1533 + static void 1534 + composite_suspend(struct usb_gadget *gadget) 1535 + { 1536 + struct usb_composite_dev *cdev = get_gadget_data(gadget); 1537 + struct usb_function *f; 1538 + 1539 + /* REVISIT: should we have config level 1540 + * suspend/resume callbacks? 1541 + */ 1542 + DBG(cdev, "suspend\n"); 1543 + if (cdev->config) { 1544 + list_for_each_entry(f, &cdev->config->functions, list) { 1545 + if (f->suspend) 1546 + f->suspend(f); 1547 + } 1548 + } 1549 + if (composite->suspend) 1550 + composite->suspend(cdev); 1551 + 1552 + cdev->suspended = 1; 1553 + 1554 + usb_gadget_vbus_draw(gadget, 2); 1555 + } 1556 + 1557 + static void 1558 + composite_resume(struct usb_gadget *gadget) 1559 + { 1560 + struct usb_composite_dev *cdev = get_gadget_data(gadget); 1561 + struct usb_function *f; 1562 + u8 maxpower; 1563 + 1564 + /* REVISIT: should we have config level 1565 + * suspend/resume callbacks? 1566 + */ 1567 + DBG(cdev, "resume\n"); 1568 + if (composite->resume) 1569 + composite->resume(cdev); 1570 + if (cdev->config) { 1571 + list_for_each_entry(f, &cdev->config->functions, list) { 1572 + if (f->resume) 1573 + f->resume(f); 1574 + } 1575 + 1576 + maxpower = cdev->config->bMaxPower; 1577 + 1578 + usb_gadget_vbus_draw(gadget, maxpower ? 1579 + (2 * maxpower) : CONFIG_USB_GADGET_VBUS_DRAW); 1580 + } 1581 + 1582 + cdev->suspended = 0; 1583 + } 1584 + 1585 + /*-------------------------------------------------------------------------*/ 1586 + 1587 + static struct usb_gadget_driver composite_driver = { 1588 + .bind = composite_bind, 1589 + .unbind = composite_unbind, 1590 + 1591 + .setup = composite_setup, 1592 + .disconnect = composite_disconnect, 1593 + 1594 + .suspend = composite_suspend, 1595 + .resume = composite_resume, 1596 + 1597 + .driver = { 1598 + .owner = THIS_MODULE, 1599 + }, 1600 + }; 1601 + 1602 + /** 1603 + * usb_composite_probe() - register a composite driver 1604 + * @driver: the driver to register 1605 + * @bind: the callback used to allocate resources that are shared across the 1606 + * whole device, such as string IDs, and add its configurations using 1607 + * @usb_add_config(). This may fail by returning a negative errno 1608 + * value; it should return zero on successful initialization. 1609 + * Context: single threaded during gadget setup 1610 + * 1611 + * This function is used to register drivers using the composite driver 1612 + * framework. The return value is zero, or a negative errno value. 1613 + * Those values normally come from the driver's @bind method, which does 1614 + * all the work of setting up the driver to match the hardware. 1615 + * 1616 + * On successful return, the gadget is ready to respond to requests from 1617 + * the host, unless one of its components invokes usb_gadget_disconnect() 1618 + * while it was binding. That would usually be done in order to wait for 1619 + * some userspace participation. 1620 + */ 1621 + int usb_composite_probe(struct usb_composite_driver *driver) 1622 + { 1623 + if (!driver || !driver->dev || composite || !driver->bind) 1624 + return -EINVAL; 1625 + 1626 + if (!driver->name) 1627 + driver->name = "composite"; 1628 + if (!driver->iProduct) 1629 + driver->iProduct = driver->name; 1630 + composite_driver.function = (char *) driver->name; 1631 + composite_driver.driver.name = driver->name; 1632 + composite_driver.max_speed = driver->max_speed; 1633 + composite = driver; 1634 + 1635 + return usb_gadget_probe_driver(&composite_driver); 1636 + } 1637 + 1638 + /** 1639 + * usb_composite_unregister() - unregister a composite driver 1640 + * @driver: the driver to unregister 1641 + * 1642 + * This function is used to unregister drivers using the composite 1643 + * driver framework. 1644 + */ 1645 + void usb_composite_unregister(struct usb_composite_driver *driver) 1646 + { 1647 + if (composite != driver) 1648 + return; 1649 + usb_gadget_unregister_driver(&composite_driver); 1650 + } 1651 + 1652 + /** 1653 + * usb_composite_setup_continue() - Continue with the control transfer 1654 + * @cdev: the composite device who's control transfer was kept waiting 1655 + * 1656 + * This function must be called by the USB function driver to continue 1657 + * with the control transfer's data/status stage in case it had requested to 1658 + * delay the data/status stages. A USB function's setup handler (e.g. set_alt()) 1659 + * can request the composite framework to delay the setup request's data/status 1660 + * stages by returning USB_GADGET_DELAYED_STATUS. 1661 + */ 1662 + void usb_composite_setup_continue(struct usb_composite_dev *cdev) 1663 + { 1664 + int value; 1665 + struct usb_request *req = cdev->req; 1666 + unsigned long flags; 1667 + 1668 + DBG(cdev, "%s\n", __func__); 1669 + spin_lock_irqsave(&cdev->lock, flags); 1670 + 1671 + if (cdev->delayed_status == 0) { 1672 + WARN(cdev, "%s: Unexpected call\n", __func__); 1673 + 1674 + } else if (--cdev->delayed_status == 0) { 1675 + DBG(cdev, "%s: Completing delayed status\n", __func__); 1676 + req->length = 0; 1677 + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); 1678 + if (value < 0) { 1679 + DBG(cdev, "ep_queue --> %d\n", value); 1680 + req->status = 0; 1681 + composite_setup_complete(cdev->gadget->ep0, req); 1682 + } 1683 + } 1684 + 1685 + spin_unlock_irqrestore(&cdev->lock, flags); 1686 + } 1687 +
+395
drivers/staging/ccg/composite.h
··· 1 + /* 2 + * composite.h -- framework for usb gadgets which are composite devices 3 + * 4 + * Copyright (C) 2006-2008 David Brownell 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 + */ 20 + 21 + #ifndef __LINUX_USB_COMPOSITE_H 22 + #define __LINUX_USB_COMPOSITE_H 23 + 24 + /* 25 + * This framework is an optional layer on top of the USB Gadget interface, 26 + * making it easier to build (a) Composite devices, supporting multiple 27 + * functions within any single configuration, and (b) Multi-configuration 28 + * devices, also supporting multiple functions but without necessarily 29 + * having more than one function per configuration. 30 + * 31 + * Example: a device with a single configuration supporting both network 32 + * link and mass storage functions is a composite device. Those functions 33 + * might alternatively be packaged in individual configurations, but in 34 + * the composite model the host can use both functions at the same time. 35 + */ 36 + 37 + #include <linux/usb/ch9.h> 38 + #include <linux/usb/gadget.h> 39 + 40 + /* 41 + * USB function drivers should return USB_GADGET_DELAYED_STATUS if they 42 + * wish to delay the data/status stages of the control transfer till they 43 + * are ready. The control transfer will then be kept from completing till 44 + * all the function drivers that requested for USB_GADGET_DELAYED_STAUS 45 + * invoke usb_composite_setup_continue(). 46 + */ 47 + #define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */ 48 + 49 + struct usb_configuration; 50 + 51 + /** 52 + * struct usb_function - describes one function of a configuration 53 + * @name: For diagnostics, identifies the function. 54 + * @strings: tables of strings, keyed by identifiers assigned during bind() 55 + * and by language IDs provided in control requests 56 + * @descriptors: Table of full (or low) speed descriptors, using interface and 57 + * string identifiers assigned during @bind(). If this pointer is null, 58 + * the function will not be available at full speed (or at low speed). 59 + * @hs_descriptors: Table of high speed descriptors, using interface and 60 + * string identifiers assigned during @bind(). If this pointer is null, 61 + * the function will not be available at high speed. 62 + * @ss_descriptors: Table of super speed descriptors, using interface and 63 + * string identifiers assigned during @bind(). If this 64 + * pointer is null after initiation, the function will not 65 + * be available at super speed. 66 + * @config: assigned when @usb_add_function() is called; this is the 67 + * configuration with which this function is associated. 68 + * @bind: Before the gadget can register, all of its functions bind() to the 69 + * available resources including string and interface identifiers used 70 + * in interface or class descriptors; endpoints; I/O buffers; and so on. 71 + * @unbind: Reverses @bind; called as a side effect of unregistering the 72 + * driver which added this function. 73 + * @set_alt: (REQUIRED) Reconfigures altsettings; function drivers may 74 + * initialize usb_ep.driver data at this time (when it is used). 75 + * Note that setting an interface to its current altsetting resets 76 + * interface state, and that all interfaces have a disabled state. 77 + * @get_alt: Returns the active altsetting. If this is not provided, 78 + * then only altsetting zero is supported. 79 + * @disable: (REQUIRED) Indicates the function should be disabled. Reasons 80 + * include host resetting or reconfiguring the gadget, and disconnection. 81 + * @setup: Used for interface-specific control requests. 82 + * @suspend: Notifies functions when the host stops sending USB traffic. 83 + * @resume: Notifies functions when the host restarts USB traffic. 84 + * @get_status: Returns function status as a reply to 85 + * GetStatus() request when the recepient is Interface. 86 + * @func_suspend: callback to be called when 87 + * SetFeature(FUNCTION_SUSPEND) is reseived 88 + * 89 + * A single USB function uses one or more interfaces, and should in most 90 + * cases support operation at both full and high speeds. Each function is 91 + * associated by @usb_add_function() with a one configuration; that function 92 + * causes @bind() to be called so resources can be allocated as part of 93 + * setting up a gadget driver. Those resources include endpoints, which 94 + * should be allocated using @usb_ep_autoconfig(). 95 + * 96 + * To support dual speed operation, a function driver provides descriptors 97 + * for both high and full speed operation. Except in rare cases that don't 98 + * involve bulk endpoints, each speed needs different endpoint descriptors. 99 + * 100 + * Function drivers choose their own strategies for managing instance data. 101 + * The simplest strategy just declares it "static', which means the function 102 + * can only be activated once. If the function needs to be exposed in more 103 + * than one configuration at a given speed, it needs to support multiple 104 + * usb_function structures (one for each configuration). 105 + * 106 + * A more complex strategy might encapsulate a @usb_function structure inside 107 + * a driver-specific instance structure to allows multiple activations. An 108 + * example of multiple activations might be a CDC ACM function that supports 109 + * two or more distinct instances within the same configuration, providing 110 + * several independent logical data links to a USB host. 111 + */ 112 + struct usb_function { 113 + const char *name; 114 + struct usb_gadget_strings **strings; 115 + struct usb_descriptor_header **descriptors; 116 + struct usb_descriptor_header **hs_descriptors; 117 + struct usb_descriptor_header **ss_descriptors; 118 + 119 + struct usb_configuration *config; 120 + 121 + /* REVISIT: bind() functions can be marked __init, which 122 + * makes trouble for section mismatch analysis. See if 123 + * we can't restructure things to avoid mismatching. 124 + * Related: unbind() may kfree() but bind() won't... 125 + */ 126 + 127 + /* configuration management: bind/unbind */ 128 + int (*bind)(struct usb_configuration *, 129 + struct usb_function *); 130 + void (*unbind)(struct usb_configuration *, 131 + struct usb_function *); 132 + 133 + /* runtime state management */ 134 + int (*set_alt)(struct usb_function *, 135 + unsigned interface, unsigned alt); 136 + int (*get_alt)(struct usb_function *, 137 + unsigned interface); 138 + void (*disable)(struct usb_function *); 139 + int (*setup)(struct usb_function *, 140 + const struct usb_ctrlrequest *); 141 + void (*suspend)(struct usb_function *); 142 + void (*resume)(struct usb_function *); 143 + 144 + /* USB 3.0 additions */ 145 + int (*get_status)(struct usb_function *); 146 + int (*func_suspend)(struct usb_function *, 147 + u8 suspend_opt); 148 + /* private: */ 149 + /* internals */ 150 + struct list_head list; 151 + DECLARE_BITMAP(endpoints, 32); 152 + }; 153 + 154 + int usb_add_function(struct usb_configuration *, struct usb_function *); 155 + 156 + int usb_function_deactivate(struct usb_function *); 157 + int usb_function_activate(struct usb_function *); 158 + 159 + int usb_interface_id(struct usb_configuration *, struct usb_function *); 160 + 161 + int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f, 162 + struct usb_ep *_ep); 163 + 164 + #define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */ 165 + 166 + /** 167 + * struct usb_configuration - represents one gadget configuration 168 + * @label: For diagnostics, describes the configuration. 169 + * @strings: Tables of strings, keyed by identifiers assigned during @bind() 170 + * and by language IDs provided in control requests. 171 + * @descriptors: Table of descriptors preceding all function descriptors. 172 + * Examples include OTG and vendor-specific descriptors. 173 + * @unbind: Reverses @bind; called as a side effect of unregistering the 174 + * driver which added this configuration. 175 + * @setup: Used to delegate control requests that aren't handled by standard 176 + * device infrastructure or directed at a specific interface. 177 + * @bConfigurationValue: Copied into configuration descriptor. 178 + * @iConfiguration: Copied into configuration descriptor. 179 + * @bmAttributes: Copied into configuration descriptor. 180 + * @bMaxPower: Copied into configuration descriptor. 181 + * @cdev: assigned by @usb_add_config() before calling @bind(); this is 182 + * the device associated with this configuration. 183 + * 184 + * Configurations are building blocks for gadget drivers structured around 185 + * function drivers. Simple USB gadgets require only one function and one 186 + * configuration, and handle dual-speed hardware by always providing the same 187 + * functionality. Slightly more complex gadgets may have more than one 188 + * single-function configuration at a given speed; or have configurations 189 + * that only work at one speed. 190 + * 191 + * Composite devices are, by definition, ones with configurations which 192 + * include more than one function. 193 + * 194 + * The lifecycle of a usb_configuration includes allocation, initialization 195 + * of the fields described above, and calling @usb_add_config() to set up 196 + * internal data and bind it to a specific device. The configuration's 197 + * @bind() method is then used to initialize all the functions and then 198 + * call @usb_add_function() for them. 199 + * 200 + * Those functions would normally be independent of each other, but that's 201 + * not mandatory. CDC WMC devices are an example where functions often 202 + * depend on other functions, with some functions subsidiary to others. 203 + * Such interdependency may be managed in any way, so long as all of the 204 + * descriptors complete by the time the composite driver returns from 205 + * its bind() routine. 206 + */ 207 + struct usb_configuration { 208 + const char *label; 209 + struct usb_gadget_strings **strings; 210 + const struct usb_descriptor_header **descriptors; 211 + 212 + /* REVISIT: bind() functions can be marked __init, which 213 + * makes trouble for section mismatch analysis. See if 214 + * we can't restructure things to avoid mismatching... 215 + */ 216 + 217 + /* configuration management: unbind/setup */ 218 + void (*unbind)(struct usb_configuration *); 219 + int (*setup)(struct usb_configuration *, 220 + const struct usb_ctrlrequest *); 221 + 222 + /* fields in the config descriptor */ 223 + u8 bConfigurationValue; 224 + u8 iConfiguration; 225 + u8 bmAttributes; 226 + u8 bMaxPower; 227 + 228 + struct usb_composite_dev *cdev; 229 + 230 + /* private: */ 231 + /* internals */ 232 + struct list_head list; 233 + struct list_head functions; 234 + u8 next_interface_id; 235 + unsigned superspeed:1; 236 + unsigned highspeed:1; 237 + unsigned fullspeed:1; 238 + struct usb_function *interface[MAX_CONFIG_INTERFACES]; 239 + }; 240 + 241 + int usb_add_config(struct usb_composite_dev *, 242 + struct usb_configuration *, 243 + int (*)(struct usb_configuration *)); 244 + 245 + void usb_remove_config(struct usb_composite_dev *, 246 + struct usb_configuration *); 247 + 248 + /** 249 + * struct usb_composite_driver - groups configurations into a gadget 250 + * @name: For diagnostics, identifies the driver. 251 + * @iProduct: Used as iProduct override if @dev->iProduct is not set. 252 + * If NULL value of @name is taken. 253 + * @iManufacturer: Used as iManufacturer override if @dev->iManufacturer is 254 + * not set. If NULL a default "<system> <release> with <udc>" value 255 + * will be used. 256 + * @iSerialNumber: Used as iSerialNumber override if @dev->iSerialNumber is 257 + * not set. 258 + * @dev: Template descriptor for the device, including default device 259 + * identifiers. 260 + * @strings: tables of strings, keyed by identifiers assigned during @bind 261 + * and language IDs provided in control requests 262 + * @max_speed: Highest speed the driver supports. 263 + * @needs_serial: set to 1 if the gadget needs userspace to provide 264 + * a serial number. If one is not provided, warning will be printed. 265 + * @bind: (REQUIRED) Used to allocate resources that are shared across the 266 + * whole device, such as string IDs, and add its configurations using 267 + * @usb_add_config(). This may fail by returning a negative errno 268 + * value; it should return zero on successful initialization. 269 + * @unbind: Reverses @bind; called as a side effect of unregistering 270 + * this driver. 271 + * @disconnect: optional driver disconnect method 272 + * @suspend: Notifies when the host stops sending USB traffic, 273 + * after function notifications 274 + * @resume: Notifies configuration when the host restarts USB traffic, 275 + * before function notifications 276 + * 277 + * Devices default to reporting self powered operation. Devices which rely 278 + * on bus powered operation should report this in their @bind method. 279 + * 280 + * Before returning from @bind, various fields in the template descriptor 281 + * may be overridden. These include the idVendor/idProduct/bcdDevice values 282 + * normally to bind the appropriate host side driver, and the three strings 283 + * (iManufacturer, iProduct, iSerialNumber) normally used to provide user 284 + * meaningful device identifiers. (The strings will not be defined unless 285 + * they are defined in @dev and @strings.) The correct ep0 maxpacket size 286 + * is also reported, as defined by the underlying controller driver. 287 + */ 288 + struct usb_composite_driver { 289 + const char *name; 290 + const char *iProduct; 291 + const char *iManufacturer; 292 + const char *iSerialNumber; 293 + const struct usb_device_descriptor *dev; 294 + struct usb_gadget_strings **strings; 295 + enum usb_device_speed max_speed; 296 + unsigned needs_serial:1; 297 + 298 + int (*bind)(struct usb_composite_dev *cdev); 299 + int (*unbind)(struct usb_composite_dev *); 300 + 301 + void (*disconnect)(struct usb_composite_dev *); 302 + 303 + /* global suspend hooks */ 304 + void (*suspend)(struct usb_composite_dev *); 305 + void (*resume)(struct usb_composite_dev *); 306 + }; 307 + 308 + extern int usb_composite_probe(struct usb_composite_driver *driver); 309 + extern void usb_composite_unregister(struct usb_composite_driver *driver); 310 + extern void usb_composite_setup_continue(struct usb_composite_dev *cdev); 311 + 312 + 313 + /** 314 + * struct usb_composite_device - represents one composite usb gadget 315 + * @gadget: read-only, abstracts the gadget's usb peripheral controller 316 + * @req: used for control responses; buffer is pre-allocated 317 + * @bufsiz: size of buffer pre-allocated in @req 318 + * @config: the currently active configuration 319 + * 320 + * One of these devices is allocated and initialized before the 321 + * associated device driver's bind() is called. 322 + * 323 + * OPEN ISSUE: it appears that some WUSB devices will need to be 324 + * built by combining a normal (wired) gadget with a wireless one. 325 + * This revision of the gadget framework should probably try to make 326 + * sure doing that won't hurt too much. 327 + * 328 + * One notion for how to handle Wireless USB devices involves: 329 + * (a) a second gadget here, discovery mechanism TBD, but likely 330 + * needing separate "register/unregister WUSB gadget" calls; 331 + * (b) updates to usb_gadget to include flags "is it wireless", 332 + * "is it wired", plus (presumably in a wrapper structure) 333 + * bandgroup and PHY info; 334 + * (c) presumably a wireless_ep wrapping a usb_ep, and reporting 335 + * wireless-specific parameters like maxburst and maxsequence; 336 + * (d) configurations that are specific to wireless links; 337 + * (e) function drivers that understand wireless configs and will 338 + * support wireless for (additional) function instances; 339 + * (f) a function to support association setup (like CBAF), not 340 + * necessarily requiring a wireless adapter; 341 + * (g) composite device setup that can create one or more wireless 342 + * configs, including appropriate association setup support; 343 + * (h) more, TBD. 344 + */ 345 + struct usb_composite_dev { 346 + struct usb_gadget *gadget; 347 + struct usb_request *req; 348 + unsigned bufsiz; 349 + 350 + struct usb_configuration *config; 351 + 352 + /* private: */ 353 + /* internals */ 354 + unsigned int suspended:1; 355 + struct usb_device_descriptor desc; 356 + struct list_head configs; 357 + struct usb_composite_driver *driver; 358 + u8 next_string_id; 359 + u8 manufacturer_override; 360 + u8 product_override; 361 + u8 serial_override; 362 + 363 + /* the gadget driver won't enable the data pullup 364 + * while the deactivation count is nonzero. 365 + */ 366 + unsigned deactivations; 367 + 368 + /* the composite driver won't complete the control transfer's 369 + * data/status stages till delayed_status is zero. 370 + */ 371 + int delayed_status; 372 + 373 + /* protects deactivations and delayed_status counts*/ 374 + spinlock_t lock; 375 + }; 376 + 377 + extern int usb_string_id(struct usb_composite_dev *c); 378 + extern int usb_string_ids_tab(struct usb_composite_dev *c, 379 + struct usb_string *str); 380 + extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); 381 + 382 + 383 + /* messaging utils */ 384 + #define DBG(d, fmt, args...) \ 385 + dev_dbg(&(d)->gadget->dev , fmt , ## args) 386 + #define VDBG(d, fmt, args...) \ 387 + dev_vdbg(&(d)->gadget->dev , fmt , ## args) 388 + #define ERROR(d, fmt, args...) \ 389 + dev_err(&(d)->gadget->dev , fmt , ## args) 390 + #define WARNING(d, fmt, args...) \ 391 + dev_warn(&(d)->gadget->dev , fmt , ## args) 392 + #define INFO(d, fmt, args...) \ 393 + dev_info(&(d)->gadget->dev , fmt , ## args) 394 + 395 + #endif /* __LINUX_USB_COMPOSITE_H */
+158
drivers/staging/ccg/config.c
··· 1 + /* 2 + * usb/gadget/config.c -- simplify building config descriptors 3 + * 4 + * Copyright (C) 2003 David Brownell 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + */ 11 + 12 + #include <linux/errno.h> 13 + #include <linux/slab.h> 14 + #include <linux/kernel.h> 15 + #include <linux/list.h> 16 + #include <linux/string.h> 17 + #include <linux/device.h> 18 + 19 + #include <linux/usb/ch9.h> 20 + #include <linux/usb/gadget.h> 21 + 22 + 23 + /** 24 + * usb_descriptor_fillbuf - fill buffer with descriptors 25 + * @buf: Buffer to be filled 26 + * @buflen: Size of buf 27 + * @src: Array of descriptor pointers, terminated by null pointer. 28 + * 29 + * Copies descriptors into the buffer, returning the length or a 30 + * negative error code if they can't all be copied. Useful when 31 + * assembling descriptors for an associated set of interfaces used 32 + * as part of configuring a composite device; or in other cases where 33 + * sets of descriptors need to be marshaled. 34 + */ 35 + int 36 + usb_descriptor_fillbuf(void *buf, unsigned buflen, 37 + const struct usb_descriptor_header **src) 38 + { 39 + u8 *dest = buf; 40 + 41 + if (!src) 42 + return -EINVAL; 43 + 44 + /* fill buffer from src[] until null descriptor ptr */ 45 + for (; NULL != *src; src++) { 46 + unsigned len = (*src)->bLength; 47 + 48 + if (len > buflen) 49 + return -EINVAL; 50 + memcpy(dest, *src, len); 51 + buflen -= len; 52 + dest += len; 53 + } 54 + return dest - (u8 *)buf; 55 + } 56 + 57 + 58 + /** 59 + * usb_gadget_config_buf - builts a complete configuration descriptor 60 + * @config: Header for the descriptor, including characteristics such 61 + * as power requirements and number of interfaces. 62 + * @desc: Null-terminated vector of pointers to the descriptors (interface, 63 + * endpoint, etc) defining all functions in this device configuration. 64 + * @buf: Buffer for the resulting configuration descriptor. 65 + * @length: Length of buffer. If this is not big enough to hold the 66 + * entire configuration descriptor, an error code will be returned. 67 + * 68 + * This copies descriptors into the response buffer, building a descriptor 69 + * for that configuration. It returns the buffer length or a negative 70 + * status code. The config.wTotalLength field is set to match the length 71 + * of the result, but other descriptor fields (including power usage and 72 + * interface count) must be set by the caller. 73 + * 74 + * Gadget drivers could use this when constructing a config descriptor 75 + * in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the 76 + * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed. 77 + */ 78 + int usb_gadget_config_buf( 79 + const struct usb_config_descriptor *config, 80 + void *buf, 81 + unsigned length, 82 + const struct usb_descriptor_header **desc 83 + ) 84 + { 85 + struct usb_config_descriptor *cp = buf; 86 + int len; 87 + 88 + /* config descriptor first */ 89 + if (length < USB_DT_CONFIG_SIZE || !desc) 90 + return -EINVAL; 91 + *cp = *config; 92 + 93 + /* then interface/endpoint/class/vendor/... */ 94 + len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf, 95 + length - USB_DT_CONFIG_SIZE, desc); 96 + if (len < 0) 97 + return len; 98 + len += USB_DT_CONFIG_SIZE; 99 + if (len > 0xffff) 100 + return -EINVAL; 101 + 102 + /* patch up the config descriptor */ 103 + cp->bLength = USB_DT_CONFIG_SIZE; 104 + cp->bDescriptorType = USB_DT_CONFIG; 105 + cp->wTotalLength = cpu_to_le16(len); 106 + cp->bmAttributes |= USB_CONFIG_ATT_ONE; 107 + return len; 108 + } 109 + 110 + /** 111 + * usb_copy_descriptors - copy a vector of USB descriptors 112 + * @src: null-terminated vector to copy 113 + * Context: initialization code, which may sleep 114 + * 115 + * This makes a copy of a vector of USB descriptors. Its primary use 116 + * is to support usb_function objects which can have multiple copies, 117 + * each needing different descriptors. Functions may have static 118 + * tables of descriptors, which are used as templates and customized 119 + * with identifiers (for interfaces, strings, endpoints, and more) 120 + * as needed by a given function instance. 121 + */ 122 + struct usb_descriptor_header ** 123 + usb_copy_descriptors(struct usb_descriptor_header **src) 124 + { 125 + struct usb_descriptor_header **tmp; 126 + unsigned bytes; 127 + unsigned n_desc; 128 + void *mem; 129 + struct usb_descriptor_header **ret; 130 + 131 + /* count descriptors and their sizes; then add vector size */ 132 + for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++) 133 + bytes += (*tmp)->bLength; 134 + bytes += (n_desc + 1) * sizeof(*tmp); 135 + 136 + mem = kmalloc(bytes, GFP_KERNEL); 137 + if (!mem) 138 + return NULL; 139 + 140 + /* fill in pointers starting at "tmp", 141 + * to descriptors copied starting at "mem"; 142 + * and return "ret" 143 + */ 144 + tmp = mem; 145 + ret = mem; 146 + mem += (n_desc + 1) * sizeof(*tmp); 147 + while (*src) { 148 + memcpy(mem, *src, (*src)->bLength); 149 + *tmp = mem; 150 + tmp++; 151 + mem += (*src)->bLength; 152 + src++; 153 + } 154 + *tmp = NULL; 155 + 156 + return ret; 157 + } 158 +
+393
drivers/staging/ccg/epautoconf.c
··· 1 + /* 2 + * epautoconf.c -- endpoint autoconfiguration for usb gadget drivers 3 + * 4 + * Copyright (C) 2004 David Brownell 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License as published by 8 + * the Free Software Foundation; either version 2 of the License, or 9 + * (at your option) any later version. 10 + */ 11 + 12 + #include <linux/kernel.h> 13 + #include <linux/init.h> 14 + #include <linux/types.h> 15 + #include <linux/device.h> 16 + 17 + #include <linux/ctype.h> 18 + #include <linux/string.h> 19 + 20 + #include <linux/usb/ch9.h> 21 + #include <linux/usb/gadget.h> 22 + 23 + #include "gadget_chips.h" 24 + 25 + 26 + /* we must assign addresses for configurable endpoints (like net2280) */ 27 + static unsigned epnum; 28 + 29 + // #define MANY_ENDPOINTS 30 + #ifdef MANY_ENDPOINTS 31 + /* more than 15 configurable endpoints */ 32 + static unsigned in_epnum; 33 + #endif 34 + 35 + 36 + /* 37 + * This should work with endpoints from controller drivers sharing the 38 + * same endpoint naming convention. By example: 39 + * 40 + * - ep1, ep2, ... address is fixed, not direction or type 41 + * - ep1in, ep2out, ... address and direction are fixed, not type 42 + * - ep1-bulk, ep2-bulk, ... address and type are fixed, not direction 43 + * - ep1in-bulk, ep2out-iso, ... all three are fixed 44 + * - ep-* ... no functionality restrictions 45 + * 46 + * Type suffixes are "-bulk", "-iso", or "-int". Numbers are decimal. 47 + * Less common restrictions are implied by gadget_is_*(). 48 + * 49 + * NOTE: each endpoint is unidirectional, as specified by its USB 50 + * descriptor; and isn't specific to a configuration or altsetting. 51 + */ 52 + static int 53 + ep_matches ( 54 + struct usb_gadget *gadget, 55 + struct usb_ep *ep, 56 + struct usb_endpoint_descriptor *desc, 57 + struct usb_ss_ep_comp_descriptor *ep_comp 58 + ) 59 + { 60 + u8 type; 61 + const char *tmp; 62 + u16 max; 63 + 64 + int num_req_streams = 0; 65 + 66 + /* endpoint already claimed? */ 67 + if (NULL != ep->driver_data) 68 + return 0; 69 + 70 + /* only support ep0 for portable CONTROL traffic */ 71 + type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 72 + if (USB_ENDPOINT_XFER_CONTROL == type) 73 + return 0; 74 + 75 + /* some other naming convention */ 76 + if ('e' != ep->name[0]) 77 + return 0; 78 + 79 + /* type-restriction: "-iso", "-bulk", or "-int". 80 + * direction-restriction: "in", "out". 81 + */ 82 + if ('-' != ep->name[2]) { 83 + tmp = strrchr (ep->name, '-'); 84 + if (tmp) { 85 + switch (type) { 86 + case USB_ENDPOINT_XFER_INT: 87 + /* bulk endpoints handle interrupt transfers, 88 + * except the toggle-quirky iso-synch kind 89 + */ 90 + if ('s' == tmp[2]) // == "-iso" 91 + return 0; 92 + /* for now, avoid PXA "interrupt-in"; 93 + * it's documented as never using DATA1. 94 + */ 95 + if (gadget_is_pxa (gadget) 96 + && 'i' == tmp [1]) 97 + return 0; 98 + break; 99 + case USB_ENDPOINT_XFER_BULK: 100 + if ('b' != tmp[1]) // != "-bulk" 101 + return 0; 102 + break; 103 + case USB_ENDPOINT_XFER_ISOC: 104 + if ('s' != tmp[2]) // != "-iso" 105 + return 0; 106 + } 107 + } else { 108 + tmp = ep->name + strlen (ep->name); 109 + } 110 + 111 + /* direction-restriction: "..in-..", "out-.." */ 112 + tmp--; 113 + if (!isdigit (*tmp)) { 114 + if (desc->bEndpointAddress & USB_DIR_IN) { 115 + if ('n' != *tmp) 116 + return 0; 117 + } else { 118 + if ('t' != *tmp) 119 + return 0; 120 + } 121 + } 122 + } 123 + 124 + /* 125 + * Get the number of required streams from the EP companion 126 + * descriptor and see if the EP matches it 127 + */ 128 + if (usb_endpoint_xfer_bulk(desc)) { 129 + if (ep_comp && gadget->max_speed >= USB_SPEED_SUPER) { 130 + num_req_streams = ep_comp->bmAttributes & 0x1f; 131 + if (num_req_streams > ep->max_streams) 132 + return 0; 133 + } 134 + 135 + } 136 + 137 + /* 138 + * If the protocol driver hasn't yet decided on wMaxPacketSize 139 + * and wants to know the maximum possible, provide the info. 140 + */ 141 + if (desc->wMaxPacketSize == 0) 142 + desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket); 143 + 144 + /* endpoint maxpacket size is an input parameter, except for bulk 145 + * where it's an output parameter representing the full speed limit. 146 + * the usb spec fixes high speed bulk maxpacket at 512 bytes. 147 + */ 148 + max = 0x7ff & usb_endpoint_maxp(desc); 149 + switch (type) { 150 + case USB_ENDPOINT_XFER_INT: 151 + /* INT: limit 64 bytes full speed, 1024 high/super speed */ 152 + if (!gadget_is_dualspeed(gadget) && max > 64) 153 + return 0; 154 + /* FALLTHROUGH */ 155 + 156 + case USB_ENDPOINT_XFER_ISOC: 157 + /* ISO: limit 1023 bytes full speed, 1024 high/super speed */ 158 + if (ep->maxpacket < max) 159 + return 0; 160 + if (!gadget_is_dualspeed(gadget) && max > 1023) 161 + return 0; 162 + 163 + /* BOTH: "high bandwidth" works only at high speed */ 164 + if ((desc->wMaxPacketSize & cpu_to_le16(3<<11))) { 165 + if (!gadget_is_dualspeed(gadget)) 166 + return 0; 167 + /* configure your hardware with enough buffering!! */ 168 + } 169 + break; 170 + } 171 + 172 + /* MATCH!! */ 173 + 174 + /* report address */ 175 + desc->bEndpointAddress &= USB_DIR_IN; 176 + if (isdigit (ep->name [2])) { 177 + u8 num = simple_strtoul (&ep->name [2], NULL, 10); 178 + desc->bEndpointAddress |= num; 179 + #ifdef MANY_ENDPOINTS 180 + } else if (desc->bEndpointAddress & USB_DIR_IN) { 181 + if (++in_epnum > 15) 182 + return 0; 183 + desc->bEndpointAddress = USB_DIR_IN | in_epnum; 184 + #endif 185 + } else { 186 + if (++epnum > 15) 187 + return 0; 188 + desc->bEndpointAddress |= epnum; 189 + } 190 + 191 + /* report (variable) full speed bulk maxpacket */ 192 + if ((USB_ENDPOINT_XFER_BULK == type) && !ep_comp) { 193 + int size = ep->maxpacket; 194 + 195 + /* min() doesn't work on bitfields with gcc-3.5 */ 196 + if (size > 64) 197 + size = 64; 198 + desc->wMaxPacketSize = cpu_to_le16(size); 199 + } 200 + ep->address = desc->bEndpointAddress; 201 + return 1; 202 + } 203 + 204 + static struct usb_ep * 205 + find_ep (struct usb_gadget *gadget, const char *name) 206 + { 207 + struct usb_ep *ep; 208 + 209 + list_for_each_entry (ep, &gadget->ep_list, ep_list) { 210 + if (0 == strcmp (ep->name, name)) 211 + return ep; 212 + } 213 + return NULL; 214 + } 215 + 216 + /** 217 + * usb_ep_autoconfig_ss() - choose an endpoint matching the ep 218 + * descriptor and ep companion descriptor 219 + * @gadget: The device to which the endpoint must belong. 220 + * @desc: Endpoint descriptor, with endpoint direction and transfer mode 221 + * initialized. For periodic transfers, the maximum packet 222 + * size must also be initialized. This is modified on 223 + * success. 224 + * @ep_comp: Endpoint companion descriptor, with the required 225 + * number of streams. Will be modified when the chosen EP 226 + * supports a different number of streams. 227 + * 228 + * This routine replaces the usb_ep_autoconfig when needed 229 + * superspeed enhancments. If such enhancemnets are required, 230 + * the FD should call usb_ep_autoconfig_ss directly and provide 231 + * the additional ep_comp parameter. 232 + * 233 + * By choosing an endpoint to use with the specified descriptor, 234 + * this routine simplifies writing gadget drivers that work with 235 + * multiple USB device controllers. The endpoint would be 236 + * passed later to usb_ep_enable(), along with some descriptor. 237 + * 238 + * That second descriptor won't always be the same as the first one. 239 + * For example, isochronous endpoints can be autoconfigured for high 240 + * bandwidth, and then used in several lower bandwidth altsettings. 241 + * Also, high and full speed descriptors will be different. 242 + * 243 + * Be sure to examine and test the results of autoconfiguration 244 + * on your hardware. This code may not make the best choices 245 + * about how to use the USB controller, and it can't know all 246 + * the restrictions that may apply. Some combinations of driver 247 + * and hardware won't be able to autoconfigure. 248 + * 249 + * On success, this returns an un-claimed usb_ep, and modifies the endpoint 250 + * descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value 251 + * is initialized as if the endpoint were used at full speed and 252 + * the bmAttribute field in the ep companion descriptor is 253 + * updated with the assigned number of streams if it is 254 + * different from the original value. To prevent the endpoint 255 + * from being returned by a later autoconfig call, claim it by 256 + * assigning ep->driver_data to some non-null value. 257 + * 258 + * On failure, this returns a null endpoint descriptor. 259 + */ 260 + struct usb_ep *usb_ep_autoconfig_ss( 261 + struct usb_gadget *gadget, 262 + struct usb_endpoint_descriptor *desc, 263 + struct usb_ss_ep_comp_descriptor *ep_comp 264 + ) 265 + { 266 + struct usb_ep *ep; 267 + u8 type; 268 + 269 + type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; 270 + 271 + /* First, apply chip-specific "best usage" knowledge. 272 + * This might make a good usb_gadget_ops hook ... 273 + */ 274 + if (gadget_is_net2280 (gadget) && type == USB_ENDPOINT_XFER_INT) { 275 + /* ep-e, ep-f are PIO with only 64 byte fifos */ 276 + ep = find_ep (gadget, "ep-e"); 277 + if (ep && ep_matches(gadget, ep, desc, ep_comp)) 278 + goto found_ep; 279 + ep = find_ep (gadget, "ep-f"); 280 + if (ep && ep_matches(gadget, ep, desc, ep_comp)) 281 + goto found_ep; 282 + 283 + } else if (gadget_is_goku (gadget)) { 284 + if (USB_ENDPOINT_XFER_INT == type) { 285 + /* single buffering is enough */ 286 + ep = find_ep(gadget, "ep3-bulk"); 287 + if (ep && ep_matches(gadget, ep, desc, ep_comp)) 288 + goto found_ep; 289 + } else if (USB_ENDPOINT_XFER_BULK == type 290 + && (USB_DIR_IN & desc->bEndpointAddress)) { 291 + /* DMA may be available */ 292 + ep = find_ep(gadget, "ep2-bulk"); 293 + if (ep && ep_matches(gadget, ep, desc, 294 + ep_comp)) 295 + goto found_ep; 296 + } 297 + 298 + #ifdef CONFIG_BLACKFIN 299 + } else if (gadget_is_musbhdrc(gadget)) { 300 + if ((USB_ENDPOINT_XFER_BULK == type) || 301 + (USB_ENDPOINT_XFER_ISOC == type)) { 302 + if (USB_DIR_IN & desc->bEndpointAddress) 303 + ep = find_ep (gadget, "ep5in"); 304 + else 305 + ep = find_ep (gadget, "ep6out"); 306 + } else if (USB_ENDPOINT_XFER_INT == type) { 307 + if (USB_DIR_IN & desc->bEndpointAddress) 308 + ep = find_ep(gadget, "ep1in"); 309 + else 310 + ep = find_ep(gadget, "ep2out"); 311 + } else 312 + ep = NULL; 313 + if (ep && ep_matches(gadget, ep, desc, ep_comp)) 314 + goto found_ep; 315 + #endif 316 + } 317 + 318 + /* Second, look at endpoints until an unclaimed one looks usable */ 319 + list_for_each_entry (ep, &gadget->ep_list, ep_list) { 320 + if (ep_matches(gadget, ep, desc, ep_comp)) 321 + goto found_ep; 322 + } 323 + 324 + /* Fail */ 325 + return NULL; 326 + found_ep: 327 + ep->desc = NULL; 328 + ep->comp_desc = NULL; 329 + return ep; 330 + } 331 + 332 + /** 333 + * usb_ep_autoconfig() - choose an endpoint matching the 334 + * descriptor 335 + * @gadget: The device to which the endpoint must belong. 336 + * @desc: Endpoint descriptor, with endpoint direction and transfer mode 337 + * initialized. For periodic transfers, the maximum packet 338 + * size must also be initialized. This is modified on success. 339 + * 340 + * By choosing an endpoint to use with the specified descriptor, this 341 + * routine simplifies writing gadget drivers that work with multiple 342 + * USB device controllers. The endpoint would be passed later to 343 + * usb_ep_enable(), along with some descriptor. 344 + * 345 + * That second descriptor won't always be the same as the first one. 346 + * For example, isochronous endpoints can be autoconfigured for high 347 + * bandwidth, and then used in several lower bandwidth altsettings. 348 + * Also, high and full speed descriptors will be different. 349 + * 350 + * Be sure to examine and test the results of autoconfiguration on your 351 + * hardware. This code may not make the best choices about how to use the 352 + * USB controller, and it can't know all the restrictions that may apply. 353 + * Some combinations of driver and hardware won't be able to autoconfigure. 354 + * 355 + * On success, this returns an un-claimed usb_ep, and modifies the endpoint 356 + * descriptor bEndpointAddress. For bulk endpoints, the wMaxPacket value 357 + * is initialized as if the endpoint were used at full speed. To prevent 358 + * the endpoint from being returned by a later autoconfig call, claim it 359 + * by assigning ep->driver_data to some non-null value. 360 + * 361 + * On failure, this returns a null endpoint descriptor. 362 + */ 363 + struct usb_ep *usb_ep_autoconfig( 364 + struct usb_gadget *gadget, 365 + struct usb_endpoint_descriptor *desc 366 + ) 367 + { 368 + return usb_ep_autoconfig_ss(gadget, desc, NULL); 369 + } 370 + 371 + 372 + /** 373 + * usb_ep_autoconfig_reset - reset endpoint autoconfig state 374 + * @gadget: device for which autoconfig state will be reset 375 + * 376 + * Use this for devices where one configuration may need to assign 377 + * endpoint resources very differently from the next one. It clears 378 + * state such as ep->driver_data and the record of assigned endpoints 379 + * used by usb_ep_autoconfig(). 380 + */ 381 + void usb_ep_autoconfig_reset (struct usb_gadget *gadget) 382 + { 383 + struct usb_ep *ep; 384 + 385 + list_for_each_entry (ep, &gadget->ep_list, ep_list) { 386 + ep->driver_data = NULL; 387 + } 388 + #ifdef MANY_ENDPOINTS 389 + in_epnum = 0; 390 + #endif 391 + epnum = 0; 392 + } 393 +
+814
drivers/staging/ccg/f_acm.c
··· 1 + /* 2 + * f_acm.c -- USB CDC serial (ACM) function driver 3 + * 4 + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) 5 + * Copyright (C) 2008 by David Brownell 6 + * Copyright (C) 2008 by Nokia Corporation 7 + * Copyright (C) 2009 by Samsung Electronics 8 + * Author: Michal Nazarewicz (mina86@mina86.com) 9 + * 10 + * This software is distributed under the terms of the GNU General 11 + * Public License ("GPL") as published by the Free Software Foundation, 12 + * either version 2 of that License or (at your option) any later version. 13 + */ 14 + 15 + /* #define VERBOSE_DEBUG */ 16 + 17 + #include <linux/slab.h> 18 + #include <linux/kernel.h> 19 + #include <linux/device.h> 20 + 21 + #include "u_serial.h" 22 + #include "gadget_chips.h" 23 + 24 + 25 + /* 26 + * This CDC ACM function support just wraps control functions and 27 + * notifications around the generic serial-over-usb code. 28 + * 29 + * Because CDC ACM is standardized by the USB-IF, many host operating 30 + * systems have drivers for it. Accordingly, ACM is the preferred 31 + * interop solution for serial-port type connections. The control 32 + * models are often not necessary, and in any case don't do much in 33 + * this bare-bones implementation. 34 + * 35 + * Note that even MS-Windows has some support for ACM. However, that 36 + * support is somewhat broken because when you use ACM in a composite 37 + * device, having multiple interfaces confuses the poor OS. It doesn't 38 + * seem to understand CDC Union descriptors. The new "association" 39 + * descriptors (roughly equivalent to CDC Unions) may sometimes help. 40 + */ 41 + 42 + struct f_acm { 43 + struct gserial port; 44 + u8 ctrl_id, data_id; 45 + u8 port_num; 46 + 47 + u8 pending; 48 + 49 + /* lock is mostly for pending and notify_req ... they get accessed 50 + * by callbacks both from tty (open/close/break) under its spinlock, 51 + * and notify_req.complete() which can't use that lock. 52 + */ 53 + spinlock_t lock; 54 + 55 + struct usb_ep *notify; 56 + struct usb_request *notify_req; 57 + 58 + struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ 59 + 60 + /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */ 61 + u16 port_handshake_bits; 62 + #define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */ 63 + #define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */ 64 + 65 + /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */ 66 + u16 serial_state; 67 + #define ACM_CTRL_OVERRUN (1 << 6) 68 + #define ACM_CTRL_PARITY (1 << 5) 69 + #define ACM_CTRL_FRAMING (1 << 4) 70 + #define ACM_CTRL_RI (1 << 3) 71 + #define ACM_CTRL_BRK (1 << 2) 72 + #define ACM_CTRL_DSR (1 << 1) 73 + #define ACM_CTRL_DCD (1 << 0) 74 + }; 75 + 76 + static inline struct f_acm *func_to_acm(struct usb_function *f) 77 + { 78 + return container_of(f, struct f_acm, port.func); 79 + } 80 + 81 + static inline struct f_acm *port_to_acm(struct gserial *p) 82 + { 83 + return container_of(p, struct f_acm, port); 84 + } 85 + 86 + /*-------------------------------------------------------------------------*/ 87 + 88 + /* notification endpoint uses smallish and infrequent fixed-size messages */ 89 + 90 + #define GS_LOG2_NOTIFY_INTERVAL 5 /* 1 << 5 == 32 msec */ 91 + #define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */ 92 + 93 + /* interface and class descriptors: */ 94 + 95 + static struct usb_interface_assoc_descriptor 96 + acm_iad_descriptor = { 97 + .bLength = sizeof acm_iad_descriptor, 98 + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, 99 + 100 + /* .bFirstInterface = DYNAMIC, */ 101 + .bInterfaceCount = 2, // control + data 102 + .bFunctionClass = USB_CLASS_COMM, 103 + .bFunctionSubClass = USB_CDC_SUBCLASS_ACM, 104 + .bFunctionProtocol = USB_CDC_ACM_PROTO_AT_V25TER, 105 + /* .iFunction = DYNAMIC */ 106 + }; 107 + 108 + 109 + static struct usb_interface_descriptor acm_control_interface_desc = { 110 + .bLength = USB_DT_INTERFACE_SIZE, 111 + .bDescriptorType = USB_DT_INTERFACE, 112 + /* .bInterfaceNumber = DYNAMIC */ 113 + .bNumEndpoints = 1, 114 + .bInterfaceClass = USB_CLASS_COMM, 115 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM, 116 + .bInterfaceProtocol = USB_CDC_ACM_PROTO_AT_V25TER, 117 + /* .iInterface = DYNAMIC */ 118 + }; 119 + 120 + static struct usb_interface_descriptor acm_data_interface_desc = { 121 + .bLength = USB_DT_INTERFACE_SIZE, 122 + .bDescriptorType = USB_DT_INTERFACE, 123 + /* .bInterfaceNumber = DYNAMIC */ 124 + .bNumEndpoints = 2, 125 + .bInterfaceClass = USB_CLASS_CDC_DATA, 126 + .bInterfaceSubClass = 0, 127 + .bInterfaceProtocol = 0, 128 + /* .iInterface = DYNAMIC */ 129 + }; 130 + 131 + static struct usb_cdc_header_desc acm_header_desc = { 132 + .bLength = sizeof(acm_header_desc), 133 + .bDescriptorType = USB_DT_CS_INTERFACE, 134 + .bDescriptorSubType = USB_CDC_HEADER_TYPE, 135 + .bcdCDC = cpu_to_le16(0x0110), 136 + }; 137 + 138 + static struct usb_cdc_call_mgmt_descriptor 139 + acm_call_mgmt_descriptor = { 140 + .bLength = sizeof(acm_call_mgmt_descriptor), 141 + .bDescriptorType = USB_DT_CS_INTERFACE, 142 + .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE, 143 + .bmCapabilities = 0, 144 + /* .bDataInterface = DYNAMIC */ 145 + }; 146 + 147 + static struct usb_cdc_acm_descriptor acm_descriptor = { 148 + .bLength = sizeof(acm_descriptor), 149 + .bDescriptorType = USB_DT_CS_INTERFACE, 150 + .bDescriptorSubType = USB_CDC_ACM_TYPE, 151 + .bmCapabilities = USB_CDC_CAP_LINE, 152 + }; 153 + 154 + static struct usb_cdc_union_desc acm_union_desc = { 155 + .bLength = sizeof(acm_union_desc), 156 + .bDescriptorType = USB_DT_CS_INTERFACE, 157 + .bDescriptorSubType = USB_CDC_UNION_TYPE, 158 + /* .bMasterInterface0 = DYNAMIC */ 159 + /* .bSlaveInterface0 = DYNAMIC */ 160 + }; 161 + 162 + /* full speed support: */ 163 + 164 + static struct usb_endpoint_descriptor acm_fs_notify_desc = { 165 + .bLength = USB_DT_ENDPOINT_SIZE, 166 + .bDescriptorType = USB_DT_ENDPOINT, 167 + .bEndpointAddress = USB_DIR_IN, 168 + .bmAttributes = USB_ENDPOINT_XFER_INT, 169 + .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET), 170 + .bInterval = 1 << GS_LOG2_NOTIFY_INTERVAL, 171 + }; 172 + 173 + static struct usb_endpoint_descriptor acm_fs_in_desc = { 174 + .bLength = USB_DT_ENDPOINT_SIZE, 175 + .bDescriptorType = USB_DT_ENDPOINT, 176 + .bEndpointAddress = USB_DIR_IN, 177 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 178 + }; 179 + 180 + static struct usb_endpoint_descriptor acm_fs_out_desc = { 181 + .bLength = USB_DT_ENDPOINT_SIZE, 182 + .bDescriptorType = USB_DT_ENDPOINT, 183 + .bEndpointAddress = USB_DIR_OUT, 184 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 185 + }; 186 + 187 + static struct usb_descriptor_header *acm_fs_function[] = { 188 + (struct usb_descriptor_header *) &acm_iad_descriptor, 189 + (struct usb_descriptor_header *) &acm_control_interface_desc, 190 + (struct usb_descriptor_header *) &acm_header_desc, 191 + (struct usb_descriptor_header *) &acm_call_mgmt_descriptor, 192 + (struct usb_descriptor_header *) &acm_descriptor, 193 + (struct usb_descriptor_header *) &acm_union_desc, 194 + (struct usb_descriptor_header *) &acm_fs_notify_desc, 195 + (struct usb_descriptor_header *) &acm_data_interface_desc, 196 + (struct usb_descriptor_header *) &acm_fs_in_desc, 197 + (struct usb_descriptor_header *) &acm_fs_out_desc, 198 + NULL, 199 + }; 200 + 201 + /* high speed support: */ 202 + 203 + static struct usb_endpoint_descriptor acm_hs_notify_desc = { 204 + .bLength = USB_DT_ENDPOINT_SIZE, 205 + .bDescriptorType = USB_DT_ENDPOINT, 206 + .bEndpointAddress = USB_DIR_IN, 207 + .bmAttributes = USB_ENDPOINT_XFER_INT, 208 + .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET), 209 + .bInterval = GS_LOG2_NOTIFY_INTERVAL+4, 210 + }; 211 + 212 + static struct usb_endpoint_descriptor acm_hs_in_desc = { 213 + .bLength = USB_DT_ENDPOINT_SIZE, 214 + .bDescriptorType = USB_DT_ENDPOINT, 215 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 216 + .wMaxPacketSize = cpu_to_le16(512), 217 + }; 218 + 219 + static struct usb_endpoint_descriptor acm_hs_out_desc = { 220 + .bLength = USB_DT_ENDPOINT_SIZE, 221 + .bDescriptorType = USB_DT_ENDPOINT, 222 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 223 + .wMaxPacketSize = cpu_to_le16(512), 224 + }; 225 + 226 + static struct usb_descriptor_header *acm_hs_function[] = { 227 + (struct usb_descriptor_header *) &acm_iad_descriptor, 228 + (struct usb_descriptor_header *) &acm_control_interface_desc, 229 + (struct usb_descriptor_header *) &acm_header_desc, 230 + (struct usb_descriptor_header *) &acm_call_mgmt_descriptor, 231 + (struct usb_descriptor_header *) &acm_descriptor, 232 + (struct usb_descriptor_header *) &acm_union_desc, 233 + (struct usb_descriptor_header *) &acm_hs_notify_desc, 234 + (struct usb_descriptor_header *) &acm_data_interface_desc, 235 + (struct usb_descriptor_header *) &acm_hs_in_desc, 236 + (struct usb_descriptor_header *) &acm_hs_out_desc, 237 + NULL, 238 + }; 239 + 240 + static struct usb_endpoint_descriptor acm_ss_in_desc = { 241 + .bLength = USB_DT_ENDPOINT_SIZE, 242 + .bDescriptorType = USB_DT_ENDPOINT, 243 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 244 + .wMaxPacketSize = cpu_to_le16(1024), 245 + }; 246 + 247 + static struct usb_endpoint_descriptor acm_ss_out_desc = { 248 + .bLength = USB_DT_ENDPOINT_SIZE, 249 + .bDescriptorType = USB_DT_ENDPOINT, 250 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 251 + .wMaxPacketSize = cpu_to_le16(1024), 252 + }; 253 + 254 + static struct usb_ss_ep_comp_descriptor acm_ss_bulk_comp_desc = { 255 + .bLength = sizeof acm_ss_bulk_comp_desc, 256 + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 257 + }; 258 + 259 + static struct usb_descriptor_header *acm_ss_function[] = { 260 + (struct usb_descriptor_header *) &acm_iad_descriptor, 261 + (struct usb_descriptor_header *) &acm_control_interface_desc, 262 + (struct usb_descriptor_header *) &acm_header_desc, 263 + (struct usb_descriptor_header *) &acm_call_mgmt_descriptor, 264 + (struct usb_descriptor_header *) &acm_descriptor, 265 + (struct usb_descriptor_header *) &acm_union_desc, 266 + (struct usb_descriptor_header *) &acm_hs_notify_desc, 267 + (struct usb_descriptor_header *) &acm_ss_bulk_comp_desc, 268 + (struct usb_descriptor_header *) &acm_data_interface_desc, 269 + (struct usb_descriptor_header *) &acm_ss_in_desc, 270 + (struct usb_descriptor_header *) &acm_ss_bulk_comp_desc, 271 + (struct usb_descriptor_header *) &acm_ss_out_desc, 272 + (struct usb_descriptor_header *) &acm_ss_bulk_comp_desc, 273 + NULL, 274 + }; 275 + 276 + /* string descriptors: */ 277 + 278 + #define ACM_CTRL_IDX 0 279 + #define ACM_DATA_IDX 1 280 + #define ACM_IAD_IDX 2 281 + 282 + /* static strings, in UTF-8 */ 283 + static struct usb_string acm_string_defs[] = { 284 + [ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)", 285 + [ACM_DATA_IDX].s = "CDC ACM Data", 286 + [ACM_IAD_IDX ].s = "CDC Serial", 287 + { /* ZEROES END LIST */ }, 288 + }; 289 + 290 + static struct usb_gadget_strings acm_string_table = { 291 + .language = 0x0409, /* en-us */ 292 + .strings = acm_string_defs, 293 + }; 294 + 295 + static struct usb_gadget_strings *acm_strings[] = { 296 + &acm_string_table, 297 + NULL, 298 + }; 299 + 300 + /*-------------------------------------------------------------------------*/ 301 + 302 + /* ACM control ... data handling is delegated to tty library code. 303 + * The main task of this function is to activate and deactivate 304 + * that code based on device state; track parameters like line 305 + * speed, handshake state, and so on; and issue notifications. 306 + */ 307 + 308 + static void acm_complete_set_line_coding(struct usb_ep *ep, 309 + struct usb_request *req) 310 + { 311 + struct f_acm *acm = ep->driver_data; 312 + struct usb_composite_dev *cdev = acm->port.func.config->cdev; 313 + 314 + if (req->status != 0) { 315 + DBG(cdev, "acm ttyGS%d completion, err %d\n", 316 + acm->port_num, req->status); 317 + return; 318 + } 319 + 320 + /* normal completion */ 321 + if (req->actual != sizeof(acm->port_line_coding)) { 322 + DBG(cdev, "acm ttyGS%d short resp, len %d\n", 323 + acm->port_num, req->actual); 324 + usb_ep_set_halt(ep); 325 + } else { 326 + struct usb_cdc_line_coding *value = req->buf; 327 + 328 + /* REVISIT: we currently just remember this data. 329 + * If we change that, (a) validate it first, then 330 + * (b) update whatever hardware needs updating, 331 + * (c) worry about locking. This is information on 332 + * the order of 9600-8-N-1 ... most of which means 333 + * nothing unless we control a real RS232 line. 334 + */ 335 + acm->port_line_coding = *value; 336 + } 337 + } 338 + 339 + static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) 340 + { 341 + struct f_acm *acm = func_to_acm(f); 342 + struct usb_composite_dev *cdev = f->config->cdev; 343 + struct usb_request *req = cdev->req; 344 + int value = -EOPNOTSUPP; 345 + u16 w_index = le16_to_cpu(ctrl->wIndex); 346 + u16 w_value = le16_to_cpu(ctrl->wValue); 347 + u16 w_length = le16_to_cpu(ctrl->wLength); 348 + 349 + /* composite driver infrastructure handles everything except 350 + * CDC class messages; interface activation uses set_alt(). 351 + * 352 + * Note CDC spec table 4 lists the ACM request profile. It requires 353 + * encapsulated command support ... we don't handle any, and respond 354 + * to them by stalling. Options include get/set/clear comm features 355 + * (not that useful) and SEND_BREAK. 356 + */ 357 + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { 358 + 359 + /* SET_LINE_CODING ... just read and save what the host sends */ 360 + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) 361 + | USB_CDC_REQ_SET_LINE_CODING: 362 + if (w_length != sizeof(struct usb_cdc_line_coding) 363 + || w_index != acm->ctrl_id) 364 + goto invalid; 365 + 366 + value = w_length; 367 + cdev->gadget->ep0->driver_data = acm; 368 + req->complete = acm_complete_set_line_coding; 369 + break; 370 + 371 + /* GET_LINE_CODING ... return what host sent, or initial value */ 372 + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) 373 + | USB_CDC_REQ_GET_LINE_CODING: 374 + if (w_index != acm->ctrl_id) 375 + goto invalid; 376 + 377 + value = min_t(unsigned, w_length, 378 + sizeof(struct usb_cdc_line_coding)); 379 + memcpy(req->buf, &acm->port_line_coding, value); 380 + break; 381 + 382 + /* SET_CONTROL_LINE_STATE ... save what the host sent */ 383 + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) 384 + | USB_CDC_REQ_SET_CONTROL_LINE_STATE: 385 + if (w_index != acm->ctrl_id) 386 + goto invalid; 387 + 388 + value = 0; 389 + 390 + /* FIXME we should not allow data to flow until the 391 + * host sets the ACM_CTRL_DTR bit; and when it clears 392 + * that bit, we should return to that no-flow state. 393 + */ 394 + acm->port_handshake_bits = w_value; 395 + break; 396 + 397 + default: 398 + invalid: 399 + VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", 400 + ctrl->bRequestType, ctrl->bRequest, 401 + w_value, w_index, w_length); 402 + } 403 + 404 + /* respond with data transfer or status phase? */ 405 + if (value >= 0) { 406 + DBG(cdev, "acm ttyGS%d req%02x.%02x v%04x i%04x l%d\n", 407 + acm->port_num, ctrl->bRequestType, ctrl->bRequest, 408 + w_value, w_index, w_length); 409 + req->zero = 0; 410 + req->length = value; 411 + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); 412 + if (value < 0) 413 + ERROR(cdev, "acm response on ttyGS%d, err %d\n", 414 + acm->port_num, value); 415 + } 416 + 417 + /* device either stalls (value < 0) or reports success */ 418 + return value; 419 + } 420 + 421 + static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 422 + { 423 + struct f_acm *acm = func_to_acm(f); 424 + struct usb_composite_dev *cdev = f->config->cdev; 425 + 426 + /* we know alt == 0, so this is an activation or a reset */ 427 + 428 + if (intf == acm->ctrl_id) { 429 + if (acm->notify->driver_data) { 430 + VDBG(cdev, "reset acm control interface %d\n", intf); 431 + usb_ep_disable(acm->notify); 432 + } else { 433 + VDBG(cdev, "init acm ctrl interface %d\n", intf); 434 + if (config_ep_by_speed(cdev->gadget, f, acm->notify)) 435 + return -EINVAL; 436 + } 437 + usb_ep_enable(acm->notify); 438 + acm->notify->driver_data = acm; 439 + 440 + } else if (intf == acm->data_id) { 441 + if (acm->port.in->driver_data) { 442 + DBG(cdev, "reset acm ttyGS%d\n", acm->port_num); 443 + gserial_disconnect(&acm->port); 444 + } 445 + if (!acm->port.in->desc || !acm->port.out->desc) { 446 + DBG(cdev, "activate acm ttyGS%d\n", acm->port_num); 447 + if (config_ep_by_speed(cdev->gadget, f, 448 + acm->port.in) || 449 + config_ep_by_speed(cdev->gadget, f, 450 + acm->port.out)) { 451 + acm->port.in->desc = NULL; 452 + acm->port.out->desc = NULL; 453 + return -EINVAL; 454 + } 455 + } 456 + gserial_connect(&acm->port, acm->port_num); 457 + 458 + } else 459 + return -EINVAL; 460 + 461 + return 0; 462 + } 463 + 464 + static void acm_disable(struct usb_function *f) 465 + { 466 + struct f_acm *acm = func_to_acm(f); 467 + struct usb_composite_dev *cdev = f->config->cdev; 468 + 469 + DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num); 470 + gserial_disconnect(&acm->port); 471 + usb_ep_disable(acm->notify); 472 + acm->notify->driver_data = NULL; 473 + } 474 + 475 + /*-------------------------------------------------------------------------*/ 476 + 477 + /** 478 + * acm_cdc_notify - issue CDC notification to host 479 + * @acm: wraps host to be notified 480 + * @type: notification type 481 + * @value: Refer to cdc specs, wValue field. 482 + * @data: data to be sent 483 + * @length: size of data 484 + * Context: irqs blocked, acm->lock held, acm_notify_req non-null 485 + * 486 + * Returns zero on success or a negative errno. 487 + * 488 + * See section 6.3.5 of the CDC 1.1 specification for information 489 + * about the only notification we issue: SerialState change. 490 + */ 491 + static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value, 492 + void *data, unsigned length) 493 + { 494 + struct usb_ep *ep = acm->notify; 495 + struct usb_request *req; 496 + struct usb_cdc_notification *notify; 497 + const unsigned len = sizeof(*notify) + length; 498 + void *buf; 499 + int status; 500 + 501 + req = acm->notify_req; 502 + acm->notify_req = NULL; 503 + acm->pending = false; 504 + 505 + req->length = len; 506 + notify = req->buf; 507 + buf = notify + 1; 508 + 509 + notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS 510 + | USB_RECIP_INTERFACE; 511 + notify->bNotificationType = type; 512 + notify->wValue = cpu_to_le16(value); 513 + notify->wIndex = cpu_to_le16(acm->ctrl_id); 514 + notify->wLength = cpu_to_le16(length); 515 + memcpy(buf, data, length); 516 + 517 + /* ep_queue() can complete immediately if it fills the fifo... */ 518 + spin_unlock(&acm->lock); 519 + status = usb_ep_queue(ep, req, GFP_ATOMIC); 520 + spin_lock(&acm->lock); 521 + 522 + if (status < 0) { 523 + ERROR(acm->port.func.config->cdev, 524 + "acm ttyGS%d can't notify serial state, %d\n", 525 + acm->port_num, status); 526 + acm->notify_req = req; 527 + } 528 + 529 + return status; 530 + } 531 + 532 + static int acm_notify_serial_state(struct f_acm *acm) 533 + { 534 + struct usb_composite_dev *cdev = acm->port.func.config->cdev; 535 + int status; 536 + 537 + spin_lock(&acm->lock); 538 + if (acm->notify_req) { 539 + DBG(cdev, "acm ttyGS%d serial state %04x\n", 540 + acm->port_num, acm->serial_state); 541 + status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, 542 + 0, &acm->serial_state, sizeof(acm->serial_state)); 543 + } else { 544 + acm->pending = true; 545 + status = 0; 546 + } 547 + spin_unlock(&acm->lock); 548 + return status; 549 + } 550 + 551 + static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req) 552 + { 553 + struct f_acm *acm = req->context; 554 + u8 doit = false; 555 + 556 + /* on this call path we do NOT hold the port spinlock, 557 + * which is why ACM needs its own spinlock 558 + */ 559 + spin_lock(&acm->lock); 560 + if (req->status != -ESHUTDOWN) 561 + doit = acm->pending; 562 + acm->notify_req = req; 563 + spin_unlock(&acm->lock); 564 + 565 + if (doit) 566 + acm_notify_serial_state(acm); 567 + } 568 + 569 + /* connect == the TTY link is open */ 570 + 571 + static void acm_connect(struct gserial *port) 572 + { 573 + struct f_acm *acm = port_to_acm(port); 574 + 575 + acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD; 576 + acm_notify_serial_state(acm); 577 + } 578 + 579 + static void acm_disconnect(struct gserial *port) 580 + { 581 + struct f_acm *acm = port_to_acm(port); 582 + 583 + acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD); 584 + acm_notify_serial_state(acm); 585 + } 586 + 587 + static int acm_send_break(struct gserial *port, int duration) 588 + { 589 + struct f_acm *acm = port_to_acm(port); 590 + u16 state; 591 + 592 + state = acm->serial_state; 593 + state &= ~ACM_CTRL_BRK; 594 + if (duration) 595 + state |= ACM_CTRL_BRK; 596 + 597 + acm->serial_state = state; 598 + return acm_notify_serial_state(acm); 599 + } 600 + 601 + /*-------------------------------------------------------------------------*/ 602 + 603 + /* ACM function driver setup/binding */ 604 + static int 605 + acm_bind(struct usb_configuration *c, struct usb_function *f) 606 + { 607 + struct usb_composite_dev *cdev = c->cdev; 608 + struct f_acm *acm = func_to_acm(f); 609 + int status; 610 + struct usb_ep *ep; 611 + 612 + /* allocate instance-specific interface IDs, and patch descriptors */ 613 + status = usb_interface_id(c, f); 614 + if (status < 0) 615 + goto fail; 616 + acm->ctrl_id = status; 617 + acm_iad_descriptor.bFirstInterface = status; 618 + 619 + acm_control_interface_desc.bInterfaceNumber = status; 620 + acm_union_desc .bMasterInterface0 = status; 621 + 622 + status = usb_interface_id(c, f); 623 + if (status < 0) 624 + goto fail; 625 + acm->data_id = status; 626 + 627 + acm_data_interface_desc.bInterfaceNumber = status; 628 + acm_union_desc.bSlaveInterface0 = status; 629 + acm_call_mgmt_descriptor.bDataInterface = status; 630 + 631 + status = -ENODEV; 632 + 633 + /* allocate instance-specific endpoints */ 634 + ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc); 635 + if (!ep) 636 + goto fail; 637 + acm->port.in = ep; 638 + ep->driver_data = cdev; /* claim */ 639 + 640 + ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc); 641 + if (!ep) 642 + goto fail; 643 + acm->port.out = ep; 644 + ep->driver_data = cdev; /* claim */ 645 + 646 + ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc); 647 + if (!ep) 648 + goto fail; 649 + acm->notify = ep; 650 + ep->driver_data = cdev; /* claim */ 651 + 652 + /* allocate notification */ 653 + acm->notify_req = gs_alloc_req(ep, 654 + sizeof(struct usb_cdc_notification) + 2, 655 + GFP_KERNEL); 656 + if (!acm->notify_req) 657 + goto fail; 658 + 659 + acm->notify_req->complete = acm_cdc_notify_complete; 660 + acm->notify_req->context = acm; 661 + 662 + /* copy descriptors */ 663 + f->descriptors = usb_copy_descriptors(acm_fs_function); 664 + if (!f->descriptors) 665 + goto fail; 666 + 667 + /* support all relevant hardware speeds... we expect that when 668 + * hardware is dual speed, all bulk-capable endpoints work at 669 + * both speeds 670 + */ 671 + if (gadget_is_dualspeed(c->cdev->gadget)) { 672 + acm_hs_in_desc.bEndpointAddress = 673 + acm_fs_in_desc.bEndpointAddress; 674 + acm_hs_out_desc.bEndpointAddress = 675 + acm_fs_out_desc.bEndpointAddress; 676 + acm_hs_notify_desc.bEndpointAddress = 677 + acm_fs_notify_desc.bEndpointAddress; 678 + 679 + /* copy descriptors */ 680 + f->hs_descriptors = usb_copy_descriptors(acm_hs_function); 681 + } 682 + if (gadget_is_superspeed(c->cdev->gadget)) { 683 + acm_ss_in_desc.bEndpointAddress = 684 + acm_fs_in_desc.bEndpointAddress; 685 + acm_ss_out_desc.bEndpointAddress = 686 + acm_fs_out_desc.bEndpointAddress; 687 + 688 + /* copy descriptors, and track endpoint copies */ 689 + f->ss_descriptors = usb_copy_descriptors(acm_ss_function); 690 + if (!f->ss_descriptors) 691 + goto fail; 692 + } 693 + 694 + DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", 695 + acm->port_num, 696 + gadget_is_superspeed(c->cdev->gadget) ? "super" : 697 + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", 698 + acm->port.in->name, acm->port.out->name, 699 + acm->notify->name); 700 + return 0; 701 + 702 + fail: 703 + if (acm->notify_req) 704 + gs_free_req(acm->notify, acm->notify_req); 705 + 706 + /* we might as well release our claims on endpoints */ 707 + if (acm->notify) 708 + acm->notify->driver_data = NULL; 709 + if (acm->port.out) 710 + acm->port.out->driver_data = NULL; 711 + if (acm->port.in) 712 + acm->port.in->driver_data = NULL; 713 + 714 + ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status); 715 + 716 + return status; 717 + } 718 + 719 + static void 720 + acm_unbind(struct usb_configuration *c, struct usb_function *f) 721 + { 722 + struct f_acm *acm = func_to_acm(f); 723 + 724 + if (gadget_is_dualspeed(c->cdev->gadget)) 725 + usb_free_descriptors(f->hs_descriptors); 726 + if (gadget_is_superspeed(c->cdev->gadget)) 727 + usb_free_descriptors(f->ss_descriptors); 728 + usb_free_descriptors(f->descriptors); 729 + gs_free_req(acm->notify, acm->notify_req); 730 + kfree(acm); 731 + } 732 + 733 + /* Some controllers can't support CDC ACM ... */ 734 + static inline bool can_support_cdc(struct usb_configuration *c) 735 + { 736 + /* everything else is *probably* fine ... */ 737 + return true; 738 + } 739 + 740 + /** 741 + * acm_bind_config - add a CDC ACM function to a configuration 742 + * @c: the configuration to support the CDC ACM instance 743 + * @port_num: /dev/ttyGS* port this interface will use 744 + * Context: single threaded during gadget setup 745 + * 746 + * Returns zero on success, else negative errno. 747 + * 748 + * Caller must have called @gserial_setup() with enough ports to 749 + * handle all the ones it binds. Caller is also responsible 750 + * for calling @gserial_cleanup() before module unload. 751 + */ 752 + int acm_bind_config(struct usb_configuration *c, u8 port_num) 753 + { 754 + struct f_acm *acm; 755 + int status; 756 + 757 + if (!can_support_cdc(c)) 758 + return -EINVAL; 759 + 760 + /* REVISIT might want instance-specific strings to help 761 + * distinguish instances ... 762 + */ 763 + 764 + /* maybe allocate device-global string IDs, and patch descriptors */ 765 + if (acm_string_defs[ACM_CTRL_IDX].id == 0) { 766 + status = usb_string_id(c->cdev); 767 + if (status < 0) 768 + return status; 769 + acm_string_defs[ACM_CTRL_IDX].id = status; 770 + 771 + acm_control_interface_desc.iInterface = status; 772 + 773 + status = usb_string_id(c->cdev); 774 + if (status < 0) 775 + return status; 776 + acm_string_defs[ACM_DATA_IDX].id = status; 777 + 778 + acm_data_interface_desc.iInterface = status; 779 + 780 + status = usb_string_id(c->cdev); 781 + if (status < 0) 782 + return status; 783 + acm_string_defs[ACM_IAD_IDX].id = status; 784 + 785 + acm_iad_descriptor.iFunction = status; 786 + } 787 + 788 + /* allocate and initialize one new instance */ 789 + acm = kzalloc(sizeof *acm, GFP_KERNEL); 790 + if (!acm) 791 + return -ENOMEM; 792 + 793 + spin_lock_init(&acm->lock); 794 + 795 + acm->port_num = port_num; 796 + 797 + acm->port.connect = acm_connect; 798 + acm->port.disconnect = acm_disconnect; 799 + acm->port.send_break = acm_send_break; 800 + 801 + acm->port.func.name = "acm"; 802 + acm->port.func.strings = acm_strings; 803 + /* descriptors are per-instance copies */ 804 + acm->port.func.bind = acm_bind; 805 + acm->port.func.unbind = acm_unbind; 806 + acm->port.func.set_alt = acm_set_alt; 807 + acm->port.func.setup = acm_setup; 808 + acm->port.func.disable = acm_disable; 809 + 810 + status = usb_add_function(c, &acm->port.func); 811 + if (status) 812 + kfree(acm); 813 + return status; 814 + }
+2455
drivers/staging/ccg/f_fs.c
··· 1 + /* 2 + * f_fs.c -- user mode file system API for USB composite function controllers 3 + * 4 + * Copyright (C) 2010 Samsung Electronics 5 + * Author: Michal Nazarewicz <mina86@mina86.com> 6 + * 7 + * Based on inode.c (GadgetFS) which was: 8 + * Copyright (C) 2003-2004 David Brownell 9 + * Copyright (C) 2003 Agilent Technologies 10 + * 11 + * This program is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License as published by 13 + * the Free Software Foundation; either version 2 of the License, or 14 + * (at your option) any later version. 15 + */ 16 + 17 + 18 + /* #define DEBUG */ 19 + /* #define VERBOSE_DEBUG */ 20 + 21 + #include <linux/blkdev.h> 22 + #include <linux/pagemap.h> 23 + #include <linux/export.h> 24 + #include <linux/hid.h> 25 + #include <asm/unaligned.h> 26 + 27 + #include <linux/usb/composite.h> 28 + #include <linux/usb/functionfs.h> 29 + 30 + 31 + #define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */ 32 + 33 + 34 + /* Debugging ****************************************************************/ 35 + 36 + #ifdef VERBOSE_DEBUG 37 + # define pr_vdebug pr_debug 38 + # define ffs_dump_mem(prefix, ptr, len) \ 39 + print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len) 40 + #else 41 + # define pr_vdebug(...) do { } while (0) 42 + # define ffs_dump_mem(prefix, ptr, len) do { } while (0) 43 + #endif /* VERBOSE_DEBUG */ 44 + 45 + #define ENTER() pr_vdebug("%s()\n", __func__) 46 + 47 + 48 + /* The data structure and setup file ****************************************/ 49 + 50 + enum ffs_state { 51 + /* 52 + * Waiting for descriptors and strings. 53 + * 54 + * In this state no open(2), read(2) or write(2) on epfiles 55 + * may succeed (which should not be the problem as there 56 + * should be no such files opened in the first place). 57 + */ 58 + FFS_READ_DESCRIPTORS, 59 + FFS_READ_STRINGS, 60 + 61 + /* 62 + * We've got descriptors and strings. We are or have called 63 + * functionfs_ready_callback(). functionfs_bind() may have 64 + * been called but we don't know. 65 + * 66 + * This is the only state in which operations on epfiles may 67 + * succeed. 68 + */ 69 + FFS_ACTIVE, 70 + 71 + /* 72 + * All endpoints have been closed. This state is also set if 73 + * we encounter an unrecoverable error. The only 74 + * unrecoverable error is situation when after reading strings 75 + * from user space we fail to initialise epfiles or 76 + * functionfs_ready_callback() returns with error (<0). 77 + * 78 + * In this state no open(2), read(2) or write(2) (both on ep0 79 + * as well as epfile) may succeed (at this point epfiles are 80 + * unlinked and all closed so this is not a problem; ep0 is 81 + * also closed but ep0 file exists and so open(2) on ep0 must 82 + * fail). 83 + */ 84 + FFS_CLOSING 85 + }; 86 + 87 + 88 + enum ffs_setup_state { 89 + /* There is no setup request pending. */ 90 + FFS_NO_SETUP, 91 + /* 92 + * User has read events and there was a setup request event 93 + * there. The next read/write on ep0 will handle the 94 + * request. 95 + */ 96 + FFS_SETUP_PENDING, 97 + /* 98 + * There was event pending but before user space handled it 99 + * some other event was introduced which canceled existing 100 + * setup. If this state is set read/write on ep0 return 101 + * -EIDRM. This state is only set when adding event. 102 + */ 103 + FFS_SETUP_CANCELED 104 + }; 105 + 106 + 107 + 108 + struct ffs_epfile; 109 + struct ffs_function; 110 + 111 + struct ffs_data { 112 + struct usb_gadget *gadget; 113 + 114 + /* 115 + * Protect access read/write operations, only one read/write 116 + * at a time. As a consequence protects ep0req and company. 117 + * While setup request is being processed (queued) this is 118 + * held. 119 + */ 120 + struct mutex mutex; 121 + 122 + /* 123 + * Protect access to endpoint related structures (basically 124 + * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for 125 + * endpoint zero. 126 + */ 127 + spinlock_t eps_lock; 128 + 129 + /* 130 + * XXX REVISIT do we need our own request? Since we are not 131 + * handling setup requests immediately user space may be so 132 + * slow that another setup will be sent to the gadget but this 133 + * time not to us but another function and then there could be 134 + * a race. Is that the case? Or maybe we can use cdev->req 135 + * after all, maybe we just need some spinlock for that? 136 + */ 137 + struct usb_request *ep0req; /* P: mutex */ 138 + struct completion ep0req_completion; /* P: mutex */ 139 + int ep0req_status; /* P: mutex */ 140 + 141 + /* reference counter */ 142 + atomic_t ref; 143 + /* how many files are opened (EP0 and others) */ 144 + atomic_t opened; 145 + 146 + /* EP0 state */ 147 + enum ffs_state state; 148 + 149 + /* 150 + * Possible transitions: 151 + * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock 152 + * happens only in ep0 read which is P: mutex 153 + * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock 154 + * happens only in ep0 i/o which is P: mutex 155 + * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELED -- P: ev.waitq.lock 156 + * + FFS_SETUP_CANCELED -> FFS_NO_SETUP -- cmpxchg 157 + */ 158 + enum ffs_setup_state setup_state; 159 + 160 + #define FFS_SETUP_STATE(ffs) \ 161 + ((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state, \ 162 + FFS_SETUP_CANCELED, FFS_NO_SETUP)) 163 + 164 + /* Events & such. */ 165 + struct { 166 + u8 types[4]; 167 + unsigned short count; 168 + /* XXX REVISIT need to update it in some places, or do we? */ 169 + unsigned short can_stall; 170 + struct usb_ctrlrequest setup; 171 + 172 + wait_queue_head_t waitq; 173 + } ev; /* the whole structure, P: ev.waitq.lock */ 174 + 175 + /* Flags */ 176 + unsigned long flags; 177 + #define FFS_FL_CALL_CLOSED_CALLBACK 0 178 + #define FFS_FL_BOUND 1 179 + 180 + /* Active function */ 181 + struct ffs_function *func; 182 + 183 + /* 184 + * Device name, write once when file system is mounted. 185 + * Intended for user to read if she wants. 186 + */ 187 + const char *dev_name; 188 + /* Private data for our user (ie. gadget). Managed by user. */ 189 + void *private_data; 190 + 191 + /* filled by __ffs_data_got_descs() */ 192 + /* 193 + * Real descriptors are 16 bytes after raw_descs (so you need 194 + * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the 195 + * first full speed descriptor). raw_descs_length and 196 + * raw_fs_descs_length do not have those 16 bytes added. 197 + */ 198 + const void *raw_descs; 199 + unsigned raw_descs_length; 200 + unsigned raw_fs_descs_length; 201 + unsigned fs_descs_count; 202 + unsigned hs_descs_count; 203 + 204 + unsigned short strings_count; 205 + unsigned short interfaces_count; 206 + unsigned short eps_count; 207 + unsigned short _pad1; 208 + 209 + /* filled by __ffs_data_got_strings() */ 210 + /* ids in stringtabs are set in functionfs_bind() */ 211 + const void *raw_strings; 212 + struct usb_gadget_strings **stringtabs; 213 + 214 + /* 215 + * File system's super block, write once when file system is 216 + * mounted. 217 + */ 218 + struct super_block *sb; 219 + 220 + /* File permissions, written once when fs is mounted */ 221 + struct ffs_file_perms { 222 + umode_t mode; 223 + uid_t uid; 224 + gid_t gid; 225 + } file_perms; 226 + 227 + /* 228 + * The endpoint files, filled by ffs_epfiles_create(), 229 + * destroyed by ffs_epfiles_destroy(). 230 + */ 231 + struct ffs_epfile *epfiles; 232 + }; 233 + 234 + /* Reference counter handling */ 235 + static void ffs_data_get(struct ffs_data *ffs); 236 + static void ffs_data_put(struct ffs_data *ffs); 237 + /* Creates new ffs_data object. */ 238 + static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc)); 239 + 240 + /* Opened counter handling. */ 241 + static void ffs_data_opened(struct ffs_data *ffs); 242 + static void ffs_data_closed(struct ffs_data *ffs); 243 + 244 + /* Called with ffs->mutex held; take over ownership of data. */ 245 + static int __must_check 246 + __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len); 247 + static int __must_check 248 + __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len); 249 + 250 + 251 + /* The function structure ***************************************************/ 252 + 253 + struct ffs_ep; 254 + 255 + struct ffs_function { 256 + struct usb_configuration *conf; 257 + struct usb_gadget *gadget; 258 + struct ffs_data *ffs; 259 + 260 + struct ffs_ep *eps; 261 + u8 eps_revmap[16]; 262 + short *interfaces_nums; 263 + 264 + struct usb_function function; 265 + }; 266 + 267 + 268 + static struct ffs_function *ffs_func_from_usb(struct usb_function *f) 269 + { 270 + return container_of(f, struct ffs_function, function); 271 + } 272 + 273 + static void ffs_func_free(struct ffs_function *func); 274 + 275 + static void ffs_func_eps_disable(struct ffs_function *func); 276 + static int __must_check ffs_func_eps_enable(struct ffs_function *func); 277 + 278 + static int ffs_func_bind(struct usb_configuration *, 279 + struct usb_function *); 280 + static void ffs_func_unbind(struct usb_configuration *, 281 + struct usb_function *); 282 + static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned); 283 + static void ffs_func_disable(struct usb_function *); 284 + static int ffs_func_setup(struct usb_function *, 285 + const struct usb_ctrlrequest *); 286 + static void ffs_func_suspend(struct usb_function *); 287 + static void ffs_func_resume(struct usb_function *); 288 + 289 + 290 + static int ffs_func_revmap_ep(struct ffs_function *func, u8 num); 291 + static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf); 292 + 293 + 294 + /* The endpoints structures *************************************************/ 295 + 296 + struct ffs_ep { 297 + struct usb_ep *ep; /* P: ffs->eps_lock */ 298 + struct usb_request *req; /* P: epfile->mutex */ 299 + 300 + /* [0]: full speed, [1]: high speed */ 301 + struct usb_endpoint_descriptor *descs[2]; 302 + 303 + u8 num; 304 + 305 + int status; /* P: epfile->mutex */ 306 + }; 307 + 308 + struct ffs_epfile { 309 + /* Protects ep->ep and ep->req. */ 310 + struct mutex mutex; 311 + wait_queue_head_t wait; 312 + 313 + struct ffs_data *ffs; 314 + struct ffs_ep *ep; /* P: ffs->eps_lock */ 315 + 316 + struct dentry *dentry; 317 + 318 + char name[5]; 319 + 320 + unsigned char in; /* P: ffs->eps_lock */ 321 + unsigned char isoc; /* P: ffs->eps_lock */ 322 + 323 + unsigned char _pad; 324 + }; 325 + 326 + static int __must_check ffs_epfiles_create(struct ffs_data *ffs); 327 + static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count); 328 + 329 + static struct inode *__must_check 330 + ffs_sb_create_file(struct super_block *sb, const char *name, void *data, 331 + const struct file_operations *fops, 332 + struct dentry **dentry_p); 333 + 334 + 335 + /* Misc helper functions ****************************************************/ 336 + 337 + static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) 338 + __attribute__((warn_unused_result, nonnull)); 339 + static char *ffs_prepare_buffer(const char * __user buf, size_t len) 340 + __attribute__((warn_unused_result, nonnull)); 341 + 342 + 343 + /* Control file aka ep0 *****************************************************/ 344 + 345 + static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req) 346 + { 347 + struct ffs_data *ffs = req->context; 348 + 349 + complete_all(&ffs->ep0req_completion); 350 + } 351 + 352 + static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) 353 + { 354 + struct usb_request *req = ffs->ep0req; 355 + int ret; 356 + 357 + req->zero = len < le16_to_cpu(ffs->ev.setup.wLength); 358 + 359 + spin_unlock_irq(&ffs->ev.waitq.lock); 360 + 361 + req->buf = data; 362 + req->length = len; 363 + 364 + /* 365 + * UDC layer requires to provide a buffer even for ZLP, but should 366 + * not use it at all. Let's provide some poisoned pointer to catch 367 + * possible bug in the driver. 368 + */ 369 + if (req->buf == NULL) 370 + req->buf = (void *)0xDEADBABE; 371 + 372 + INIT_COMPLETION(ffs->ep0req_completion); 373 + 374 + ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC); 375 + if (unlikely(ret < 0)) 376 + return ret; 377 + 378 + ret = wait_for_completion_interruptible(&ffs->ep0req_completion); 379 + if (unlikely(ret)) { 380 + usb_ep_dequeue(ffs->gadget->ep0, req); 381 + return -EINTR; 382 + } 383 + 384 + ffs->setup_state = FFS_NO_SETUP; 385 + return ffs->ep0req_status; 386 + } 387 + 388 + static int __ffs_ep0_stall(struct ffs_data *ffs) 389 + { 390 + if (ffs->ev.can_stall) { 391 + pr_vdebug("ep0 stall\n"); 392 + usb_ep_set_halt(ffs->gadget->ep0); 393 + ffs->setup_state = FFS_NO_SETUP; 394 + return -EL2HLT; 395 + } else { 396 + pr_debug("bogus ep0 stall!\n"); 397 + return -ESRCH; 398 + } 399 + } 400 + 401 + static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, 402 + size_t len, loff_t *ptr) 403 + { 404 + struct ffs_data *ffs = file->private_data; 405 + ssize_t ret; 406 + char *data; 407 + 408 + ENTER(); 409 + 410 + /* Fast check if setup was canceled */ 411 + if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) 412 + return -EIDRM; 413 + 414 + /* Acquire mutex */ 415 + ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); 416 + if (unlikely(ret < 0)) 417 + return ret; 418 + 419 + /* Check state */ 420 + switch (ffs->state) { 421 + case FFS_READ_DESCRIPTORS: 422 + case FFS_READ_STRINGS: 423 + /* Copy data */ 424 + if (unlikely(len < 16)) { 425 + ret = -EINVAL; 426 + break; 427 + } 428 + 429 + data = ffs_prepare_buffer(buf, len); 430 + if (IS_ERR(data)) { 431 + ret = PTR_ERR(data); 432 + break; 433 + } 434 + 435 + /* Handle data */ 436 + if (ffs->state == FFS_READ_DESCRIPTORS) { 437 + pr_info("read descriptors\n"); 438 + ret = __ffs_data_got_descs(ffs, data, len); 439 + if (unlikely(ret < 0)) 440 + break; 441 + 442 + ffs->state = FFS_READ_STRINGS; 443 + ret = len; 444 + } else { 445 + pr_info("read strings\n"); 446 + ret = __ffs_data_got_strings(ffs, data, len); 447 + if (unlikely(ret < 0)) 448 + break; 449 + 450 + ret = ffs_epfiles_create(ffs); 451 + if (unlikely(ret)) { 452 + ffs->state = FFS_CLOSING; 453 + break; 454 + } 455 + 456 + ffs->state = FFS_ACTIVE; 457 + mutex_unlock(&ffs->mutex); 458 + 459 + ret = functionfs_ready_callback(ffs); 460 + if (unlikely(ret < 0)) { 461 + ffs->state = FFS_CLOSING; 462 + return ret; 463 + } 464 + 465 + set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); 466 + return len; 467 + } 468 + break; 469 + 470 + case FFS_ACTIVE: 471 + data = NULL; 472 + /* 473 + * We're called from user space, we can use _irq 474 + * rather then _irqsave 475 + */ 476 + spin_lock_irq(&ffs->ev.waitq.lock); 477 + switch (FFS_SETUP_STATE(ffs)) { 478 + case FFS_SETUP_CANCELED: 479 + ret = -EIDRM; 480 + goto done_spin; 481 + 482 + case FFS_NO_SETUP: 483 + ret = -ESRCH; 484 + goto done_spin; 485 + 486 + case FFS_SETUP_PENDING: 487 + break; 488 + } 489 + 490 + /* FFS_SETUP_PENDING */ 491 + if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) { 492 + spin_unlock_irq(&ffs->ev.waitq.lock); 493 + ret = __ffs_ep0_stall(ffs); 494 + break; 495 + } 496 + 497 + /* FFS_SETUP_PENDING and not stall */ 498 + len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); 499 + 500 + spin_unlock_irq(&ffs->ev.waitq.lock); 501 + 502 + data = ffs_prepare_buffer(buf, len); 503 + if (IS_ERR(data)) { 504 + ret = PTR_ERR(data); 505 + break; 506 + } 507 + 508 + spin_lock_irq(&ffs->ev.waitq.lock); 509 + 510 + /* 511 + * We are guaranteed to be still in FFS_ACTIVE state 512 + * but the state of setup could have changed from 513 + * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need 514 + * to check for that. If that happened we copied data 515 + * from user space in vain but it's unlikely. 516 + * 517 + * For sure we are not in FFS_NO_SETUP since this is 518 + * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP 519 + * transition can be performed and it's protected by 520 + * mutex. 521 + */ 522 + if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) { 523 + ret = -EIDRM; 524 + done_spin: 525 + spin_unlock_irq(&ffs->ev.waitq.lock); 526 + } else { 527 + /* unlocks spinlock */ 528 + ret = __ffs_ep0_queue_wait(ffs, data, len); 529 + } 530 + kfree(data); 531 + break; 532 + 533 + default: 534 + ret = -EBADFD; 535 + break; 536 + } 537 + 538 + mutex_unlock(&ffs->mutex); 539 + return ret; 540 + } 541 + 542 + static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf, 543 + size_t n) 544 + { 545 + /* 546 + * We are holding ffs->ev.waitq.lock and ffs->mutex and we need 547 + * to release them. 548 + */ 549 + struct usb_functionfs_event events[n]; 550 + unsigned i = 0; 551 + 552 + memset(events, 0, sizeof events); 553 + 554 + do { 555 + events[i].type = ffs->ev.types[i]; 556 + if (events[i].type == FUNCTIONFS_SETUP) { 557 + events[i].u.setup = ffs->ev.setup; 558 + ffs->setup_state = FFS_SETUP_PENDING; 559 + } 560 + } while (++i < n); 561 + 562 + if (n < ffs->ev.count) { 563 + ffs->ev.count -= n; 564 + memmove(ffs->ev.types, ffs->ev.types + n, 565 + ffs->ev.count * sizeof *ffs->ev.types); 566 + } else { 567 + ffs->ev.count = 0; 568 + } 569 + 570 + spin_unlock_irq(&ffs->ev.waitq.lock); 571 + mutex_unlock(&ffs->mutex); 572 + 573 + return unlikely(__copy_to_user(buf, events, sizeof events)) 574 + ? -EFAULT : sizeof events; 575 + } 576 + 577 + static ssize_t ffs_ep0_read(struct file *file, char __user *buf, 578 + size_t len, loff_t *ptr) 579 + { 580 + struct ffs_data *ffs = file->private_data; 581 + char *data = NULL; 582 + size_t n; 583 + int ret; 584 + 585 + ENTER(); 586 + 587 + /* Fast check if setup was canceled */ 588 + if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) 589 + return -EIDRM; 590 + 591 + /* Acquire mutex */ 592 + ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); 593 + if (unlikely(ret < 0)) 594 + return ret; 595 + 596 + /* Check state */ 597 + if (ffs->state != FFS_ACTIVE) { 598 + ret = -EBADFD; 599 + goto done_mutex; 600 + } 601 + 602 + /* 603 + * We're called from user space, we can use _irq rather then 604 + * _irqsave 605 + */ 606 + spin_lock_irq(&ffs->ev.waitq.lock); 607 + 608 + switch (FFS_SETUP_STATE(ffs)) { 609 + case FFS_SETUP_CANCELED: 610 + ret = -EIDRM; 611 + break; 612 + 613 + case FFS_NO_SETUP: 614 + n = len / sizeof(struct usb_functionfs_event); 615 + if (unlikely(!n)) { 616 + ret = -EINVAL; 617 + break; 618 + } 619 + 620 + if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) { 621 + ret = -EAGAIN; 622 + break; 623 + } 624 + 625 + if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq, 626 + ffs->ev.count)) { 627 + ret = -EINTR; 628 + break; 629 + } 630 + 631 + return __ffs_ep0_read_events(ffs, buf, 632 + min(n, (size_t)ffs->ev.count)); 633 + 634 + case FFS_SETUP_PENDING: 635 + if (ffs->ev.setup.bRequestType & USB_DIR_IN) { 636 + spin_unlock_irq(&ffs->ev.waitq.lock); 637 + ret = __ffs_ep0_stall(ffs); 638 + goto done_mutex; 639 + } 640 + 641 + len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); 642 + 643 + spin_unlock_irq(&ffs->ev.waitq.lock); 644 + 645 + if (likely(len)) { 646 + data = kmalloc(len, GFP_KERNEL); 647 + if (unlikely(!data)) { 648 + ret = -ENOMEM; 649 + goto done_mutex; 650 + } 651 + } 652 + 653 + spin_lock_irq(&ffs->ev.waitq.lock); 654 + 655 + /* See ffs_ep0_write() */ 656 + if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) { 657 + ret = -EIDRM; 658 + break; 659 + } 660 + 661 + /* unlocks spinlock */ 662 + ret = __ffs_ep0_queue_wait(ffs, data, len); 663 + if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len))) 664 + ret = -EFAULT; 665 + goto done_mutex; 666 + 667 + default: 668 + ret = -EBADFD; 669 + break; 670 + } 671 + 672 + spin_unlock_irq(&ffs->ev.waitq.lock); 673 + done_mutex: 674 + mutex_unlock(&ffs->mutex); 675 + kfree(data); 676 + return ret; 677 + } 678 + 679 + static int ffs_ep0_open(struct inode *inode, struct file *file) 680 + { 681 + struct ffs_data *ffs = inode->i_private; 682 + 683 + ENTER(); 684 + 685 + if (unlikely(ffs->state == FFS_CLOSING)) 686 + return -EBUSY; 687 + 688 + file->private_data = ffs; 689 + ffs_data_opened(ffs); 690 + 691 + return 0; 692 + } 693 + 694 + static int ffs_ep0_release(struct inode *inode, struct file *file) 695 + { 696 + struct ffs_data *ffs = file->private_data; 697 + 698 + ENTER(); 699 + 700 + ffs_data_closed(ffs); 701 + 702 + return 0; 703 + } 704 + 705 + static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) 706 + { 707 + struct ffs_data *ffs = file->private_data; 708 + struct usb_gadget *gadget = ffs->gadget; 709 + long ret; 710 + 711 + ENTER(); 712 + 713 + if (code == FUNCTIONFS_INTERFACE_REVMAP) { 714 + struct ffs_function *func = ffs->func; 715 + ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV; 716 + } else if (gadget && gadget->ops->ioctl) { 717 + ret = gadget->ops->ioctl(gadget, code, value); 718 + } else { 719 + ret = -ENOTTY; 720 + } 721 + 722 + return ret; 723 + } 724 + 725 + static const struct file_operations ffs_ep0_operations = { 726 + .owner = THIS_MODULE, 727 + .llseek = no_llseek, 728 + 729 + .open = ffs_ep0_open, 730 + .write = ffs_ep0_write, 731 + .read = ffs_ep0_read, 732 + .release = ffs_ep0_release, 733 + .unlocked_ioctl = ffs_ep0_ioctl, 734 + }; 735 + 736 + 737 + /* "Normal" endpoints operations ********************************************/ 738 + 739 + static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) 740 + { 741 + ENTER(); 742 + if (likely(req->context)) { 743 + struct ffs_ep *ep = _ep->driver_data; 744 + ep->status = req->status ? req->status : req->actual; 745 + complete(req->context); 746 + } 747 + } 748 + 749 + static ssize_t ffs_epfile_io(struct file *file, 750 + char __user *buf, size_t len, int read) 751 + { 752 + struct ffs_epfile *epfile = file->private_data; 753 + struct ffs_ep *ep; 754 + char *data = NULL; 755 + ssize_t ret; 756 + int halt; 757 + 758 + goto first_try; 759 + do { 760 + spin_unlock_irq(&epfile->ffs->eps_lock); 761 + mutex_unlock(&epfile->mutex); 762 + 763 + first_try: 764 + /* Are we still active? */ 765 + if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) { 766 + ret = -ENODEV; 767 + goto error; 768 + } 769 + 770 + /* Wait for endpoint to be enabled */ 771 + ep = epfile->ep; 772 + if (!ep) { 773 + if (file->f_flags & O_NONBLOCK) { 774 + ret = -EAGAIN; 775 + goto error; 776 + } 777 + 778 + if (wait_event_interruptible(epfile->wait, 779 + (ep = epfile->ep))) { 780 + ret = -EINTR; 781 + goto error; 782 + } 783 + } 784 + 785 + /* Do we halt? */ 786 + halt = !read == !epfile->in; 787 + if (halt && epfile->isoc) { 788 + ret = -EINVAL; 789 + goto error; 790 + } 791 + 792 + /* Allocate & copy */ 793 + if (!halt && !data) { 794 + data = kzalloc(len, GFP_KERNEL); 795 + if (unlikely(!data)) 796 + return -ENOMEM; 797 + 798 + if (!read && 799 + unlikely(__copy_from_user(data, buf, len))) { 800 + ret = -EFAULT; 801 + goto error; 802 + } 803 + } 804 + 805 + /* We will be using request */ 806 + ret = ffs_mutex_lock(&epfile->mutex, 807 + file->f_flags & O_NONBLOCK); 808 + if (unlikely(ret)) 809 + goto error; 810 + 811 + /* 812 + * We're called from user space, we can use _irq rather then 813 + * _irqsave 814 + */ 815 + spin_lock_irq(&epfile->ffs->eps_lock); 816 + 817 + /* 818 + * While we were acquiring mutex endpoint got disabled 819 + * or changed? 820 + */ 821 + } while (unlikely(epfile->ep != ep)); 822 + 823 + /* Halt */ 824 + if (unlikely(halt)) { 825 + if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep)) 826 + usb_ep_set_halt(ep->ep); 827 + spin_unlock_irq(&epfile->ffs->eps_lock); 828 + ret = -EBADMSG; 829 + } else { 830 + /* Fire the request */ 831 + DECLARE_COMPLETION_ONSTACK(done); 832 + 833 + struct usb_request *req = ep->req; 834 + req->context = &done; 835 + req->complete = ffs_epfile_io_complete; 836 + req->buf = data; 837 + req->length = len; 838 + 839 + ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); 840 + 841 + spin_unlock_irq(&epfile->ffs->eps_lock); 842 + 843 + if (unlikely(ret < 0)) { 844 + /* nop */ 845 + } else if (unlikely(wait_for_completion_interruptible(&done))) { 846 + ret = -EINTR; 847 + usb_ep_dequeue(ep->ep, req); 848 + } else { 849 + ret = ep->status; 850 + if (read && ret > 0 && 851 + unlikely(copy_to_user(buf, data, ret))) 852 + ret = -EFAULT; 853 + } 854 + } 855 + 856 + mutex_unlock(&epfile->mutex); 857 + error: 858 + kfree(data); 859 + return ret; 860 + } 861 + 862 + static ssize_t 863 + ffs_epfile_write(struct file *file, const char __user *buf, size_t len, 864 + loff_t *ptr) 865 + { 866 + ENTER(); 867 + 868 + return ffs_epfile_io(file, (char __user *)buf, len, 0); 869 + } 870 + 871 + static ssize_t 872 + ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) 873 + { 874 + ENTER(); 875 + 876 + return ffs_epfile_io(file, buf, len, 1); 877 + } 878 + 879 + static int 880 + ffs_epfile_open(struct inode *inode, struct file *file) 881 + { 882 + struct ffs_epfile *epfile = inode->i_private; 883 + 884 + ENTER(); 885 + 886 + if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) 887 + return -ENODEV; 888 + 889 + file->private_data = epfile; 890 + ffs_data_opened(epfile->ffs); 891 + 892 + return 0; 893 + } 894 + 895 + static int 896 + ffs_epfile_release(struct inode *inode, struct file *file) 897 + { 898 + struct ffs_epfile *epfile = inode->i_private; 899 + 900 + ENTER(); 901 + 902 + ffs_data_closed(epfile->ffs); 903 + 904 + return 0; 905 + } 906 + 907 + static long ffs_epfile_ioctl(struct file *file, unsigned code, 908 + unsigned long value) 909 + { 910 + struct ffs_epfile *epfile = file->private_data; 911 + int ret; 912 + 913 + ENTER(); 914 + 915 + if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) 916 + return -ENODEV; 917 + 918 + spin_lock_irq(&epfile->ffs->eps_lock); 919 + if (likely(epfile->ep)) { 920 + switch (code) { 921 + case FUNCTIONFS_FIFO_STATUS: 922 + ret = usb_ep_fifo_status(epfile->ep->ep); 923 + break; 924 + case FUNCTIONFS_FIFO_FLUSH: 925 + usb_ep_fifo_flush(epfile->ep->ep); 926 + ret = 0; 927 + break; 928 + case FUNCTIONFS_CLEAR_HALT: 929 + ret = usb_ep_clear_halt(epfile->ep->ep); 930 + break; 931 + case FUNCTIONFS_ENDPOINT_REVMAP: 932 + ret = epfile->ep->num; 933 + break; 934 + default: 935 + ret = -ENOTTY; 936 + } 937 + } else { 938 + ret = -ENODEV; 939 + } 940 + spin_unlock_irq(&epfile->ffs->eps_lock); 941 + 942 + return ret; 943 + } 944 + 945 + static const struct file_operations ffs_epfile_operations = { 946 + .owner = THIS_MODULE, 947 + .llseek = no_llseek, 948 + 949 + .open = ffs_epfile_open, 950 + .write = ffs_epfile_write, 951 + .read = ffs_epfile_read, 952 + .release = ffs_epfile_release, 953 + .unlocked_ioctl = ffs_epfile_ioctl, 954 + }; 955 + 956 + 957 + /* File system and super block operations ***********************************/ 958 + 959 + /* 960 + * Mounting the file system creates a controller file, used first for 961 + * function configuration then later for event monitoring. 962 + */ 963 + 964 + static struct inode *__must_check 965 + ffs_sb_make_inode(struct super_block *sb, void *data, 966 + const struct file_operations *fops, 967 + const struct inode_operations *iops, 968 + struct ffs_file_perms *perms) 969 + { 970 + struct inode *inode; 971 + 972 + ENTER(); 973 + 974 + inode = new_inode(sb); 975 + 976 + if (likely(inode)) { 977 + struct timespec current_time = CURRENT_TIME; 978 + 979 + inode->i_ino = get_next_ino(); 980 + inode->i_mode = perms->mode; 981 + inode->i_uid = perms->uid; 982 + inode->i_gid = perms->gid; 983 + inode->i_atime = current_time; 984 + inode->i_mtime = current_time; 985 + inode->i_ctime = current_time; 986 + inode->i_private = data; 987 + if (fops) 988 + inode->i_fop = fops; 989 + if (iops) 990 + inode->i_op = iops; 991 + } 992 + 993 + return inode; 994 + } 995 + 996 + /* Create "regular" file */ 997 + static struct inode *ffs_sb_create_file(struct super_block *sb, 998 + const char *name, void *data, 999 + const struct file_operations *fops, 1000 + struct dentry **dentry_p) 1001 + { 1002 + struct ffs_data *ffs = sb->s_fs_info; 1003 + struct dentry *dentry; 1004 + struct inode *inode; 1005 + 1006 + ENTER(); 1007 + 1008 + dentry = d_alloc_name(sb->s_root, name); 1009 + if (unlikely(!dentry)) 1010 + return NULL; 1011 + 1012 + inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms); 1013 + if (unlikely(!inode)) { 1014 + dput(dentry); 1015 + return NULL; 1016 + } 1017 + 1018 + d_add(dentry, inode); 1019 + if (dentry_p) 1020 + *dentry_p = dentry; 1021 + 1022 + return inode; 1023 + } 1024 + 1025 + /* Super block */ 1026 + static const struct super_operations ffs_sb_operations = { 1027 + .statfs = simple_statfs, 1028 + .drop_inode = generic_delete_inode, 1029 + }; 1030 + 1031 + struct ffs_sb_fill_data { 1032 + struct ffs_file_perms perms; 1033 + umode_t root_mode; 1034 + const char *dev_name; 1035 + union { 1036 + /* set by ffs_fs_mount(), read by ffs_sb_fill() */ 1037 + void *private_data; 1038 + /* set by ffs_sb_fill(), read by ffs_fs_mount */ 1039 + struct ffs_data *ffs_data; 1040 + }; 1041 + }; 1042 + 1043 + static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) 1044 + { 1045 + struct ffs_sb_fill_data *data = _data; 1046 + struct inode *inode; 1047 + struct ffs_data *ffs; 1048 + 1049 + ENTER(); 1050 + 1051 + /* Initialise data */ 1052 + ffs = ffs_data_new(); 1053 + if (unlikely(!ffs)) 1054 + goto Enomem; 1055 + 1056 + ffs->sb = sb; 1057 + ffs->dev_name = kstrdup(data->dev_name, GFP_KERNEL); 1058 + if (unlikely(!ffs->dev_name)) 1059 + goto Enomem; 1060 + ffs->file_perms = data->perms; 1061 + ffs->private_data = data->private_data; 1062 + 1063 + /* used by the caller of this function */ 1064 + data->ffs_data = ffs; 1065 + 1066 + sb->s_fs_info = ffs; 1067 + sb->s_blocksize = PAGE_CACHE_SIZE; 1068 + sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 1069 + sb->s_magic = FUNCTIONFS_MAGIC; 1070 + sb->s_op = &ffs_sb_operations; 1071 + sb->s_time_gran = 1; 1072 + 1073 + /* Root inode */ 1074 + data->perms.mode = data->root_mode; 1075 + inode = ffs_sb_make_inode(sb, NULL, 1076 + &simple_dir_operations, 1077 + &simple_dir_inode_operations, 1078 + &data->perms); 1079 + sb->s_root = d_make_root(inode); 1080 + if (unlikely(!sb->s_root)) 1081 + goto Enomem; 1082 + 1083 + /* EP0 file */ 1084 + if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, 1085 + &ffs_ep0_operations, NULL))) 1086 + goto Enomem; 1087 + 1088 + return 0; 1089 + 1090 + Enomem: 1091 + return -ENOMEM; 1092 + } 1093 + 1094 + static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) 1095 + { 1096 + ENTER(); 1097 + 1098 + if (!opts || !*opts) 1099 + return 0; 1100 + 1101 + for (;;) { 1102 + char *end, *eq, *comma; 1103 + unsigned long value; 1104 + 1105 + /* Option limit */ 1106 + comma = strchr(opts, ','); 1107 + if (comma) 1108 + *comma = 0; 1109 + 1110 + /* Value limit */ 1111 + eq = strchr(opts, '='); 1112 + if (unlikely(!eq)) { 1113 + pr_err("'=' missing in %s\n", opts); 1114 + return -EINVAL; 1115 + } 1116 + *eq = 0; 1117 + 1118 + /* Parse value */ 1119 + value = simple_strtoul(eq + 1, &end, 0); 1120 + if (unlikely(*end != ',' && *end != 0)) { 1121 + pr_err("%s: invalid value: %s\n", opts, eq + 1); 1122 + return -EINVAL; 1123 + } 1124 + 1125 + /* Interpret option */ 1126 + switch (eq - opts) { 1127 + case 5: 1128 + if (!memcmp(opts, "rmode", 5)) 1129 + data->root_mode = (value & 0555) | S_IFDIR; 1130 + else if (!memcmp(opts, "fmode", 5)) 1131 + data->perms.mode = (value & 0666) | S_IFREG; 1132 + else 1133 + goto invalid; 1134 + break; 1135 + 1136 + case 4: 1137 + if (!memcmp(opts, "mode", 4)) { 1138 + data->root_mode = (value & 0555) | S_IFDIR; 1139 + data->perms.mode = (value & 0666) | S_IFREG; 1140 + } else { 1141 + goto invalid; 1142 + } 1143 + break; 1144 + 1145 + case 3: 1146 + if (!memcmp(opts, "uid", 3)) 1147 + data->perms.uid = value; 1148 + else if (!memcmp(opts, "gid", 3)) 1149 + data->perms.gid = value; 1150 + else 1151 + goto invalid; 1152 + break; 1153 + 1154 + default: 1155 + invalid: 1156 + pr_err("%s: invalid option\n", opts); 1157 + return -EINVAL; 1158 + } 1159 + 1160 + /* Next iteration */ 1161 + if (!comma) 1162 + break; 1163 + opts = comma + 1; 1164 + } 1165 + 1166 + return 0; 1167 + } 1168 + 1169 + /* "mount -t functionfs dev_name /dev/function" ends up here */ 1170 + 1171 + static struct dentry * 1172 + ffs_fs_mount(struct file_system_type *t, int flags, 1173 + const char *dev_name, void *opts) 1174 + { 1175 + struct ffs_sb_fill_data data = { 1176 + .perms = { 1177 + .mode = S_IFREG | 0600, 1178 + .uid = 0, 1179 + .gid = 0 1180 + }, 1181 + .root_mode = S_IFDIR | 0500, 1182 + }; 1183 + struct dentry *rv; 1184 + int ret; 1185 + void *ffs_dev; 1186 + 1187 + ENTER(); 1188 + 1189 + ret = ffs_fs_parse_opts(&data, opts); 1190 + if (unlikely(ret < 0)) 1191 + return ERR_PTR(ret); 1192 + 1193 + ffs_dev = functionfs_acquire_dev_callback(dev_name); 1194 + if (IS_ERR(ffs_dev)) 1195 + return ffs_dev; 1196 + 1197 + data.dev_name = dev_name; 1198 + data.private_data = ffs_dev; 1199 + rv = mount_nodev(t, flags, &data, ffs_sb_fill); 1200 + 1201 + /* data.ffs_data is set by ffs_sb_fill */ 1202 + if (IS_ERR(rv)) 1203 + functionfs_release_dev_callback(data.ffs_data); 1204 + 1205 + return rv; 1206 + } 1207 + 1208 + static void 1209 + ffs_fs_kill_sb(struct super_block *sb) 1210 + { 1211 + ENTER(); 1212 + 1213 + kill_litter_super(sb); 1214 + if (sb->s_fs_info) { 1215 + functionfs_release_dev_callback(sb->s_fs_info); 1216 + ffs_data_put(sb->s_fs_info); 1217 + } 1218 + } 1219 + 1220 + static struct file_system_type ffs_fs_type = { 1221 + .owner = THIS_MODULE, 1222 + .name = "functionfs", 1223 + .mount = ffs_fs_mount, 1224 + .kill_sb = ffs_fs_kill_sb, 1225 + }; 1226 + 1227 + 1228 + /* Driver's main init/cleanup functions *************************************/ 1229 + 1230 + static int functionfs_init(void) 1231 + { 1232 + int ret; 1233 + 1234 + ENTER(); 1235 + 1236 + ret = register_filesystem(&ffs_fs_type); 1237 + if (likely(!ret)) 1238 + pr_info("file system registered\n"); 1239 + else 1240 + pr_err("failed registering file system (%d)\n", ret); 1241 + 1242 + return ret; 1243 + } 1244 + 1245 + static void functionfs_cleanup(void) 1246 + { 1247 + ENTER(); 1248 + 1249 + pr_info("unloading\n"); 1250 + unregister_filesystem(&ffs_fs_type); 1251 + } 1252 + 1253 + 1254 + /* ffs_data and ffs_function construction and destruction code **************/ 1255 + 1256 + static void ffs_data_clear(struct ffs_data *ffs); 1257 + static void ffs_data_reset(struct ffs_data *ffs); 1258 + 1259 + static void ffs_data_get(struct ffs_data *ffs) 1260 + { 1261 + ENTER(); 1262 + 1263 + atomic_inc(&ffs->ref); 1264 + } 1265 + 1266 + static void ffs_data_opened(struct ffs_data *ffs) 1267 + { 1268 + ENTER(); 1269 + 1270 + atomic_inc(&ffs->ref); 1271 + atomic_inc(&ffs->opened); 1272 + } 1273 + 1274 + static void ffs_data_put(struct ffs_data *ffs) 1275 + { 1276 + ENTER(); 1277 + 1278 + if (unlikely(atomic_dec_and_test(&ffs->ref))) { 1279 + pr_info("%s(): freeing\n", __func__); 1280 + ffs_data_clear(ffs); 1281 + BUG_ON(waitqueue_active(&ffs->ev.waitq) || 1282 + waitqueue_active(&ffs->ep0req_completion.wait)); 1283 + kfree(ffs->dev_name); 1284 + kfree(ffs); 1285 + } 1286 + } 1287 + 1288 + static void ffs_data_closed(struct ffs_data *ffs) 1289 + { 1290 + ENTER(); 1291 + 1292 + if (atomic_dec_and_test(&ffs->opened)) { 1293 + ffs->state = FFS_CLOSING; 1294 + ffs_data_reset(ffs); 1295 + } 1296 + 1297 + ffs_data_put(ffs); 1298 + } 1299 + 1300 + static struct ffs_data *ffs_data_new(void) 1301 + { 1302 + struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); 1303 + if (unlikely(!ffs)) 1304 + return 0; 1305 + 1306 + ENTER(); 1307 + 1308 + atomic_set(&ffs->ref, 1); 1309 + atomic_set(&ffs->opened, 0); 1310 + ffs->state = FFS_READ_DESCRIPTORS; 1311 + mutex_init(&ffs->mutex); 1312 + spin_lock_init(&ffs->eps_lock); 1313 + init_waitqueue_head(&ffs->ev.waitq); 1314 + init_completion(&ffs->ep0req_completion); 1315 + 1316 + /* XXX REVISIT need to update it in some places, or do we? */ 1317 + ffs->ev.can_stall = 1; 1318 + 1319 + return ffs; 1320 + } 1321 + 1322 + static void ffs_data_clear(struct ffs_data *ffs) 1323 + { 1324 + ENTER(); 1325 + 1326 + if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags)) 1327 + functionfs_closed_callback(ffs); 1328 + 1329 + BUG_ON(ffs->gadget); 1330 + 1331 + if (ffs->epfiles) 1332 + ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); 1333 + 1334 + kfree(ffs->raw_descs); 1335 + kfree(ffs->raw_strings); 1336 + kfree(ffs->stringtabs); 1337 + } 1338 + 1339 + static void ffs_data_reset(struct ffs_data *ffs) 1340 + { 1341 + ENTER(); 1342 + 1343 + ffs_data_clear(ffs); 1344 + 1345 + ffs->epfiles = NULL; 1346 + ffs->raw_descs = NULL; 1347 + ffs->raw_strings = NULL; 1348 + ffs->stringtabs = NULL; 1349 + 1350 + ffs->raw_descs_length = 0; 1351 + ffs->raw_fs_descs_length = 0; 1352 + ffs->fs_descs_count = 0; 1353 + ffs->hs_descs_count = 0; 1354 + 1355 + ffs->strings_count = 0; 1356 + ffs->interfaces_count = 0; 1357 + ffs->eps_count = 0; 1358 + 1359 + ffs->ev.count = 0; 1360 + 1361 + ffs->state = FFS_READ_DESCRIPTORS; 1362 + ffs->setup_state = FFS_NO_SETUP; 1363 + ffs->flags = 0; 1364 + } 1365 + 1366 + 1367 + static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) 1368 + { 1369 + struct usb_gadget_strings **lang; 1370 + int first_id; 1371 + 1372 + ENTER(); 1373 + 1374 + if (WARN_ON(ffs->state != FFS_ACTIVE 1375 + || test_and_set_bit(FFS_FL_BOUND, &ffs->flags))) 1376 + return -EBADFD; 1377 + 1378 + first_id = usb_string_ids_n(cdev, ffs->strings_count); 1379 + if (unlikely(first_id < 0)) 1380 + return first_id; 1381 + 1382 + ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL); 1383 + if (unlikely(!ffs->ep0req)) 1384 + return -ENOMEM; 1385 + ffs->ep0req->complete = ffs_ep0_complete; 1386 + ffs->ep0req->context = ffs; 1387 + 1388 + lang = ffs->stringtabs; 1389 + for (lang = ffs->stringtabs; *lang; ++lang) { 1390 + struct usb_string *str = (*lang)->strings; 1391 + int id = first_id; 1392 + for (; str->s; ++id, ++str) 1393 + str->id = id; 1394 + } 1395 + 1396 + ffs->gadget = cdev->gadget; 1397 + ffs_data_get(ffs); 1398 + return 0; 1399 + } 1400 + 1401 + static void functionfs_unbind(struct ffs_data *ffs) 1402 + { 1403 + ENTER(); 1404 + 1405 + if (!WARN_ON(!ffs->gadget)) { 1406 + usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req); 1407 + ffs->ep0req = NULL; 1408 + ffs->gadget = NULL; 1409 + ffs_data_put(ffs); 1410 + clear_bit(FFS_FL_BOUND, &ffs->flags); 1411 + } 1412 + } 1413 + 1414 + static int ffs_epfiles_create(struct ffs_data *ffs) 1415 + { 1416 + struct ffs_epfile *epfile, *epfiles; 1417 + unsigned i, count; 1418 + 1419 + ENTER(); 1420 + 1421 + count = ffs->eps_count; 1422 + epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL); 1423 + if (!epfiles) 1424 + return -ENOMEM; 1425 + 1426 + epfile = epfiles; 1427 + for (i = 1; i <= count; ++i, ++epfile) { 1428 + epfile->ffs = ffs; 1429 + mutex_init(&epfile->mutex); 1430 + init_waitqueue_head(&epfile->wait); 1431 + sprintf(epfiles->name, "ep%u", i); 1432 + if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile, 1433 + &ffs_epfile_operations, 1434 + &epfile->dentry))) { 1435 + ffs_epfiles_destroy(epfiles, i - 1); 1436 + return -ENOMEM; 1437 + } 1438 + } 1439 + 1440 + ffs->epfiles = epfiles; 1441 + return 0; 1442 + } 1443 + 1444 + static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) 1445 + { 1446 + struct ffs_epfile *epfile = epfiles; 1447 + 1448 + ENTER(); 1449 + 1450 + for (; count; --count, ++epfile) { 1451 + BUG_ON(mutex_is_locked(&epfile->mutex) || 1452 + waitqueue_active(&epfile->wait)); 1453 + if (epfile->dentry) { 1454 + d_delete(epfile->dentry); 1455 + dput(epfile->dentry); 1456 + epfile->dentry = NULL; 1457 + } 1458 + } 1459 + 1460 + kfree(epfiles); 1461 + } 1462 + 1463 + static int functionfs_bind_config(struct usb_composite_dev *cdev, 1464 + struct usb_configuration *c, 1465 + struct ffs_data *ffs) 1466 + { 1467 + struct ffs_function *func; 1468 + int ret; 1469 + 1470 + ENTER(); 1471 + 1472 + func = kzalloc(sizeof *func, GFP_KERNEL); 1473 + if (unlikely(!func)) 1474 + return -ENOMEM; 1475 + 1476 + func->function.name = "Function FS Gadget"; 1477 + func->function.strings = ffs->stringtabs; 1478 + 1479 + func->function.bind = ffs_func_bind; 1480 + func->function.unbind = ffs_func_unbind; 1481 + func->function.set_alt = ffs_func_set_alt; 1482 + func->function.disable = ffs_func_disable; 1483 + func->function.setup = ffs_func_setup; 1484 + func->function.suspend = ffs_func_suspend; 1485 + func->function.resume = ffs_func_resume; 1486 + 1487 + func->conf = c; 1488 + func->gadget = cdev->gadget; 1489 + func->ffs = ffs; 1490 + ffs_data_get(ffs); 1491 + 1492 + ret = usb_add_function(c, &func->function); 1493 + if (unlikely(ret)) 1494 + ffs_func_free(func); 1495 + 1496 + return ret; 1497 + } 1498 + 1499 + static void ffs_func_free(struct ffs_function *func) 1500 + { 1501 + struct ffs_ep *ep = func->eps; 1502 + unsigned count = func->ffs->eps_count; 1503 + unsigned long flags; 1504 + 1505 + ENTER(); 1506 + 1507 + /* cleanup after autoconfig */ 1508 + spin_lock_irqsave(&func->ffs->eps_lock, flags); 1509 + do { 1510 + if (ep->ep && ep->req) 1511 + usb_ep_free_request(ep->ep, ep->req); 1512 + ep->req = NULL; 1513 + ++ep; 1514 + } while (--count); 1515 + spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1516 + 1517 + ffs_data_put(func->ffs); 1518 + 1519 + kfree(func->eps); 1520 + /* 1521 + * eps and interfaces_nums are allocated in the same chunk so 1522 + * only one free is required. Descriptors are also allocated 1523 + * in the same chunk. 1524 + */ 1525 + 1526 + kfree(func); 1527 + } 1528 + 1529 + static void ffs_func_eps_disable(struct ffs_function *func) 1530 + { 1531 + struct ffs_ep *ep = func->eps; 1532 + struct ffs_epfile *epfile = func->ffs->epfiles; 1533 + unsigned count = func->ffs->eps_count; 1534 + unsigned long flags; 1535 + 1536 + spin_lock_irqsave(&func->ffs->eps_lock, flags); 1537 + do { 1538 + /* pending requests get nuked */ 1539 + if (likely(ep->ep)) 1540 + usb_ep_disable(ep->ep); 1541 + epfile->ep = NULL; 1542 + 1543 + ++ep; 1544 + ++epfile; 1545 + } while (--count); 1546 + spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1547 + } 1548 + 1549 + static int ffs_func_eps_enable(struct ffs_function *func) 1550 + { 1551 + struct ffs_data *ffs = func->ffs; 1552 + struct ffs_ep *ep = func->eps; 1553 + struct ffs_epfile *epfile = ffs->epfiles; 1554 + unsigned count = ffs->eps_count; 1555 + unsigned long flags; 1556 + int ret = 0; 1557 + 1558 + spin_lock_irqsave(&func->ffs->eps_lock, flags); 1559 + do { 1560 + struct usb_endpoint_descriptor *ds; 1561 + ds = ep->descs[ep->descs[1] ? 1 : 0]; 1562 + 1563 + ep->ep->driver_data = ep; 1564 + ep->ep->desc = ds; 1565 + ret = usb_ep_enable(ep->ep); 1566 + if (likely(!ret)) { 1567 + epfile->ep = ep; 1568 + epfile->in = usb_endpoint_dir_in(ds); 1569 + epfile->isoc = usb_endpoint_xfer_isoc(ds); 1570 + } else { 1571 + break; 1572 + } 1573 + 1574 + wake_up(&epfile->wait); 1575 + 1576 + ++ep; 1577 + ++epfile; 1578 + } while (--count); 1579 + spin_unlock_irqrestore(&func->ffs->eps_lock, flags); 1580 + 1581 + return ret; 1582 + } 1583 + 1584 + 1585 + /* Parsing and building descriptors and strings *****************************/ 1586 + 1587 + /* 1588 + * This validates if data pointed by data is a valid USB descriptor as 1589 + * well as record how many interfaces, endpoints and strings are 1590 + * required by given configuration. Returns address after the 1591 + * descriptor or NULL if data is invalid. 1592 + */ 1593 + 1594 + enum ffs_entity_type { 1595 + FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT 1596 + }; 1597 + 1598 + typedef int (*ffs_entity_callback)(enum ffs_entity_type entity, 1599 + u8 *valuep, 1600 + struct usb_descriptor_header *desc, 1601 + void *priv); 1602 + 1603 + static int __must_check ffs_do_desc(char *data, unsigned len, 1604 + ffs_entity_callback entity, void *priv) 1605 + { 1606 + struct usb_descriptor_header *_ds = (void *)data; 1607 + u8 length; 1608 + int ret; 1609 + 1610 + ENTER(); 1611 + 1612 + /* At least two bytes are required: length and type */ 1613 + if (len < 2) { 1614 + pr_vdebug("descriptor too short\n"); 1615 + return -EINVAL; 1616 + } 1617 + 1618 + /* If we have at least as many bytes as the descriptor takes? */ 1619 + length = _ds->bLength; 1620 + if (len < length) { 1621 + pr_vdebug("descriptor longer then available data\n"); 1622 + return -EINVAL; 1623 + } 1624 + 1625 + #define __entity_check_INTERFACE(val) 1 1626 + #define __entity_check_STRING(val) (val) 1627 + #define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK) 1628 + #define __entity(type, val) do { \ 1629 + pr_vdebug("entity " #type "(%02x)\n", (val)); \ 1630 + if (unlikely(!__entity_check_ ##type(val))) { \ 1631 + pr_vdebug("invalid entity's value\n"); \ 1632 + return -EINVAL; \ 1633 + } \ 1634 + ret = entity(FFS_ ##type, &val, _ds, priv); \ 1635 + if (unlikely(ret < 0)) { \ 1636 + pr_debug("entity " #type "(%02x); ret = %d\n", \ 1637 + (val), ret); \ 1638 + return ret; \ 1639 + } \ 1640 + } while (0) 1641 + 1642 + /* Parse descriptor depending on type. */ 1643 + switch (_ds->bDescriptorType) { 1644 + case USB_DT_DEVICE: 1645 + case USB_DT_CONFIG: 1646 + case USB_DT_STRING: 1647 + case USB_DT_DEVICE_QUALIFIER: 1648 + /* function can't have any of those */ 1649 + pr_vdebug("descriptor reserved for gadget: %d\n", 1650 + _ds->bDescriptorType); 1651 + return -EINVAL; 1652 + 1653 + case USB_DT_INTERFACE: { 1654 + struct usb_interface_descriptor *ds = (void *)_ds; 1655 + pr_vdebug("interface descriptor\n"); 1656 + if (length != sizeof *ds) 1657 + goto inv_length; 1658 + 1659 + __entity(INTERFACE, ds->bInterfaceNumber); 1660 + if (ds->iInterface) 1661 + __entity(STRING, ds->iInterface); 1662 + } 1663 + break; 1664 + 1665 + case USB_DT_ENDPOINT: { 1666 + struct usb_endpoint_descriptor *ds = (void *)_ds; 1667 + pr_vdebug("endpoint descriptor\n"); 1668 + if (length != USB_DT_ENDPOINT_SIZE && 1669 + length != USB_DT_ENDPOINT_AUDIO_SIZE) 1670 + goto inv_length; 1671 + __entity(ENDPOINT, ds->bEndpointAddress); 1672 + } 1673 + break; 1674 + 1675 + case HID_DT_HID: 1676 + pr_vdebug("hid descriptor\n"); 1677 + if (length != sizeof(struct hid_descriptor)) 1678 + goto inv_length; 1679 + break; 1680 + 1681 + case USB_DT_OTG: 1682 + if (length != sizeof(struct usb_otg_descriptor)) 1683 + goto inv_length; 1684 + break; 1685 + 1686 + case USB_DT_INTERFACE_ASSOCIATION: { 1687 + struct usb_interface_assoc_descriptor *ds = (void *)_ds; 1688 + pr_vdebug("interface association descriptor\n"); 1689 + if (length != sizeof *ds) 1690 + goto inv_length; 1691 + if (ds->iFunction) 1692 + __entity(STRING, ds->iFunction); 1693 + } 1694 + break; 1695 + 1696 + case USB_DT_OTHER_SPEED_CONFIG: 1697 + case USB_DT_INTERFACE_POWER: 1698 + case USB_DT_DEBUG: 1699 + case USB_DT_SECURITY: 1700 + case USB_DT_CS_RADIO_CONTROL: 1701 + /* TODO */ 1702 + pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType); 1703 + return -EINVAL; 1704 + 1705 + default: 1706 + /* We should never be here */ 1707 + pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType); 1708 + return -EINVAL; 1709 + 1710 + inv_length: 1711 + pr_vdebug("invalid length: %d (descriptor %d)\n", 1712 + _ds->bLength, _ds->bDescriptorType); 1713 + return -EINVAL; 1714 + } 1715 + 1716 + #undef __entity 1717 + #undef __entity_check_DESCRIPTOR 1718 + #undef __entity_check_INTERFACE 1719 + #undef __entity_check_STRING 1720 + #undef __entity_check_ENDPOINT 1721 + 1722 + return length; 1723 + } 1724 + 1725 + static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len, 1726 + ffs_entity_callback entity, void *priv) 1727 + { 1728 + const unsigned _len = len; 1729 + unsigned long num = 0; 1730 + 1731 + ENTER(); 1732 + 1733 + for (;;) { 1734 + int ret; 1735 + 1736 + if (num == count) 1737 + data = NULL; 1738 + 1739 + /* Record "descriptor" entity */ 1740 + ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv); 1741 + if (unlikely(ret < 0)) { 1742 + pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n", 1743 + num, ret); 1744 + return ret; 1745 + } 1746 + 1747 + if (!data) 1748 + return _len - len; 1749 + 1750 + ret = ffs_do_desc(data, len, entity, priv); 1751 + if (unlikely(ret < 0)) { 1752 + pr_debug("%s returns %d\n", __func__, ret); 1753 + return ret; 1754 + } 1755 + 1756 + len -= ret; 1757 + data += ret; 1758 + ++num; 1759 + } 1760 + } 1761 + 1762 + static int __ffs_data_do_entity(enum ffs_entity_type type, 1763 + u8 *valuep, struct usb_descriptor_header *desc, 1764 + void *priv) 1765 + { 1766 + struct ffs_data *ffs = priv; 1767 + 1768 + ENTER(); 1769 + 1770 + switch (type) { 1771 + case FFS_DESCRIPTOR: 1772 + break; 1773 + 1774 + case FFS_INTERFACE: 1775 + /* 1776 + * Interfaces are indexed from zero so if we 1777 + * encountered interface "n" then there are at least 1778 + * "n+1" interfaces. 1779 + */ 1780 + if (*valuep >= ffs->interfaces_count) 1781 + ffs->interfaces_count = *valuep + 1; 1782 + break; 1783 + 1784 + case FFS_STRING: 1785 + /* 1786 + * Strings are indexed from 1 (0 is magic ;) reserved 1787 + * for languages list or some such) 1788 + */ 1789 + if (*valuep > ffs->strings_count) 1790 + ffs->strings_count = *valuep; 1791 + break; 1792 + 1793 + case FFS_ENDPOINT: 1794 + /* Endpoints are indexed from 1 as well. */ 1795 + if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count) 1796 + ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK); 1797 + break; 1798 + } 1799 + 1800 + return 0; 1801 + } 1802 + 1803 + static int __ffs_data_got_descs(struct ffs_data *ffs, 1804 + char *const _data, size_t len) 1805 + { 1806 + unsigned fs_count, hs_count; 1807 + int fs_len, ret = -EINVAL; 1808 + char *data = _data; 1809 + 1810 + ENTER(); 1811 + 1812 + if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC || 1813 + get_unaligned_le32(data + 4) != len)) 1814 + goto error; 1815 + fs_count = get_unaligned_le32(data + 8); 1816 + hs_count = get_unaligned_le32(data + 12); 1817 + 1818 + if (!fs_count && !hs_count) 1819 + goto einval; 1820 + 1821 + data += 16; 1822 + len -= 16; 1823 + 1824 + if (likely(fs_count)) { 1825 + fs_len = ffs_do_descs(fs_count, data, len, 1826 + __ffs_data_do_entity, ffs); 1827 + if (unlikely(fs_len < 0)) { 1828 + ret = fs_len; 1829 + goto error; 1830 + } 1831 + 1832 + data += fs_len; 1833 + len -= fs_len; 1834 + } else { 1835 + fs_len = 0; 1836 + } 1837 + 1838 + if (likely(hs_count)) { 1839 + ret = ffs_do_descs(hs_count, data, len, 1840 + __ffs_data_do_entity, ffs); 1841 + if (unlikely(ret < 0)) 1842 + goto error; 1843 + } else { 1844 + ret = 0; 1845 + } 1846 + 1847 + if (unlikely(len != ret)) 1848 + goto einval; 1849 + 1850 + ffs->raw_fs_descs_length = fs_len; 1851 + ffs->raw_descs_length = fs_len + ret; 1852 + ffs->raw_descs = _data; 1853 + ffs->fs_descs_count = fs_count; 1854 + ffs->hs_descs_count = hs_count; 1855 + 1856 + return 0; 1857 + 1858 + einval: 1859 + ret = -EINVAL; 1860 + error: 1861 + kfree(_data); 1862 + return ret; 1863 + } 1864 + 1865 + static int __ffs_data_got_strings(struct ffs_data *ffs, 1866 + char *const _data, size_t len) 1867 + { 1868 + u32 str_count, needed_count, lang_count; 1869 + struct usb_gadget_strings **stringtabs, *t; 1870 + struct usb_string *strings, *s; 1871 + const char *data = _data; 1872 + 1873 + ENTER(); 1874 + 1875 + if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || 1876 + get_unaligned_le32(data + 4) != len)) 1877 + goto error; 1878 + str_count = get_unaligned_le32(data + 8); 1879 + lang_count = get_unaligned_le32(data + 12); 1880 + 1881 + /* if one is zero the other must be zero */ 1882 + if (unlikely(!str_count != !lang_count)) 1883 + goto error; 1884 + 1885 + /* Do we have at least as many strings as descriptors need? */ 1886 + needed_count = ffs->strings_count; 1887 + if (unlikely(str_count < needed_count)) 1888 + goto error; 1889 + 1890 + /* 1891 + * If we don't need any strings just return and free all 1892 + * memory. 1893 + */ 1894 + if (!needed_count) { 1895 + kfree(_data); 1896 + return 0; 1897 + } 1898 + 1899 + /* Allocate everything in one chunk so there's less maintenance. */ 1900 + { 1901 + struct { 1902 + struct usb_gadget_strings *stringtabs[lang_count + 1]; 1903 + struct usb_gadget_strings stringtab[lang_count]; 1904 + struct usb_string strings[lang_count*(needed_count+1)]; 1905 + } *d; 1906 + unsigned i = 0; 1907 + 1908 + d = kmalloc(sizeof *d, GFP_KERNEL); 1909 + if (unlikely(!d)) { 1910 + kfree(_data); 1911 + return -ENOMEM; 1912 + } 1913 + 1914 + stringtabs = d->stringtabs; 1915 + t = d->stringtab; 1916 + i = lang_count; 1917 + do { 1918 + *stringtabs++ = t++; 1919 + } while (--i); 1920 + *stringtabs = NULL; 1921 + 1922 + stringtabs = d->stringtabs; 1923 + t = d->stringtab; 1924 + s = d->strings; 1925 + strings = s; 1926 + } 1927 + 1928 + /* For each language */ 1929 + data += 16; 1930 + len -= 16; 1931 + 1932 + do { /* lang_count > 0 so we can use do-while */ 1933 + unsigned needed = needed_count; 1934 + 1935 + if (unlikely(len < 3)) 1936 + goto error_free; 1937 + t->language = get_unaligned_le16(data); 1938 + t->strings = s; 1939 + ++t; 1940 + 1941 + data += 2; 1942 + len -= 2; 1943 + 1944 + /* For each string */ 1945 + do { /* str_count > 0 so we can use do-while */ 1946 + size_t length = strnlen(data, len); 1947 + 1948 + if (unlikely(length == len)) 1949 + goto error_free; 1950 + 1951 + /* 1952 + * User may provide more strings then we need, 1953 + * if that's the case we simply ignore the 1954 + * rest 1955 + */ 1956 + if (likely(needed)) { 1957 + /* 1958 + * s->id will be set while adding 1959 + * function to configuration so for 1960 + * now just leave garbage here. 1961 + */ 1962 + s->s = data; 1963 + --needed; 1964 + ++s; 1965 + } 1966 + 1967 + data += length + 1; 1968 + len -= length + 1; 1969 + } while (--str_count); 1970 + 1971 + s->id = 0; /* terminator */ 1972 + s->s = NULL; 1973 + ++s; 1974 + 1975 + } while (--lang_count); 1976 + 1977 + /* Some garbage left? */ 1978 + if (unlikely(len)) 1979 + goto error_free; 1980 + 1981 + /* Done! */ 1982 + ffs->stringtabs = stringtabs; 1983 + ffs->raw_strings = _data; 1984 + 1985 + return 0; 1986 + 1987 + error_free: 1988 + kfree(stringtabs); 1989 + error: 1990 + kfree(_data); 1991 + return -EINVAL; 1992 + } 1993 + 1994 + 1995 + /* Events handling and management *******************************************/ 1996 + 1997 + static void __ffs_event_add(struct ffs_data *ffs, 1998 + enum usb_functionfs_event_type type) 1999 + { 2000 + enum usb_functionfs_event_type rem_type1, rem_type2 = type; 2001 + int neg = 0; 2002 + 2003 + /* 2004 + * Abort any unhandled setup 2005 + * 2006 + * We do not need to worry about some cmpxchg() changing value 2007 + * of ffs->setup_state without holding the lock because when 2008 + * state is FFS_SETUP_PENDING cmpxchg() in several places in 2009 + * the source does nothing. 2010 + */ 2011 + if (ffs->setup_state == FFS_SETUP_PENDING) 2012 + ffs->setup_state = FFS_SETUP_CANCELED; 2013 + 2014 + switch (type) { 2015 + case FUNCTIONFS_RESUME: 2016 + rem_type2 = FUNCTIONFS_SUSPEND; 2017 + /* FALL THROUGH */ 2018 + case FUNCTIONFS_SUSPEND: 2019 + case FUNCTIONFS_SETUP: 2020 + rem_type1 = type; 2021 + /* Discard all similar events */ 2022 + break; 2023 + 2024 + case FUNCTIONFS_BIND: 2025 + case FUNCTIONFS_UNBIND: 2026 + case FUNCTIONFS_DISABLE: 2027 + case FUNCTIONFS_ENABLE: 2028 + /* Discard everything other then power management. */ 2029 + rem_type1 = FUNCTIONFS_SUSPEND; 2030 + rem_type2 = FUNCTIONFS_RESUME; 2031 + neg = 1; 2032 + break; 2033 + 2034 + default: 2035 + BUG(); 2036 + } 2037 + 2038 + { 2039 + u8 *ev = ffs->ev.types, *out = ev; 2040 + unsigned n = ffs->ev.count; 2041 + for (; n; --n, ++ev) 2042 + if ((*ev == rem_type1 || *ev == rem_type2) == neg) 2043 + *out++ = *ev; 2044 + else 2045 + pr_vdebug("purging event %d\n", *ev); 2046 + ffs->ev.count = out - ffs->ev.types; 2047 + } 2048 + 2049 + pr_vdebug("adding event %d\n", type); 2050 + ffs->ev.types[ffs->ev.count++] = type; 2051 + wake_up_locked(&ffs->ev.waitq); 2052 + } 2053 + 2054 + static void ffs_event_add(struct ffs_data *ffs, 2055 + enum usb_functionfs_event_type type) 2056 + { 2057 + unsigned long flags; 2058 + spin_lock_irqsave(&ffs->ev.waitq.lock, flags); 2059 + __ffs_event_add(ffs, type); 2060 + spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); 2061 + } 2062 + 2063 + 2064 + /* Bind/unbind USB function hooks *******************************************/ 2065 + 2066 + static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, 2067 + struct usb_descriptor_header *desc, 2068 + void *priv) 2069 + { 2070 + struct usb_endpoint_descriptor *ds = (void *)desc; 2071 + struct ffs_function *func = priv; 2072 + struct ffs_ep *ffs_ep; 2073 + 2074 + /* 2075 + * If hs_descriptors is not NULL then we are reading hs 2076 + * descriptors now 2077 + */ 2078 + const int isHS = func->function.hs_descriptors != NULL; 2079 + unsigned idx; 2080 + 2081 + if (type != FFS_DESCRIPTOR) 2082 + return 0; 2083 + 2084 + if (isHS) 2085 + func->function.hs_descriptors[(long)valuep] = desc; 2086 + else 2087 + func->function.descriptors[(long)valuep] = desc; 2088 + 2089 + if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) 2090 + return 0; 2091 + 2092 + idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1; 2093 + ffs_ep = func->eps + idx; 2094 + 2095 + if (unlikely(ffs_ep->descs[isHS])) { 2096 + pr_vdebug("two %sspeed descriptors for EP %d\n", 2097 + isHS ? "high" : "full", 2098 + ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); 2099 + return -EINVAL; 2100 + } 2101 + ffs_ep->descs[isHS] = ds; 2102 + 2103 + ffs_dump_mem(": Original ep desc", ds, ds->bLength); 2104 + if (ffs_ep->ep) { 2105 + ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress; 2106 + if (!ds->wMaxPacketSize) 2107 + ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize; 2108 + } else { 2109 + struct usb_request *req; 2110 + struct usb_ep *ep; 2111 + 2112 + pr_vdebug("autoconfig\n"); 2113 + ep = usb_ep_autoconfig(func->gadget, ds); 2114 + if (unlikely(!ep)) 2115 + return -ENOTSUPP; 2116 + ep->driver_data = func->eps + idx; 2117 + 2118 + req = usb_ep_alloc_request(ep, GFP_KERNEL); 2119 + if (unlikely(!req)) 2120 + return -ENOMEM; 2121 + 2122 + ffs_ep->ep = ep; 2123 + ffs_ep->req = req; 2124 + func->eps_revmap[ds->bEndpointAddress & 2125 + USB_ENDPOINT_NUMBER_MASK] = idx + 1; 2126 + } 2127 + ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength); 2128 + 2129 + return 0; 2130 + } 2131 + 2132 + static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep, 2133 + struct usb_descriptor_header *desc, 2134 + void *priv) 2135 + { 2136 + struct ffs_function *func = priv; 2137 + unsigned idx; 2138 + u8 newValue; 2139 + 2140 + switch (type) { 2141 + default: 2142 + case FFS_DESCRIPTOR: 2143 + /* Handled in previous pass by __ffs_func_bind_do_descs() */ 2144 + return 0; 2145 + 2146 + case FFS_INTERFACE: 2147 + idx = *valuep; 2148 + if (func->interfaces_nums[idx] < 0) { 2149 + int id = usb_interface_id(func->conf, &func->function); 2150 + if (unlikely(id < 0)) 2151 + return id; 2152 + func->interfaces_nums[idx] = id; 2153 + } 2154 + newValue = func->interfaces_nums[idx]; 2155 + break; 2156 + 2157 + case FFS_STRING: 2158 + /* String' IDs are allocated when fsf_data is bound to cdev */ 2159 + newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id; 2160 + break; 2161 + 2162 + case FFS_ENDPOINT: 2163 + /* 2164 + * USB_DT_ENDPOINT are handled in 2165 + * __ffs_func_bind_do_descs(). 2166 + */ 2167 + if (desc->bDescriptorType == USB_DT_ENDPOINT) 2168 + return 0; 2169 + 2170 + idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1; 2171 + if (unlikely(!func->eps[idx].ep)) 2172 + return -EINVAL; 2173 + 2174 + { 2175 + struct usb_endpoint_descriptor **descs; 2176 + descs = func->eps[idx].descs; 2177 + newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress; 2178 + } 2179 + break; 2180 + } 2181 + 2182 + pr_vdebug("%02x -> %02x\n", *valuep, newValue); 2183 + *valuep = newValue; 2184 + return 0; 2185 + } 2186 + 2187 + static int ffs_func_bind(struct usb_configuration *c, 2188 + struct usb_function *f) 2189 + { 2190 + struct ffs_function *func = ffs_func_from_usb(f); 2191 + struct ffs_data *ffs = func->ffs; 2192 + 2193 + const int full = !!func->ffs->fs_descs_count; 2194 + const int high = gadget_is_dualspeed(func->gadget) && 2195 + func->ffs->hs_descs_count; 2196 + 2197 + int ret; 2198 + 2199 + /* Make it a single chunk, less management later on */ 2200 + struct { 2201 + struct ffs_ep eps[ffs->eps_count]; 2202 + struct usb_descriptor_header 2203 + *fs_descs[full ? ffs->fs_descs_count + 1 : 0]; 2204 + struct usb_descriptor_header 2205 + *hs_descs[high ? ffs->hs_descs_count + 1 : 0]; 2206 + short inums[ffs->interfaces_count]; 2207 + char raw_descs[high ? ffs->raw_descs_length 2208 + : ffs->raw_fs_descs_length]; 2209 + } *data; 2210 + 2211 + ENTER(); 2212 + 2213 + /* Only high speed but not supported by gadget? */ 2214 + if (unlikely(!(full | high))) 2215 + return -ENOTSUPP; 2216 + 2217 + /* Allocate */ 2218 + data = kmalloc(sizeof *data, GFP_KERNEL); 2219 + if (unlikely(!data)) 2220 + return -ENOMEM; 2221 + 2222 + /* Zero */ 2223 + memset(data->eps, 0, sizeof data->eps); 2224 + memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof data->raw_descs); 2225 + memset(data->inums, 0xff, sizeof data->inums); 2226 + for (ret = ffs->eps_count; ret; --ret) 2227 + data->eps[ret].num = -1; 2228 + 2229 + /* Save pointers */ 2230 + func->eps = data->eps; 2231 + func->interfaces_nums = data->inums; 2232 + 2233 + /* 2234 + * Go through all the endpoint descriptors and allocate 2235 + * endpoints first, so that later we can rewrite the endpoint 2236 + * numbers without worrying that it may be described later on. 2237 + */ 2238 + if (likely(full)) { 2239 + func->function.descriptors = data->fs_descs; 2240 + ret = ffs_do_descs(ffs->fs_descs_count, 2241 + data->raw_descs, 2242 + sizeof data->raw_descs, 2243 + __ffs_func_bind_do_descs, func); 2244 + if (unlikely(ret < 0)) 2245 + goto error; 2246 + } else { 2247 + ret = 0; 2248 + } 2249 + 2250 + if (likely(high)) { 2251 + func->function.hs_descriptors = data->hs_descs; 2252 + ret = ffs_do_descs(ffs->hs_descs_count, 2253 + data->raw_descs + ret, 2254 + (sizeof data->raw_descs) - ret, 2255 + __ffs_func_bind_do_descs, func); 2256 + } 2257 + 2258 + /* 2259 + * Now handle interface numbers allocation and interface and 2260 + * endpoint numbers rewriting. We can do that in one go 2261 + * now. 2262 + */ 2263 + ret = ffs_do_descs(ffs->fs_descs_count + 2264 + (high ? ffs->hs_descs_count : 0), 2265 + data->raw_descs, sizeof data->raw_descs, 2266 + __ffs_func_bind_do_nums, func); 2267 + if (unlikely(ret < 0)) 2268 + goto error; 2269 + 2270 + /* And we're done */ 2271 + ffs_event_add(ffs, FUNCTIONFS_BIND); 2272 + return 0; 2273 + 2274 + error: 2275 + /* XXX Do we need to release all claimed endpoints here? */ 2276 + return ret; 2277 + } 2278 + 2279 + 2280 + /* Other USB function hooks *************************************************/ 2281 + 2282 + static void ffs_func_unbind(struct usb_configuration *c, 2283 + struct usb_function *f) 2284 + { 2285 + struct ffs_function *func = ffs_func_from_usb(f); 2286 + struct ffs_data *ffs = func->ffs; 2287 + 2288 + ENTER(); 2289 + 2290 + if (ffs->func == func) { 2291 + ffs_func_eps_disable(func); 2292 + ffs->func = NULL; 2293 + } 2294 + 2295 + ffs_event_add(ffs, FUNCTIONFS_UNBIND); 2296 + 2297 + ffs_func_free(func); 2298 + } 2299 + 2300 + static int ffs_func_set_alt(struct usb_function *f, 2301 + unsigned interface, unsigned alt) 2302 + { 2303 + struct ffs_function *func = ffs_func_from_usb(f); 2304 + struct ffs_data *ffs = func->ffs; 2305 + int ret = 0, intf; 2306 + 2307 + if (alt != (unsigned)-1) { 2308 + intf = ffs_func_revmap_intf(func, interface); 2309 + if (unlikely(intf < 0)) 2310 + return intf; 2311 + } 2312 + 2313 + if (ffs->func) 2314 + ffs_func_eps_disable(ffs->func); 2315 + 2316 + if (ffs->state != FFS_ACTIVE) 2317 + return -ENODEV; 2318 + 2319 + if (alt == (unsigned)-1) { 2320 + ffs->func = NULL; 2321 + ffs_event_add(ffs, FUNCTIONFS_DISABLE); 2322 + return 0; 2323 + } 2324 + 2325 + ffs->func = func; 2326 + ret = ffs_func_eps_enable(func); 2327 + if (likely(ret >= 0)) 2328 + ffs_event_add(ffs, FUNCTIONFS_ENABLE); 2329 + return ret; 2330 + } 2331 + 2332 + static void ffs_func_disable(struct usb_function *f) 2333 + { 2334 + ffs_func_set_alt(f, 0, (unsigned)-1); 2335 + } 2336 + 2337 + static int ffs_func_setup(struct usb_function *f, 2338 + const struct usb_ctrlrequest *creq) 2339 + { 2340 + struct ffs_function *func = ffs_func_from_usb(f); 2341 + struct ffs_data *ffs = func->ffs; 2342 + unsigned long flags; 2343 + int ret; 2344 + 2345 + ENTER(); 2346 + 2347 + pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType); 2348 + pr_vdebug("creq->bRequest = %02x\n", creq->bRequest); 2349 + pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue)); 2350 + pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex)); 2351 + pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength)); 2352 + 2353 + /* 2354 + * Most requests directed to interface go through here 2355 + * (notable exceptions are set/get interface) so we need to 2356 + * handle them. All other either handled by composite or 2357 + * passed to usb_configuration->setup() (if one is set). No 2358 + * matter, we will handle requests directed to endpoint here 2359 + * as well (as it's straightforward) but what to do with any 2360 + * other request? 2361 + */ 2362 + if (ffs->state != FFS_ACTIVE) 2363 + return -ENODEV; 2364 + 2365 + switch (creq->bRequestType & USB_RECIP_MASK) { 2366 + case USB_RECIP_INTERFACE: 2367 + ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex)); 2368 + if (unlikely(ret < 0)) 2369 + return ret; 2370 + break; 2371 + 2372 + case USB_RECIP_ENDPOINT: 2373 + ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex)); 2374 + if (unlikely(ret < 0)) 2375 + return ret; 2376 + break; 2377 + 2378 + default: 2379 + return -EOPNOTSUPP; 2380 + } 2381 + 2382 + spin_lock_irqsave(&ffs->ev.waitq.lock, flags); 2383 + ffs->ev.setup = *creq; 2384 + ffs->ev.setup.wIndex = cpu_to_le16(ret); 2385 + __ffs_event_add(ffs, FUNCTIONFS_SETUP); 2386 + spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); 2387 + 2388 + return 0; 2389 + } 2390 + 2391 + static void ffs_func_suspend(struct usb_function *f) 2392 + { 2393 + ENTER(); 2394 + ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND); 2395 + } 2396 + 2397 + static void ffs_func_resume(struct usb_function *f) 2398 + { 2399 + ENTER(); 2400 + ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME); 2401 + } 2402 + 2403 + 2404 + /* Endpoint and interface numbers reverse mapping ***************************/ 2405 + 2406 + static int ffs_func_revmap_ep(struct ffs_function *func, u8 num) 2407 + { 2408 + num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK]; 2409 + return num ? num : -EDOM; 2410 + } 2411 + 2412 + static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf) 2413 + { 2414 + short *nums = func->interfaces_nums; 2415 + unsigned count = func->ffs->interfaces_count; 2416 + 2417 + for (; count; --count, ++nums) { 2418 + if (*nums >= 0 && *nums == intf) 2419 + return nums - func->interfaces_nums; 2420 + } 2421 + 2422 + return -EDOM; 2423 + } 2424 + 2425 + 2426 + /* Misc helper functions ****************************************************/ 2427 + 2428 + static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) 2429 + { 2430 + return nonblock 2431 + ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN 2432 + : mutex_lock_interruptible(mutex); 2433 + } 2434 + 2435 + static char *ffs_prepare_buffer(const char * __user buf, size_t len) 2436 + { 2437 + char *data; 2438 + 2439 + if (unlikely(!len)) 2440 + return NULL; 2441 + 2442 + data = kmalloc(len, GFP_KERNEL); 2443 + if (unlikely(!data)) 2444 + return ERR_PTR(-ENOMEM); 2445 + 2446 + if (unlikely(__copy_from_user(data, buf, len))) { 2447 + kfree(data); 2448 + return ERR_PTR(-EFAULT); 2449 + } 2450 + 2451 + pr_vdebug("Buffer from user space:\n"); 2452 + ffs_dump_mem("", data, len); 2453 + 2454 + return data; 2455 + }
+3135
drivers/staging/ccg/f_mass_storage.c
··· 1 + /* 2 + * f_mass_storage.c -- Mass Storage USB Composite Function 3 + * 4 + * Copyright (C) 2003-2008 Alan Stern 5 + * Copyright (C) 2009 Samsung Electronics 6 + * Author: Michal Nazarewicz <mina86@mina86.com> 7 + * All rights reserved. 8 + * 9 + * Redistribution and use in source and binary forms, with or without 10 + * modification, are permitted provided that the following conditions 11 + * are met: 12 + * 1. Redistributions of source code must retain the above copyright 13 + * notice, this list of conditions, and the following disclaimer, 14 + * without modification. 15 + * 2. Redistributions in binary form must reproduce the above copyright 16 + * notice, this list of conditions and the following disclaimer in the 17 + * documentation and/or other materials provided with the distribution. 18 + * 3. The names of the above-listed copyright holders may not be used 19 + * to endorse or promote products derived from this software without 20 + * specific prior written permission. 21 + * 22 + * ALTERNATIVELY, this software may be distributed under the terms of the 23 + * GNU General Public License ("GPL") as published by the Free Software 24 + * Foundation, either version 2 of that License or (at your option) any 25 + * later version. 26 + * 27 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 28 + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 29 + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 31 + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 32 + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 33 + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 34 + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 35 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 36 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 37 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 38 + */ 39 + 40 + /* 41 + * The Mass Storage Function acts as a USB Mass Storage device, 42 + * appearing to the host as a disk drive or as a CD-ROM drive. In 43 + * addition to providing an example of a genuinely useful composite 44 + * function for a USB device, it also illustrates a technique of 45 + * double-buffering for increased throughput. 46 + * 47 + * For more information about MSF and in particular its module 48 + * parameters and sysfs interface read the 49 + * <Documentation/usb/mass-storage.txt> file. 50 + */ 51 + 52 + /* 53 + * MSF is configured by specifying a fsg_config structure. It has the 54 + * following fields: 55 + * 56 + * nluns Number of LUNs function have (anywhere from 1 57 + * to FSG_MAX_LUNS which is 8). 58 + * luns An array of LUN configuration values. This 59 + * should be filled for each LUN that 60 + * function will include (ie. for "nluns" 61 + * LUNs). Each element of the array has 62 + * the following fields: 63 + * ->filename The path to the backing file for the LUN. 64 + * Required if LUN is not marked as 65 + * removable. 66 + * ->ro Flag specifying access to the LUN shall be 67 + * read-only. This is implied if CD-ROM 68 + * emulation is enabled as well as when 69 + * it was impossible to open "filename" 70 + * in R/W mode. 71 + * ->removable Flag specifying that LUN shall be indicated as 72 + * being removable. 73 + * ->cdrom Flag specifying that LUN shall be reported as 74 + * being a CD-ROM. 75 + * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12) 76 + * commands for this LUN shall be ignored. 77 + * 78 + * vendor_name 79 + * product_name 80 + * release Information used as a reply to INQUIRY 81 + * request. To use default set to NULL, 82 + * NULL, 0xffff respectively. The first 83 + * field should be 8 and the second 16 84 + * characters or less. 85 + * 86 + * can_stall Set to permit function to halt bulk endpoints. 87 + * Disabled on some USB devices known not 88 + * to work correctly. You should set it 89 + * to true. 90 + * 91 + * If "removable" is not set for a LUN then a backing file must be 92 + * specified. If it is set, then NULL filename means the LUN's medium 93 + * is not loaded (an empty string as "filename" in the fsg_config 94 + * structure causes error). The CD-ROM emulation includes a single 95 + * data track and no audio tracks; hence there need be only one 96 + * backing file per LUN. 97 + * 98 + * This function is heavily based on "File-backed Storage Gadget" by 99 + * Alan Stern which in turn is heavily based on "Gadget Zero" by David 100 + * Brownell. The driver's SCSI command interface was based on the 101 + * "Information technology - Small Computer System Interface - 2" 102 + * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93, 103 + * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. 104 + * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which 105 + * was based on the "Universal Serial Bus Mass Storage Class UFI 106 + * Command Specification" document, Revision 1.0, December 14, 1998, 107 + * available at 108 + * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>. 109 + */ 110 + 111 + /* 112 + * Driver Design 113 + * 114 + * The MSF is fairly straightforward. There is a main kernel 115 + * thread that handles most of the work. Interrupt routines field 116 + * callbacks from the controller driver: bulk- and interrupt-request 117 + * completion notifications, endpoint-0 events, and disconnect events. 118 + * Completion events are passed to the main thread by wakeup calls. Many 119 + * ep0 requests are handled at interrupt time, but SetInterface, 120 + * SetConfiguration, and device reset requests are forwarded to the 121 + * thread in the form of "exceptions" using SIGUSR1 signals (since they 122 + * should interrupt any ongoing file I/O operations). 123 + * 124 + * The thread's main routine implements the standard command/data/status 125 + * parts of a SCSI interaction. It and its subroutines are full of tests 126 + * for pending signals/exceptions -- all this polling is necessary since 127 + * the kernel has no setjmp/longjmp equivalents. (Maybe this is an 128 + * indication that the driver really wants to be running in userspace.) 129 + * An important point is that so long as the thread is alive it keeps an 130 + * open reference to the backing file. This will prevent unmounting 131 + * the backing file's underlying filesystem and could cause problems 132 + * during system shutdown, for example. To prevent such problems, the 133 + * thread catches INT, TERM, and KILL signals and converts them into 134 + * an EXIT exception. 135 + * 136 + * In normal operation the main thread is started during the gadget's 137 + * fsg_bind() callback and stopped during fsg_unbind(). But it can 138 + * also exit when it receives a signal, and there's no point leaving 139 + * the gadget running when the thread is dead. As of this moment, MSF 140 + * provides no way to deregister the gadget when thread dies -- maybe 141 + * a callback functions is needed. 142 + * 143 + * To provide maximum throughput, the driver uses a circular pipeline of 144 + * buffer heads (struct fsg_buffhd). In principle the pipeline can be 145 + * arbitrarily long; in practice the benefits don't justify having more 146 + * than 2 stages (i.e., double buffering). But it helps to think of the 147 + * pipeline as being a long one. Each buffer head contains a bulk-in and 148 + * a bulk-out request pointer (since the buffer can be used for both 149 + * output and input -- directions always are given from the host's 150 + * point of view) as well as a pointer to the buffer and various state 151 + * variables. 152 + * 153 + * Use of the pipeline follows a simple protocol. There is a variable 154 + * (fsg->next_buffhd_to_fill) that points to the next buffer head to use. 155 + * At any time that buffer head may still be in use from an earlier 156 + * request, so each buffer head has a state variable indicating whether 157 + * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the 158 + * buffer head to be EMPTY, filling the buffer either by file I/O or by 159 + * USB I/O (during which the buffer head is BUSY), and marking the buffer 160 + * head FULL when the I/O is complete. Then the buffer will be emptied 161 + * (again possibly by USB I/O, during which it is marked BUSY) and 162 + * finally marked EMPTY again (possibly by a completion routine). 163 + * 164 + * A module parameter tells the driver to avoid stalling the bulk 165 + * endpoints wherever the transport specification allows. This is 166 + * necessary for some UDCs like the SuperH, which cannot reliably clear a 167 + * halt on a bulk endpoint. However, under certain circumstances the 168 + * Bulk-only specification requires a stall. In such cases the driver 169 + * will halt the endpoint and set a flag indicating that it should clear 170 + * the halt in software during the next device reset. Hopefully this 171 + * will permit everything to work correctly. Furthermore, although the 172 + * specification allows the bulk-out endpoint to halt when the host sends 173 + * too much data, implementing this would cause an unavoidable race. 174 + * The driver will always use the "no-stall" approach for OUT transfers. 175 + * 176 + * One subtle point concerns sending status-stage responses for ep0 177 + * requests. Some of these requests, such as device reset, can involve 178 + * interrupting an ongoing file I/O operation, which might take an 179 + * arbitrarily long time. During that delay the host might give up on 180 + * the original ep0 request and issue a new one. When that happens the 181 + * driver should not notify the host about completion of the original 182 + * request, as the host will no longer be waiting for it. So the driver 183 + * assigns to each ep0 request a unique tag, and it keeps track of the 184 + * tag value of the request associated with a long-running exception 185 + * (device-reset, interface-change, or configuration-change). When the 186 + * exception handler is finished, the status-stage response is submitted 187 + * only if the current ep0 request tag is equal to the exception request 188 + * tag. Thus only the most recently received ep0 request will get a 189 + * status-stage response. 190 + * 191 + * Warning: This driver source file is too long. It ought to be split up 192 + * into a header file plus about 3 separate .c files, to handle the details 193 + * of the Gadget, USB Mass Storage, and SCSI protocols. 194 + */ 195 + 196 + 197 + /* #define VERBOSE_DEBUG */ 198 + /* #define DUMP_MSGS */ 199 + 200 + #include <linux/blkdev.h> 201 + #include <linux/completion.h> 202 + #include <linux/dcache.h> 203 + #include <linux/delay.h> 204 + #include <linux/device.h> 205 + #include <linux/fcntl.h> 206 + #include <linux/file.h> 207 + #include <linux/fs.h> 208 + #include <linux/kref.h> 209 + #include <linux/kthread.h> 210 + #include <linux/limits.h> 211 + #include <linux/rwsem.h> 212 + #include <linux/slab.h> 213 + #include <linux/spinlock.h> 214 + #include <linux/string.h> 215 + #include <linux/freezer.h> 216 + #include <linux/utsname.h> 217 + 218 + #include <linux/usb/ch9.h> 219 + #include <linux/usb/gadget.h> 220 + #include <linux/usb/composite.h> 221 + 222 + #include "gadget_chips.h" 223 + 224 + 225 + /*------------------------------------------------------------------------*/ 226 + 227 + #define FSG_DRIVER_DESC "Mass Storage Function" 228 + #define FSG_DRIVER_VERSION "2009/09/11" 229 + 230 + static const char fsg_string_interface[] = "Mass Storage"; 231 + 232 + #define FSG_NO_DEVICE_STRINGS 1 233 + #define FSG_NO_OTG 1 234 + #define FSG_NO_INTR_EP 1 235 + 236 + #include "storage_common.c" 237 + 238 + 239 + /*-------------------------------------------------------------------------*/ 240 + 241 + struct fsg_dev; 242 + struct fsg_common; 243 + 244 + /* FSF callback functions */ 245 + struct fsg_operations { 246 + /* 247 + * Callback function to call when thread exits. If no 248 + * callback is set or it returns value lower then zero MSF 249 + * will force eject all LUNs it operates on (including those 250 + * marked as non-removable or with prevent_medium_removal flag 251 + * set). 252 + */ 253 + int (*thread_exits)(struct fsg_common *common); 254 + 255 + /* 256 + * Called prior to ejection. Negative return means error, 257 + * zero means to continue with ejection, positive means not to 258 + * eject. 259 + */ 260 + int (*pre_eject)(struct fsg_common *common, 261 + struct fsg_lun *lun, int num); 262 + /* 263 + * Called after ejection. Negative return means error, zero 264 + * or positive is just a success. 265 + */ 266 + int (*post_eject)(struct fsg_common *common, 267 + struct fsg_lun *lun, int num); 268 + }; 269 + 270 + /* Data shared by all the FSG instances. */ 271 + struct fsg_common { 272 + struct usb_gadget *gadget; 273 + struct usb_composite_dev *cdev; 274 + struct fsg_dev *fsg, *new_fsg; 275 + wait_queue_head_t fsg_wait; 276 + 277 + /* filesem protects: backing files in use */ 278 + struct rw_semaphore filesem; 279 + 280 + /* lock protects: state, all the req_busy's */ 281 + spinlock_t lock; 282 + 283 + struct usb_ep *ep0; /* Copy of gadget->ep0 */ 284 + struct usb_request *ep0req; /* Copy of cdev->req */ 285 + unsigned int ep0_req_tag; 286 + 287 + struct fsg_buffhd *next_buffhd_to_fill; 288 + struct fsg_buffhd *next_buffhd_to_drain; 289 + struct fsg_buffhd *buffhds; 290 + 291 + int cmnd_size; 292 + u8 cmnd[MAX_COMMAND_SIZE]; 293 + 294 + unsigned int nluns; 295 + unsigned int lun; 296 + struct fsg_lun *luns; 297 + struct fsg_lun *curlun; 298 + 299 + unsigned int bulk_out_maxpacket; 300 + enum fsg_state state; /* For exception handling */ 301 + unsigned int exception_req_tag; 302 + 303 + enum data_direction data_dir; 304 + u32 data_size; 305 + u32 data_size_from_cmnd; 306 + u32 tag; 307 + u32 residue; 308 + u32 usb_amount_left; 309 + 310 + unsigned int can_stall:1; 311 + unsigned int free_storage_on_release:1; 312 + unsigned int phase_error:1; 313 + unsigned int short_packet_received:1; 314 + unsigned int bad_lun_okay:1; 315 + unsigned int running:1; 316 + 317 + int thread_wakeup_needed; 318 + struct completion thread_notifier; 319 + struct task_struct *thread_task; 320 + 321 + /* Callback functions. */ 322 + const struct fsg_operations *ops; 323 + /* Gadget's private data. */ 324 + void *private_data; 325 + 326 + /* 327 + * Vendor (8 chars), product (16 chars), release (4 328 + * hexadecimal digits) and NUL byte 329 + */ 330 + char inquiry_string[8 + 16 + 4 + 1]; 331 + 332 + struct kref ref; 333 + }; 334 + 335 + struct fsg_config { 336 + unsigned nluns; 337 + struct fsg_lun_config { 338 + const char *filename; 339 + char ro; 340 + char removable; 341 + char cdrom; 342 + char nofua; 343 + } luns[FSG_MAX_LUNS]; 344 + 345 + /* Callback functions. */ 346 + const struct fsg_operations *ops; 347 + /* Gadget's private data. */ 348 + void *private_data; 349 + 350 + const char *vendor_name; /* 8 characters or less */ 351 + const char *product_name; /* 16 characters or less */ 352 + u16 release; 353 + 354 + char can_stall; 355 + }; 356 + 357 + struct fsg_dev { 358 + struct usb_function function; 359 + struct usb_gadget *gadget; /* Copy of cdev->gadget */ 360 + struct fsg_common *common; 361 + 362 + u16 interface_number; 363 + 364 + unsigned int bulk_in_enabled:1; 365 + unsigned int bulk_out_enabled:1; 366 + 367 + unsigned long atomic_bitflags; 368 + #define IGNORE_BULK_OUT 0 369 + 370 + struct usb_ep *bulk_in; 371 + struct usb_ep *bulk_out; 372 + }; 373 + 374 + static inline int __fsg_is_set(struct fsg_common *common, 375 + const char *func, unsigned line) 376 + { 377 + if (common->fsg) 378 + return 1; 379 + ERROR(common, "common->fsg is NULL in %s at %u\n", func, line); 380 + WARN_ON(1); 381 + return 0; 382 + } 383 + 384 + #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__)) 385 + 386 + static inline struct fsg_dev *fsg_from_func(struct usb_function *f) 387 + { 388 + return container_of(f, struct fsg_dev, function); 389 + } 390 + 391 + typedef void (*fsg_routine_t)(struct fsg_dev *); 392 + 393 + static int exception_in_progress(struct fsg_common *common) 394 + { 395 + return common->state > FSG_STATE_IDLE; 396 + } 397 + 398 + /* Make bulk-out requests be divisible by the maxpacket size */ 399 + static void set_bulk_out_req_length(struct fsg_common *common, 400 + struct fsg_buffhd *bh, unsigned int length) 401 + { 402 + unsigned int rem; 403 + 404 + bh->bulk_out_intended_length = length; 405 + rem = length % common->bulk_out_maxpacket; 406 + if (rem > 0) 407 + length += common->bulk_out_maxpacket - rem; 408 + bh->outreq->length = length; 409 + } 410 + 411 + 412 + /*-------------------------------------------------------------------------*/ 413 + 414 + static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) 415 + { 416 + const char *name; 417 + 418 + if (ep == fsg->bulk_in) 419 + name = "bulk-in"; 420 + else if (ep == fsg->bulk_out) 421 + name = "bulk-out"; 422 + else 423 + name = ep->name; 424 + DBG(fsg, "%s set halt\n", name); 425 + return usb_ep_set_halt(ep); 426 + } 427 + 428 + 429 + /*-------------------------------------------------------------------------*/ 430 + 431 + /* These routines may be called in process context or in_irq */ 432 + 433 + /* Caller must hold fsg->lock */ 434 + static void wakeup_thread(struct fsg_common *common) 435 + { 436 + /* Tell the main thread that something has happened */ 437 + common->thread_wakeup_needed = 1; 438 + if (common->thread_task) 439 + wake_up_process(common->thread_task); 440 + } 441 + 442 + static void raise_exception(struct fsg_common *common, enum fsg_state new_state) 443 + { 444 + unsigned long flags; 445 + 446 + /* 447 + * Do nothing if a higher-priority exception is already in progress. 448 + * If a lower-or-equal priority exception is in progress, preempt it 449 + * and notify the main thread by sending it a signal. 450 + */ 451 + spin_lock_irqsave(&common->lock, flags); 452 + if (common->state <= new_state) { 453 + common->exception_req_tag = common->ep0_req_tag; 454 + common->state = new_state; 455 + if (common->thread_task) 456 + send_sig_info(SIGUSR1, SEND_SIG_FORCED, 457 + common->thread_task); 458 + } 459 + spin_unlock_irqrestore(&common->lock, flags); 460 + } 461 + 462 + 463 + /*-------------------------------------------------------------------------*/ 464 + 465 + static int ep0_queue(struct fsg_common *common) 466 + { 467 + int rc; 468 + 469 + rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC); 470 + common->ep0->driver_data = common; 471 + if (rc != 0 && rc != -ESHUTDOWN) { 472 + /* We can't do much more than wait for a reset */ 473 + WARNING(common, "error in submission: %s --> %d\n", 474 + common->ep0->name, rc); 475 + } 476 + return rc; 477 + } 478 + 479 + 480 + /*-------------------------------------------------------------------------*/ 481 + 482 + /* Completion handlers. These always run in_irq. */ 483 + 484 + static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) 485 + { 486 + struct fsg_common *common = ep->driver_data; 487 + struct fsg_buffhd *bh = req->context; 488 + 489 + if (req->status || req->actual != req->length) 490 + DBG(common, "%s --> %d, %u/%u\n", __func__, 491 + req->status, req->actual, req->length); 492 + if (req->status == -ECONNRESET) /* Request was cancelled */ 493 + usb_ep_fifo_flush(ep); 494 + 495 + /* Hold the lock while we update the request and buffer states */ 496 + smp_wmb(); 497 + spin_lock(&common->lock); 498 + bh->inreq_busy = 0; 499 + bh->state = BUF_STATE_EMPTY; 500 + wakeup_thread(common); 501 + spin_unlock(&common->lock); 502 + } 503 + 504 + static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) 505 + { 506 + struct fsg_common *common = ep->driver_data; 507 + struct fsg_buffhd *bh = req->context; 508 + 509 + dump_msg(common, "bulk-out", req->buf, req->actual); 510 + if (req->status || req->actual != bh->bulk_out_intended_length) 511 + DBG(common, "%s --> %d, %u/%u\n", __func__, 512 + req->status, req->actual, bh->bulk_out_intended_length); 513 + if (req->status == -ECONNRESET) /* Request was cancelled */ 514 + usb_ep_fifo_flush(ep); 515 + 516 + /* Hold the lock while we update the request and buffer states */ 517 + smp_wmb(); 518 + spin_lock(&common->lock); 519 + bh->outreq_busy = 0; 520 + bh->state = BUF_STATE_FULL; 521 + wakeup_thread(common); 522 + spin_unlock(&common->lock); 523 + } 524 + 525 + static int fsg_setup(struct usb_function *f, 526 + const struct usb_ctrlrequest *ctrl) 527 + { 528 + struct fsg_dev *fsg = fsg_from_func(f); 529 + struct usb_request *req = fsg->common->ep0req; 530 + u16 w_index = le16_to_cpu(ctrl->wIndex); 531 + u16 w_value = le16_to_cpu(ctrl->wValue); 532 + u16 w_length = le16_to_cpu(ctrl->wLength); 533 + 534 + if (!fsg_is_set(fsg->common)) 535 + return -EOPNOTSUPP; 536 + 537 + ++fsg->common->ep0_req_tag; /* Record arrival of a new request */ 538 + req->context = NULL; 539 + req->length = 0; 540 + dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl)); 541 + 542 + switch (ctrl->bRequest) { 543 + 544 + case US_BULK_RESET_REQUEST: 545 + if (ctrl->bRequestType != 546 + (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 547 + break; 548 + if (w_index != fsg->interface_number || w_value != 0 || 549 + w_length != 0) 550 + return -EDOM; 551 + 552 + /* 553 + * Raise an exception to stop the current operation 554 + * and reinitialize our state. 555 + */ 556 + DBG(fsg, "bulk reset request\n"); 557 + raise_exception(fsg->common, FSG_STATE_RESET); 558 + return DELAYED_STATUS; 559 + 560 + case US_BULK_GET_MAX_LUN: 561 + if (ctrl->bRequestType != 562 + (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) 563 + break; 564 + if (w_index != fsg->interface_number || w_value != 0 || 565 + w_length != 1) 566 + return -EDOM; 567 + VDBG(fsg, "get max LUN\n"); 568 + *(u8 *)req->buf = fsg->common->nluns - 1; 569 + 570 + /* Respond with data/status */ 571 + req->length = min((u16)1, w_length); 572 + return ep0_queue(fsg->common); 573 + } 574 + 575 + VDBG(fsg, 576 + "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n", 577 + ctrl->bRequestType, ctrl->bRequest, 578 + le16_to_cpu(ctrl->wValue), w_index, w_length); 579 + return -EOPNOTSUPP; 580 + } 581 + 582 + 583 + /*-------------------------------------------------------------------------*/ 584 + 585 + /* All the following routines run in process context */ 586 + 587 + /* Use this for bulk or interrupt transfers, not ep0 */ 588 + static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, 589 + struct usb_request *req, int *pbusy, 590 + enum fsg_buffer_state *state) 591 + { 592 + int rc; 593 + 594 + if (ep == fsg->bulk_in) 595 + dump_msg(fsg, "bulk-in", req->buf, req->length); 596 + 597 + spin_lock_irq(&fsg->common->lock); 598 + *pbusy = 1; 599 + *state = BUF_STATE_BUSY; 600 + spin_unlock_irq(&fsg->common->lock); 601 + rc = usb_ep_queue(ep, req, GFP_KERNEL); 602 + if (rc != 0) { 603 + *pbusy = 0; 604 + *state = BUF_STATE_EMPTY; 605 + 606 + /* We can't do much more than wait for a reset */ 607 + 608 + /* 609 + * Note: currently the net2280 driver fails zero-length 610 + * submissions if DMA is enabled. 611 + */ 612 + if (rc != -ESHUTDOWN && 613 + !(rc == -EOPNOTSUPP && req->length == 0)) 614 + WARNING(fsg, "error in submission: %s --> %d\n", 615 + ep->name, rc); 616 + } 617 + } 618 + 619 + static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh) 620 + { 621 + if (!fsg_is_set(common)) 622 + return false; 623 + start_transfer(common->fsg, common->fsg->bulk_in, 624 + bh->inreq, &bh->inreq_busy, &bh->state); 625 + return true; 626 + } 627 + 628 + static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh) 629 + { 630 + if (!fsg_is_set(common)) 631 + return false; 632 + start_transfer(common->fsg, common->fsg->bulk_out, 633 + bh->outreq, &bh->outreq_busy, &bh->state); 634 + return true; 635 + } 636 + 637 + static int sleep_thread(struct fsg_common *common) 638 + { 639 + int rc = 0; 640 + 641 + /* Wait until a signal arrives or we are woken up */ 642 + for (;;) { 643 + try_to_freeze(); 644 + set_current_state(TASK_INTERRUPTIBLE); 645 + if (signal_pending(current)) { 646 + rc = -EINTR; 647 + break; 648 + } 649 + if (common->thread_wakeup_needed) 650 + break; 651 + schedule(); 652 + } 653 + __set_current_state(TASK_RUNNING); 654 + common->thread_wakeup_needed = 0; 655 + return rc; 656 + } 657 + 658 + 659 + /*-------------------------------------------------------------------------*/ 660 + 661 + static int do_read(struct fsg_common *common) 662 + { 663 + struct fsg_lun *curlun = common->curlun; 664 + u32 lba; 665 + struct fsg_buffhd *bh; 666 + int rc; 667 + u32 amount_left; 668 + loff_t file_offset, file_offset_tmp; 669 + unsigned int amount; 670 + ssize_t nread; 671 + 672 + /* 673 + * Get the starting Logical Block Address and check that it's 674 + * not too big. 675 + */ 676 + if (common->cmnd[0] == READ_6) 677 + lba = get_unaligned_be24(&common->cmnd[1]); 678 + else { 679 + lba = get_unaligned_be32(&common->cmnd[2]); 680 + 681 + /* 682 + * We allow DPO (Disable Page Out = don't save data in the 683 + * cache) and FUA (Force Unit Access = don't read from the 684 + * cache), but we don't implement them. 685 + */ 686 + if ((common->cmnd[1] & ~0x18) != 0) { 687 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 688 + return -EINVAL; 689 + } 690 + } 691 + if (lba >= curlun->num_sectors) { 692 + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 693 + return -EINVAL; 694 + } 695 + file_offset = ((loff_t) lba) << curlun->blkbits; 696 + 697 + /* Carry out the file reads */ 698 + amount_left = common->data_size_from_cmnd; 699 + if (unlikely(amount_left == 0)) 700 + return -EIO; /* No default reply */ 701 + 702 + for (;;) { 703 + /* 704 + * Figure out how much we need to read: 705 + * Try to read the remaining amount. 706 + * But don't read more than the buffer size. 707 + * And don't try to read past the end of the file. 708 + */ 709 + amount = min(amount_left, FSG_BUFLEN); 710 + amount = min((loff_t)amount, 711 + curlun->file_length - file_offset); 712 + 713 + /* Wait for the next buffer to become available */ 714 + bh = common->next_buffhd_to_fill; 715 + while (bh->state != BUF_STATE_EMPTY) { 716 + rc = sleep_thread(common); 717 + if (rc) 718 + return rc; 719 + } 720 + 721 + /* 722 + * If we were asked to read past the end of file, 723 + * end with an empty buffer. 724 + */ 725 + if (amount == 0) { 726 + curlun->sense_data = 727 + SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 728 + curlun->sense_data_info = 729 + file_offset >> curlun->blkbits; 730 + curlun->info_valid = 1; 731 + bh->inreq->length = 0; 732 + bh->state = BUF_STATE_FULL; 733 + break; 734 + } 735 + 736 + /* Perform the read */ 737 + file_offset_tmp = file_offset; 738 + nread = vfs_read(curlun->filp, 739 + (char __user *)bh->buf, 740 + amount, &file_offset_tmp); 741 + VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 742 + (unsigned long long)file_offset, (int)nread); 743 + if (signal_pending(current)) 744 + return -EINTR; 745 + 746 + if (nread < 0) { 747 + LDBG(curlun, "error in file read: %d\n", (int)nread); 748 + nread = 0; 749 + } else if (nread < amount) { 750 + LDBG(curlun, "partial file read: %d/%u\n", 751 + (int)nread, amount); 752 + nread = round_down(nread, curlun->blksize); 753 + } 754 + file_offset += nread; 755 + amount_left -= nread; 756 + common->residue -= nread; 757 + 758 + /* 759 + * Except at the end of the transfer, nread will be 760 + * equal to the buffer size, which is divisible by the 761 + * bulk-in maxpacket size. 762 + */ 763 + bh->inreq->length = nread; 764 + bh->state = BUF_STATE_FULL; 765 + 766 + /* If an error occurred, report it and its position */ 767 + if (nread < amount) { 768 + curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 769 + curlun->sense_data_info = 770 + file_offset >> curlun->blkbits; 771 + curlun->info_valid = 1; 772 + break; 773 + } 774 + 775 + if (amount_left == 0) 776 + break; /* No more left to read */ 777 + 778 + /* Send this buffer and go read some more */ 779 + bh->inreq->zero = 0; 780 + if (!start_in_transfer(common, bh)) 781 + /* Don't know what to do if common->fsg is NULL */ 782 + return -EIO; 783 + common->next_buffhd_to_fill = bh->next; 784 + } 785 + 786 + return -EIO; /* No default reply */ 787 + } 788 + 789 + 790 + /*-------------------------------------------------------------------------*/ 791 + 792 + static int do_write(struct fsg_common *common) 793 + { 794 + struct fsg_lun *curlun = common->curlun; 795 + u32 lba; 796 + struct fsg_buffhd *bh; 797 + int get_some_more; 798 + u32 amount_left_to_req, amount_left_to_write; 799 + loff_t usb_offset, file_offset, file_offset_tmp; 800 + unsigned int amount; 801 + ssize_t nwritten; 802 + int rc; 803 + 804 + if (curlun->ro) { 805 + curlun->sense_data = SS_WRITE_PROTECTED; 806 + return -EINVAL; 807 + } 808 + spin_lock(&curlun->filp->f_lock); 809 + curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */ 810 + spin_unlock(&curlun->filp->f_lock); 811 + 812 + /* 813 + * Get the starting Logical Block Address and check that it's 814 + * not too big 815 + */ 816 + if (common->cmnd[0] == WRITE_6) 817 + lba = get_unaligned_be24(&common->cmnd[1]); 818 + else { 819 + lba = get_unaligned_be32(&common->cmnd[2]); 820 + 821 + /* 822 + * We allow DPO (Disable Page Out = don't save data in the 823 + * cache) and FUA (Force Unit Access = write directly to the 824 + * medium). We don't implement DPO; we implement FUA by 825 + * performing synchronous output. 826 + */ 827 + if (common->cmnd[1] & ~0x18) { 828 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 829 + return -EINVAL; 830 + } 831 + if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */ 832 + spin_lock(&curlun->filp->f_lock); 833 + curlun->filp->f_flags |= O_SYNC; 834 + spin_unlock(&curlun->filp->f_lock); 835 + } 836 + } 837 + if (lba >= curlun->num_sectors) { 838 + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 839 + return -EINVAL; 840 + } 841 + 842 + /* Carry out the file writes */ 843 + get_some_more = 1; 844 + file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits; 845 + amount_left_to_req = common->data_size_from_cmnd; 846 + amount_left_to_write = common->data_size_from_cmnd; 847 + 848 + while (amount_left_to_write > 0) { 849 + 850 + /* Queue a request for more data from the host */ 851 + bh = common->next_buffhd_to_fill; 852 + if (bh->state == BUF_STATE_EMPTY && get_some_more) { 853 + 854 + /* 855 + * Figure out how much we want to get: 856 + * Try to get the remaining amount, 857 + * but not more than the buffer size. 858 + */ 859 + amount = min(amount_left_to_req, FSG_BUFLEN); 860 + 861 + /* Beyond the end of the backing file? */ 862 + if (usb_offset >= curlun->file_length) { 863 + get_some_more = 0; 864 + curlun->sense_data = 865 + SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 866 + curlun->sense_data_info = 867 + usb_offset >> curlun->blkbits; 868 + curlun->info_valid = 1; 869 + continue; 870 + } 871 + 872 + /* Get the next buffer */ 873 + usb_offset += amount; 874 + common->usb_amount_left -= amount; 875 + amount_left_to_req -= amount; 876 + if (amount_left_to_req == 0) 877 + get_some_more = 0; 878 + 879 + /* 880 + * Except at the end of the transfer, amount will be 881 + * equal to the buffer size, which is divisible by 882 + * the bulk-out maxpacket size. 883 + */ 884 + set_bulk_out_req_length(common, bh, amount); 885 + if (!start_out_transfer(common, bh)) 886 + /* Dunno what to do if common->fsg is NULL */ 887 + return -EIO; 888 + common->next_buffhd_to_fill = bh->next; 889 + continue; 890 + } 891 + 892 + /* Write the received data to the backing file */ 893 + bh = common->next_buffhd_to_drain; 894 + if (bh->state == BUF_STATE_EMPTY && !get_some_more) 895 + break; /* We stopped early */ 896 + if (bh->state == BUF_STATE_FULL) { 897 + smp_rmb(); 898 + common->next_buffhd_to_drain = bh->next; 899 + bh->state = BUF_STATE_EMPTY; 900 + 901 + /* Did something go wrong with the transfer? */ 902 + if (bh->outreq->status != 0) { 903 + curlun->sense_data = SS_COMMUNICATION_FAILURE; 904 + curlun->sense_data_info = 905 + file_offset >> curlun->blkbits; 906 + curlun->info_valid = 1; 907 + break; 908 + } 909 + 910 + amount = bh->outreq->actual; 911 + if (curlun->file_length - file_offset < amount) { 912 + LERROR(curlun, 913 + "write %u @ %llu beyond end %llu\n", 914 + amount, (unsigned long long)file_offset, 915 + (unsigned long long)curlun->file_length); 916 + amount = curlun->file_length - file_offset; 917 + } 918 + 919 + /* Don't accept excess data. The spec doesn't say 920 + * what to do in this case. We'll ignore the error. 921 + */ 922 + amount = min(amount, bh->bulk_out_intended_length); 923 + 924 + /* Don't write a partial block */ 925 + amount = round_down(amount, curlun->blksize); 926 + if (amount == 0) 927 + goto empty_write; 928 + 929 + /* Perform the write */ 930 + file_offset_tmp = file_offset; 931 + nwritten = vfs_write(curlun->filp, 932 + (char __user *)bh->buf, 933 + amount, &file_offset_tmp); 934 + VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, 935 + (unsigned long long)file_offset, (int)nwritten); 936 + if (signal_pending(current)) 937 + return -EINTR; /* Interrupted! */ 938 + 939 + if (nwritten < 0) { 940 + LDBG(curlun, "error in file write: %d\n", 941 + (int)nwritten); 942 + nwritten = 0; 943 + } else if (nwritten < amount) { 944 + LDBG(curlun, "partial file write: %d/%u\n", 945 + (int)nwritten, amount); 946 + nwritten = round_down(nwritten, curlun->blksize); 947 + } 948 + file_offset += nwritten; 949 + amount_left_to_write -= nwritten; 950 + common->residue -= nwritten; 951 + 952 + /* If an error occurred, report it and its position */ 953 + if (nwritten < amount) { 954 + curlun->sense_data = SS_WRITE_ERROR; 955 + curlun->sense_data_info = 956 + file_offset >> curlun->blkbits; 957 + curlun->info_valid = 1; 958 + break; 959 + } 960 + 961 + empty_write: 962 + /* Did the host decide to stop early? */ 963 + if (bh->outreq->actual < bh->bulk_out_intended_length) { 964 + common->short_packet_received = 1; 965 + break; 966 + } 967 + continue; 968 + } 969 + 970 + /* Wait for something to happen */ 971 + rc = sleep_thread(common); 972 + if (rc) 973 + return rc; 974 + } 975 + 976 + return -EIO; /* No default reply */ 977 + } 978 + 979 + 980 + /*-------------------------------------------------------------------------*/ 981 + 982 + static int do_synchronize_cache(struct fsg_common *common) 983 + { 984 + struct fsg_lun *curlun = common->curlun; 985 + int rc; 986 + 987 + /* We ignore the requested LBA and write out all file's 988 + * dirty data buffers. */ 989 + rc = fsg_lun_fsync_sub(curlun); 990 + if (rc) 991 + curlun->sense_data = SS_WRITE_ERROR; 992 + return 0; 993 + } 994 + 995 + 996 + /*-------------------------------------------------------------------------*/ 997 + 998 + static void invalidate_sub(struct fsg_lun *curlun) 999 + { 1000 + struct file *filp = curlun->filp; 1001 + struct inode *inode = filp->f_path.dentry->d_inode; 1002 + unsigned long rc; 1003 + 1004 + rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); 1005 + VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); 1006 + } 1007 + 1008 + static int do_verify(struct fsg_common *common) 1009 + { 1010 + struct fsg_lun *curlun = common->curlun; 1011 + u32 lba; 1012 + u32 verification_length; 1013 + struct fsg_buffhd *bh = common->next_buffhd_to_fill; 1014 + loff_t file_offset, file_offset_tmp; 1015 + u32 amount_left; 1016 + unsigned int amount; 1017 + ssize_t nread; 1018 + 1019 + /* 1020 + * Get the starting Logical Block Address and check that it's 1021 + * not too big. 1022 + */ 1023 + lba = get_unaligned_be32(&common->cmnd[2]); 1024 + if (lba >= curlun->num_sectors) { 1025 + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1026 + return -EINVAL; 1027 + } 1028 + 1029 + /* 1030 + * We allow DPO (Disable Page Out = don't save data in the 1031 + * cache) but we don't implement it. 1032 + */ 1033 + if (common->cmnd[1] & ~0x10) { 1034 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1035 + return -EINVAL; 1036 + } 1037 + 1038 + verification_length = get_unaligned_be16(&common->cmnd[7]); 1039 + if (unlikely(verification_length == 0)) 1040 + return -EIO; /* No default reply */ 1041 + 1042 + /* Prepare to carry out the file verify */ 1043 + amount_left = verification_length << curlun->blkbits; 1044 + file_offset = ((loff_t) lba) << curlun->blkbits; 1045 + 1046 + /* Write out all the dirty buffers before invalidating them */ 1047 + fsg_lun_fsync_sub(curlun); 1048 + if (signal_pending(current)) 1049 + return -EINTR; 1050 + 1051 + invalidate_sub(curlun); 1052 + if (signal_pending(current)) 1053 + return -EINTR; 1054 + 1055 + /* Just try to read the requested blocks */ 1056 + while (amount_left > 0) { 1057 + /* 1058 + * Figure out how much we need to read: 1059 + * Try to read the remaining amount, but not more than 1060 + * the buffer size. 1061 + * And don't try to read past the end of the file. 1062 + */ 1063 + amount = min(amount_left, FSG_BUFLEN); 1064 + amount = min((loff_t)amount, 1065 + curlun->file_length - file_offset); 1066 + if (amount == 0) { 1067 + curlun->sense_data = 1068 + SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1069 + curlun->sense_data_info = 1070 + file_offset >> curlun->blkbits; 1071 + curlun->info_valid = 1; 1072 + break; 1073 + } 1074 + 1075 + /* Perform the read */ 1076 + file_offset_tmp = file_offset; 1077 + nread = vfs_read(curlun->filp, 1078 + (char __user *) bh->buf, 1079 + amount, &file_offset_tmp); 1080 + VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, 1081 + (unsigned long long) file_offset, 1082 + (int) nread); 1083 + if (signal_pending(current)) 1084 + return -EINTR; 1085 + 1086 + if (nread < 0) { 1087 + LDBG(curlun, "error in file verify: %d\n", (int)nread); 1088 + nread = 0; 1089 + } else if (nread < amount) { 1090 + LDBG(curlun, "partial file verify: %d/%u\n", 1091 + (int)nread, amount); 1092 + nread = round_down(nread, curlun->blksize); 1093 + } 1094 + if (nread == 0) { 1095 + curlun->sense_data = SS_UNRECOVERED_READ_ERROR; 1096 + curlun->sense_data_info = 1097 + file_offset >> curlun->blkbits; 1098 + curlun->info_valid = 1; 1099 + break; 1100 + } 1101 + file_offset += nread; 1102 + amount_left -= nread; 1103 + } 1104 + return 0; 1105 + } 1106 + 1107 + 1108 + /*-------------------------------------------------------------------------*/ 1109 + 1110 + static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh) 1111 + { 1112 + struct fsg_lun *curlun = common->curlun; 1113 + u8 *buf = (u8 *) bh->buf; 1114 + 1115 + if (!curlun) { /* Unsupported LUNs are okay */ 1116 + common->bad_lun_okay = 1; 1117 + memset(buf, 0, 36); 1118 + buf[0] = 0x7f; /* Unsupported, no device-type */ 1119 + buf[4] = 31; /* Additional length */ 1120 + return 36; 1121 + } 1122 + 1123 + buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK; 1124 + buf[1] = curlun->removable ? 0x80 : 0; 1125 + buf[2] = 2; /* ANSI SCSI level 2 */ 1126 + buf[3] = 2; /* SCSI-2 INQUIRY data format */ 1127 + buf[4] = 31; /* Additional length */ 1128 + buf[5] = 0; /* No special options */ 1129 + buf[6] = 0; 1130 + buf[7] = 0; 1131 + memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string); 1132 + return 36; 1133 + } 1134 + 1135 + static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1136 + { 1137 + struct fsg_lun *curlun = common->curlun; 1138 + u8 *buf = (u8 *) bh->buf; 1139 + u32 sd, sdinfo; 1140 + int valid; 1141 + 1142 + /* 1143 + * From the SCSI-2 spec., section 7.9 (Unit attention condition): 1144 + * 1145 + * If a REQUEST SENSE command is received from an initiator 1146 + * with a pending unit attention condition (before the target 1147 + * generates the contingent allegiance condition), then the 1148 + * target shall either: 1149 + * a) report any pending sense data and preserve the unit 1150 + * attention condition on the logical unit, or, 1151 + * b) report the unit attention condition, may discard any 1152 + * pending sense data, and clear the unit attention 1153 + * condition on the logical unit for that initiator. 1154 + * 1155 + * FSG normally uses option a); enable this code to use option b). 1156 + */ 1157 + #if 0 1158 + if (curlun && curlun->unit_attention_data != SS_NO_SENSE) { 1159 + curlun->sense_data = curlun->unit_attention_data; 1160 + curlun->unit_attention_data = SS_NO_SENSE; 1161 + } 1162 + #endif 1163 + 1164 + if (!curlun) { /* Unsupported LUNs are okay */ 1165 + common->bad_lun_okay = 1; 1166 + sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 1167 + sdinfo = 0; 1168 + valid = 0; 1169 + } else { 1170 + sd = curlun->sense_data; 1171 + sdinfo = curlun->sense_data_info; 1172 + valid = curlun->info_valid << 7; 1173 + curlun->sense_data = SS_NO_SENSE; 1174 + curlun->sense_data_info = 0; 1175 + curlun->info_valid = 0; 1176 + } 1177 + 1178 + memset(buf, 0, 18); 1179 + buf[0] = valid | 0x70; /* Valid, current error */ 1180 + buf[2] = SK(sd); 1181 + put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */ 1182 + buf[7] = 18 - 8; /* Additional sense length */ 1183 + buf[12] = ASC(sd); 1184 + buf[13] = ASCQ(sd); 1185 + return 18; 1186 + } 1187 + 1188 + static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh) 1189 + { 1190 + struct fsg_lun *curlun = common->curlun; 1191 + u32 lba = get_unaligned_be32(&common->cmnd[2]); 1192 + int pmi = common->cmnd[8]; 1193 + u8 *buf = (u8 *)bh->buf; 1194 + 1195 + /* Check the PMI and LBA fields */ 1196 + if (pmi > 1 || (pmi == 0 && lba != 0)) { 1197 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1198 + return -EINVAL; 1199 + } 1200 + 1201 + put_unaligned_be32(curlun->num_sectors - 1, &buf[0]); 1202 + /* Max logical block */ 1203 + put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ 1204 + return 8; 1205 + } 1206 + 1207 + static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh) 1208 + { 1209 + struct fsg_lun *curlun = common->curlun; 1210 + int msf = common->cmnd[1] & 0x02; 1211 + u32 lba = get_unaligned_be32(&common->cmnd[2]); 1212 + u8 *buf = (u8 *)bh->buf; 1213 + 1214 + if (common->cmnd[1] & ~0x02) { /* Mask away MSF */ 1215 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1216 + return -EINVAL; 1217 + } 1218 + if (lba >= curlun->num_sectors) { 1219 + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; 1220 + return -EINVAL; 1221 + } 1222 + 1223 + memset(buf, 0, 8); 1224 + buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */ 1225 + store_cdrom_address(&buf[4], msf, lba); 1226 + return 8; 1227 + } 1228 + 1229 + static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) 1230 + { 1231 + struct fsg_lun *curlun = common->curlun; 1232 + int msf = common->cmnd[1] & 0x02; 1233 + int start_track = common->cmnd[6]; 1234 + u8 *buf = (u8 *)bh->buf; 1235 + 1236 + if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ 1237 + start_track > 1) { 1238 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1239 + return -EINVAL; 1240 + } 1241 + 1242 + memset(buf, 0, 20); 1243 + buf[1] = (20-2); /* TOC data length */ 1244 + buf[2] = 1; /* First track number */ 1245 + buf[3] = 1; /* Last track number */ 1246 + buf[5] = 0x16; /* Data track, copying allowed */ 1247 + buf[6] = 0x01; /* Only track is number 1 */ 1248 + store_cdrom_address(&buf[8], msf, 0); 1249 + 1250 + buf[13] = 0x16; /* Lead-out track is data */ 1251 + buf[14] = 0xAA; /* Lead-out track number */ 1252 + store_cdrom_address(&buf[16], msf, curlun->num_sectors); 1253 + return 20; 1254 + } 1255 + 1256 + static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) 1257 + { 1258 + struct fsg_lun *curlun = common->curlun; 1259 + int mscmnd = common->cmnd[0]; 1260 + u8 *buf = (u8 *) bh->buf; 1261 + u8 *buf0 = buf; 1262 + int pc, page_code; 1263 + int changeable_values, all_pages; 1264 + int valid_page = 0; 1265 + int len, limit; 1266 + 1267 + if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */ 1268 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1269 + return -EINVAL; 1270 + } 1271 + pc = common->cmnd[2] >> 6; 1272 + page_code = common->cmnd[2] & 0x3f; 1273 + if (pc == 3) { 1274 + curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED; 1275 + return -EINVAL; 1276 + } 1277 + changeable_values = (pc == 1); 1278 + all_pages = (page_code == 0x3f); 1279 + 1280 + /* 1281 + * Write the mode parameter header. Fixed values are: default 1282 + * medium type, no cache control (DPOFUA), and no block descriptors. 1283 + * The only variable value is the WriteProtect bit. We will fill in 1284 + * the mode data length later. 1285 + */ 1286 + memset(buf, 0, 8); 1287 + if (mscmnd == MODE_SENSE) { 1288 + buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1289 + buf += 4; 1290 + limit = 255; 1291 + } else { /* MODE_SENSE_10 */ 1292 + buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ 1293 + buf += 8; 1294 + limit = 65535; /* Should really be FSG_BUFLEN */ 1295 + } 1296 + 1297 + /* No block descriptors */ 1298 + 1299 + /* 1300 + * The mode pages, in numerical order. The only page we support 1301 + * is the Caching page. 1302 + */ 1303 + if (page_code == 0x08 || all_pages) { 1304 + valid_page = 1; 1305 + buf[0] = 0x08; /* Page code */ 1306 + buf[1] = 10; /* Page length */ 1307 + memset(buf+2, 0, 10); /* None of the fields are changeable */ 1308 + 1309 + if (!changeable_values) { 1310 + buf[2] = 0x04; /* Write cache enable, */ 1311 + /* Read cache not disabled */ 1312 + /* No cache retention priorities */ 1313 + put_unaligned_be16(0xffff, &buf[4]); 1314 + /* Don't disable prefetch */ 1315 + /* Minimum prefetch = 0 */ 1316 + put_unaligned_be16(0xffff, &buf[8]); 1317 + /* Maximum prefetch */ 1318 + put_unaligned_be16(0xffff, &buf[10]); 1319 + /* Maximum prefetch ceiling */ 1320 + } 1321 + buf += 12; 1322 + } 1323 + 1324 + /* 1325 + * Check that a valid page was requested and the mode data length 1326 + * isn't too long. 1327 + */ 1328 + len = buf - buf0; 1329 + if (!valid_page || len > limit) { 1330 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1331 + return -EINVAL; 1332 + } 1333 + 1334 + /* Store the mode data length */ 1335 + if (mscmnd == MODE_SENSE) 1336 + buf0[0] = len - 1; 1337 + else 1338 + put_unaligned_be16(len - 2, buf0); 1339 + return len; 1340 + } 1341 + 1342 + static int do_start_stop(struct fsg_common *common) 1343 + { 1344 + struct fsg_lun *curlun = common->curlun; 1345 + int loej, start; 1346 + 1347 + if (!curlun) { 1348 + return -EINVAL; 1349 + } else if (!curlun->removable) { 1350 + curlun->sense_data = SS_INVALID_COMMAND; 1351 + return -EINVAL; 1352 + } else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */ 1353 + (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */ 1354 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1355 + return -EINVAL; 1356 + } 1357 + 1358 + loej = common->cmnd[4] & 0x02; 1359 + start = common->cmnd[4] & 0x01; 1360 + 1361 + /* 1362 + * Our emulation doesn't support mounting; the medium is 1363 + * available for use as soon as it is loaded. 1364 + */ 1365 + if (start) { 1366 + if (!fsg_lun_is_open(curlun)) { 1367 + curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1368 + return -EINVAL; 1369 + } 1370 + return 0; 1371 + } 1372 + 1373 + /* Are we allowed to unload the media? */ 1374 + if (curlun->prevent_medium_removal) { 1375 + LDBG(curlun, "unload attempt prevented\n"); 1376 + curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED; 1377 + return -EINVAL; 1378 + } 1379 + 1380 + if (!loej) 1381 + return 0; 1382 + 1383 + /* Simulate an unload/eject */ 1384 + if (common->ops && common->ops->pre_eject) { 1385 + int r = common->ops->pre_eject(common, curlun, 1386 + curlun - common->luns); 1387 + if (unlikely(r < 0)) 1388 + return r; 1389 + else if (r) 1390 + return 0; 1391 + } 1392 + 1393 + up_read(&common->filesem); 1394 + down_write(&common->filesem); 1395 + fsg_lun_close(curlun); 1396 + up_write(&common->filesem); 1397 + down_read(&common->filesem); 1398 + 1399 + return common->ops && common->ops->post_eject 1400 + ? min(0, common->ops->post_eject(common, curlun, 1401 + curlun - common->luns)) 1402 + : 0; 1403 + } 1404 + 1405 + static int do_prevent_allow(struct fsg_common *common) 1406 + { 1407 + struct fsg_lun *curlun = common->curlun; 1408 + int prevent; 1409 + 1410 + if (!common->curlun) { 1411 + return -EINVAL; 1412 + } else if (!common->curlun->removable) { 1413 + common->curlun->sense_data = SS_INVALID_COMMAND; 1414 + return -EINVAL; 1415 + } 1416 + 1417 + prevent = common->cmnd[4] & 0x01; 1418 + if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */ 1419 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1420 + return -EINVAL; 1421 + } 1422 + 1423 + if (curlun->prevent_medium_removal && !prevent) 1424 + fsg_lun_fsync_sub(curlun); 1425 + curlun->prevent_medium_removal = prevent; 1426 + return 0; 1427 + } 1428 + 1429 + static int do_read_format_capacities(struct fsg_common *common, 1430 + struct fsg_buffhd *bh) 1431 + { 1432 + struct fsg_lun *curlun = common->curlun; 1433 + u8 *buf = (u8 *) bh->buf; 1434 + 1435 + buf[0] = buf[1] = buf[2] = 0; 1436 + buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */ 1437 + buf += 4; 1438 + 1439 + put_unaligned_be32(curlun->num_sectors, &buf[0]); 1440 + /* Number of blocks */ 1441 + put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ 1442 + buf[4] = 0x02; /* Current capacity */ 1443 + return 12; 1444 + } 1445 + 1446 + static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh) 1447 + { 1448 + struct fsg_lun *curlun = common->curlun; 1449 + 1450 + /* We don't support MODE SELECT */ 1451 + if (curlun) 1452 + curlun->sense_data = SS_INVALID_COMMAND; 1453 + return -EINVAL; 1454 + } 1455 + 1456 + 1457 + /*-------------------------------------------------------------------------*/ 1458 + 1459 + static int halt_bulk_in_endpoint(struct fsg_dev *fsg) 1460 + { 1461 + int rc; 1462 + 1463 + rc = fsg_set_halt(fsg, fsg->bulk_in); 1464 + if (rc == -EAGAIN) 1465 + VDBG(fsg, "delayed bulk-in endpoint halt\n"); 1466 + while (rc != 0) { 1467 + if (rc != -EAGAIN) { 1468 + WARNING(fsg, "usb_ep_set_halt -> %d\n", rc); 1469 + rc = 0; 1470 + break; 1471 + } 1472 + 1473 + /* Wait for a short time and then try again */ 1474 + if (msleep_interruptible(100) != 0) 1475 + return -EINTR; 1476 + rc = usb_ep_set_halt(fsg->bulk_in); 1477 + } 1478 + return rc; 1479 + } 1480 + 1481 + static int wedge_bulk_in_endpoint(struct fsg_dev *fsg) 1482 + { 1483 + int rc; 1484 + 1485 + DBG(fsg, "bulk-in set wedge\n"); 1486 + rc = usb_ep_set_wedge(fsg->bulk_in); 1487 + if (rc == -EAGAIN) 1488 + VDBG(fsg, "delayed bulk-in endpoint wedge\n"); 1489 + while (rc != 0) { 1490 + if (rc != -EAGAIN) { 1491 + WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc); 1492 + rc = 0; 1493 + break; 1494 + } 1495 + 1496 + /* Wait for a short time and then try again */ 1497 + if (msleep_interruptible(100) != 0) 1498 + return -EINTR; 1499 + rc = usb_ep_set_wedge(fsg->bulk_in); 1500 + } 1501 + return rc; 1502 + } 1503 + 1504 + static int throw_away_data(struct fsg_common *common) 1505 + { 1506 + struct fsg_buffhd *bh; 1507 + u32 amount; 1508 + int rc; 1509 + 1510 + for (bh = common->next_buffhd_to_drain; 1511 + bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0; 1512 + bh = common->next_buffhd_to_drain) { 1513 + 1514 + /* Throw away the data in a filled buffer */ 1515 + if (bh->state == BUF_STATE_FULL) { 1516 + smp_rmb(); 1517 + bh->state = BUF_STATE_EMPTY; 1518 + common->next_buffhd_to_drain = bh->next; 1519 + 1520 + /* A short packet or an error ends everything */ 1521 + if (bh->outreq->actual < bh->bulk_out_intended_length || 1522 + bh->outreq->status != 0) { 1523 + raise_exception(common, 1524 + FSG_STATE_ABORT_BULK_OUT); 1525 + return -EINTR; 1526 + } 1527 + continue; 1528 + } 1529 + 1530 + /* Try to submit another request if we need one */ 1531 + bh = common->next_buffhd_to_fill; 1532 + if (bh->state == BUF_STATE_EMPTY 1533 + && common->usb_amount_left > 0) { 1534 + amount = min(common->usb_amount_left, FSG_BUFLEN); 1535 + 1536 + /* 1537 + * Except at the end of the transfer, amount will be 1538 + * equal to the buffer size, which is divisible by 1539 + * the bulk-out maxpacket size. 1540 + */ 1541 + set_bulk_out_req_length(common, bh, amount); 1542 + if (!start_out_transfer(common, bh)) 1543 + /* Dunno what to do if common->fsg is NULL */ 1544 + return -EIO; 1545 + common->next_buffhd_to_fill = bh->next; 1546 + common->usb_amount_left -= amount; 1547 + continue; 1548 + } 1549 + 1550 + /* Otherwise wait for something to happen */ 1551 + rc = sleep_thread(common); 1552 + if (rc) 1553 + return rc; 1554 + } 1555 + return 0; 1556 + } 1557 + 1558 + static int finish_reply(struct fsg_common *common) 1559 + { 1560 + struct fsg_buffhd *bh = common->next_buffhd_to_fill; 1561 + int rc = 0; 1562 + 1563 + switch (common->data_dir) { 1564 + case DATA_DIR_NONE: 1565 + break; /* Nothing to send */ 1566 + 1567 + /* 1568 + * If we don't know whether the host wants to read or write, 1569 + * this must be CB or CBI with an unknown command. We mustn't 1570 + * try to send or receive any data. So stall both bulk pipes 1571 + * if we can and wait for a reset. 1572 + */ 1573 + case DATA_DIR_UNKNOWN: 1574 + if (!common->can_stall) { 1575 + /* Nothing */ 1576 + } else if (fsg_is_set(common)) { 1577 + fsg_set_halt(common->fsg, common->fsg->bulk_out); 1578 + rc = halt_bulk_in_endpoint(common->fsg); 1579 + } else { 1580 + /* Don't know what to do if common->fsg is NULL */ 1581 + rc = -EIO; 1582 + } 1583 + break; 1584 + 1585 + /* All but the last buffer of data must have already been sent */ 1586 + case DATA_DIR_TO_HOST: 1587 + if (common->data_size == 0) { 1588 + /* Nothing to send */ 1589 + 1590 + /* Don't know what to do if common->fsg is NULL */ 1591 + } else if (!fsg_is_set(common)) { 1592 + rc = -EIO; 1593 + 1594 + /* If there's no residue, simply send the last buffer */ 1595 + } else if (common->residue == 0) { 1596 + bh->inreq->zero = 0; 1597 + if (!start_in_transfer(common, bh)) 1598 + return -EIO; 1599 + common->next_buffhd_to_fill = bh->next; 1600 + 1601 + /* 1602 + * For Bulk-only, mark the end of the data with a short 1603 + * packet. If we are allowed to stall, halt the bulk-in 1604 + * endpoint. (Note: This violates the Bulk-Only Transport 1605 + * specification, which requires us to pad the data if we 1606 + * don't halt the endpoint. Presumably nobody will mind.) 1607 + */ 1608 + } else { 1609 + bh->inreq->zero = 1; 1610 + if (!start_in_transfer(common, bh)) 1611 + rc = -EIO; 1612 + common->next_buffhd_to_fill = bh->next; 1613 + if (common->can_stall) 1614 + rc = halt_bulk_in_endpoint(common->fsg); 1615 + } 1616 + break; 1617 + 1618 + /* 1619 + * We have processed all we want from the data the host has sent. 1620 + * There may still be outstanding bulk-out requests. 1621 + */ 1622 + case DATA_DIR_FROM_HOST: 1623 + if (common->residue == 0) { 1624 + /* Nothing to receive */ 1625 + 1626 + /* Did the host stop sending unexpectedly early? */ 1627 + } else if (common->short_packet_received) { 1628 + raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1629 + rc = -EINTR; 1630 + 1631 + /* 1632 + * We haven't processed all the incoming data. Even though 1633 + * we may be allowed to stall, doing so would cause a race. 1634 + * The controller may already have ACK'ed all the remaining 1635 + * bulk-out packets, in which case the host wouldn't see a 1636 + * STALL. Not realizing the endpoint was halted, it wouldn't 1637 + * clear the halt -- leading to problems later on. 1638 + */ 1639 + #if 0 1640 + } else if (common->can_stall) { 1641 + if (fsg_is_set(common)) 1642 + fsg_set_halt(common->fsg, 1643 + common->fsg->bulk_out); 1644 + raise_exception(common, FSG_STATE_ABORT_BULK_OUT); 1645 + rc = -EINTR; 1646 + #endif 1647 + 1648 + /* 1649 + * We can't stall. Read in the excess data and throw it 1650 + * all away. 1651 + */ 1652 + } else { 1653 + rc = throw_away_data(common); 1654 + } 1655 + break; 1656 + } 1657 + return rc; 1658 + } 1659 + 1660 + static int send_status(struct fsg_common *common) 1661 + { 1662 + struct fsg_lun *curlun = common->curlun; 1663 + struct fsg_buffhd *bh; 1664 + struct bulk_cs_wrap *csw; 1665 + int rc; 1666 + u8 status = US_BULK_STAT_OK; 1667 + u32 sd, sdinfo = 0; 1668 + 1669 + /* Wait for the next buffer to become available */ 1670 + bh = common->next_buffhd_to_fill; 1671 + while (bh->state != BUF_STATE_EMPTY) { 1672 + rc = sleep_thread(common); 1673 + if (rc) 1674 + return rc; 1675 + } 1676 + 1677 + if (curlun) { 1678 + sd = curlun->sense_data; 1679 + sdinfo = curlun->sense_data_info; 1680 + } else if (common->bad_lun_okay) 1681 + sd = SS_NO_SENSE; 1682 + else 1683 + sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; 1684 + 1685 + if (common->phase_error) { 1686 + DBG(common, "sending phase-error status\n"); 1687 + status = US_BULK_STAT_PHASE; 1688 + sd = SS_INVALID_COMMAND; 1689 + } else if (sd != SS_NO_SENSE) { 1690 + DBG(common, "sending command-failure status\n"); 1691 + status = US_BULK_STAT_FAIL; 1692 + VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;" 1693 + " info x%x\n", 1694 + SK(sd), ASC(sd), ASCQ(sd), sdinfo); 1695 + } 1696 + 1697 + /* Store and send the Bulk-only CSW */ 1698 + csw = (void *)bh->buf; 1699 + 1700 + csw->Signature = cpu_to_le32(US_BULK_CS_SIGN); 1701 + csw->Tag = common->tag; 1702 + csw->Residue = cpu_to_le32(common->residue); 1703 + csw->Status = status; 1704 + 1705 + bh->inreq->length = US_BULK_CS_WRAP_LEN; 1706 + bh->inreq->zero = 0; 1707 + if (!start_in_transfer(common, bh)) 1708 + /* Don't know what to do if common->fsg is NULL */ 1709 + return -EIO; 1710 + 1711 + common->next_buffhd_to_fill = bh->next; 1712 + return 0; 1713 + } 1714 + 1715 + 1716 + /*-------------------------------------------------------------------------*/ 1717 + 1718 + /* 1719 + * Check whether the command is properly formed and whether its data size 1720 + * and direction agree with the values we already have. 1721 + */ 1722 + static int check_command(struct fsg_common *common, int cmnd_size, 1723 + enum data_direction data_dir, unsigned int mask, 1724 + int needs_medium, const char *name) 1725 + { 1726 + int i; 1727 + int lun = common->cmnd[1] >> 5; 1728 + static const char dirletter[4] = {'u', 'o', 'i', 'n'}; 1729 + char hdlen[20]; 1730 + struct fsg_lun *curlun; 1731 + 1732 + hdlen[0] = 0; 1733 + if (common->data_dir != DATA_DIR_UNKNOWN) 1734 + sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir], 1735 + common->data_size); 1736 + VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n", 1737 + name, cmnd_size, dirletter[(int) data_dir], 1738 + common->data_size_from_cmnd, common->cmnd_size, hdlen); 1739 + 1740 + /* 1741 + * We can't reply at all until we know the correct data direction 1742 + * and size. 1743 + */ 1744 + if (common->data_size_from_cmnd == 0) 1745 + data_dir = DATA_DIR_NONE; 1746 + if (common->data_size < common->data_size_from_cmnd) { 1747 + /* 1748 + * Host data size < Device data size is a phase error. 1749 + * Carry out the command, but only transfer as much as 1750 + * we are allowed. 1751 + */ 1752 + common->data_size_from_cmnd = common->data_size; 1753 + common->phase_error = 1; 1754 + } 1755 + common->residue = common->data_size; 1756 + common->usb_amount_left = common->data_size; 1757 + 1758 + /* Conflicting data directions is a phase error */ 1759 + if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) { 1760 + common->phase_error = 1; 1761 + return -EINVAL; 1762 + } 1763 + 1764 + /* Verify the length of the command itself */ 1765 + if (cmnd_size != common->cmnd_size) { 1766 + 1767 + /* 1768 + * Special case workaround: There are plenty of buggy SCSI 1769 + * implementations. Many have issues with cbw->Length 1770 + * field passing a wrong command size. For those cases we 1771 + * always try to work around the problem by using the length 1772 + * sent by the host side provided it is at least as large 1773 + * as the correct command length. 1774 + * Examples of such cases would be MS-Windows, which issues 1775 + * REQUEST SENSE with cbw->Length == 12 where it should 1776 + * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and 1777 + * REQUEST SENSE with cbw->Length == 10 where it should 1778 + * be 6 as well. 1779 + */ 1780 + if (cmnd_size <= common->cmnd_size) { 1781 + DBG(common, "%s is buggy! Expected length %d " 1782 + "but we got %d\n", name, 1783 + cmnd_size, common->cmnd_size); 1784 + cmnd_size = common->cmnd_size; 1785 + } else { 1786 + common->phase_error = 1; 1787 + return -EINVAL; 1788 + } 1789 + } 1790 + 1791 + /* Check that the LUN values are consistent */ 1792 + if (common->lun != lun) 1793 + DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n", 1794 + common->lun, lun); 1795 + 1796 + /* Check the LUN */ 1797 + curlun = common->curlun; 1798 + if (curlun) { 1799 + if (common->cmnd[0] != REQUEST_SENSE) { 1800 + curlun->sense_data = SS_NO_SENSE; 1801 + curlun->sense_data_info = 0; 1802 + curlun->info_valid = 0; 1803 + } 1804 + } else { 1805 + common->bad_lun_okay = 0; 1806 + 1807 + /* 1808 + * INQUIRY and REQUEST SENSE commands are explicitly allowed 1809 + * to use unsupported LUNs; all others may not. 1810 + */ 1811 + if (common->cmnd[0] != INQUIRY && 1812 + common->cmnd[0] != REQUEST_SENSE) { 1813 + DBG(common, "unsupported LUN %d\n", common->lun); 1814 + return -EINVAL; 1815 + } 1816 + } 1817 + 1818 + /* 1819 + * If a unit attention condition exists, only INQUIRY and 1820 + * REQUEST SENSE commands are allowed; anything else must fail. 1821 + */ 1822 + if (curlun && curlun->unit_attention_data != SS_NO_SENSE && 1823 + common->cmnd[0] != INQUIRY && 1824 + common->cmnd[0] != REQUEST_SENSE) { 1825 + curlun->sense_data = curlun->unit_attention_data; 1826 + curlun->unit_attention_data = SS_NO_SENSE; 1827 + return -EINVAL; 1828 + } 1829 + 1830 + /* Check that only command bytes listed in the mask are non-zero */ 1831 + common->cmnd[1] &= 0x1f; /* Mask away the LUN */ 1832 + for (i = 1; i < cmnd_size; ++i) { 1833 + if (common->cmnd[i] && !(mask & (1 << i))) { 1834 + if (curlun) 1835 + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; 1836 + return -EINVAL; 1837 + } 1838 + } 1839 + 1840 + /* If the medium isn't mounted and the command needs to access 1841 + * it, return an error. */ 1842 + if (curlun && !fsg_lun_is_open(curlun) && needs_medium) { 1843 + curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1844 + return -EINVAL; 1845 + } 1846 + 1847 + return 0; 1848 + } 1849 + 1850 + /* wrapper of check_command for data size in blocks handling */ 1851 + static int check_command_size_in_blocks(struct fsg_common *common, 1852 + int cmnd_size, enum data_direction data_dir, 1853 + unsigned int mask, int needs_medium, const char *name) 1854 + { 1855 + if (common->curlun) 1856 + common->data_size_from_cmnd <<= common->curlun->blkbits; 1857 + return check_command(common, cmnd_size, data_dir, 1858 + mask, needs_medium, name); 1859 + } 1860 + 1861 + static int do_scsi_command(struct fsg_common *common) 1862 + { 1863 + struct fsg_buffhd *bh; 1864 + int rc; 1865 + int reply = -EINVAL; 1866 + int i; 1867 + static char unknown[16]; 1868 + 1869 + dump_cdb(common); 1870 + 1871 + /* Wait for the next buffer to become available for data or status */ 1872 + bh = common->next_buffhd_to_fill; 1873 + common->next_buffhd_to_drain = bh; 1874 + while (bh->state != BUF_STATE_EMPTY) { 1875 + rc = sleep_thread(common); 1876 + if (rc) 1877 + return rc; 1878 + } 1879 + common->phase_error = 0; 1880 + common->short_packet_received = 0; 1881 + 1882 + down_read(&common->filesem); /* We're using the backing file */ 1883 + switch (common->cmnd[0]) { 1884 + 1885 + case INQUIRY: 1886 + common->data_size_from_cmnd = common->cmnd[4]; 1887 + reply = check_command(common, 6, DATA_DIR_TO_HOST, 1888 + (1<<4), 0, 1889 + "INQUIRY"); 1890 + if (reply == 0) 1891 + reply = do_inquiry(common, bh); 1892 + break; 1893 + 1894 + case MODE_SELECT: 1895 + common->data_size_from_cmnd = common->cmnd[4]; 1896 + reply = check_command(common, 6, DATA_DIR_FROM_HOST, 1897 + (1<<1) | (1<<4), 0, 1898 + "MODE SELECT(6)"); 1899 + if (reply == 0) 1900 + reply = do_mode_select(common, bh); 1901 + break; 1902 + 1903 + case MODE_SELECT_10: 1904 + common->data_size_from_cmnd = 1905 + get_unaligned_be16(&common->cmnd[7]); 1906 + reply = check_command(common, 10, DATA_DIR_FROM_HOST, 1907 + (1<<1) | (3<<7), 0, 1908 + "MODE SELECT(10)"); 1909 + if (reply == 0) 1910 + reply = do_mode_select(common, bh); 1911 + break; 1912 + 1913 + case MODE_SENSE: 1914 + common->data_size_from_cmnd = common->cmnd[4]; 1915 + reply = check_command(common, 6, DATA_DIR_TO_HOST, 1916 + (1<<1) | (1<<2) | (1<<4), 0, 1917 + "MODE SENSE(6)"); 1918 + if (reply == 0) 1919 + reply = do_mode_sense(common, bh); 1920 + break; 1921 + 1922 + case MODE_SENSE_10: 1923 + common->data_size_from_cmnd = 1924 + get_unaligned_be16(&common->cmnd[7]); 1925 + reply = check_command(common, 10, DATA_DIR_TO_HOST, 1926 + (1<<1) | (1<<2) | (3<<7), 0, 1927 + "MODE SENSE(10)"); 1928 + if (reply == 0) 1929 + reply = do_mode_sense(common, bh); 1930 + break; 1931 + 1932 + case ALLOW_MEDIUM_REMOVAL: 1933 + common->data_size_from_cmnd = 0; 1934 + reply = check_command(common, 6, DATA_DIR_NONE, 1935 + (1<<4), 0, 1936 + "PREVENT-ALLOW MEDIUM REMOVAL"); 1937 + if (reply == 0) 1938 + reply = do_prevent_allow(common); 1939 + break; 1940 + 1941 + case READ_6: 1942 + i = common->cmnd[4]; 1943 + common->data_size_from_cmnd = (i == 0) ? 256 : i; 1944 + reply = check_command_size_in_blocks(common, 6, 1945 + DATA_DIR_TO_HOST, 1946 + (7<<1) | (1<<4), 1, 1947 + "READ(6)"); 1948 + if (reply == 0) 1949 + reply = do_read(common); 1950 + break; 1951 + 1952 + case READ_10: 1953 + common->data_size_from_cmnd = 1954 + get_unaligned_be16(&common->cmnd[7]); 1955 + reply = check_command_size_in_blocks(common, 10, 1956 + DATA_DIR_TO_HOST, 1957 + (1<<1) | (0xf<<2) | (3<<7), 1, 1958 + "READ(10)"); 1959 + if (reply == 0) 1960 + reply = do_read(common); 1961 + break; 1962 + 1963 + case READ_12: 1964 + common->data_size_from_cmnd = 1965 + get_unaligned_be32(&common->cmnd[6]); 1966 + reply = check_command_size_in_blocks(common, 12, 1967 + DATA_DIR_TO_HOST, 1968 + (1<<1) | (0xf<<2) | (0xf<<6), 1, 1969 + "READ(12)"); 1970 + if (reply == 0) 1971 + reply = do_read(common); 1972 + break; 1973 + 1974 + case READ_CAPACITY: 1975 + common->data_size_from_cmnd = 8; 1976 + reply = check_command(common, 10, DATA_DIR_TO_HOST, 1977 + (0xf<<2) | (1<<8), 1, 1978 + "READ CAPACITY"); 1979 + if (reply == 0) 1980 + reply = do_read_capacity(common, bh); 1981 + break; 1982 + 1983 + case READ_HEADER: 1984 + if (!common->curlun || !common->curlun->cdrom) 1985 + goto unknown_cmnd; 1986 + common->data_size_from_cmnd = 1987 + get_unaligned_be16(&common->cmnd[7]); 1988 + reply = check_command(common, 10, DATA_DIR_TO_HOST, 1989 + (3<<7) | (0x1f<<1), 1, 1990 + "READ HEADER"); 1991 + if (reply == 0) 1992 + reply = do_read_header(common, bh); 1993 + break; 1994 + 1995 + case READ_TOC: 1996 + if (!common->curlun || !common->curlun->cdrom) 1997 + goto unknown_cmnd; 1998 + common->data_size_from_cmnd = 1999 + get_unaligned_be16(&common->cmnd[7]); 2000 + reply = check_command(common, 10, DATA_DIR_TO_HOST, 2001 + (7<<6) | (1<<1), 1, 2002 + "READ TOC"); 2003 + if (reply == 0) 2004 + reply = do_read_toc(common, bh); 2005 + break; 2006 + 2007 + case READ_FORMAT_CAPACITIES: 2008 + common->data_size_from_cmnd = 2009 + get_unaligned_be16(&common->cmnd[7]); 2010 + reply = check_command(common, 10, DATA_DIR_TO_HOST, 2011 + (3<<7), 1, 2012 + "READ FORMAT CAPACITIES"); 2013 + if (reply == 0) 2014 + reply = do_read_format_capacities(common, bh); 2015 + break; 2016 + 2017 + case REQUEST_SENSE: 2018 + common->data_size_from_cmnd = common->cmnd[4]; 2019 + reply = check_command(common, 6, DATA_DIR_TO_HOST, 2020 + (1<<4), 0, 2021 + "REQUEST SENSE"); 2022 + if (reply == 0) 2023 + reply = do_request_sense(common, bh); 2024 + break; 2025 + 2026 + case START_STOP: 2027 + common->data_size_from_cmnd = 0; 2028 + reply = check_command(common, 6, DATA_DIR_NONE, 2029 + (1<<1) | (1<<4), 0, 2030 + "START-STOP UNIT"); 2031 + if (reply == 0) 2032 + reply = do_start_stop(common); 2033 + break; 2034 + 2035 + case SYNCHRONIZE_CACHE: 2036 + common->data_size_from_cmnd = 0; 2037 + reply = check_command(common, 10, DATA_DIR_NONE, 2038 + (0xf<<2) | (3<<7), 1, 2039 + "SYNCHRONIZE CACHE"); 2040 + if (reply == 0) 2041 + reply = do_synchronize_cache(common); 2042 + break; 2043 + 2044 + case TEST_UNIT_READY: 2045 + common->data_size_from_cmnd = 0; 2046 + reply = check_command(common, 6, DATA_DIR_NONE, 2047 + 0, 1, 2048 + "TEST UNIT READY"); 2049 + break; 2050 + 2051 + /* 2052 + * Although optional, this command is used by MS-Windows. We 2053 + * support a minimal version: BytChk must be 0. 2054 + */ 2055 + case VERIFY: 2056 + common->data_size_from_cmnd = 0; 2057 + reply = check_command(common, 10, DATA_DIR_NONE, 2058 + (1<<1) | (0xf<<2) | (3<<7), 1, 2059 + "VERIFY"); 2060 + if (reply == 0) 2061 + reply = do_verify(common); 2062 + break; 2063 + 2064 + case WRITE_6: 2065 + i = common->cmnd[4]; 2066 + common->data_size_from_cmnd = (i == 0) ? 256 : i; 2067 + reply = check_command_size_in_blocks(common, 6, 2068 + DATA_DIR_FROM_HOST, 2069 + (7<<1) | (1<<4), 1, 2070 + "WRITE(6)"); 2071 + if (reply == 0) 2072 + reply = do_write(common); 2073 + break; 2074 + 2075 + case WRITE_10: 2076 + common->data_size_from_cmnd = 2077 + get_unaligned_be16(&common->cmnd[7]); 2078 + reply = check_command_size_in_blocks(common, 10, 2079 + DATA_DIR_FROM_HOST, 2080 + (1<<1) | (0xf<<2) | (3<<7), 1, 2081 + "WRITE(10)"); 2082 + if (reply == 0) 2083 + reply = do_write(common); 2084 + break; 2085 + 2086 + case WRITE_12: 2087 + common->data_size_from_cmnd = 2088 + get_unaligned_be32(&common->cmnd[6]); 2089 + reply = check_command_size_in_blocks(common, 12, 2090 + DATA_DIR_FROM_HOST, 2091 + (1<<1) | (0xf<<2) | (0xf<<6), 1, 2092 + "WRITE(12)"); 2093 + if (reply == 0) 2094 + reply = do_write(common); 2095 + break; 2096 + 2097 + /* 2098 + * Some mandatory commands that we recognize but don't implement. 2099 + * They don't mean much in this setting. It's left as an exercise 2100 + * for anyone interested to implement RESERVE and RELEASE in terms 2101 + * of Posix locks. 2102 + */ 2103 + case FORMAT_UNIT: 2104 + case RELEASE: 2105 + case RESERVE: 2106 + case SEND_DIAGNOSTIC: 2107 + /* Fall through */ 2108 + 2109 + default: 2110 + unknown_cmnd: 2111 + common->data_size_from_cmnd = 0; 2112 + sprintf(unknown, "Unknown x%02x", common->cmnd[0]); 2113 + reply = check_command(common, common->cmnd_size, 2114 + DATA_DIR_UNKNOWN, ~0, 0, unknown); 2115 + if (reply == 0) { 2116 + common->curlun->sense_data = SS_INVALID_COMMAND; 2117 + reply = -EINVAL; 2118 + } 2119 + break; 2120 + } 2121 + up_read(&common->filesem); 2122 + 2123 + if (reply == -EINTR || signal_pending(current)) 2124 + return -EINTR; 2125 + 2126 + /* Set up the single reply buffer for finish_reply() */ 2127 + if (reply == -EINVAL) 2128 + reply = 0; /* Error reply length */ 2129 + if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) { 2130 + reply = min((u32)reply, common->data_size_from_cmnd); 2131 + bh->inreq->length = reply; 2132 + bh->state = BUF_STATE_FULL; 2133 + common->residue -= reply; 2134 + } /* Otherwise it's already set */ 2135 + 2136 + return 0; 2137 + } 2138 + 2139 + 2140 + /*-------------------------------------------------------------------------*/ 2141 + 2142 + static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2143 + { 2144 + struct usb_request *req = bh->outreq; 2145 + struct bulk_cb_wrap *cbw = req->buf; 2146 + struct fsg_common *common = fsg->common; 2147 + 2148 + /* Was this a real packet? Should it be ignored? */ 2149 + if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) 2150 + return -EINVAL; 2151 + 2152 + /* Is the CBW valid? */ 2153 + if (req->actual != US_BULK_CB_WRAP_LEN || 2154 + cbw->Signature != cpu_to_le32( 2155 + US_BULK_CB_SIGN)) { 2156 + DBG(fsg, "invalid CBW: len %u sig 0x%x\n", 2157 + req->actual, 2158 + le32_to_cpu(cbw->Signature)); 2159 + 2160 + /* 2161 + * The Bulk-only spec says we MUST stall the IN endpoint 2162 + * (6.6.1), so it's unavoidable. It also says we must 2163 + * retain this state until the next reset, but there's 2164 + * no way to tell the controller driver it should ignore 2165 + * Clear-Feature(HALT) requests. 2166 + * 2167 + * We aren't required to halt the OUT endpoint; instead 2168 + * we can simply accept and discard any data received 2169 + * until the next reset. 2170 + */ 2171 + wedge_bulk_in_endpoint(fsg); 2172 + set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2173 + return -EINVAL; 2174 + } 2175 + 2176 + /* Is the CBW meaningful? */ 2177 + if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN || 2178 + cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) { 2179 + DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " 2180 + "cmdlen %u\n", 2181 + cbw->Lun, cbw->Flags, cbw->Length); 2182 + 2183 + /* 2184 + * We can do anything we want here, so let's stall the 2185 + * bulk pipes if we are allowed to. 2186 + */ 2187 + if (common->can_stall) { 2188 + fsg_set_halt(fsg, fsg->bulk_out); 2189 + halt_bulk_in_endpoint(fsg); 2190 + } 2191 + return -EINVAL; 2192 + } 2193 + 2194 + /* Save the command for later */ 2195 + common->cmnd_size = cbw->Length; 2196 + memcpy(common->cmnd, cbw->CDB, common->cmnd_size); 2197 + if (cbw->Flags & US_BULK_FLAG_IN) 2198 + common->data_dir = DATA_DIR_TO_HOST; 2199 + else 2200 + common->data_dir = DATA_DIR_FROM_HOST; 2201 + common->data_size = le32_to_cpu(cbw->DataTransferLength); 2202 + if (common->data_size == 0) 2203 + common->data_dir = DATA_DIR_NONE; 2204 + common->lun = cbw->Lun; 2205 + if (common->lun >= 0 && common->lun < common->nluns) 2206 + common->curlun = &common->luns[common->lun]; 2207 + else 2208 + common->curlun = NULL; 2209 + common->tag = cbw->Tag; 2210 + return 0; 2211 + } 2212 + 2213 + static int get_next_command(struct fsg_common *common) 2214 + { 2215 + struct fsg_buffhd *bh; 2216 + int rc = 0; 2217 + 2218 + /* Wait for the next buffer to become available */ 2219 + bh = common->next_buffhd_to_fill; 2220 + while (bh->state != BUF_STATE_EMPTY) { 2221 + rc = sleep_thread(common); 2222 + if (rc) 2223 + return rc; 2224 + } 2225 + 2226 + /* Queue a request to read a Bulk-only CBW */ 2227 + set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN); 2228 + if (!start_out_transfer(common, bh)) 2229 + /* Don't know what to do if common->fsg is NULL */ 2230 + return -EIO; 2231 + 2232 + /* 2233 + * We will drain the buffer in software, which means we 2234 + * can reuse it for the next filling. No need to advance 2235 + * next_buffhd_to_fill. 2236 + */ 2237 + 2238 + /* Wait for the CBW to arrive */ 2239 + while (bh->state != BUF_STATE_FULL) { 2240 + rc = sleep_thread(common); 2241 + if (rc) 2242 + return rc; 2243 + } 2244 + smp_rmb(); 2245 + rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO; 2246 + bh->state = BUF_STATE_EMPTY; 2247 + 2248 + return rc; 2249 + } 2250 + 2251 + 2252 + /*-------------------------------------------------------------------------*/ 2253 + 2254 + static int alloc_request(struct fsg_common *common, struct usb_ep *ep, 2255 + struct usb_request **preq) 2256 + { 2257 + *preq = usb_ep_alloc_request(ep, GFP_ATOMIC); 2258 + if (*preq) 2259 + return 0; 2260 + ERROR(common, "can't allocate request for %s\n", ep->name); 2261 + return -ENOMEM; 2262 + } 2263 + 2264 + /* Reset interface setting and re-init endpoint state (toggle etc). */ 2265 + static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) 2266 + { 2267 + struct fsg_dev *fsg; 2268 + int i, rc = 0; 2269 + 2270 + if (common->running) 2271 + DBG(common, "reset interface\n"); 2272 + 2273 + reset: 2274 + /* Deallocate the requests */ 2275 + if (common->fsg) { 2276 + fsg = common->fsg; 2277 + 2278 + for (i = 0; i < fsg_num_buffers; ++i) { 2279 + struct fsg_buffhd *bh = &common->buffhds[i]; 2280 + 2281 + if (bh->inreq) { 2282 + usb_ep_free_request(fsg->bulk_in, bh->inreq); 2283 + bh->inreq = NULL; 2284 + } 2285 + if (bh->outreq) { 2286 + usb_ep_free_request(fsg->bulk_out, bh->outreq); 2287 + bh->outreq = NULL; 2288 + } 2289 + } 2290 + 2291 + /* Disable the endpoints */ 2292 + if (fsg->bulk_in_enabled) { 2293 + usb_ep_disable(fsg->bulk_in); 2294 + fsg->bulk_in_enabled = 0; 2295 + } 2296 + if (fsg->bulk_out_enabled) { 2297 + usb_ep_disable(fsg->bulk_out); 2298 + fsg->bulk_out_enabled = 0; 2299 + } 2300 + 2301 + common->fsg = NULL; 2302 + wake_up(&common->fsg_wait); 2303 + } 2304 + 2305 + common->running = 0; 2306 + if (!new_fsg || rc) 2307 + return rc; 2308 + 2309 + common->fsg = new_fsg; 2310 + fsg = common->fsg; 2311 + 2312 + /* Enable the endpoints */ 2313 + rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in); 2314 + if (rc) 2315 + goto reset; 2316 + rc = usb_ep_enable(fsg->bulk_in); 2317 + if (rc) 2318 + goto reset; 2319 + fsg->bulk_in->driver_data = common; 2320 + fsg->bulk_in_enabled = 1; 2321 + 2322 + rc = config_ep_by_speed(common->gadget, &(fsg->function), 2323 + fsg->bulk_out); 2324 + if (rc) 2325 + goto reset; 2326 + rc = usb_ep_enable(fsg->bulk_out); 2327 + if (rc) 2328 + goto reset; 2329 + fsg->bulk_out->driver_data = common; 2330 + fsg->bulk_out_enabled = 1; 2331 + common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc); 2332 + clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2333 + 2334 + /* Allocate the requests */ 2335 + for (i = 0; i < fsg_num_buffers; ++i) { 2336 + struct fsg_buffhd *bh = &common->buffhds[i]; 2337 + 2338 + rc = alloc_request(common, fsg->bulk_in, &bh->inreq); 2339 + if (rc) 2340 + goto reset; 2341 + rc = alloc_request(common, fsg->bulk_out, &bh->outreq); 2342 + if (rc) 2343 + goto reset; 2344 + bh->inreq->buf = bh->outreq->buf = bh->buf; 2345 + bh->inreq->context = bh->outreq->context = bh; 2346 + bh->inreq->complete = bulk_in_complete; 2347 + bh->outreq->complete = bulk_out_complete; 2348 + } 2349 + 2350 + common->running = 1; 2351 + for (i = 0; i < common->nluns; ++i) 2352 + common->luns[i].unit_attention_data = SS_RESET_OCCURRED; 2353 + return rc; 2354 + } 2355 + 2356 + 2357 + /****************************** ALT CONFIGS ******************************/ 2358 + 2359 + static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 2360 + { 2361 + struct fsg_dev *fsg = fsg_from_func(f); 2362 + fsg->common->new_fsg = fsg; 2363 + raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2364 + return USB_GADGET_DELAYED_STATUS; 2365 + } 2366 + 2367 + static void fsg_disable(struct usb_function *f) 2368 + { 2369 + struct fsg_dev *fsg = fsg_from_func(f); 2370 + fsg->common->new_fsg = NULL; 2371 + raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2372 + } 2373 + 2374 + 2375 + /*-------------------------------------------------------------------------*/ 2376 + 2377 + static void handle_exception(struct fsg_common *common) 2378 + { 2379 + siginfo_t info; 2380 + int i; 2381 + struct fsg_buffhd *bh; 2382 + enum fsg_state old_state; 2383 + struct fsg_lun *curlun; 2384 + unsigned int exception_req_tag; 2385 + 2386 + /* 2387 + * Clear the existing signals. Anything but SIGUSR1 is converted 2388 + * into a high-priority EXIT exception. 2389 + */ 2390 + for (;;) { 2391 + int sig = 2392 + dequeue_signal_lock(current, &current->blocked, &info); 2393 + if (!sig) 2394 + break; 2395 + if (sig != SIGUSR1) { 2396 + if (common->state < FSG_STATE_EXIT) 2397 + DBG(common, "Main thread exiting on signal\n"); 2398 + raise_exception(common, FSG_STATE_EXIT); 2399 + } 2400 + } 2401 + 2402 + /* Cancel all the pending transfers */ 2403 + if (likely(common->fsg)) { 2404 + for (i = 0; i < fsg_num_buffers; ++i) { 2405 + bh = &common->buffhds[i]; 2406 + if (bh->inreq_busy) 2407 + usb_ep_dequeue(common->fsg->bulk_in, bh->inreq); 2408 + if (bh->outreq_busy) 2409 + usb_ep_dequeue(common->fsg->bulk_out, 2410 + bh->outreq); 2411 + } 2412 + 2413 + /* Wait until everything is idle */ 2414 + for (;;) { 2415 + int num_active = 0; 2416 + for (i = 0; i < fsg_num_buffers; ++i) { 2417 + bh = &common->buffhds[i]; 2418 + num_active += bh->inreq_busy + bh->outreq_busy; 2419 + } 2420 + if (num_active == 0) 2421 + break; 2422 + if (sleep_thread(common)) 2423 + return; 2424 + } 2425 + 2426 + /* Clear out the controller's fifos */ 2427 + if (common->fsg->bulk_in_enabled) 2428 + usb_ep_fifo_flush(common->fsg->bulk_in); 2429 + if (common->fsg->bulk_out_enabled) 2430 + usb_ep_fifo_flush(common->fsg->bulk_out); 2431 + } 2432 + 2433 + /* 2434 + * Reset the I/O buffer states and pointers, the SCSI 2435 + * state, and the exception. Then invoke the handler. 2436 + */ 2437 + spin_lock_irq(&common->lock); 2438 + 2439 + for (i = 0; i < fsg_num_buffers; ++i) { 2440 + bh = &common->buffhds[i]; 2441 + bh->state = BUF_STATE_EMPTY; 2442 + } 2443 + common->next_buffhd_to_fill = &common->buffhds[0]; 2444 + common->next_buffhd_to_drain = &common->buffhds[0]; 2445 + exception_req_tag = common->exception_req_tag; 2446 + old_state = common->state; 2447 + 2448 + if (old_state == FSG_STATE_ABORT_BULK_OUT) 2449 + common->state = FSG_STATE_STATUS_PHASE; 2450 + else { 2451 + for (i = 0; i < common->nluns; ++i) { 2452 + curlun = &common->luns[i]; 2453 + curlun->prevent_medium_removal = 0; 2454 + curlun->sense_data = SS_NO_SENSE; 2455 + curlun->unit_attention_data = SS_NO_SENSE; 2456 + curlun->sense_data_info = 0; 2457 + curlun->info_valid = 0; 2458 + } 2459 + common->state = FSG_STATE_IDLE; 2460 + } 2461 + spin_unlock_irq(&common->lock); 2462 + 2463 + /* Carry out any extra actions required for the exception */ 2464 + switch (old_state) { 2465 + case FSG_STATE_ABORT_BULK_OUT: 2466 + send_status(common); 2467 + spin_lock_irq(&common->lock); 2468 + if (common->state == FSG_STATE_STATUS_PHASE) 2469 + common->state = FSG_STATE_IDLE; 2470 + spin_unlock_irq(&common->lock); 2471 + break; 2472 + 2473 + case FSG_STATE_RESET: 2474 + /* 2475 + * In case we were forced against our will to halt a 2476 + * bulk endpoint, clear the halt now. (The SuperH UDC 2477 + * requires this.) 2478 + */ 2479 + if (!fsg_is_set(common)) 2480 + break; 2481 + if (test_and_clear_bit(IGNORE_BULK_OUT, 2482 + &common->fsg->atomic_bitflags)) 2483 + usb_ep_clear_halt(common->fsg->bulk_in); 2484 + 2485 + if (common->ep0_req_tag == exception_req_tag) 2486 + ep0_queue(common); /* Complete the status stage */ 2487 + 2488 + /* 2489 + * Technically this should go here, but it would only be 2490 + * a waste of time. Ditto for the INTERFACE_CHANGE and 2491 + * CONFIG_CHANGE cases. 2492 + */ 2493 + /* for (i = 0; i < common->nluns; ++i) */ 2494 + /* common->luns[i].unit_attention_data = */ 2495 + /* SS_RESET_OCCURRED; */ 2496 + break; 2497 + 2498 + case FSG_STATE_CONFIG_CHANGE: 2499 + do_set_interface(common, common->new_fsg); 2500 + if (common->new_fsg) 2501 + usb_composite_setup_continue(common->cdev); 2502 + break; 2503 + 2504 + case FSG_STATE_EXIT: 2505 + case FSG_STATE_TERMINATED: 2506 + do_set_interface(common, NULL); /* Free resources */ 2507 + spin_lock_irq(&common->lock); 2508 + common->state = FSG_STATE_TERMINATED; /* Stop the thread */ 2509 + spin_unlock_irq(&common->lock); 2510 + break; 2511 + 2512 + case FSG_STATE_INTERFACE_CHANGE: 2513 + case FSG_STATE_DISCONNECT: 2514 + case FSG_STATE_COMMAND_PHASE: 2515 + case FSG_STATE_DATA_PHASE: 2516 + case FSG_STATE_STATUS_PHASE: 2517 + case FSG_STATE_IDLE: 2518 + break; 2519 + } 2520 + } 2521 + 2522 + 2523 + /*-------------------------------------------------------------------------*/ 2524 + 2525 + static int fsg_main_thread(void *common_) 2526 + { 2527 + struct fsg_common *common = common_; 2528 + 2529 + /* 2530 + * Allow the thread to be killed by a signal, but set the signal mask 2531 + * to block everything but INT, TERM, KILL, and USR1. 2532 + */ 2533 + allow_signal(SIGINT); 2534 + allow_signal(SIGTERM); 2535 + allow_signal(SIGKILL); 2536 + allow_signal(SIGUSR1); 2537 + 2538 + /* Allow the thread to be frozen */ 2539 + set_freezable(); 2540 + 2541 + /* 2542 + * Arrange for userspace references to be interpreted as kernel 2543 + * pointers. That way we can pass a kernel pointer to a routine 2544 + * that expects a __user pointer and it will work okay. 2545 + */ 2546 + set_fs(get_ds()); 2547 + 2548 + /* The main loop */ 2549 + while (common->state != FSG_STATE_TERMINATED) { 2550 + if (exception_in_progress(common) || signal_pending(current)) { 2551 + handle_exception(common); 2552 + continue; 2553 + } 2554 + 2555 + if (!common->running) { 2556 + sleep_thread(common); 2557 + continue; 2558 + } 2559 + 2560 + if (get_next_command(common)) 2561 + continue; 2562 + 2563 + spin_lock_irq(&common->lock); 2564 + if (!exception_in_progress(common)) 2565 + common->state = FSG_STATE_DATA_PHASE; 2566 + spin_unlock_irq(&common->lock); 2567 + 2568 + if (do_scsi_command(common) || finish_reply(common)) 2569 + continue; 2570 + 2571 + spin_lock_irq(&common->lock); 2572 + if (!exception_in_progress(common)) 2573 + common->state = FSG_STATE_STATUS_PHASE; 2574 + spin_unlock_irq(&common->lock); 2575 + 2576 + if (send_status(common)) 2577 + continue; 2578 + 2579 + spin_lock_irq(&common->lock); 2580 + if (!exception_in_progress(common)) 2581 + common->state = FSG_STATE_IDLE; 2582 + spin_unlock_irq(&common->lock); 2583 + } 2584 + 2585 + spin_lock_irq(&common->lock); 2586 + common->thread_task = NULL; 2587 + spin_unlock_irq(&common->lock); 2588 + 2589 + if (!common->ops || !common->ops->thread_exits 2590 + || common->ops->thread_exits(common) < 0) { 2591 + struct fsg_lun *curlun = common->luns; 2592 + unsigned i = common->nluns; 2593 + 2594 + down_write(&common->filesem); 2595 + for (; i--; ++curlun) { 2596 + if (!fsg_lun_is_open(curlun)) 2597 + continue; 2598 + 2599 + fsg_lun_close(curlun); 2600 + curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; 2601 + } 2602 + up_write(&common->filesem); 2603 + } 2604 + 2605 + /* Let fsg_unbind() know the thread has exited */ 2606 + complete_and_exit(&common->thread_notifier, 0); 2607 + } 2608 + 2609 + 2610 + /*************************** DEVICE ATTRIBUTES ***************************/ 2611 + 2612 + static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro); 2613 + static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, fsg_store_nofua); 2614 + static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file); 2615 + 2616 + static struct device_attribute dev_attr_ro_cdrom = 2617 + __ATTR(ro, 0444, fsg_show_ro, NULL); 2618 + static struct device_attribute dev_attr_file_nonremovable = 2619 + __ATTR(file, 0444, fsg_show_file, NULL); 2620 + 2621 + 2622 + /****************************** FSG COMMON ******************************/ 2623 + 2624 + static void fsg_common_release(struct kref *ref); 2625 + 2626 + static void fsg_lun_release(struct device *dev) 2627 + { 2628 + /* Nothing needs to be done */ 2629 + } 2630 + 2631 + static inline void fsg_common_get(struct fsg_common *common) 2632 + { 2633 + kref_get(&common->ref); 2634 + } 2635 + 2636 + static inline void fsg_common_put(struct fsg_common *common) 2637 + { 2638 + kref_put(&common->ref, fsg_common_release); 2639 + } 2640 + 2641 + static struct fsg_common *fsg_common_init(struct fsg_common *common, 2642 + struct usb_composite_dev *cdev, 2643 + struct fsg_config *cfg) 2644 + { 2645 + struct usb_gadget *gadget = cdev->gadget; 2646 + struct fsg_buffhd *bh; 2647 + struct fsg_lun *curlun; 2648 + struct fsg_lun_config *lcfg; 2649 + int nluns, i, rc; 2650 + char *pathbuf; 2651 + 2652 + rc = fsg_num_buffers_validate(); 2653 + if (rc != 0) 2654 + return ERR_PTR(rc); 2655 + 2656 + /* Find out how many LUNs there should be */ 2657 + nluns = cfg->nluns; 2658 + if (nluns < 1 || nluns > FSG_MAX_LUNS) { 2659 + dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns); 2660 + return ERR_PTR(-EINVAL); 2661 + } 2662 + 2663 + /* Allocate? */ 2664 + if (!common) { 2665 + common = kzalloc(sizeof *common, GFP_KERNEL); 2666 + if (!common) 2667 + return ERR_PTR(-ENOMEM); 2668 + common->free_storage_on_release = 1; 2669 + } else { 2670 + memset(common, 0, sizeof *common); 2671 + common->free_storage_on_release = 0; 2672 + } 2673 + 2674 + common->buffhds = kcalloc(fsg_num_buffers, 2675 + sizeof *(common->buffhds), GFP_KERNEL); 2676 + if (!common->buffhds) { 2677 + if (common->free_storage_on_release) 2678 + kfree(common); 2679 + return ERR_PTR(-ENOMEM); 2680 + } 2681 + 2682 + common->ops = cfg->ops; 2683 + common->private_data = cfg->private_data; 2684 + 2685 + common->gadget = gadget; 2686 + common->ep0 = gadget->ep0; 2687 + common->ep0req = cdev->req; 2688 + common->cdev = cdev; 2689 + 2690 + /* Maybe allocate device-global string IDs, and patch descriptors */ 2691 + if (fsg_strings[FSG_STRING_INTERFACE].id == 0) { 2692 + rc = usb_string_id(cdev); 2693 + if (unlikely(rc < 0)) 2694 + goto error_release; 2695 + fsg_strings[FSG_STRING_INTERFACE].id = rc; 2696 + fsg_intf_desc.iInterface = rc; 2697 + } 2698 + 2699 + /* 2700 + * Create the LUNs, open their backing files, and register the 2701 + * LUN devices in sysfs. 2702 + */ 2703 + curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL); 2704 + if (unlikely(!curlun)) { 2705 + rc = -ENOMEM; 2706 + goto error_release; 2707 + } 2708 + common->luns = curlun; 2709 + 2710 + init_rwsem(&common->filesem); 2711 + 2712 + for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) { 2713 + curlun->cdrom = !!lcfg->cdrom; 2714 + curlun->ro = lcfg->cdrom || lcfg->ro; 2715 + curlun->initially_ro = curlun->ro; 2716 + curlun->removable = lcfg->removable; 2717 + curlun->dev.release = fsg_lun_release; 2718 + curlun->dev.parent = &gadget->dev; 2719 + /* curlun->dev.driver = &fsg_driver.driver; XXX */ 2720 + dev_set_drvdata(&curlun->dev, &common->filesem); 2721 + dev_set_name(&curlun->dev, "lun%d", i); 2722 + 2723 + rc = device_register(&curlun->dev); 2724 + if (rc) { 2725 + INFO(common, "failed to register LUN%d: %d\n", i, rc); 2726 + common->nluns = i; 2727 + put_device(&curlun->dev); 2728 + goto error_release; 2729 + } 2730 + 2731 + rc = device_create_file(&curlun->dev, 2732 + curlun->cdrom 2733 + ? &dev_attr_ro_cdrom 2734 + : &dev_attr_ro); 2735 + if (rc) 2736 + goto error_luns; 2737 + rc = device_create_file(&curlun->dev, 2738 + curlun->removable 2739 + ? &dev_attr_file 2740 + : &dev_attr_file_nonremovable); 2741 + if (rc) 2742 + goto error_luns; 2743 + rc = device_create_file(&curlun->dev, &dev_attr_nofua); 2744 + if (rc) 2745 + goto error_luns; 2746 + 2747 + if (lcfg->filename) { 2748 + rc = fsg_lun_open(curlun, lcfg->filename); 2749 + if (rc) 2750 + goto error_luns; 2751 + } else if (!curlun->removable) { 2752 + ERROR(common, "no file given for LUN%d\n", i); 2753 + rc = -EINVAL; 2754 + goto error_luns; 2755 + } 2756 + } 2757 + common->nluns = nluns; 2758 + 2759 + /* Data buffers cyclic list */ 2760 + bh = common->buffhds; 2761 + i = fsg_num_buffers; 2762 + goto buffhds_first_it; 2763 + do { 2764 + bh->next = bh + 1; 2765 + ++bh; 2766 + buffhds_first_it: 2767 + bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL); 2768 + if (unlikely(!bh->buf)) { 2769 + rc = -ENOMEM; 2770 + goto error_release; 2771 + } 2772 + } while (--i); 2773 + bh->next = common->buffhds; 2774 + 2775 + /* Prepare inquiryString */ 2776 + if (cfg->release != 0xffff) { 2777 + i = cfg->release; 2778 + } else { 2779 + i = usb_gadget_controller_number(gadget); 2780 + if (i >= 0) { 2781 + i = 0x0300 + i; 2782 + } else { 2783 + WARNING(common, "controller '%s' not recognized\n", 2784 + gadget->name); 2785 + i = 0x0399; 2786 + } 2787 + } 2788 + snprintf(common->inquiry_string, sizeof common->inquiry_string, 2789 + "%-8s%-16s%04x", cfg->vendor_name ?: "Linux", 2790 + /* Assume product name dependent on the first LUN */ 2791 + cfg->product_name ?: (common->luns->cdrom 2792 + ? "File-Stor Gadget" 2793 + : "File-CD Gadget"), 2794 + i); 2795 + 2796 + /* 2797 + * Some peripheral controllers are known not to be able to 2798 + * halt bulk endpoints correctly. If one of them is present, 2799 + * disable stalls. 2800 + */ 2801 + common->can_stall = cfg->can_stall && 2802 + !(gadget_is_at91(common->gadget)); 2803 + 2804 + spin_lock_init(&common->lock); 2805 + kref_init(&common->ref); 2806 + 2807 + /* Tell the thread to start working */ 2808 + common->thread_task = 2809 + kthread_create(fsg_main_thread, common, "file-storage"); 2810 + if (IS_ERR(common->thread_task)) { 2811 + rc = PTR_ERR(common->thread_task); 2812 + goto error_release; 2813 + } 2814 + init_completion(&common->thread_notifier); 2815 + init_waitqueue_head(&common->fsg_wait); 2816 + 2817 + /* Information */ 2818 + INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n"); 2819 + INFO(common, "Number of LUNs=%d\n", common->nluns); 2820 + 2821 + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 2822 + for (i = 0, nluns = common->nluns, curlun = common->luns; 2823 + i < nluns; 2824 + ++curlun, ++i) { 2825 + char *p = "(no medium)"; 2826 + if (fsg_lun_is_open(curlun)) { 2827 + p = "(error)"; 2828 + if (pathbuf) { 2829 + p = d_path(&curlun->filp->f_path, 2830 + pathbuf, PATH_MAX); 2831 + if (IS_ERR(p)) 2832 + p = "(error)"; 2833 + } 2834 + } 2835 + LINFO(curlun, "LUN: %s%s%sfile: %s\n", 2836 + curlun->removable ? "removable " : "", 2837 + curlun->ro ? "read only " : "", 2838 + curlun->cdrom ? "CD-ROM " : "", 2839 + p); 2840 + } 2841 + kfree(pathbuf); 2842 + 2843 + DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task)); 2844 + 2845 + wake_up_process(common->thread_task); 2846 + 2847 + return common; 2848 + 2849 + error_luns: 2850 + common->nluns = i + 1; 2851 + error_release: 2852 + common->state = FSG_STATE_TERMINATED; /* The thread is dead */ 2853 + /* Call fsg_common_release() directly, ref might be not initialised. */ 2854 + fsg_common_release(&common->ref); 2855 + return ERR_PTR(rc); 2856 + } 2857 + 2858 + static void fsg_common_release(struct kref *ref) 2859 + { 2860 + struct fsg_common *common = container_of(ref, struct fsg_common, ref); 2861 + 2862 + /* If the thread isn't already dead, tell it to exit now */ 2863 + if (common->state != FSG_STATE_TERMINATED) { 2864 + raise_exception(common, FSG_STATE_EXIT); 2865 + wait_for_completion(&common->thread_notifier); 2866 + } 2867 + 2868 + if (likely(common->luns)) { 2869 + struct fsg_lun *lun = common->luns; 2870 + unsigned i = common->nluns; 2871 + 2872 + /* In error recovery common->nluns may be zero. */ 2873 + for (; i; --i, ++lun) { 2874 + device_remove_file(&lun->dev, &dev_attr_nofua); 2875 + device_remove_file(&lun->dev, 2876 + lun->cdrom 2877 + ? &dev_attr_ro_cdrom 2878 + : &dev_attr_ro); 2879 + device_remove_file(&lun->dev, 2880 + lun->removable 2881 + ? &dev_attr_file 2882 + : &dev_attr_file_nonremovable); 2883 + fsg_lun_close(lun); 2884 + device_unregister(&lun->dev); 2885 + } 2886 + 2887 + kfree(common->luns); 2888 + } 2889 + 2890 + { 2891 + struct fsg_buffhd *bh = common->buffhds; 2892 + unsigned i = fsg_num_buffers; 2893 + do { 2894 + kfree(bh->buf); 2895 + } while (++bh, --i); 2896 + } 2897 + 2898 + kfree(common->buffhds); 2899 + if (common->free_storage_on_release) 2900 + kfree(common); 2901 + } 2902 + 2903 + 2904 + /*-------------------------------------------------------------------------*/ 2905 + 2906 + static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) 2907 + { 2908 + struct fsg_dev *fsg = fsg_from_func(f); 2909 + struct fsg_common *common = fsg->common; 2910 + 2911 + DBG(fsg, "unbind\n"); 2912 + if (fsg->common->fsg == fsg) { 2913 + fsg->common->new_fsg = NULL; 2914 + raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); 2915 + /* FIXME: make interruptible or killable somehow? */ 2916 + wait_event(common->fsg_wait, common->fsg != fsg); 2917 + } 2918 + 2919 + fsg_common_put(common); 2920 + usb_free_descriptors(fsg->function.descriptors); 2921 + usb_free_descriptors(fsg->function.hs_descriptors); 2922 + usb_free_descriptors(fsg->function.ss_descriptors); 2923 + kfree(fsg); 2924 + } 2925 + 2926 + static int fsg_bind(struct usb_configuration *c, struct usb_function *f) 2927 + { 2928 + struct fsg_dev *fsg = fsg_from_func(f); 2929 + struct usb_gadget *gadget = c->cdev->gadget; 2930 + int i; 2931 + struct usb_ep *ep; 2932 + 2933 + fsg->gadget = gadget; 2934 + 2935 + /* New interface */ 2936 + i = usb_interface_id(c, f); 2937 + if (i < 0) 2938 + return i; 2939 + fsg_intf_desc.bInterfaceNumber = i; 2940 + fsg->interface_number = i; 2941 + 2942 + /* Find all the endpoints we will use */ 2943 + ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc); 2944 + if (!ep) 2945 + goto autoconf_fail; 2946 + ep->driver_data = fsg->common; /* claim the endpoint */ 2947 + fsg->bulk_in = ep; 2948 + 2949 + ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc); 2950 + if (!ep) 2951 + goto autoconf_fail; 2952 + ep->driver_data = fsg->common; /* claim the endpoint */ 2953 + fsg->bulk_out = ep; 2954 + 2955 + /* Copy descriptors */ 2956 + f->descriptors = usb_copy_descriptors(fsg_fs_function); 2957 + if (unlikely(!f->descriptors)) 2958 + return -ENOMEM; 2959 + 2960 + if (gadget_is_dualspeed(gadget)) { 2961 + /* Assume endpoint addresses are the same for both speeds */ 2962 + fsg_hs_bulk_in_desc.bEndpointAddress = 2963 + fsg_fs_bulk_in_desc.bEndpointAddress; 2964 + fsg_hs_bulk_out_desc.bEndpointAddress = 2965 + fsg_fs_bulk_out_desc.bEndpointAddress; 2966 + f->hs_descriptors = usb_copy_descriptors(fsg_hs_function); 2967 + if (unlikely(!f->hs_descriptors)) { 2968 + usb_free_descriptors(f->descriptors); 2969 + return -ENOMEM; 2970 + } 2971 + } 2972 + 2973 + if (gadget_is_superspeed(gadget)) { 2974 + unsigned max_burst; 2975 + 2976 + /* Calculate bMaxBurst, we know packet size is 1024 */ 2977 + max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15); 2978 + 2979 + fsg_ss_bulk_in_desc.bEndpointAddress = 2980 + fsg_fs_bulk_in_desc.bEndpointAddress; 2981 + fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst; 2982 + 2983 + fsg_ss_bulk_out_desc.bEndpointAddress = 2984 + fsg_fs_bulk_out_desc.bEndpointAddress; 2985 + fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst; 2986 + 2987 + f->ss_descriptors = usb_copy_descriptors(fsg_ss_function); 2988 + if (unlikely(!f->ss_descriptors)) { 2989 + usb_free_descriptors(f->hs_descriptors); 2990 + usb_free_descriptors(f->descriptors); 2991 + return -ENOMEM; 2992 + } 2993 + } 2994 + 2995 + return 0; 2996 + 2997 + autoconf_fail: 2998 + ERROR(fsg, "unable to autoconfigure all endpoints\n"); 2999 + return -ENOTSUPP; 3000 + } 3001 + 3002 + 3003 + /****************************** ADD FUNCTION ******************************/ 3004 + 3005 + static struct usb_gadget_strings *fsg_strings_array[] = { 3006 + &fsg_stringtab, 3007 + NULL, 3008 + }; 3009 + 3010 + static int fsg_bind_config(struct usb_composite_dev *cdev, 3011 + struct usb_configuration *c, 3012 + struct fsg_common *common) 3013 + { 3014 + struct fsg_dev *fsg; 3015 + int rc; 3016 + 3017 + fsg = kzalloc(sizeof *fsg, GFP_KERNEL); 3018 + if (unlikely(!fsg)) 3019 + return -ENOMEM; 3020 + 3021 + fsg->function.name = FSG_DRIVER_DESC; 3022 + fsg->function.strings = fsg_strings_array; 3023 + fsg->function.bind = fsg_bind; 3024 + fsg->function.unbind = fsg_unbind; 3025 + fsg->function.setup = fsg_setup; 3026 + fsg->function.set_alt = fsg_set_alt; 3027 + fsg->function.disable = fsg_disable; 3028 + 3029 + fsg->common = common; 3030 + /* 3031 + * Our caller holds a reference to common structure so we 3032 + * don't have to be worry about it being freed until we return 3033 + * from this function. So instead of incrementing counter now 3034 + * and decrement in error recovery we increment it only when 3035 + * call to usb_add_function() was successful. 3036 + */ 3037 + 3038 + rc = usb_add_function(c, &fsg->function); 3039 + if (unlikely(rc)) 3040 + kfree(fsg); 3041 + else 3042 + fsg_common_get(fsg->common); 3043 + return rc; 3044 + } 3045 + 3046 + 3047 + /************************* Module parameters *************************/ 3048 + 3049 + struct fsg_module_parameters { 3050 + char *file[FSG_MAX_LUNS]; 3051 + bool ro[FSG_MAX_LUNS]; 3052 + bool removable[FSG_MAX_LUNS]; 3053 + bool cdrom[FSG_MAX_LUNS]; 3054 + bool nofua[FSG_MAX_LUNS]; 3055 + 3056 + unsigned int file_count, ro_count, removable_count, cdrom_count; 3057 + unsigned int nofua_count; 3058 + unsigned int luns; /* nluns */ 3059 + bool stall; /* can_stall */ 3060 + }; 3061 + 3062 + #define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \ 3063 + module_param_array_named(prefix ## name, params.name, type, \ 3064 + &prefix ## params.name ## _count, \ 3065 + S_IRUGO); \ 3066 + MODULE_PARM_DESC(prefix ## name, desc) 3067 + 3068 + #define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \ 3069 + module_param_named(prefix ## name, params.name, type, \ 3070 + S_IRUGO); \ 3071 + MODULE_PARM_DESC(prefix ## name, desc) 3072 + 3073 + #define FSG_MODULE_PARAMETERS(prefix, params) \ 3074 + _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \ 3075 + "names of backing files or devices"); \ 3076 + _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \ 3077 + "true to force read-only"); \ 3078 + _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \ 3079 + "true to simulate removable media"); \ 3080 + _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \ 3081 + "true to simulate CD-ROM instead of disk"); \ 3082 + _FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool, \ 3083 + "true to ignore SCSI WRITE(10,12) FUA bit"); \ 3084 + _FSG_MODULE_PARAM(prefix, params, luns, uint, \ 3085 + "number of LUNs"); \ 3086 + _FSG_MODULE_PARAM(prefix, params, stall, bool, \ 3087 + "false to prevent bulk stalls") 3088 + 3089 + static void 3090 + fsg_config_from_params(struct fsg_config *cfg, 3091 + const struct fsg_module_parameters *params) 3092 + { 3093 + struct fsg_lun_config *lun; 3094 + unsigned i; 3095 + 3096 + /* Configure LUNs */ 3097 + cfg->nluns = 3098 + min(params->luns ?: (params->file_count ?: 1u), 3099 + (unsigned)FSG_MAX_LUNS); 3100 + for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) { 3101 + lun->ro = !!params->ro[i]; 3102 + lun->cdrom = !!params->cdrom[i]; 3103 + lun->removable = !!params->removable[i]; 3104 + lun->filename = 3105 + params->file_count > i && params->file[i][0] 3106 + ? params->file[i] 3107 + : 0; 3108 + } 3109 + 3110 + /* Let MSF use defaults */ 3111 + cfg->vendor_name = 0; 3112 + cfg->product_name = 0; 3113 + cfg->release = 0xffff; 3114 + 3115 + cfg->ops = NULL; 3116 + cfg->private_data = NULL; 3117 + 3118 + /* Finalise */ 3119 + cfg->can_stall = params->stall; 3120 + } 3121 + 3122 + static inline struct fsg_common * 3123 + fsg_common_from_params(struct fsg_common *common, 3124 + struct usb_composite_dev *cdev, 3125 + const struct fsg_module_parameters *params) 3126 + __attribute__((unused)); 3127 + static inline struct fsg_common * 3128 + fsg_common_from_params(struct fsg_common *common, 3129 + struct usb_composite_dev *cdev, 3130 + const struct fsg_module_parameters *params) 3131 + { 3132 + struct fsg_config cfg; 3133 + fsg_config_from_params(&cfg, params); 3134 + return fsg_common_init(common, cdev, &cfg); 3135 + }
+918
drivers/staging/ccg/f_rndis.c
··· 1 + /* 2 + * f_rndis.c -- RNDIS link function driver 3 + * 4 + * Copyright (C) 2003-2005,2008 David Brownell 5 + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 6 + * Copyright (C) 2008 Nokia Corporation 7 + * Copyright (C) 2009 Samsung Electronics 8 + * Author: Michal Nazarewicz (mina86@mina86.com) 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License as published by 12 + * the Free Software Foundation; either version 2 of the License, or 13 + * (at your option) any later version. 14 + */ 15 + 16 + /* #define VERBOSE_DEBUG */ 17 + 18 + #include <linux/slab.h> 19 + #include <linux/kernel.h> 20 + #include <linux/device.h> 21 + #include <linux/etherdevice.h> 22 + 23 + #include <linux/atomic.h> 24 + 25 + #include "u_ether.h" 26 + #include "rndis.h" 27 + 28 + 29 + /* 30 + * This function is an RNDIS Ethernet port -- a Microsoft protocol that's 31 + * been promoted instead of the standard CDC Ethernet. The published RNDIS 32 + * spec is ambiguous, incomplete, and needlessly complex. Variants such as 33 + * ActiveSync have even worse status in terms of specification. 34 + * 35 + * In short: it's a protocol controlled by (and for) Microsoft, not for an 36 + * Open ecosystem or markets. Linux supports it *only* because Microsoft 37 + * doesn't support the CDC Ethernet standard. 38 + * 39 + * The RNDIS data transfer model is complex, with multiple Ethernet packets 40 + * per USB message, and out of band data. The control model is built around 41 + * what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM 42 + * (modem, not Ethernet) veneer, with those ACM descriptors being entirely 43 + * useless (they're ignored). RNDIS expects to be the only function in its 44 + * configuration, so it's no real help if you need composite devices; and 45 + * it expects to be the first configuration too. 46 + * 47 + * There is a single technical advantage of RNDIS over CDC Ethernet, if you 48 + * discount the fluff that its RPC can be made to deliver: it doesn't need 49 + * a NOP altsetting for the data interface. That lets it work on some of the 50 + * "so smart it's stupid" hardware which takes over configuration changes 51 + * from the software, and adds restrictions like "no altsettings". 52 + * 53 + * Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and 54 + * have all sorts of contrary-to-specification oddities that can prevent 55 + * them from working sanely. Since bugfixes (or accurate specs, letting 56 + * Linux work around those bugs) are unlikely to ever come from MSFT, you 57 + * may want to avoid using RNDIS on purely operational grounds. 58 + * 59 + * Omissions from the RNDIS 1.0 specification include: 60 + * 61 + * - Power management ... references data that's scattered around lots 62 + * of other documentation, which is incorrect/incomplete there too. 63 + * 64 + * - There are various undocumented protocol requirements, like the need 65 + * to send garbage in some control-OUT messages. 66 + * 67 + * - MS-Windows drivers sometimes emit undocumented requests. 68 + */ 69 + 70 + struct f_rndis { 71 + struct gether port; 72 + u8 ctrl_id, data_id; 73 + u8 ethaddr[ETH_ALEN]; 74 + u32 vendorID; 75 + const char *manufacturer; 76 + int config; 77 + 78 + struct usb_ep *notify; 79 + struct usb_request *notify_req; 80 + atomic_t notify_count; 81 + }; 82 + 83 + static inline struct f_rndis *func_to_rndis(struct usb_function *f) 84 + { 85 + return container_of(f, struct f_rndis, port.func); 86 + } 87 + 88 + /* peak (theoretical) bulk transfer rate in bits-per-second */ 89 + static unsigned int bitrate(struct usb_gadget *g) 90 + { 91 + if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) 92 + return 13 * 1024 * 8 * 1000 * 8; 93 + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) 94 + return 13 * 512 * 8 * 1000 * 8; 95 + else 96 + return 19 * 64 * 1 * 1000 * 8; 97 + } 98 + 99 + /*-------------------------------------------------------------------------*/ 100 + 101 + /* 102 + */ 103 + 104 + #define LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ 105 + #define STATUS_BYTECOUNT 8 /* 8 bytes data */ 106 + 107 + 108 + /* interface descriptor: */ 109 + 110 + static struct usb_interface_descriptor rndis_control_intf = { 111 + .bLength = sizeof rndis_control_intf, 112 + .bDescriptorType = USB_DT_INTERFACE, 113 + 114 + /* .bInterfaceNumber = DYNAMIC */ 115 + /* status endpoint is optional; this could be patched later */ 116 + .bNumEndpoints = 1, 117 + .bInterfaceClass = USB_CLASS_COMM, 118 + .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM, 119 + .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR, 120 + /* .iInterface = DYNAMIC */ 121 + }; 122 + 123 + static struct usb_cdc_header_desc header_desc = { 124 + .bLength = sizeof header_desc, 125 + .bDescriptorType = USB_DT_CS_INTERFACE, 126 + .bDescriptorSubType = USB_CDC_HEADER_TYPE, 127 + 128 + .bcdCDC = cpu_to_le16(0x0110), 129 + }; 130 + 131 + static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = { 132 + .bLength = sizeof call_mgmt_descriptor, 133 + .bDescriptorType = USB_DT_CS_INTERFACE, 134 + .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE, 135 + 136 + .bmCapabilities = 0x00, 137 + .bDataInterface = 0x01, 138 + }; 139 + 140 + static struct usb_cdc_acm_descriptor rndis_acm_descriptor = { 141 + .bLength = sizeof rndis_acm_descriptor, 142 + .bDescriptorType = USB_DT_CS_INTERFACE, 143 + .bDescriptorSubType = USB_CDC_ACM_TYPE, 144 + 145 + .bmCapabilities = 0x00, 146 + }; 147 + 148 + static struct usb_cdc_union_desc rndis_union_desc = { 149 + .bLength = sizeof(rndis_union_desc), 150 + .bDescriptorType = USB_DT_CS_INTERFACE, 151 + .bDescriptorSubType = USB_CDC_UNION_TYPE, 152 + /* .bMasterInterface0 = DYNAMIC */ 153 + /* .bSlaveInterface0 = DYNAMIC */ 154 + }; 155 + 156 + /* the data interface has two bulk endpoints */ 157 + 158 + static struct usb_interface_descriptor rndis_data_intf = { 159 + .bLength = sizeof rndis_data_intf, 160 + .bDescriptorType = USB_DT_INTERFACE, 161 + 162 + /* .bInterfaceNumber = DYNAMIC */ 163 + .bNumEndpoints = 2, 164 + .bInterfaceClass = USB_CLASS_CDC_DATA, 165 + .bInterfaceSubClass = 0, 166 + .bInterfaceProtocol = 0, 167 + /* .iInterface = DYNAMIC */ 168 + }; 169 + 170 + 171 + static struct usb_interface_assoc_descriptor 172 + rndis_iad_descriptor = { 173 + .bLength = sizeof rndis_iad_descriptor, 174 + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, 175 + 176 + .bFirstInterface = 0, /* XXX, hardcoded */ 177 + .bInterfaceCount = 2, // control + data 178 + .bFunctionClass = USB_CLASS_COMM, 179 + .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET, 180 + .bFunctionProtocol = USB_CDC_PROTO_NONE, 181 + /* .iFunction = DYNAMIC */ 182 + }; 183 + 184 + /* full speed support: */ 185 + 186 + static struct usb_endpoint_descriptor fs_notify_desc = { 187 + .bLength = USB_DT_ENDPOINT_SIZE, 188 + .bDescriptorType = USB_DT_ENDPOINT, 189 + 190 + .bEndpointAddress = USB_DIR_IN, 191 + .bmAttributes = USB_ENDPOINT_XFER_INT, 192 + .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT), 193 + .bInterval = 1 << LOG2_STATUS_INTERVAL_MSEC, 194 + }; 195 + 196 + static struct usb_endpoint_descriptor fs_in_desc = { 197 + .bLength = USB_DT_ENDPOINT_SIZE, 198 + .bDescriptorType = USB_DT_ENDPOINT, 199 + 200 + .bEndpointAddress = USB_DIR_IN, 201 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 202 + }; 203 + 204 + static struct usb_endpoint_descriptor fs_out_desc = { 205 + .bLength = USB_DT_ENDPOINT_SIZE, 206 + .bDescriptorType = USB_DT_ENDPOINT, 207 + 208 + .bEndpointAddress = USB_DIR_OUT, 209 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 210 + }; 211 + 212 + static struct usb_descriptor_header *eth_fs_function[] = { 213 + (struct usb_descriptor_header *) &rndis_iad_descriptor, 214 + 215 + /* control interface matches ACM, not Ethernet */ 216 + (struct usb_descriptor_header *) &rndis_control_intf, 217 + (struct usb_descriptor_header *) &header_desc, 218 + (struct usb_descriptor_header *) &call_mgmt_descriptor, 219 + (struct usb_descriptor_header *) &rndis_acm_descriptor, 220 + (struct usb_descriptor_header *) &rndis_union_desc, 221 + (struct usb_descriptor_header *) &fs_notify_desc, 222 + 223 + /* data interface has no altsetting */ 224 + (struct usb_descriptor_header *) &rndis_data_intf, 225 + (struct usb_descriptor_header *) &fs_in_desc, 226 + (struct usb_descriptor_header *) &fs_out_desc, 227 + NULL, 228 + }; 229 + 230 + /* high speed support: */ 231 + 232 + static struct usb_endpoint_descriptor hs_notify_desc = { 233 + .bLength = USB_DT_ENDPOINT_SIZE, 234 + .bDescriptorType = USB_DT_ENDPOINT, 235 + 236 + .bEndpointAddress = USB_DIR_IN, 237 + .bmAttributes = USB_ENDPOINT_XFER_INT, 238 + .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT), 239 + .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4, 240 + }; 241 + 242 + static struct usb_endpoint_descriptor hs_in_desc = { 243 + .bLength = USB_DT_ENDPOINT_SIZE, 244 + .bDescriptorType = USB_DT_ENDPOINT, 245 + 246 + .bEndpointAddress = USB_DIR_IN, 247 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 248 + .wMaxPacketSize = cpu_to_le16(512), 249 + }; 250 + 251 + static struct usb_endpoint_descriptor hs_out_desc = { 252 + .bLength = USB_DT_ENDPOINT_SIZE, 253 + .bDescriptorType = USB_DT_ENDPOINT, 254 + 255 + .bEndpointAddress = USB_DIR_OUT, 256 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 257 + .wMaxPacketSize = cpu_to_le16(512), 258 + }; 259 + 260 + static struct usb_descriptor_header *eth_hs_function[] = { 261 + (struct usb_descriptor_header *) &rndis_iad_descriptor, 262 + 263 + /* control interface matches ACM, not Ethernet */ 264 + (struct usb_descriptor_header *) &rndis_control_intf, 265 + (struct usb_descriptor_header *) &header_desc, 266 + (struct usb_descriptor_header *) &call_mgmt_descriptor, 267 + (struct usb_descriptor_header *) &rndis_acm_descriptor, 268 + (struct usb_descriptor_header *) &rndis_union_desc, 269 + (struct usb_descriptor_header *) &hs_notify_desc, 270 + 271 + /* data interface has no altsetting */ 272 + (struct usb_descriptor_header *) &rndis_data_intf, 273 + (struct usb_descriptor_header *) &hs_in_desc, 274 + (struct usb_descriptor_header *) &hs_out_desc, 275 + NULL, 276 + }; 277 + 278 + /* super speed support: */ 279 + 280 + static struct usb_endpoint_descriptor ss_notify_desc = { 281 + .bLength = USB_DT_ENDPOINT_SIZE, 282 + .bDescriptorType = USB_DT_ENDPOINT, 283 + 284 + .bEndpointAddress = USB_DIR_IN, 285 + .bmAttributes = USB_ENDPOINT_XFER_INT, 286 + .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT), 287 + .bInterval = LOG2_STATUS_INTERVAL_MSEC + 4, 288 + }; 289 + 290 + static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = { 291 + .bLength = sizeof ss_intr_comp_desc, 292 + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 293 + 294 + /* the following 3 values can be tweaked if necessary */ 295 + /* .bMaxBurst = 0, */ 296 + /* .bmAttributes = 0, */ 297 + .wBytesPerInterval = cpu_to_le16(STATUS_BYTECOUNT), 298 + }; 299 + 300 + static struct usb_endpoint_descriptor ss_in_desc = { 301 + .bLength = USB_DT_ENDPOINT_SIZE, 302 + .bDescriptorType = USB_DT_ENDPOINT, 303 + 304 + .bEndpointAddress = USB_DIR_IN, 305 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 306 + .wMaxPacketSize = cpu_to_le16(1024), 307 + }; 308 + 309 + static struct usb_endpoint_descriptor ss_out_desc = { 310 + .bLength = USB_DT_ENDPOINT_SIZE, 311 + .bDescriptorType = USB_DT_ENDPOINT, 312 + 313 + .bEndpointAddress = USB_DIR_OUT, 314 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 315 + .wMaxPacketSize = cpu_to_le16(1024), 316 + }; 317 + 318 + static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = { 319 + .bLength = sizeof ss_bulk_comp_desc, 320 + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 321 + 322 + /* the following 2 values can be tweaked if necessary */ 323 + /* .bMaxBurst = 0, */ 324 + /* .bmAttributes = 0, */ 325 + }; 326 + 327 + static struct usb_descriptor_header *eth_ss_function[] = { 328 + (struct usb_descriptor_header *) &rndis_iad_descriptor, 329 + 330 + /* control interface matches ACM, not Ethernet */ 331 + (struct usb_descriptor_header *) &rndis_control_intf, 332 + (struct usb_descriptor_header *) &header_desc, 333 + (struct usb_descriptor_header *) &call_mgmt_descriptor, 334 + (struct usb_descriptor_header *) &rndis_acm_descriptor, 335 + (struct usb_descriptor_header *) &rndis_union_desc, 336 + (struct usb_descriptor_header *) &ss_notify_desc, 337 + (struct usb_descriptor_header *) &ss_intr_comp_desc, 338 + 339 + /* data interface has no altsetting */ 340 + (struct usb_descriptor_header *) &rndis_data_intf, 341 + (struct usb_descriptor_header *) &ss_in_desc, 342 + (struct usb_descriptor_header *) &ss_bulk_comp_desc, 343 + (struct usb_descriptor_header *) &ss_out_desc, 344 + (struct usb_descriptor_header *) &ss_bulk_comp_desc, 345 + NULL, 346 + }; 347 + 348 + /* string descriptors: */ 349 + 350 + static struct usb_string rndis_string_defs[] = { 351 + [0].s = "RNDIS Communications Control", 352 + [1].s = "RNDIS Ethernet Data", 353 + [2].s = "RNDIS", 354 + { } /* end of list */ 355 + }; 356 + 357 + static struct usb_gadget_strings rndis_string_table = { 358 + .language = 0x0409, /* en-us */ 359 + .strings = rndis_string_defs, 360 + }; 361 + 362 + static struct usb_gadget_strings *rndis_strings[] = { 363 + &rndis_string_table, 364 + NULL, 365 + }; 366 + 367 + /*-------------------------------------------------------------------------*/ 368 + 369 + static struct sk_buff *rndis_add_header(struct gether *port, 370 + struct sk_buff *skb) 371 + { 372 + struct sk_buff *skb2; 373 + 374 + skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type)); 375 + if (skb2) 376 + rndis_add_hdr(skb2); 377 + 378 + dev_kfree_skb_any(skb); 379 + return skb2; 380 + } 381 + 382 + static void rndis_response_available(void *_rndis) 383 + { 384 + struct f_rndis *rndis = _rndis; 385 + struct usb_request *req = rndis->notify_req; 386 + struct usb_composite_dev *cdev = rndis->port.func.config->cdev; 387 + __le32 *data = req->buf; 388 + int status; 389 + 390 + if (atomic_inc_return(&rndis->notify_count) != 1) 391 + return; 392 + 393 + /* Send RNDIS RESPONSE_AVAILABLE notification; a 394 + * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too 395 + * 396 + * This is the only notification defined by RNDIS. 397 + */ 398 + data[0] = cpu_to_le32(1); 399 + data[1] = cpu_to_le32(0); 400 + 401 + status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC); 402 + if (status) { 403 + atomic_dec(&rndis->notify_count); 404 + DBG(cdev, "notify/0 --> %d\n", status); 405 + } 406 + } 407 + 408 + static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req) 409 + { 410 + struct f_rndis *rndis = req->context; 411 + struct usb_composite_dev *cdev = rndis->port.func.config->cdev; 412 + int status = req->status; 413 + 414 + /* after TX: 415 + * - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control) 416 + * - RNDIS_RESPONSE_AVAILABLE (status/irq) 417 + */ 418 + switch (status) { 419 + case -ECONNRESET: 420 + case -ESHUTDOWN: 421 + /* connection gone */ 422 + atomic_set(&rndis->notify_count, 0); 423 + break; 424 + default: 425 + DBG(cdev, "RNDIS %s response error %d, %d/%d\n", 426 + ep->name, status, 427 + req->actual, req->length); 428 + /* FALLTHROUGH */ 429 + case 0: 430 + if (ep != rndis->notify) 431 + break; 432 + 433 + /* handle multiple pending RNDIS_RESPONSE_AVAILABLE 434 + * notifications by resending until we're done 435 + */ 436 + if (atomic_dec_and_test(&rndis->notify_count)) 437 + break; 438 + status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC); 439 + if (status) { 440 + atomic_dec(&rndis->notify_count); 441 + DBG(cdev, "notify/1 --> %d\n", status); 442 + } 443 + break; 444 + } 445 + } 446 + 447 + static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) 448 + { 449 + struct f_rndis *rndis = req->context; 450 + struct usb_composite_dev *cdev = rndis->port.func.config->cdev; 451 + int status; 452 + 453 + /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ 454 + // spin_lock(&dev->lock); 455 + status = rndis_msg_parser(rndis->config, (u8 *) req->buf); 456 + if (status < 0) 457 + ERROR(cdev, "RNDIS command error %d, %d/%d\n", 458 + status, req->actual, req->length); 459 + // spin_unlock(&dev->lock); 460 + } 461 + 462 + static int 463 + rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) 464 + { 465 + struct f_rndis *rndis = func_to_rndis(f); 466 + struct usb_composite_dev *cdev = f->config->cdev; 467 + struct usb_request *req = cdev->req; 468 + int value = -EOPNOTSUPP; 469 + u16 w_index = le16_to_cpu(ctrl->wIndex); 470 + u16 w_value = le16_to_cpu(ctrl->wValue); 471 + u16 w_length = le16_to_cpu(ctrl->wLength); 472 + 473 + /* composite driver infrastructure handles everything except 474 + * CDC class messages; interface activation uses set_alt(). 475 + */ 476 + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { 477 + 478 + /* RNDIS uses the CDC command encapsulation mechanism to implement 479 + * an RPC scheme, with much getting/setting of attributes by OID. 480 + */ 481 + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) 482 + | USB_CDC_SEND_ENCAPSULATED_COMMAND: 483 + if (w_value || w_index != rndis->ctrl_id) 484 + goto invalid; 485 + /* read the request; process it later */ 486 + value = w_length; 487 + req->complete = rndis_command_complete; 488 + req->context = rndis; 489 + /* later, rndis_response_available() sends a notification */ 490 + break; 491 + 492 + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) 493 + | USB_CDC_GET_ENCAPSULATED_RESPONSE: 494 + if (w_value || w_index != rndis->ctrl_id) 495 + goto invalid; 496 + else { 497 + u8 *buf; 498 + u32 n; 499 + 500 + /* return the result */ 501 + buf = rndis_get_next_response(rndis->config, &n); 502 + if (buf) { 503 + memcpy(req->buf, buf, n); 504 + req->complete = rndis_response_complete; 505 + req->context = rndis; 506 + rndis_free_response(rndis->config, buf); 507 + value = n; 508 + } 509 + /* else stalls ... spec says to avoid that */ 510 + } 511 + break; 512 + 513 + default: 514 + invalid: 515 + VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", 516 + ctrl->bRequestType, ctrl->bRequest, 517 + w_value, w_index, w_length); 518 + } 519 + 520 + /* respond with data transfer or status phase? */ 521 + if (value >= 0) { 522 + DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n", 523 + ctrl->bRequestType, ctrl->bRequest, 524 + w_value, w_index, w_length); 525 + req->zero = (value < w_length); 526 + req->length = value; 527 + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); 528 + if (value < 0) 529 + ERROR(cdev, "rndis response on err %d\n", value); 530 + } 531 + 532 + /* device either stalls (value < 0) or reports success */ 533 + return value; 534 + } 535 + 536 + 537 + static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 538 + { 539 + struct f_rndis *rndis = func_to_rndis(f); 540 + struct usb_composite_dev *cdev = f->config->cdev; 541 + 542 + /* we know alt == 0 */ 543 + 544 + if (intf == rndis->ctrl_id) { 545 + if (rndis->notify->driver_data) { 546 + VDBG(cdev, "reset rndis control %d\n", intf); 547 + usb_ep_disable(rndis->notify); 548 + } 549 + if (!rndis->notify->desc) { 550 + VDBG(cdev, "init rndis ctrl %d\n", intf); 551 + if (config_ep_by_speed(cdev->gadget, f, rndis->notify)) 552 + goto fail; 553 + } 554 + usb_ep_enable(rndis->notify); 555 + rndis->notify->driver_data = rndis; 556 + 557 + } else if (intf == rndis->data_id) { 558 + struct net_device *net; 559 + 560 + if (rndis->port.in_ep->driver_data) { 561 + DBG(cdev, "reset rndis\n"); 562 + gether_disconnect(&rndis->port); 563 + } 564 + 565 + if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) { 566 + DBG(cdev, "init rndis\n"); 567 + if (config_ep_by_speed(cdev->gadget, f, 568 + rndis->port.in_ep) || 569 + config_ep_by_speed(cdev->gadget, f, 570 + rndis->port.out_ep)) { 571 + rndis->port.in_ep->desc = NULL; 572 + rndis->port.out_ep->desc = NULL; 573 + goto fail; 574 + } 575 + } 576 + 577 + /* Avoid ZLPs; they can be troublesome. */ 578 + rndis->port.is_zlp_ok = false; 579 + 580 + /* RNDIS should be in the "RNDIS uninitialized" state, 581 + * either never activated or after rndis_uninit(). 582 + * 583 + * We don't want data to flow here until a nonzero packet 584 + * filter is set, at which point it enters "RNDIS data 585 + * initialized" state ... but we do want the endpoints 586 + * to be activated. It's a strange little state. 587 + * 588 + * REVISIT the RNDIS gadget code has done this wrong for a 589 + * very long time. We need another call to the link layer 590 + * code -- gether_updown(...bool) maybe -- to do it right. 591 + */ 592 + rndis->port.cdc_filter = 0; 593 + 594 + DBG(cdev, "RNDIS RX/TX early activation ... \n"); 595 + net = gether_connect(&rndis->port); 596 + if (IS_ERR(net)) 597 + return PTR_ERR(net); 598 + 599 + rndis_set_param_dev(rndis->config, net, 600 + &rndis->port.cdc_filter); 601 + } else 602 + goto fail; 603 + 604 + return 0; 605 + fail: 606 + return -EINVAL; 607 + } 608 + 609 + static void rndis_disable(struct usb_function *f) 610 + { 611 + struct f_rndis *rndis = func_to_rndis(f); 612 + struct usb_composite_dev *cdev = f->config->cdev; 613 + 614 + if (!rndis->notify->driver_data) 615 + return; 616 + 617 + DBG(cdev, "rndis deactivated\n"); 618 + 619 + rndis_uninit(rndis->config); 620 + gether_disconnect(&rndis->port); 621 + 622 + usb_ep_disable(rndis->notify); 623 + rndis->notify->driver_data = NULL; 624 + } 625 + 626 + /*-------------------------------------------------------------------------*/ 627 + 628 + /* 629 + * This isn't quite the same mechanism as CDC Ethernet, since the 630 + * notification scheme passes less data, but the same set of link 631 + * states must be tested. A key difference is that altsettings are 632 + * not used to tell whether the link should send packets or not. 633 + */ 634 + 635 + static void rndis_open(struct gether *geth) 636 + { 637 + struct f_rndis *rndis = func_to_rndis(&geth->func); 638 + struct usb_composite_dev *cdev = geth->func.config->cdev; 639 + 640 + DBG(cdev, "%s\n", __func__); 641 + 642 + rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 643 + bitrate(cdev->gadget) / 100); 644 + rndis_signal_connect(rndis->config); 645 + } 646 + 647 + static void rndis_close(struct gether *geth) 648 + { 649 + struct f_rndis *rndis = func_to_rndis(&geth->func); 650 + 651 + DBG(geth->func.config->cdev, "%s\n", __func__); 652 + 653 + rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0); 654 + rndis_signal_disconnect(rndis->config); 655 + } 656 + 657 + /*-------------------------------------------------------------------------*/ 658 + 659 + /* ethernet function driver setup/binding */ 660 + 661 + static int 662 + rndis_bind(struct usb_configuration *c, struct usb_function *f) 663 + { 664 + struct usb_composite_dev *cdev = c->cdev; 665 + struct f_rndis *rndis = func_to_rndis(f); 666 + int status; 667 + struct usb_ep *ep; 668 + 669 + /* allocate instance-specific interface IDs */ 670 + status = usb_interface_id(c, f); 671 + if (status < 0) 672 + goto fail; 673 + rndis->ctrl_id = status; 674 + rndis_iad_descriptor.bFirstInterface = status; 675 + 676 + rndis_control_intf.bInterfaceNumber = status; 677 + rndis_union_desc.bMasterInterface0 = status; 678 + 679 + status = usb_interface_id(c, f); 680 + if (status < 0) 681 + goto fail; 682 + rndis->data_id = status; 683 + 684 + rndis_data_intf.bInterfaceNumber = status; 685 + rndis_union_desc.bSlaveInterface0 = status; 686 + 687 + status = -ENODEV; 688 + 689 + /* allocate instance-specific endpoints */ 690 + ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc); 691 + if (!ep) 692 + goto fail; 693 + rndis->port.in_ep = ep; 694 + ep->driver_data = cdev; /* claim */ 695 + 696 + ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc); 697 + if (!ep) 698 + goto fail; 699 + rndis->port.out_ep = ep; 700 + ep->driver_data = cdev; /* claim */ 701 + 702 + /* NOTE: a status/notification endpoint is, strictly speaking, 703 + * optional. We don't treat it that way though! It's simpler, 704 + * and some newer profiles don't treat it as optional. 705 + */ 706 + ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc); 707 + if (!ep) 708 + goto fail; 709 + rndis->notify = ep; 710 + ep->driver_data = cdev; /* claim */ 711 + 712 + status = -ENOMEM; 713 + 714 + /* allocate notification request and buffer */ 715 + rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); 716 + if (!rndis->notify_req) 717 + goto fail; 718 + rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL); 719 + if (!rndis->notify_req->buf) 720 + goto fail; 721 + rndis->notify_req->length = STATUS_BYTECOUNT; 722 + rndis->notify_req->context = rndis; 723 + rndis->notify_req->complete = rndis_response_complete; 724 + 725 + /* copy descriptors, and track endpoint copies */ 726 + f->descriptors = usb_copy_descriptors(eth_fs_function); 727 + if (!f->descriptors) 728 + goto fail; 729 + 730 + /* support all relevant hardware speeds... we expect that when 731 + * hardware is dual speed, all bulk-capable endpoints work at 732 + * both speeds 733 + */ 734 + if (gadget_is_dualspeed(c->cdev->gadget)) { 735 + hs_in_desc.bEndpointAddress = 736 + fs_in_desc.bEndpointAddress; 737 + hs_out_desc.bEndpointAddress = 738 + fs_out_desc.bEndpointAddress; 739 + hs_notify_desc.bEndpointAddress = 740 + fs_notify_desc.bEndpointAddress; 741 + 742 + /* copy descriptors, and track endpoint copies */ 743 + f->hs_descriptors = usb_copy_descriptors(eth_hs_function); 744 + if (!f->hs_descriptors) 745 + goto fail; 746 + } 747 + 748 + if (gadget_is_superspeed(c->cdev->gadget)) { 749 + ss_in_desc.bEndpointAddress = 750 + fs_in_desc.bEndpointAddress; 751 + ss_out_desc.bEndpointAddress = 752 + fs_out_desc.bEndpointAddress; 753 + ss_notify_desc.bEndpointAddress = 754 + fs_notify_desc.bEndpointAddress; 755 + 756 + /* copy descriptors, and track endpoint copies */ 757 + f->ss_descriptors = usb_copy_descriptors(eth_ss_function); 758 + if (!f->ss_descriptors) 759 + goto fail; 760 + } 761 + 762 + rndis->port.open = rndis_open; 763 + rndis->port.close = rndis_close; 764 + 765 + status = rndis_register(rndis_response_available, rndis); 766 + if (status < 0) 767 + goto fail; 768 + rndis->config = status; 769 + 770 + rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0); 771 + rndis_set_host_mac(rndis->config, rndis->ethaddr); 772 + 773 + if (rndis->manufacturer && rndis->vendorID && 774 + rndis_set_param_vendor(rndis->config, rndis->vendorID, 775 + rndis->manufacturer)) 776 + goto fail; 777 + 778 + /* NOTE: all that is done without knowing or caring about 779 + * the network link ... which is unavailable to this code 780 + * until we're activated via set_alt(). 781 + */ 782 + 783 + DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n", 784 + gadget_is_superspeed(c->cdev->gadget) ? "super" : 785 + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", 786 + rndis->port.in_ep->name, rndis->port.out_ep->name, 787 + rndis->notify->name); 788 + return 0; 789 + 790 + fail: 791 + if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors) 792 + usb_free_descriptors(f->ss_descriptors); 793 + if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors) 794 + usb_free_descriptors(f->hs_descriptors); 795 + if (f->descriptors) 796 + usb_free_descriptors(f->descriptors); 797 + 798 + if (rndis->notify_req) { 799 + kfree(rndis->notify_req->buf); 800 + usb_ep_free_request(rndis->notify, rndis->notify_req); 801 + } 802 + 803 + /* we might as well release our claims on endpoints */ 804 + if (rndis->notify) 805 + rndis->notify->driver_data = NULL; 806 + if (rndis->port.out_ep->desc) 807 + rndis->port.out_ep->driver_data = NULL; 808 + if (rndis->port.in_ep->desc) 809 + rndis->port.in_ep->driver_data = NULL; 810 + 811 + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); 812 + 813 + return status; 814 + } 815 + 816 + static void 817 + rndis_unbind(struct usb_configuration *c, struct usb_function *f) 818 + { 819 + struct f_rndis *rndis = func_to_rndis(f); 820 + 821 + rndis_deregister(rndis->config); 822 + rndis_exit(); 823 + rndis_string_defs[0].id = 0; 824 + 825 + if (gadget_is_superspeed(c->cdev->gadget)) 826 + usb_free_descriptors(f->ss_descriptors); 827 + if (gadget_is_dualspeed(c->cdev->gadget)) 828 + usb_free_descriptors(f->hs_descriptors); 829 + usb_free_descriptors(f->descriptors); 830 + 831 + kfree(rndis->notify_req->buf); 832 + usb_ep_free_request(rndis->notify, rndis->notify_req); 833 + 834 + kfree(rndis); 835 + } 836 + 837 + /* Some controllers can't support RNDIS ... */ 838 + static inline bool can_support_rndis(struct usb_configuration *c) 839 + { 840 + /* everything else is *presumably* fine */ 841 + return true; 842 + } 843 + 844 + int 845 + rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN], 846 + u32 vendorID, const char *manufacturer) 847 + { 848 + struct f_rndis *rndis; 849 + int status; 850 + 851 + if (!can_support_rndis(c) || !ethaddr) 852 + return -EINVAL; 853 + 854 + /* maybe allocate device-global string IDs */ 855 + if (rndis_string_defs[0].id == 0) { 856 + 857 + /* ... and setup RNDIS itself */ 858 + status = rndis_init(); 859 + if (status < 0) 860 + return status; 861 + 862 + /* control interface label */ 863 + status = usb_string_id(c->cdev); 864 + if (status < 0) 865 + return status; 866 + rndis_string_defs[0].id = status; 867 + rndis_control_intf.iInterface = status; 868 + 869 + /* data interface label */ 870 + status = usb_string_id(c->cdev); 871 + if (status < 0) 872 + return status; 873 + rndis_string_defs[1].id = status; 874 + rndis_data_intf.iInterface = status; 875 + 876 + /* IAD iFunction label */ 877 + status = usb_string_id(c->cdev); 878 + if (status < 0) 879 + return status; 880 + rndis_string_defs[2].id = status; 881 + rndis_iad_descriptor.iFunction = status; 882 + } 883 + 884 + /* allocate and initialize one new instance */ 885 + status = -ENOMEM; 886 + rndis = kzalloc(sizeof *rndis, GFP_KERNEL); 887 + if (!rndis) 888 + goto fail; 889 + 890 + memcpy(rndis->ethaddr, ethaddr, ETH_ALEN); 891 + rndis->vendorID = vendorID; 892 + rndis->manufacturer = manufacturer; 893 + 894 + /* RNDIS activates when the host changes this filter */ 895 + rndis->port.cdc_filter = 0; 896 + 897 + /* RNDIS has special (and complex) framing */ 898 + rndis->port.header_len = sizeof(struct rndis_packet_msg_type); 899 + rndis->port.wrap = rndis_add_header; 900 + rndis->port.unwrap = rndis_rm_hdr; 901 + 902 + rndis->port.func.name = "rndis"; 903 + rndis->port.func.strings = rndis_strings; 904 + /* descriptors are per-instance copies */ 905 + rndis->port.func.bind = rndis_bind; 906 + rndis->port.func.unbind = rndis_unbind; 907 + rndis->port.func.set_alt = rndis_set_alt; 908 + rndis->port.func.setup = rndis_setup; 909 + rndis->port.func.disable = rndis_disable; 910 + 911 + status = usb_add_function(c, &rndis->port.func); 912 + if (status) { 913 + kfree(rndis); 914 + fail: 915 + rndis_exit(); 916 + } 917 + return status; 918 + }
+150
drivers/staging/ccg/gadget_chips.h
··· 1 + /* 2 + * USB device controllers have lots of quirks. Use these macros in 3 + * gadget drivers or other code that needs to deal with them, and which 4 + * autoconfigures instead of using early binding to the hardware. 5 + * 6 + * This SHOULD eventually work like the ARM mach_is_*() stuff, driven by 7 + * some config file that gets updated as new hardware is supported. 8 + * (And avoiding all runtime comparisons in typical one-choice configs!) 9 + * 10 + * NOTE: some of these controller drivers may not be available yet. 11 + * Some are available on 2.4 kernels; several are available, but not 12 + * yet pushed in the 2.6 mainline tree. 13 + */ 14 + 15 + #ifndef __GADGET_CHIPS_H 16 + #define __GADGET_CHIPS_H 17 + 18 + /* 19 + * NOTICE: the entries below are alphabetical and should be kept 20 + * that way. 21 + * 22 + * Always be sure to add new entries to the correct position or 23 + * accept the bashing later. 24 + * 25 + * If you have forgotten the alphabetical order let VIM/EMACS 26 + * do that for you. 27 + */ 28 + #define gadget_is_amd5536udc(g) (!strcmp("amd5536udc", (g)->name)) 29 + #define gadget_is_at91(g) (!strcmp("at91_udc", (g)->name)) 30 + #define gadget_is_atmel_usba(g) (!strcmp("atmel_usba_udc", (g)->name)) 31 + #define gadget_is_bcm63xx(g) (!strcmp("bcm63xx_udc", (g)->name)) 32 + #define gadget_is_ci13xxx_msm(g) (!strcmp("ci13xxx_msm", (g)->name)) 33 + #define gadget_is_ci13xxx_pci(g) (!strcmp("ci13xxx_pci", (g)->name)) 34 + #define gadget_is_dummy(g) (!strcmp("dummy_udc", (g)->name)) 35 + #define gadget_is_dwc3(g) (!strcmp("dwc3-gadget", (g)->name)) 36 + #define gadget_is_fsl_qe(g) (!strcmp("fsl_qe_udc", (g)->name)) 37 + #define gadget_is_fsl_usb2(g) (!strcmp("fsl-usb2-udc", (g)->name)) 38 + #define gadget_is_goku(g) (!strcmp("goku_udc", (g)->name)) 39 + #define gadget_is_imx(g) (!strcmp("imx_udc", (g)->name)) 40 + #define gadget_is_langwell(g) (!strcmp("langwell_udc", (g)->name)) 41 + #define gadget_is_lpc32xx(g) (!strcmp("lpc32xx_udc", (g)->name)) 42 + #define gadget_is_m66592(g) (!strcmp("m66592_udc", (g)->name)) 43 + #define gadget_is_musbhdrc(g) (!strcmp("musb-hdrc", (g)->name)) 44 + #define gadget_is_net2272(g) (!strcmp("net2272", (g)->name)) 45 + #define gadget_is_net2280(g) (!strcmp("net2280", (g)->name)) 46 + #define gadget_is_omap(g) (!strcmp("omap_udc", (g)->name)) 47 + #define gadget_is_pch(g) (!strcmp("pch_udc", (g)->name)) 48 + #define gadget_is_pxa(g) (!strcmp("pxa25x_udc", (g)->name)) 49 + #define gadget_is_pxa27x(g) (!strcmp("pxa27x_udc", (g)->name)) 50 + #define gadget_is_r8a66597(g) (!strcmp("r8a66597_udc", (g)->name)) 51 + #define gadget_is_renesas_usbhs(g) (!strcmp("renesas_usbhs_udc", (g)->name)) 52 + #define gadget_is_s3c2410(g) (!strcmp("s3c2410_udc", (g)->name)) 53 + #define gadget_is_s3c_hsotg(g) (!strcmp("s3c-hsotg", (g)->name)) 54 + #define gadget_is_s3c_hsudc(g) (!strcmp("s3c-hsudc", (g)->name)) 55 + 56 + /** 57 + * usb_gadget_controller_number - support bcdDevice id convention 58 + * @gadget: the controller being driven 59 + * 60 + * Return a 2-digit BCD value associated with the peripheral controller, 61 + * suitable for use as part of a bcdDevice value, or a negative error code. 62 + * 63 + * NOTE: this convention is purely optional, and has no meaning in terms of 64 + * any USB specification. If you want to use a different convention in your 65 + * gadget driver firmware -- maybe a more formal revision ID -- feel free. 66 + * 67 + * Hosts see these bcdDevice numbers, and are allowed (but not encouraged!) 68 + * to change their behavior accordingly. For example it might help avoiding 69 + * some chip bug. 70 + */ 71 + static inline int usb_gadget_controller_number(struct usb_gadget *gadget) 72 + { 73 + if (gadget_is_net2280(gadget)) 74 + return 0x01; 75 + else if (gadget_is_dummy(gadget)) 76 + return 0x02; 77 + else if (gadget_is_pxa(gadget)) 78 + return 0x03; 79 + else if (gadget_is_goku(gadget)) 80 + return 0x06; 81 + else if (gadget_is_omap(gadget)) 82 + return 0x08; 83 + else if (gadget_is_pxa27x(gadget)) 84 + return 0x11; 85 + else if (gadget_is_s3c2410(gadget)) 86 + return 0x12; 87 + else if (gadget_is_at91(gadget)) 88 + return 0x13; 89 + else if (gadget_is_imx(gadget)) 90 + return 0x14; 91 + else if (gadget_is_musbhdrc(gadget)) 92 + return 0x16; 93 + else if (gadget_is_atmel_usba(gadget)) 94 + return 0x18; 95 + else if (gadget_is_fsl_usb2(gadget)) 96 + return 0x19; 97 + else if (gadget_is_amd5536udc(gadget)) 98 + return 0x20; 99 + else if (gadget_is_m66592(gadget)) 100 + return 0x21; 101 + else if (gadget_is_fsl_qe(gadget)) 102 + return 0x22; 103 + else if (gadget_is_ci13xxx_pci(gadget)) 104 + return 0x23; 105 + else if (gadget_is_langwell(gadget)) 106 + return 0x24; 107 + else if (gadget_is_r8a66597(gadget)) 108 + return 0x25; 109 + else if (gadget_is_s3c_hsotg(gadget)) 110 + return 0x26; 111 + else if (gadget_is_pch(gadget)) 112 + return 0x27; 113 + else if (gadget_is_ci13xxx_msm(gadget)) 114 + return 0x28; 115 + else if (gadget_is_renesas_usbhs(gadget)) 116 + return 0x29; 117 + else if (gadget_is_s3c_hsudc(gadget)) 118 + return 0x30; 119 + else if (gadget_is_net2272(gadget)) 120 + return 0x31; 121 + else if (gadget_is_dwc3(gadget)) 122 + return 0x32; 123 + else if (gadget_is_lpc32xx(gadget)) 124 + return 0x33; 125 + else if (gadget_is_bcm63xx(gadget)) 126 + return 0x34; 127 + 128 + return -ENOENT; 129 + } 130 + 131 + 132 + /** 133 + * gadget_supports_altsettings - return true if altsettings work 134 + * @gadget: the gadget in question 135 + */ 136 + static inline bool gadget_supports_altsettings(struct usb_gadget *gadget) 137 + { 138 + /* PXA 21x/25x/26x has no altsettings at all */ 139 + if (gadget_is_pxa(gadget)) 140 + return false; 141 + 142 + /* PXA 27x and 3xx have *broken* altsetting support */ 143 + if (gadget_is_pxa27x(gadget)) 144 + return false; 145 + 146 + /* Everything else is *presumably* fine ... */ 147 + return true; 148 + } 149 + 150 + #endif /* __GADGET_CHIPS_H */
+47
drivers/staging/ccg/ndis.h
··· 1 + /* 2 + * ndis.h 3 + * 4 + * ntddndis.h modified by Benedikt Spranger <b.spranger@pengutronix.de> 5 + * 6 + * Thanks to the cygwin development team, 7 + * espacially to Casper S. Hornstrup <chorns@users.sourceforge.net> 8 + * 9 + * THIS SOFTWARE IS NOT COPYRIGHTED 10 + * 11 + * This source code is offered for use in the public domain. You may 12 + * use, modify or distribute it freely. 13 + */ 14 + 15 + #ifndef _LINUX_NDIS_H 16 + #define _LINUX_NDIS_H 17 + 18 + enum NDIS_DEVICE_POWER_STATE { 19 + NdisDeviceStateUnspecified = 0, 20 + NdisDeviceStateD0, 21 + NdisDeviceStateD1, 22 + NdisDeviceStateD2, 23 + NdisDeviceStateD3, 24 + NdisDeviceStateMaximum 25 + }; 26 + 27 + struct NDIS_PM_WAKE_UP_CAPABILITIES { 28 + enum NDIS_DEVICE_POWER_STATE MinMagicPacketWakeUp; 29 + enum NDIS_DEVICE_POWER_STATE MinPatternWakeUp; 30 + enum NDIS_DEVICE_POWER_STATE MinLinkChangeWakeUp; 31 + }; 32 + 33 + struct NDIS_PNP_CAPABILITIES { 34 + __le32 Flags; 35 + struct NDIS_PM_WAKE_UP_CAPABILITIES WakeUpCapabilities; 36 + }; 37 + 38 + struct NDIS_PM_PACKET_PATTERN { 39 + __le32 Priority; 40 + __le32 Reserved; 41 + __le32 MaskSize; 42 + __le32 PatternOffset; 43 + __le32 PatternSize; 44 + __le32 PatternFlags; 45 + }; 46 + 47 + #endif /* _LINUX_NDIS_H */
+1175
drivers/staging/ccg/rndis.c
··· 1 + /* 2 + * RNDIS MSG parser 3 + * 4 + * Authors: Benedikt Spranger, Pengutronix 5 + * Robert Schwebel, Pengutronix 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This software was originally developed in conformance with 12 + * Microsoft's Remote NDIS Specification License Agreement. 13 + * 14 + * 03/12/2004 Kai-Uwe Bloem <linux-development@auerswald.de> 15 + * Fixed message length bug in init_response 16 + * 17 + * 03/25/2004 Kai-Uwe Bloem <linux-development@auerswald.de> 18 + * Fixed rndis_rm_hdr length bug. 19 + * 20 + * Copyright (C) 2004 by David Brownell 21 + * updates to merge with Linux 2.6, better match RNDIS spec 22 + */ 23 + 24 + #include <linux/module.h> 25 + #include <linux/moduleparam.h> 26 + #include <linux/kernel.h> 27 + #include <linux/errno.h> 28 + #include <linux/init.h> 29 + #include <linux/list.h> 30 + #include <linux/proc_fs.h> 31 + #include <linux/slab.h> 32 + #include <linux/seq_file.h> 33 + #include <linux/netdevice.h> 34 + 35 + #include <asm/io.h> 36 + #include <asm/byteorder.h> 37 + #include <asm/unaligned.h> 38 + 39 + 40 + #undef VERBOSE_DEBUG 41 + 42 + #include "rndis.h" 43 + 44 + 45 + /* The driver for your USB chip needs to support ep0 OUT to work with 46 + * RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional). 47 + * 48 + * Windows hosts need an INF file like Documentation/usb/linux.inf 49 + * and will be happier if you provide the host_addr module parameter. 50 + */ 51 + 52 + #if 0 53 + static int rndis_debug = 0; 54 + module_param (rndis_debug, int, 0); 55 + MODULE_PARM_DESC (rndis_debug, "enable debugging"); 56 + #else 57 + #define rndis_debug 0 58 + #endif 59 + 60 + #define RNDIS_MAX_CONFIGS 1 61 + 62 + 63 + static rndis_params rndis_per_dev_params[RNDIS_MAX_CONFIGS]; 64 + 65 + /* Driver Version */ 66 + static const __le32 rndis_driver_version = cpu_to_le32(1); 67 + 68 + /* Function Prototypes */ 69 + static rndis_resp_t *rndis_add_response(int configNr, u32 length); 70 + 71 + 72 + /* supported OIDs */ 73 + static const u32 oid_supported_list[] = 74 + { 75 + /* the general stuff */ 76 + RNDIS_OID_GEN_SUPPORTED_LIST, 77 + RNDIS_OID_GEN_HARDWARE_STATUS, 78 + RNDIS_OID_GEN_MEDIA_SUPPORTED, 79 + RNDIS_OID_GEN_MEDIA_IN_USE, 80 + RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, 81 + RNDIS_OID_GEN_LINK_SPEED, 82 + RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE, 83 + RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE, 84 + RNDIS_OID_GEN_VENDOR_ID, 85 + RNDIS_OID_GEN_VENDOR_DESCRIPTION, 86 + RNDIS_OID_GEN_VENDOR_DRIVER_VERSION, 87 + RNDIS_OID_GEN_CURRENT_PACKET_FILTER, 88 + RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE, 89 + RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, 90 + RNDIS_OID_GEN_PHYSICAL_MEDIUM, 91 + 92 + /* the statistical stuff */ 93 + RNDIS_OID_GEN_XMIT_OK, 94 + RNDIS_OID_GEN_RCV_OK, 95 + RNDIS_OID_GEN_XMIT_ERROR, 96 + RNDIS_OID_GEN_RCV_ERROR, 97 + RNDIS_OID_GEN_RCV_NO_BUFFER, 98 + #ifdef RNDIS_OPTIONAL_STATS 99 + RNDIS_OID_GEN_DIRECTED_BYTES_XMIT, 100 + RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT, 101 + RNDIS_OID_GEN_MULTICAST_BYTES_XMIT, 102 + RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT, 103 + RNDIS_OID_GEN_BROADCAST_BYTES_XMIT, 104 + RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT, 105 + RNDIS_OID_GEN_DIRECTED_BYTES_RCV, 106 + RNDIS_OID_GEN_DIRECTED_FRAMES_RCV, 107 + RNDIS_OID_GEN_MULTICAST_BYTES_RCV, 108 + RNDIS_OID_GEN_MULTICAST_FRAMES_RCV, 109 + RNDIS_OID_GEN_BROADCAST_BYTES_RCV, 110 + RNDIS_OID_GEN_BROADCAST_FRAMES_RCV, 111 + RNDIS_OID_GEN_RCV_CRC_ERROR, 112 + RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH, 113 + #endif /* RNDIS_OPTIONAL_STATS */ 114 + 115 + /* mandatory 802.3 */ 116 + /* the general stuff */ 117 + RNDIS_OID_802_3_PERMANENT_ADDRESS, 118 + RNDIS_OID_802_3_CURRENT_ADDRESS, 119 + RNDIS_OID_802_3_MULTICAST_LIST, 120 + RNDIS_OID_802_3_MAC_OPTIONS, 121 + RNDIS_OID_802_3_MAXIMUM_LIST_SIZE, 122 + 123 + /* the statistical stuff */ 124 + RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT, 125 + RNDIS_OID_802_3_XMIT_ONE_COLLISION, 126 + RNDIS_OID_802_3_XMIT_MORE_COLLISIONS, 127 + #ifdef RNDIS_OPTIONAL_STATS 128 + RNDIS_OID_802_3_XMIT_DEFERRED, 129 + RNDIS_OID_802_3_XMIT_MAX_COLLISIONS, 130 + RNDIS_OID_802_3_RCV_OVERRUN, 131 + RNDIS_OID_802_3_XMIT_UNDERRUN, 132 + RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE, 133 + RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST, 134 + RNDIS_OID_802_3_XMIT_LATE_COLLISIONS, 135 + #endif /* RNDIS_OPTIONAL_STATS */ 136 + 137 + #ifdef RNDIS_PM 138 + /* PM and wakeup are "mandatory" for USB, but the RNDIS specs 139 + * don't say what they mean ... and the NDIS specs are often 140 + * confusing and/or ambiguous in this context. (That is, more 141 + * so than their specs for the other OIDs.) 142 + * 143 + * FIXME someone who knows what these should do, please 144 + * implement them! 145 + */ 146 + 147 + /* power management */ 148 + OID_PNP_CAPABILITIES, 149 + OID_PNP_QUERY_POWER, 150 + OID_PNP_SET_POWER, 151 + 152 + #ifdef RNDIS_WAKEUP 153 + /* wake up host */ 154 + OID_PNP_ENABLE_WAKE_UP, 155 + OID_PNP_ADD_WAKE_UP_PATTERN, 156 + OID_PNP_REMOVE_WAKE_UP_PATTERN, 157 + #endif /* RNDIS_WAKEUP */ 158 + #endif /* RNDIS_PM */ 159 + }; 160 + 161 + 162 + /* NDIS Functions */ 163 + static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, 164 + unsigned buf_len, rndis_resp_t *r) 165 + { 166 + int retval = -ENOTSUPP; 167 + u32 length = 4; /* usually */ 168 + __le32 *outbuf; 169 + int i, count; 170 + rndis_query_cmplt_type *resp; 171 + struct net_device *net; 172 + struct rtnl_link_stats64 temp; 173 + const struct rtnl_link_stats64 *stats; 174 + 175 + if (!r) return -ENOMEM; 176 + resp = (rndis_query_cmplt_type *)r->buf; 177 + 178 + if (!resp) return -ENOMEM; 179 + 180 + if (buf_len && rndis_debug > 1) { 181 + pr_debug("query OID %08x value, len %d:\n", OID, buf_len); 182 + for (i = 0; i < buf_len; i += 16) { 183 + pr_debug("%03d: %08x %08x %08x %08x\n", i, 184 + get_unaligned_le32(&buf[i]), 185 + get_unaligned_le32(&buf[i + 4]), 186 + get_unaligned_le32(&buf[i + 8]), 187 + get_unaligned_le32(&buf[i + 12])); 188 + } 189 + } 190 + 191 + /* response goes here, right after the header */ 192 + outbuf = (__le32 *)&resp[1]; 193 + resp->InformationBufferOffset = cpu_to_le32(16); 194 + 195 + net = rndis_per_dev_params[configNr].dev; 196 + stats = dev_get_stats(net, &temp); 197 + 198 + switch (OID) { 199 + 200 + /* general oids (table 4-1) */ 201 + 202 + /* mandatory */ 203 + case RNDIS_OID_GEN_SUPPORTED_LIST: 204 + pr_debug("%s: RNDIS_OID_GEN_SUPPORTED_LIST\n", __func__); 205 + length = sizeof(oid_supported_list); 206 + count = length / sizeof(u32); 207 + for (i = 0; i < count; i++) 208 + outbuf[i] = cpu_to_le32(oid_supported_list[i]); 209 + retval = 0; 210 + break; 211 + 212 + /* mandatory */ 213 + case RNDIS_OID_GEN_HARDWARE_STATUS: 214 + pr_debug("%s: RNDIS_OID_GEN_HARDWARE_STATUS\n", __func__); 215 + /* Bogus question! 216 + * Hardware must be ready to receive high level protocols. 217 + * BTW: 218 + * reddite ergo quae sunt Caesaris Caesari 219 + * et quae sunt Dei Deo! 220 + */ 221 + *outbuf = cpu_to_le32(0); 222 + retval = 0; 223 + break; 224 + 225 + /* mandatory */ 226 + case RNDIS_OID_GEN_MEDIA_SUPPORTED: 227 + pr_debug("%s: RNDIS_OID_GEN_MEDIA_SUPPORTED\n", __func__); 228 + *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); 229 + retval = 0; 230 + break; 231 + 232 + /* mandatory */ 233 + case RNDIS_OID_GEN_MEDIA_IN_USE: 234 + pr_debug("%s: RNDIS_OID_GEN_MEDIA_IN_USE\n", __func__); 235 + /* one medium, one transport... (maybe you do it better) */ 236 + *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); 237 + retval = 0; 238 + break; 239 + 240 + /* mandatory */ 241 + case RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE: 242 + pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__); 243 + if (rndis_per_dev_params[configNr].dev) { 244 + *outbuf = cpu_to_le32( 245 + rndis_per_dev_params[configNr].dev->mtu); 246 + retval = 0; 247 + } 248 + break; 249 + 250 + /* mandatory */ 251 + case RNDIS_OID_GEN_LINK_SPEED: 252 + if (rndis_debug > 1) 253 + pr_debug("%s: RNDIS_OID_GEN_LINK_SPEED\n", __func__); 254 + if (rndis_per_dev_params[configNr].media_state 255 + == RNDIS_MEDIA_STATE_DISCONNECTED) 256 + *outbuf = cpu_to_le32(0); 257 + else 258 + *outbuf = cpu_to_le32( 259 + rndis_per_dev_params[configNr].speed); 260 + retval = 0; 261 + break; 262 + 263 + /* mandatory */ 264 + case RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE: 265 + pr_debug("%s: RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__); 266 + if (rndis_per_dev_params[configNr].dev) { 267 + *outbuf = cpu_to_le32( 268 + rndis_per_dev_params[configNr].dev->mtu); 269 + retval = 0; 270 + } 271 + break; 272 + 273 + /* mandatory */ 274 + case RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE: 275 + pr_debug("%s: RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__); 276 + if (rndis_per_dev_params[configNr].dev) { 277 + *outbuf = cpu_to_le32( 278 + rndis_per_dev_params[configNr].dev->mtu); 279 + retval = 0; 280 + } 281 + break; 282 + 283 + /* mandatory */ 284 + case RNDIS_OID_GEN_VENDOR_ID: 285 + pr_debug("%s: RNDIS_OID_GEN_VENDOR_ID\n", __func__); 286 + *outbuf = cpu_to_le32( 287 + rndis_per_dev_params[configNr].vendorID); 288 + retval = 0; 289 + break; 290 + 291 + /* mandatory */ 292 + case RNDIS_OID_GEN_VENDOR_DESCRIPTION: 293 + pr_debug("%s: RNDIS_OID_GEN_VENDOR_DESCRIPTION\n", __func__); 294 + if (rndis_per_dev_params[configNr].vendorDescr) { 295 + length = strlen(rndis_per_dev_params[configNr]. 296 + vendorDescr); 297 + memcpy(outbuf, 298 + rndis_per_dev_params[configNr].vendorDescr, 299 + length); 300 + } else { 301 + outbuf[0] = 0; 302 + } 303 + retval = 0; 304 + break; 305 + 306 + case RNDIS_OID_GEN_VENDOR_DRIVER_VERSION: 307 + pr_debug("%s: RNDIS_OID_GEN_VENDOR_DRIVER_VERSION\n", __func__); 308 + /* Created as LE */ 309 + *outbuf = rndis_driver_version; 310 + retval = 0; 311 + break; 312 + 313 + /* mandatory */ 314 + case RNDIS_OID_GEN_CURRENT_PACKET_FILTER: 315 + pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER\n", __func__); 316 + *outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter); 317 + retval = 0; 318 + break; 319 + 320 + /* mandatory */ 321 + case RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE: 322 + pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__); 323 + *outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE); 324 + retval = 0; 325 + break; 326 + 327 + /* mandatory */ 328 + case RNDIS_OID_GEN_MEDIA_CONNECT_STATUS: 329 + if (rndis_debug > 1) 330 + pr_debug("%s: RNDIS_OID_GEN_MEDIA_CONNECT_STATUS\n", __func__); 331 + *outbuf = cpu_to_le32(rndis_per_dev_params[configNr] 332 + .media_state); 333 + retval = 0; 334 + break; 335 + 336 + case RNDIS_OID_GEN_PHYSICAL_MEDIUM: 337 + pr_debug("%s: RNDIS_OID_GEN_PHYSICAL_MEDIUM\n", __func__); 338 + *outbuf = cpu_to_le32(0); 339 + retval = 0; 340 + break; 341 + 342 + /* The RNDIS specification is incomplete/wrong. Some versions 343 + * of MS-Windows expect OIDs that aren't specified there. Other 344 + * versions emit undefined RNDIS messages. DOCUMENT ALL THESE! 345 + */ 346 + case RNDIS_OID_GEN_MAC_OPTIONS: /* from WinME */ 347 + pr_debug("%s: RNDIS_OID_GEN_MAC_OPTIONS\n", __func__); 348 + *outbuf = cpu_to_le32( 349 + RNDIS_MAC_OPTION_RECEIVE_SERIALIZED 350 + | RNDIS_MAC_OPTION_FULL_DUPLEX); 351 + retval = 0; 352 + break; 353 + 354 + /* statistics OIDs (table 4-2) */ 355 + 356 + /* mandatory */ 357 + case RNDIS_OID_GEN_XMIT_OK: 358 + if (rndis_debug > 1) 359 + pr_debug("%s: RNDIS_OID_GEN_XMIT_OK\n", __func__); 360 + if (stats) { 361 + *outbuf = cpu_to_le32(stats->tx_packets 362 + - stats->tx_errors - stats->tx_dropped); 363 + retval = 0; 364 + } 365 + break; 366 + 367 + /* mandatory */ 368 + case RNDIS_OID_GEN_RCV_OK: 369 + if (rndis_debug > 1) 370 + pr_debug("%s: RNDIS_OID_GEN_RCV_OK\n", __func__); 371 + if (stats) { 372 + *outbuf = cpu_to_le32(stats->rx_packets 373 + - stats->rx_errors - stats->rx_dropped); 374 + retval = 0; 375 + } 376 + break; 377 + 378 + /* mandatory */ 379 + case RNDIS_OID_GEN_XMIT_ERROR: 380 + if (rndis_debug > 1) 381 + pr_debug("%s: RNDIS_OID_GEN_XMIT_ERROR\n", __func__); 382 + if (stats) { 383 + *outbuf = cpu_to_le32(stats->tx_errors); 384 + retval = 0; 385 + } 386 + break; 387 + 388 + /* mandatory */ 389 + case RNDIS_OID_GEN_RCV_ERROR: 390 + if (rndis_debug > 1) 391 + pr_debug("%s: RNDIS_OID_GEN_RCV_ERROR\n", __func__); 392 + if (stats) { 393 + *outbuf = cpu_to_le32(stats->rx_errors); 394 + retval = 0; 395 + } 396 + break; 397 + 398 + /* mandatory */ 399 + case RNDIS_OID_GEN_RCV_NO_BUFFER: 400 + pr_debug("%s: RNDIS_OID_GEN_RCV_NO_BUFFER\n", __func__); 401 + if (stats) { 402 + *outbuf = cpu_to_le32(stats->rx_dropped); 403 + retval = 0; 404 + } 405 + break; 406 + 407 + /* ieee802.3 OIDs (table 4-3) */ 408 + 409 + /* mandatory */ 410 + case RNDIS_OID_802_3_PERMANENT_ADDRESS: 411 + pr_debug("%s: RNDIS_OID_802_3_PERMANENT_ADDRESS\n", __func__); 412 + if (rndis_per_dev_params[configNr].dev) { 413 + length = ETH_ALEN; 414 + memcpy(outbuf, 415 + rndis_per_dev_params[configNr].host_mac, 416 + length); 417 + retval = 0; 418 + } 419 + break; 420 + 421 + /* mandatory */ 422 + case RNDIS_OID_802_3_CURRENT_ADDRESS: 423 + pr_debug("%s: RNDIS_OID_802_3_CURRENT_ADDRESS\n", __func__); 424 + if (rndis_per_dev_params[configNr].dev) { 425 + length = ETH_ALEN; 426 + memcpy(outbuf, 427 + rndis_per_dev_params [configNr].host_mac, 428 + length); 429 + retval = 0; 430 + } 431 + break; 432 + 433 + /* mandatory */ 434 + case RNDIS_OID_802_3_MULTICAST_LIST: 435 + pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__); 436 + /* Multicast base address only */ 437 + *outbuf = cpu_to_le32(0xE0000000); 438 + retval = 0; 439 + break; 440 + 441 + /* mandatory */ 442 + case RNDIS_OID_802_3_MAXIMUM_LIST_SIZE: 443 + pr_debug("%s: RNDIS_OID_802_3_MAXIMUM_LIST_SIZE\n", __func__); 444 + /* Multicast base address only */ 445 + *outbuf = cpu_to_le32(1); 446 + retval = 0; 447 + break; 448 + 449 + case RNDIS_OID_802_3_MAC_OPTIONS: 450 + pr_debug("%s: RNDIS_OID_802_3_MAC_OPTIONS\n", __func__); 451 + *outbuf = cpu_to_le32(0); 452 + retval = 0; 453 + break; 454 + 455 + /* ieee802.3 statistics OIDs (table 4-4) */ 456 + 457 + /* mandatory */ 458 + case RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT: 459 + pr_debug("%s: RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__); 460 + if (stats) { 461 + *outbuf = cpu_to_le32(stats->rx_frame_errors); 462 + retval = 0; 463 + } 464 + break; 465 + 466 + /* mandatory */ 467 + case RNDIS_OID_802_3_XMIT_ONE_COLLISION: 468 + pr_debug("%s: RNDIS_OID_802_3_XMIT_ONE_COLLISION\n", __func__); 469 + *outbuf = cpu_to_le32(0); 470 + retval = 0; 471 + break; 472 + 473 + /* mandatory */ 474 + case RNDIS_OID_802_3_XMIT_MORE_COLLISIONS: 475 + pr_debug("%s: RNDIS_OID_802_3_XMIT_MORE_COLLISIONS\n", __func__); 476 + *outbuf = cpu_to_le32(0); 477 + retval = 0; 478 + break; 479 + 480 + default: 481 + pr_warning("%s: query unknown OID 0x%08X\n", 482 + __func__, OID); 483 + } 484 + if (retval < 0) 485 + length = 0; 486 + 487 + resp->InformationBufferLength = cpu_to_le32(length); 488 + r->length = length + sizeof(*resp); 489 + resp->MessageLength = cpu_to_le32(r->length); 490 + return retval; 491 + } 492 + 493 + static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len, 494 + rndis_resp_t *r) 495 + { 496 + rndis_set_cmplt_type *resp; 497 + int i, retval = -ENOTSUPP; 498 + struct rndis_params *params; 499 + 500 + if (!r) 501 + return -ENOMEM; 502 + resp = (rndis_set_cmplt_type *)r->buf; 503 + if (!resp) 504 + return -ENOMEM; 505 + 506 + if (buf_len && rndis_debug > 1) { 507 + pr_debug("set OID %08x value, len %d:\n", OID, buf_len); 508 + for (i = 0; i < buf_len; i += 16) { 509 + pr_debug("%03d: %08x %08x %08x %08x\n", i, 510 + get_unaligned_le32(&buf[i]), 511 + get_unaligned_le32(&buf[i + 4]), 512 + get_unaligned_le32(&buf[i + 8]), 513 + get_unaligned_le32(&buf[i + 12])); 514 + } 515 + } 516 + 517 + params = &rndis_per_dev_params[configNr]; 518 + switch (OID) { 519 + case RNDIS_OID_GEN_CURRENT_PACKET_FILTER: 520 + 521 + /* these NDIS_PACKET_TYPE_* bitflags are shared with 522 + * cdc_filter; it's not RNDIS-specific 523 + * NDIS_PACKET_TYPE_x == USB_CDC_PACKET_TYPE_x for x in: 524 + * PROMISCUOUS, DIRECTED, 525 + * MULTICAST, ALL_MULTICAST, BROADCAST 526 + */ 527 + *params->filter = (u16)get_unaligned_le32(buf); 528 + pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER %08x\n", 529 + __func__, *params->filter); 530 + 531 + /* this call has a significant side effect: it's 532 + * what makes the packet flow start and stop, like 533 + * activating the CDC Ethernet altsetting. 534 + */ 535 + retval = 0; 536 + if (*params->filter) { 537 + params->state = RNDIS_DATA_INITIALIZED; 538 + netif_carrier_on(params->dev); 539 + if (netif_running(params->dev)) 540 + netif_wake_queue(params->dev); 541 + } else { 542 + params->state = RNDIS_INITIALIZED; 543 + netif_carrier_off(params->dev); 544 + netif_stop_queue(params->dev); 545 + } 546 + break; 547 + 548 + case RNDIS_OID_802_3_MULTICAST_LIST: 549 + /* I think we can ignore this */ 550 + pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__); 551 + retval = 0; 552 + break; 553 + 554 + default: 555 + pr_warning("%s: set unknown OID 0x%08X, size %d\n", 556 + __func__, OID, buf_len); 557 + } 558 + 559 + return retval; 560 + } 561 + 562 + /* 563 + * Response Functions 564 + */ 565 + 566 + static int rndis_init_response(int configNr, rndis_init_msg_type *buf) 567 + { 568 + rndis_init_cmplt_type *resp; 569 + rndis_resp_t *r; 570 + struct rndis_params *params = rndis_per_dev_params + configNr; 571 + 572 + if (!params->dev) 573 + return -ENOTSUPP; 574 + 575 + r = rndis_add_response(configNr, sizeof(rndis_init_cmplt_type)); 576 + if (!r) 577 + return -ENOMEM; 578 + resp = (rndis_init_cmplt_type *)r->buf; 579 + 580 + resp->MessageType = cpu_to_le32(RNDIS_MSG_INIT_C); 581 + resp->MessageLength = cpu_to_le32(52); 582 + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 583 + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); 584 + resp->MajorVersion = cpu_to_le32(RNDIS_MAJOR_VERSION); 585 + resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION); 586 + resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS); 587 + resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3); 588 + resp->MaxPacketsPerTransfer = cpu_to_le32(1); 589 + resp->MaxTransferSize = cpu_to_le32( 590 + params->dev->mtu 591 + + sizeof(struct ethhdr) 592 + + sizeof(struct rndis_packet_msg_type) 593 + + 22); 594 + resp->PacketAlignmentFactor = cpu_to_le32(0); 595 + resp->AFListOffset = cpu_to_le32(0); 596 + resp->AFListSize = cpu_to_le32(0); 597 + 598 + params->resp_avail(params->v); 599 + return 0; 600 + } 601 + 602 + static int rndis_query_response(int configNr, rndis_query_msg_type *buf) 603 + { 604 + rndis_query_cmplt_type *resp; 605 + rndis_resp_t *r; 606 + struct rndis_params *params = rndis_per_dev_params + configNr; 607 + 608 + /* pr_debug("%s: OID = %08X\n", __func__, cpu_to_le32(buf->OID)); */ 609 + if (!params->dev) 610 + return -ENOTSUPP; 611 + 612 + /* 613 + * we need more memory: 614 + * gen_ndis_query_resp expects enough space for 615 + * rndis_query_cmplt_type followed by data. 616 + * oid_supported_list is the largest data reply 617 + */ 618 + r = rndis_add_response(configNr, 619 + sizeof(oid_supported_list) + sizeof(rndis_query_cmplt_type)); 620 + if (!r) 621 + return -ENOMEM; 622 + resp = (rndis_query_cmplt_type *)r->buf; 623 + 624 + resp->MessageType = cpu_to_le32(RNDIS_MSG_QUERY_C); 625 + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 626 + 627 + if (gen_ndis_query_resp(configNr, le32_to_cpu(buf->OID), 628 + le32_to_cpu(buf->InformationBufferOffset) 629 + + 8 + (u8 *)buf, 630 + le32_to_cpu(buf->InformationBufferLength), 631 + r)) { 632 + /* OID not supported */ 633 + resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); 634 + resp->MessageLength = cpu_to_le32(sizeof *resp); 635 + resp->InformationBufferLength = cpu_to_le32(0); 636 + resp->InformationBufferOffset = cpu_to_le32(0); 637 + } else 638 + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); 639 + 640 + params->resp_avail(params->v); 641 + return 0; 642 + } 643 + 644 + static int rndis_set_response(int configNr, rndis_set_msg_type *buf) 645 + { 646 + u32 BufLength, BufOffset; 647 + rndis_set_cmplt_type *resp; 648 + rndis_resp_t *r; 649 + struct rndis_params *params = rndis_per_dev_params + configNr; 650 + 651 + r = rndis_add_response(configNr, sizeof(rndis_set_cmplt_type)); 652 + if (!r) 653 + return -ENOMEM; 654 + resp = (rndis_set_cmplt_type *)r->buf; 655 + 656 + BufLength = le32_to_cpu(buf->InformationBufferLength); 657 + BufOffset = le32_to_cpu(buf->InformationBufferOffset); 658 + 659 + #ifdef VERBOSE_DEBUG 660 + pr_debug("%s: Length: %d\n", __func__, BufLength); 661 + pr_debug("%s: Offset: %d\n", __func__, BufOffset); 662 + pr_debug("%s: InfoBuffer: ", __func__); 663 + 664 + for (i = 0; i < BufLength; i++) { 665 + pr_debug("%02x ", *(((u8 *) buf) + i + 8 + BufOffset)); 666 + } 667 + 668 + pr_debug("\n"); 669 + #endif 670 + 671 + resp->MessageType = cpu_to_le32(RNDIS_MSG_SET_C); 672 + resp->MessageLength = cpu_to_le32(16); 673 + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 674 + if (gen_ndis_set_resp(configNr, le32_to_cpu(buf->OID), 675 + ((u8 *)buf) + 8 + BufOffset, BufLength, r)) 676 + resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); 677 + else 678 + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); 679 + 680 + params->resp_avail(params->v); 681 + return 0; 682 + } 683 + 684 + static int rndis_reset_response(int configNr, rndis_reset_msg_type *buf) 685 + { 686 + rndis_reset_cmplt_type *resp; 687 + rndis_resp_t *r; 688 + struct rndis_params *params = rndis_per_dev_params + configNr; 689 + 690 + r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type)); 691 + if (!r) 692 + return -ENOMEM; 693 + resp = (rndis_reset_cmplt_type *)r->buf; 694 + 695 + resp->MessageType = cpu_to_le32(RNDIS_MSG_RESET_C); 696 + resp->MessageLength = cpu_to_le32(16); 697 + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); 698 + /* resent information */ 699 + resp->AddressingReset = cpu_to_le32(1); 700 + 701 + params->resp_avail(params->v); 702 + return 0; 703 + } 704 + 705 + static int rndis_keepalive_response(int configNr, 706 + rndis_keepalive_msg_type *buf) 707 + { 708 + rndis_keepalive_cmplt_type *resp; 709 + rndis_resp_t *r; 710 + struct rndis_params *params = rndis_per_dev_params + configNr; 711 + 712 + /* host "should" check only in RNDIS_DATA_INITIALIZED state */ 713 + 714 + r = rndis_add_response(configNr, sizeof(rndis_keepalive_cmplt_type)); 715 + if (!r) 716 + return -ENOMEM; 717 + resp = (rndis_keepalive_cmplt_type *)r->buf; 718 + 719 + resp->MessageType = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C); 720 + resp->MessageLength = cpu_to_le32(16); 721 + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ 722 + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); 723 + 724 + params->resp_avail(params->v); 725 + return 0; 726 + } 727 + 728 + 729 + /* 730 + * Device to Host Comunication 731 + */ 732 + static int rndis_indicate_status_msg(int configNr, u32 status) 733 + { 734 + rndis_indicate_status_msg_type *resp; 735 + rndis_resp_t *r; 736 + struct rndis_params *params = rndis_per_dev_params + configNr; 737 + 738 + if (params->state == RNDIS_UNINITIALIZED) 739 + return -ENOTSUPP; 740 + 741 + r = rndis_add_response(configNr, 742 + sizeof(rndis_indicate_status_msg_type)); 743 + if (!r) 744 + return -ENOMEM; 745 + resp = (rndis_indicate_status_msg_type *)r->buf; 746 + 747 + resp->MessageType = cpu_to_le32(RNDIS_MSG_INDICATE); 748 + resp->MessageLength = cpu_to_le32(20); 749 + resp->Status = cpu_to_le32(status); 750 + resp->StatusBufferLength = cpu_to_le32(0); 751 + resp->StatusBufferOffset = cpu_to_le32(0); 752 + 753 + params->resp_avail(params->v); 754 + return 0; 755 + } 756 + 757 + int rndis_signal_connect(int configNr) 758 + { 759 + rndis_per_dev_params[configNr].media_state 760 + = RNDIS_MEDIA_STATE_CONNECTED; 761 + return rndis_indicate_status_msg(configNr, 762 + RNDIS_STATUS_MEDIA_CONNECT); 763 + } 764 + 765 + int rndis_signal_disconnect(int configNr) 766 + { 767 + rndis_per_dev_params[configNr].media_state 768 + = RNDIS_MEDIA_STATE_DISCONNECTED; 769 + return rndis_indicate_status_msg(configNr, 770 + RNDIS_STATUS_MEDIA_DISCONNECT); 771 + } 772 + 773 + void rndis_uninit(int configNr) 774 + { 775 + u8 *buf; 776 + u32 length; 777 + 778 + if (configNr >= RNDIS_MAX_CONFIGS) 779 + return; 780 + rndis_per_dev_params[configNr].state = RNDIS_UNINITIALIZED; 781 + 782 + /* drain the response queue */ 783 + while ((buf = rndis_get_next_response(configNr, &length))) 784 + rndis_free_response(configNr, buf); 785 + } 786 + 787 + void rndis_set_host_mac(int configNr, const u8 *addr) 788 + { 789 + rndis_per_dev_params[configNr].host_mac = addr; 790 + } 791 + 792 + /* 793 + * Message Parser 794 + */ 795 + int rndis_msg_parser(u8 configNr, u8 *buf) 796 + { 797 + u32 MsgType, MsgLength; 798 + __le32 *tmp; 799 + struct rndis_params *params; 800 + 801 + if (!buf) 802 + return -ENOMEM; 803 + 804 + tmp = (__le32 *)buf; 805 + MsgType = get_unaligned_le32(tmp++); 806 + MsgLength = get_unaligned_le32(tmp++); 807 + 808 + if (configNr >= RNDIS_MAX_CONFIGS) 809 + return -ENOTSUPP; 810 + params = &rndis_per_dev_params[configNr]; 811 + 812 + /* NOTE: RNDIS is *EXTREMELY* chatty ... Windows constantly polls for 813 + * rx/tx statistics and link status, in addition to KEEPALIVE traffic 814 + * and normal HC level polling to see if there's any IN traffic. 815 + */ 816 + 817 + /* For USB: responses may take up to 10 seconds */ 818 + switch (MsgType) { 819 + case RNDIS_MSG_INIT: 820 + pr_debug("%s: RNDIS_MSG_INIT\n", 821 + __func__); 822 + params->state = RNDIS_INITIALIZED; 823 + return rndis_init_response(configNr, 824 + (rndis_init_msg_type *)buf); 825 + 826 + case RNDIS_MSG_HALT: 827 + pr_debug("%s: RNDIS_MSG_HALT\n", 828 + __func__); 829 + params->state = RNDIS_UNINITIALIZED; 830 + if (params->dev) { 831 + netif_carrier_off(params->dev); 832 + netif_stop_queue(params->dev); 833 + } 834 + return 0; 835 + 836 + case RNDIS_MSG_QUERY: 837 + return rndis_query_response(configNr, 838 + (rndis_query_msg_type *)buf); 839 + 840 + case RNDIS_MSG_SET: 841 + return rndis_set_response(configNr, 842 + (rndis_set_msg_type *)buf); 843 + 844 + case RNDIS_MSG_RESET: 845 + pr_debug("%s: RNDIS_MSG_RESET\n", 846 + __func__); 847 + return rndis_reset_response(configNr, 848 + (rndis_reset_msg_type *)buf); 849 + 850 + case RNDIS_MSG_KEEPALIVE: 851 + /* For USB: host does this every 5 seconds */ 852 + if (rndis_debug > 1) 853 + pr_debug("%s: RNDIS_MSG_KEEPALIVE\n", 854 + __func__); 855 + return rndis_keepalive_response(configNr, 856 + (rndis_keepalive_msg_type *) 857 + buf); 858 + 859 + default: 860 + /* At least Windows XP emits some undefined RNDIS messages. 861 + * In one case those messages seemed to relate to the host 862 + * suspending itself. 863 + */ 864 + pr_warning("%s: unknown RNDIS message 0x%08X len %d\n", 865 + __func__, MsgType, MsgLength); 866 + print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET, 867 + buf, MsgLength); 868 + break; 869 + } 870 + 871 + return -ENOTSUPP; 872 + } 873 + 874 + int rndis_register(void (*resp_avail)(void *v), void *v) 875 + { 876 + u8 i; 877 + 878 + if (!resp_avail) 879 + return -EINVAL; 880 + 881 + for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { 882 + if (!rndis_per_dev_params[i].used) { 883 + rndis_per_dev_params[i].used = 1; 884 + rndis_per_dev_params[i].resp_avail = resp_avail; 885 + rndis_per_dev_params[i].v = v; 886 + pr_debug("%s: configNr = %d\n", __func__, i); 887 + return i; 888 + } 889 + } 890 + pr_debug("failed\n"); 891 + 892 + return -ENODEV; 893 + } 894 + 895 + void rndis_deregister(int configNr) 896 + { 897 + pr_debug("%s:\n", __func__); 898 + 899 + if (configNr >= RNDIS_MAX_CONFIGS) return; 900 + rndis_per_dev_params[configNr].used = 0; 901 + } 902 + 903 + int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter) 904 + { 905 + pr_debug("%s:\n", __func__); 906 + if (!dev) 907 + return -EINVAL; 908 + if (configNr >= RNDIS_MAX_CONFIGS) return -1; 909 + 910 + rndis_per_dev_params[configNr].dev = dev; 911 + rndis_per_dev_params[configNr].filter = cdc_filter; 912 + 913 + return 0; 914 + } 915 + 916 + int rndis_set_param_vendor(u8 configNr, u32 vendorID, const char *vendorDescr) 917 + { 918 + pr_debug("%s:\n", __func__); 919 + if (!vendorDescr) return -1; 920 + if (configNr >= RNDIS_MAX_CONFIGS) return -1; 921 + 922 + rndis_per_dev_params[configNr].vendorID = vendorID; 923 + rndis_per_dev_params[configNr].vendorDescr = vendorDescr; 924 + 925 + return 0; 926 + } 927 + 928 + int rndis_set_param_medium(u8 configNr, u32 medium, u32 speed) 929 + { 930 + pr_debug("%s: %u %u\n", __func__, medium, speed); 931 + if (configNr >= RNDIS_MAX_CONFIGS) return -1; 932 + 933 + rndis_per_dev_params[configNr].medium = medium; 934 + rndis_per_dev_params[configNr].speed = speed; 935 + 936 + return 0; 937 + } 938 + 939 + void rndis_add_hdr(struct sk_buff *skb) 940 + { 941 + struct rndis_packet_msg_type *header; 942 + 943 + if (!skb) 944 + return; 945 + header = (void *)skb_push(skb, sizeof(*header)); 946 + memset(header, 0, sizeof *header); 947 + header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET); 948 + header->MessageLength = cpu_to_le32(skb->len); 949 + header->DataOffset = cpu_to_le32(36); 950 + header->DataLength = cpu_to_le32(skb->len - sizeof(*header)); 951 + } 952 + 953 + void rndis_free_response(int configNr, u8 *buf) 954 + { 955 + rndis_resp_t *r; 956 + struct list_head *act, *tmp; 957 + 958 + list_for_each_safe(act, tmp, 959 + &(rndis_per_dev_params[configNr].resp_queue)) 960 + { 961 + r = list_entry(act, rndis_resp_t, list); 962 + if (r && r->buf == buf) { 963 + list_del(&r->list); 964 + kfree(r); 965 + } 966 + } 967 + } 968 + 969 + u8 *rndis_get_next_response(int configNr, u32 *length) 970 + { 971 + rndis_resp_t *r; 972 + struct list_head *act, *tmp; 973 + 974 + if (!length) return NULL; 975 + 976 + list_for_each_safe(act, tmp, 977 + &(rndis_per_dev_params[configNr].resp_queue)) 978 + { 979 + r = list_entry(act, rndis_resp_t, list); 980 + if (!r->send) { 981 + r->send = 1; 982 + *length = r->length; 983 + return r->buf; 984 + } 985 + } 986 + 987 + return NULL; 988 + } 989 + 990 + static rndis_resp_t *rndis_add_response(int configNr, u32 length) 991 + { 992 + rndis_resp_t *r; 993 + 994 + /* NOTE: this gets copied into ether.c USB_BUFSIZ bytes ... */ 995 + r = kmalloc(sizeof(rndis_resp_t) + length, GFP_ATOMIC); 996 + if (!r) return NULL; 997 + 998 + r->buf = (u8 *)(r + 1); 999 + r->length = length; 1000 + r->send = 0; 1001 + 1002 + list_add_tail(&r->list, 1003 + &(rndis_per_dev_params[configNr].resp_queue)); 1004 + return r; 1005 + } 1006 + 1007 + int rndis_rm_hdr(struct gether *port, 1008 + struct sk_buff *skb, 1009 + struct sk_buff_head *list) 1010 + { 1011 + /* tmp points to a struct rndis_packet_msg_type */ 1012 + __le32 *tmp = (void *)skb->data; 1013 + 1014 + /* MessageType, MessageLength */ 1015 + if (cpu_to_le32(RNDIS_MSG_PACKET) 1016 + != get_unaligned(tmp++)) { 1017 + dev_kfree_skb_any(skb); 1018 + return -EINVAL; 1019 + } 1020 + tmp++; 1021 + 1022 + /* DataOffset, DataLength */ 1023 + if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) { 1024 + dev_kfree_skb_any(skb); 1025 + return -EOVERFLOW; 1026 + } 1027 + skb_trim(skb, get_unaligned_le32(tmp++)); 1028 + 1029 + skb_queue_tail(list, skb); 1030 + return 0; 1031 + } 1032 + 1033 + #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1034 + 1035 + static int rndis_proc_show(struct seq_file *m, void *v) 1036 + { 1037 + rndis_params *param = m->private; 1038 + 1039 + seq_printf(m, 1040 + "Config Nr. %d\n" 1041 + "used : %s\n" 1042 + "state : %s\n" 1043 + "medium : 0x%08X\n" 1044 + "speed : %d\n" 1045 + "cable : %s\n" 1046 + "vendor ID : 0x%08X\n" 1047 + "vendor : %s\n", 1048 + param->confignr, (param->used) ? "y" : "n", 1049 + ({ char *s = "?"; 1050 + switch (param->state) { 1051 + case RNDIS_UNINITIALIZED: 1052 + s = "RNDIS_UNINITIALIZED"; break; 1053 + case RNDIS_INITIALIZED: 1054 + s = "RNDIS_INITIALIZED"; break; 1055 + case RNDIS_DATA_INITIALIZED: 1056 + s = "RNDIS_DATA_INITIALIZED"; break; 1057 + }; s; }), 1058 + param->medium, 1059 + (param->media_state) ? 0 : param->speed*100, 1060 + (param->media_state) ? "disconnected" : "connected", 1061 + param->vendorID, param->vendorDescr); 1062 + return 0; 1063 + } 1064 + 1065 + static ssize_t rndis_proc_write(struct file *file, const char __user *buffer, 1066 + size_t count, loff_t *ppos) 1067 + { 1068 + rndis_params *p = PDE(file->f_path.dentry->d_inode)->data; 1069 + u32 speed = 0; 1070 + int i, fl_speed = 0; 1071 + 1072 + for (i = 0; i < count; i++) { 1073 + char c; 1074 + if (get_user(c, buffer)) 1075 + return -EFAULT; 1076 + switch (c) { 1077 + case '0': 1078 + case '1': 1079 + case '2': 1080 + case '3': 1081 + case '4': 1082 + case '5': 1083 + case '6': 1084 + case '7': 1085 + case '8': 1086 + case '9': 1087 + fl_speed = 1; 1088 + speed = speed * 10 + c - '0'; 1089 + break; 1090 + case 'C': 1091 + case 'c': 1092 + rndis_signal_connect(p->confignr); 1093 + break; 1094 + case 'D': 1095 + case 'd': 1096 + rndis_signal_disconnect(p->confignr); 1097 + break; 1098 + default: 1099 + if (fl_speed) p->speed = speed; 1100 + else pr_debug("%c is not valid\n", c); 1101 + break; 1102 + } 1103 + 1104 + buffer++; 1105 + } 1106 + 1107 + return count; 1108 + } 1109 + 1110 + static int rndis_proc_open(struct inode *inode, struct file *file) 1111 + { 1112 + return single_open(file, rndis_proc_show, PDE(inode)->data); 1113 + } 1114 + 1115 + static const struct file_operations rndis_proc_fops = { 1116 + .owner = THIS_MODULE, 1117 + .open = rndis_proc_open, 1118 + .read = seq_read, 1119 + .llseek = seq_lseek, 1120 + .release = single_release, 1121 + .write = rndis_proc_write, 1122 + }; 1123 + 1124 + #define NAME_TEMPLATE "driver/rndis-%03d" 1125 + 1126 + static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS]; 1127 + 1128 + #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1129 + 1130 + 1131 + int rndis_init(void) 1132 + { 1133 + u8 i; 1134 + 1135 + for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { 1136 + #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1137 + char name [20]; 1138 + 1139 + sprintf(name, NAME_TEMPLATE, i); 1140 + rndis_connect_state[i] = proc_create_data(name, 0660, NULL, 1141 + &rndis_proc_fops, 1142 + (void *)(rndis_per_dev_params + i)); 1143 + if (!rndis_connect_state[i]) { 1144 + pr_debug("%s: remove entries", __func__); 1145 + while (i) { 1146 + sprintf(name, NAME_TEMPLATE, --i); 1147 + remove_proc_entry(name, NULL); 1148 + } 1149 + pr_debug("\n"); 1150 + return -EIO; 1151 + } 1152 + #endif 1153 + rndis_per_dev_params[i].confignr = i; 1154 + rndis_per_dev_params[i].used = 0; 1155 + rndis_per_dev_params[i].state = RNDIS_UNINITIALIZED; 1156 + rndis_per_dev_params[i].media_state 1157 + = RNDIS_MEDIA_STATE_DISCONNECTED; 1158 + INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue)); 1159 + } 1160 + 1161 + return 0; 1162 + } 1163 + 1164 + void rndis_exit(void) 1165 + { 1166 + #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1167 + u8 i; 1168 + char name[20]; 1169 + 1170 + for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { 1171 + sprintf(name, NAME_TEMPLATE, i); 1172 + remove_proc_entry(name, NULL); 1173 + } 1174 + #endif 1175 + }
+222
drivers/staging/ccg/rndis.h
··· 1 + /* 2 + * RNDIS Definitions for Remote NDIS 3 + * 4 + * Authors: Benedikt Spranger, Pengutronix 5 + * Robert Schwebel, Pengutronix 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This software was originally developed in conformance with 12 + * Microsoft's Remote NDIS Specification License Agreement. 13 + */ 14 + 15 + #ifndef _LINUX_RNDIS_H 16 + #define _LINUX_RNDIS_H 17 + 18 + #include <linux/rndis.h> 19 + #include "ndis.h" 20 + 21 + #define RNDIS_MAXIMUM_FRAME_SIZE 1518 22 + #define RNDIS_MAX_TOTAL_SIZE 1558 23 + 24 + typedef struct rndis_init_msg_type 25 + { 26 + __le32 MessageType; 27 + __le32 MessageLength; 28 + __le32 RequestID; 29 + __le32 MajorVersion; 30 + __le32 MinorVersion; 31 + __le32 MaxTransferSize; 32 + } rndis_init_msg_type; 33 + 34 + typedef struct rndis_init_cmplt_type 35 + { 36 + __le32 MessageType; 37 + __le32 MessageLength; 38 + __le32 RequestID; 39 + __le32 Status; 40 + __le32 MajorVersion; 41 + __le32 MinorVersion; 42 + __le32 DeviceFlags; 43 + __le32 Medium; 44 + __le32 MaxPacketsPerTransfer; 45 + __le32 MaxTransferSize; 46 + __le32 PacketAlignmentFactor; 47 + __le32 AFListOffset; 48 + __le32 AFListSize; 49 + } rndis_init_cmplt_type; 50 + 51 + typedef struct rndis_halt_msg_type 52 + { 53 + __le32 MessageType; 54 + __le32 MessageLength; 55 + __le32 RequestID; 56 + } rndis_halt_msg_type; 57 + 58 + typedef struct rndis_query_msg_type 59 + { 60 + __le32 MessageType; 61 + __le32 MessageLength; 62 + __le32 RequestID; 63 + __le32 OID; 64 + __le32 InformationBufferLength; 65 + __le32 InformationBufferOffset; 66 + __le32 DeviceVcHandle; 67 + } rndis_query_msg_type; 68 + 69 + typedef struct rndis_query_cmplt_type 70 + { 71 + __le32 MessageType; 72 + __le32 MessageLength; 73 + __le32 RequestID; 74 + __le32 Status; 75 + __le32 InformationBufferLength; 76 + __le32 InformationBufferOffset; 77 + } rndis_query_cmplt_type; 78 + 79 + typedef struct rndis_set_msg_type 80 + { 81 + __le32 MessageType; 82 + __le32 MessageLength; 83 + __le32 RequestID; 84 + __le32 OID; 85 + __le32 InformationBufferLength; 86 + __le32 InformationBufferOffset; 87 + __le32 DeviceVcHandle; 88 + } rndis_set_msg_type; 89 + 90 + typedef struct rndis_set_cmplt_type 91 + { 92 + __le32 MessageType; 93 + __le32 MessageLength; 94 + __le32 RequestID; 95 + __le32 Status; 96 + } rndis_set_cmplt_type; 97 + 98 + typedef struct rndis_reset_msg_type 99 + { 100 + __le32 MessageType; 101 + __le32 MessageLength; 102 + __le32 Reserved; 103 + } rndis_reset_msg_type; 104 + 105 + typedef struct rndis_reset_cmplt_type 106 + { 107 + __le32 MessageType; 108 + __le32 MessageLength; 109 + __le32 Status; 110 + __le32 AddressingReset; 111 + } rndis_reset_cmplt_type; 112 + 113 + typedef struct rndis_indicate_status_msg_type 114 + { 115 + __le32 MessageType; 116 + __le32 MessageLength; 117 + __le32 Status; 118 + __le32 StatusBufferLength; 119 + __le32 StatusBufferOffset; 120 + } rndis_indicate_status_msg_type; 121 + 122 + typedef struct rndis_keepalive_msg_type 123 + { 124 + __le32 MessageType; 125 + __le32 MessageLength; 126 + __le32 RequestID; 127 + } rndis_keepalive_msg_type; 128 + 129 + typedef struct rndis_keepalive_cmplt_type 130 + { 131 + __le32 MessageType; 132 + __le32 MessageLength; 133 + __le32 RequestID; 134 + __le32 Status; 135 + } rndis_keepalive_cmplt_type; 136 + 137 + struct rndis_packet_msg_type 138 + { 139 + __le32 MessageType; 140 + __le32 MessageLength; 141 + __le32 DataOffset; 142 + __le32 DataLength; 143 + __le32 OOBDataOffset; 144 + __le32 OOBDataLength; 145 + __le32 NumOOBDataElements; 146 + __le32 PerPacketInfoOffset; 147 + __le32 PerPacketInfoLength; 148 + __le32 VcHandle; 149 + __le32 Reserved; 150 + } __attribute__ ((packed)); 151 + 152 + struct rndis_config_parameter 153 + { 154 + __le32 ParameterNameOffset; 155 + __le32 ParameterNameLength; 156 + __le32 ParameterType; 157 + __le32 ParameterValueOffset; 158 + __le32 ParameterValueLength; 159 + }; 160 + 161 + /* implementation specific */ 162 + enum rndis_state 163 + { 164 + RNDIS_UNINITIALIZED, 165 + RNDIS_INITIALIZED, 166 + RNDIS_DATA_INITIALIZED, 167 + }; 168 + 169 + typedef struct rndis_resp_t 170 + { 171 + struct list_head list; 172 + u8 *buf; 173 + u32 length; 174 + int send; 175 + } rndis_resp_t; 176 + 177 + typedef struct rndis_params 178 + { 179 + u8 confignr; 180 + u8 used; 181 + u16 saved_filter; 182 + enum rndis_state state; 183 + u32 medium; 184 + u32 speed; 185 + u32 media_state; 186 + 187 + const u8 *host_mac; 188 + u16 *filter; 189 + struct net_device *dev; 190 + 191 + u32 vendorID; 192 + const char *vendorDescr; 193 + void (*resp_avail)(void *v); 194 + void *v; 195 + struct list_head resp_queue; 196 + } rndis_params; 197 + 198 + /* RNDIS Message parser and other useless functions */ 199 + int rndis_msg_parser (u8 configNr, u8 *buf); 200 + int rndis_register(void (*resp_avail)(void *v), void *v); 201 + void rndis_deregister (int configNr); 202 + int rndis_set_param_dev (u8 configNr, struct net_device *dev, 203 + u16 *cdc_filter); 204 + int rndis_set_param_vendor (u8 configNr, u32 vendorID, 205 + const char *vendorDescr); 206 + int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed); 207 + void rndis_add_hdr (struct sk_buff *skb); 208 + int rndis_rm_hdr(struct gether *port, struct sk_buff *skb, 209 + struct sk_buff_head *list); 210 + u8 *rndis_get_next_response (int configNr, u32 *length); 211 + void rndis_free_response (int configNr, u8 *buf); 212 + 213 + void rndis_uninit (int configNr); 214 + int rndis_signal_connect (int configNr); 215 + int rndis_signal_disconnect (int configNr); 216 + int rndis_state (int configNr); 217 + extern void rndis_set_host_mac (int configNr, const u8 *addr); 218 + 219 + int rndis_init(void); 220 + void rndis_exit (void); 221 + 222 + #endif /* _LINUX_RNDIS_H */
+893
drivers/staging/ccg/storage_common.c
··· 1 + /* 2 + * storage_common.c -- Common definitions for mass storage functionality 3 + * 4 + * Copyright (C) 2003-2008 Alan Stern 5 + * Copyeight (C) 2009 Samsung Electronics 6 + * Author: Michal Nazarewicz (mina86@mina86.com) 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + */ 13 + 14 + 15 + /* 16 + * This file requires the following identifiers used in USB strings to 17 + * be defined (each of type pointer to char): 18 + * - fsg_string_manufacturer -- name of the manufacturer 19 + * - fsg_string_product -- name of the product 20 + * - fsg_string_config -- name of the configuration 21 + * - fsg_string_interface -- name of the interface 22 + * The first four are only needed when FSG_DESCRIPTORS_DEVICE_STRINGS 23 + * macro is defined prior to including this file. 24 + */ 25 + 26 + /* 27 + * When FSG_NO_INTR_EP is defined fsg_fs_intr_in_desc and 28 + * fsg_hs_intr_in_desc objects as well as 29 + * FSG_FS_FUNCTION_PRE_EP_ENTRIES and FSG_HS_FUNCTION_PRE_EP_ENTRIES 30 + * macros are not defined. 31 + * 32 + * When FSG_NO_DEVICE_STRINGS is defined FSG_STRING_MANUFACTURER, 33 + * FSG_STRING_PRODUCT, FSG_STRING_SERIAL and FSG_STRING_CONFIG are not 34 + * defined (as well as corresponding entries in string tables are 35 + * missing) and FSG_STRING_INTERFACE has value of zero. 36 + * 37 + * When FSG_NO_OTG is defined fsg_otg_desc won't be defined. 38 + */ 39 + 40 + /* 41 + * When USB_GADGET_DEBUG_FILES is defined the module param num_buffers 42 + * sets the number of pipeline buffers (length of the fsg_buffhd array). 43 + * The valid range of num_buffers is: num >= 2 && num <= 4. 44 + */ 45 + 46 + 47 + #include <linux/usb/storage.h> 48 + #include <scsi/scsi.h> 49 + #include <asm/unaligned.h> 50 + 51 + 52 + /* 53 + * Thanks to NetChip Technologies for donating this product ID. 54 + * 55 + * DO NOT REUSE THESE IDs with any other driver!! Ever!! 56 + * Instead: allocate your own, using normal USB-IF procedures. 57 + */ 58 + #define FSG_VENDOR_ID 0x0525 /* NetChip */ 59 + #define FSG_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */ 60 + 61 + 62 + /*-------------------------------------------------------------------------*/ 63 + 64 + 65 + #ifndef DEBUG 66 + #undef VERBOSE_DEBUG 67 + #undef DUMP_MSGS 68 + #endif /* !DEBUG */ 69 + 70 + #ifdef VERBOSE_DEBUG 71 + #define VLDBG LDBG 72 + #else 73 + #define VLDBG(lun, fmt, args...) do { } while (0) 74 + #endif /* VERBOSE_DEBUG */ 75 + 76 + #define LDBG(lun, fmt, args...) dev_dbg (&(lun)->dev, fmt, ## args) 77 + #define LERROR(lun, fmt, args...) dev_err (&(lun)->dev, fmt, ## args) 78 + #define LWARN(lun, fmt, args...) dev_warn(&(lun)->dev, fmt, ## args) 79 + #define LINFO(lun, fmt, args...) dev_info(&(lun)->dev, fmt, ## args) 80 + 81 + /* 82 + * Keep those macros in sync with those in 83 + * include/linux/usb/composite.h or else GCC will complain. If they 84 + * are identical (the same names of arguments, white spaces in the 85 + * same places) GCC will allow redefinition otherwise (even if some 86 + * white space is removed or added) warning will be issued. 87 + * 88 + * Those macros are needed here because File Storage Gadget does not 89 + * include the composite.h header. For composite gadgets those macros 90 + * are redundant since composite.h is included any way. 91 + * 92 + * One could check whether those macros are already defined (which 93 + * would indicate composite.h had been included) or not (which would 94 + * indicate we were in FSG) but this is not done because a warning is 95 + * desired if definitions here differ from the ones in composite.h. 96 + * 97 + * We want the definitions to match and be the same in File Storage 98 + * Gadget as well as Mass Storage Function (and so composite gadgets 99 + * using MSF). If someone changes them in composite.h it will produce 100 + * a warning in this file when building MSF. 101 + */ 102 + #define DBG(d, fmt, args...) dev_dbg(&(d)->gadget->dev , fmt , ## args) 103 + #define VDBG(d, fmt, args...) dev_vdbg(&(d)->gadget->dev , fmt , ## args) 104 + #define ERROR(d, fmt, args...) dev_err(&(d)->gadget->dev , fmt , ## args) 105 + #define WARNING(d, fmt, args...) dev_warn(&(d)->gadget->dev , fmt , ## args) 106 + #define INFO(d, fmt, args...) dev_info(&(d)->gadget->dev , fmt , ## args) 107 + 108 + 109 + 110 + #ifdef DUMP_MSGS 111 + 112 + # define dump_msg(fsg, /* const char * */ label, \ 113 + /* const u8 * */ buf, /* unsigned */ length) do { \ 114 + if (length < 512) { \ 115 + DBG(fsg, "%s, length %u:\n", label, length); \ 116 + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, \ 117 + 16, 1, buf, length, 0); \ 118 + } \ 119 + } while (0) 120 + 121 + # define dump_cdb(fsg) do { } while (0) 122 + 123 + #else 124 + 125 + # define dump_msg(fsg, /* const char * */ label, \ 126 + /* const u8 * */ buf, /* unsigned */ length) do { } while (0) 127 + 128 + # ifdef VERBOSE_DEBUG 129 + 130 + # define dump_cdb(fsg) \ 131 + print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE, \ 132 + 16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0) \ 133 + 134 + # else 135 + 136 + # define dump_cdb(fsg) do { } while (0) 137 + 138 + # endif /* VERBOSE_DEBUG */ 139 + 140 + #endif /* DUMP_MSGS */ 141 + 142 + /*-------------------------------------------------------------------------*/ 143 + 144 + /* CBI Interrupt data structure */ 145 + struct interrupt_data { 146 + u8 bType; 147 + u8 bValue; 148 + }; 149 + 150 + #define CBI_INTERRUPT_DATA_LEN 2 151 + 152 + /* CBI Accept Device-Specific Command request */ 153 + #define USB_CBI_ADSC_REQUEST 0x00 154 + 155 + 156 + /* Length of a SCSI Command Data Block */ 157 + #define MAX_COMMAND_SIZE 16 158 + 159 + /* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */ 160 + #define SS_NO_SENSE 0 161 + #define SS_COMMUNICATION_FAILURE 0x040800 162 + #define SS_INVALID_COMMAND 0x052000 163 + #define SS_INVALID_FIELD_IN_CDB 0x052400 164 + #define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100 165 + #define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500 166 + #define SS_MEDIUM_NOT_PRESENT 0x023a00 167 + #define SS_MEDIUM_REMOVAL_PREVENTED 0x055302 168 + #define SS_NOT_READY_TO_READY_TRANSITION 0x062800 169 + #define SS_RESET_OCCURRED 0x062900 170 + #define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900 171 + #define SS_UNRECOVERED_READ_ERROR 0x031100 172 + #define SS_WRITE_ERROR 0x030c02 173 + #define SS_WRITE_PROTECTED 0x072700 174 + 175 + #define SK(x) ((u8) ((x) >> 16)) /* Sense Key byte, etc. */ 176 + #define ASC(x) ((u8) ((x) >> 8)) 177 + #define ASCQ(x) ((u8) (x)) 178 + 179 + 180 + /*-------------------------------------------------------------------------*/ 181 + 182 + 183 + struct fsg_lun { 184 + struct file *filp; 185 + loff_t file_length; 186 + loff_t num_sectors; 187 + 188 + unsigned int initially_ro:1; 189 + unsigned int ro:1; 190 + unsigned int removable:1; 191 + unsigned int cdrom:1; 192 + unsigned int prevent_medium_removal:1; 193 + unsigned int registered:1; 194 + unsigned int info_valid:1; 195 + unsigned int nofua:1; 196 + 197 + u32 sense_data; 198 + u32 sense_data_info; 199 + u32 unit_attention_data; 200 + 201 + unsigned int blkbits; /* Bits of logical block size of bound block device */ 202 + unsigned int blksize; /* logical block size of bound block device */ 203 + struct device dev; 204 + }; 205 + 206 + #define fsg_lun_is_open(curlun) ((curlun)->filp != NULL) 207 + 208 + static struct fsg_lun *fsg_lun_from_dev(struct device *dev) 209 + { 210 + return container_of(dev, struct fsg_lun, dev); 211 + } 212 + 213 + 214 + /* Big enough to hold our biggest descriptor */ 215 + #define EP0_BUFSIZE 256 216 + #define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */ 217 + 218 + #ifdef CONFIG_USB_GADGET_DEBUG_FILES 219 + 220 + static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS; 221 + module_param_named(num_buffers, fsg_num_buffers, uint, S_IRUGO); 222 + MODULE_PARM_DESC(num_buffers, "Number of pipeline buffers"); 223 + 224 + #else 225 + 226 + /* 227 + * Number of buffers we will use. 228 + * 2 is usually enough for good buffering pipeline 229 + */ 230 + #define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS 231 + 232 + #endif /* CONFIG_USB_DEBUG */ 233 + 234 + /* check if fsg_num_buffers is within a valid range */ 235 + static inline int fsg_num_buffers_validate(void) 236 + { 237 + if (fsg_num_buffers >= 2 && fsg_num_buffers <= 4) 238 + return 0; 239 + pr_err("fsg_num_buffers %u is out of range (%d to %d)\n", 240 + fsg_num_buffers, 2 ,4); 241 + return -EINVAL; 242 + } 243 + 244 + /* Default size of buffer length. */ 245 + #define FSG_BUFLEN ((u32)16384) 246 + 247 + /* Maximal number of LUNs supported in mass storage function */ 248 + #define FSG_MAX_LUNS 8 249 + 250 + enum fsg_buffer_state { 251 + BUF_STATE_EMPTY = 0, 252 + BUF_STATE_FULL, 253 + BUF_STATE_BUSY 254 + }; 255 + 256 + struct fsg_buffhd { 257 + void *buf; 258 + enum fsg_buffer_state state; 259 + struct fsg_buffhd *next; 260 + 261 + /* 262 + * The NetChip 2280 is faster, and handles some protocol faults 263 + * better, if we don't submit any short bulk-out read requests. 264 + * So we will record the intended request length here. 265 + */ 266 + unsigned int bulk_out_intended_length; 267 + 268 + struct usb_request *inreq; 269 + int inreq_busy; 270 + struct usb_request *outreq; 271 + int outreq_busy; 272 + }; 273 + 274 + enum fsg_state { 275 + /* This one isn't used anywhere */ 276 + FSG_STATE_COMMAND_PHASE = -10, 277 + FSG_STATE_DATA_PHASE, 278 + FSG_STATE_STATUS_PHASE, 279 + 280 + FSG_STATE_IDLE = 0, 281 + FSG_STATE_ABORT_BULK_OUT, 282 + FSG_STATE_RESET, 283 + FSG_STATE_INTERFACE_CHANGE, 284 + FSG_STATE_CONFIG_CHANGE, 285 + FSG_STATE_DISCONNECT, 286 + FSG_STATE_EXIT, 287 + FSG_STATE_TERMINATED 288 + }; 289 + 290 + enum data_direction { 291 + DATA_DIR_UNKNOWN = 0, 292 + DATA_DIR_FROM_HOST, 293 + DATA_DIR_TO_HOST, 294 + DATA_DIR_NONE 295 + }; 296 + 297 + 298 + /*-------------------------------------------------------------------------*/ 299 + 300 + 301 + static inline u32 get_unaligned_be24(u8 *buf) 302 + { 303 + return 0xffffff & (u32) get_unaligned_be32(buf - 1); 304 + } 305 + 306 + 307 + /*-------------------------------------------------------------------------*/ 308 + 309 + 310 + enum { 311 + #ifndef FSG_NO_DEVICE_STRINGS 312 + FSG_STRING_MANUFACTURER = 1, 313 + FSG_STRING_PRODUCT, 314 + FSG_STRING_SERIAL, 315 + FSG_STRING_CONFIG, 316 + #endif 317 + FSG_STRING_INTERFACE 318 + }; 319 + 320 + 321 + #ifndef FSG_NO_OTG 322 + static struct usb_otg_descriptor 323 + fsg_otg_desc = { 324 + .bLength = sizeof fsg_otg_desc, 325 + .bDescriptorType = USB_DT_OTG, 326 + 327 + .bmAttributes = USB_OTG_SRP, 328 + }; 329 + #endif 330 + 331 + /* There is only one interface. */ 332 + 333 + static struct usb_interface_descriptor 334 + fsg_intf_desc = { 335 + .bLength = sizeof fsg_intf_desc, 336 + .bDescriptorType = USB_DT_INTERFACE, 337 + 338 + .bNumEndpoints = 2, /* Adjusted during fsg_bind() */ 339 + .bInterfaceClass = USB_CLASS_MASS_STORAGE, 340 + .bInterfaceSubClass = USB_SC_SCSI, /* Adjusted during fsg_bind() */ 341 + .bInterfaceProtocol = USB_PR_BULK, /* Adjusted during fsg_bind() */ 342 + .iInterface = FSG_STRING_INTERFACE, 343 + }; 344 + 345 + /* 346 + * Three full-speed endpoint descriptors: bulk-in, bulk-out, and 347 + * interrupt-in. 348 + */ 349 + 350 + static struct usb_endpoint_descriptor 351 + fsg_fs_bulk_in_desc = { 352 + .bLength = USB_DT_ENDPOINT_SIZE, 353 + .bDescriptorType = USB_DT_ENDPOINT, 354 + 355 + .bEndpointAddress = USB_DIR_IN, 356 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 357 + /* wMaxPacketSize set by autoconfiguration */ 358 + }; 359 + 360 + static struct usb_endpoint_descriptor 361 + fsg_fs_bulk_out_desc = { 362 + .bLength = USB_DT_ENDPOINT_SIZE, 363 + .bDescriptorType = USB_DT_ENDPOINT, 364 + 365 + .bEndpointAddress = USB_DIR_OUT, 366 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 367 + /* wMaxPacketSize set by autoconfiguration */ 368 + }; 369 + 370 + #ifndef FSG_NO_INTR_EP 371 + 372 + static struct usb_endpoint_descriptor 373 + fsg_fs_intr_in_desc = { 374 + .bLength = USB_DT_ENDPOINT_SIZE, 375 + .bDescriptorType = USB_DT_ENDPOINT, 376 + 377 + .bEndpointAddress = USB_DIR_IN, 378 + .bmAttributes = USB_ENDPOINT_XFER_INT, 379 + .wMaxPacketSize = cpu_to_le16(2), 380 + .bInterval = 32, /* frames -> 32 ms */ 381 + }; 382 + 383 + #ifndef FSG_NO_OTG 384 + # define FSG_FS_FUNCTION_PRE_EP_ENTRIES 2 385 + #else 386 + # define FSG_FS_FUNCTION_PRE_EP_ENTRIES 1 387 + #endif 388 + 389 + #endif 390 + 391 + static struct usb_descriptor_header *fsg_fs_function[] = { 392 + #ifndef FSG_NO_OTG 393 + (struct usb_descriptor_header *) &fsg_otg_desc, 394 + #endif 395 + (struct usb_descriptor_header *) &fsg_intf_desc, 396 + (struct usb_descriptor_header *) &fsg_fs_bulk_in_desc, 397 + (struct usb_descriptor_header *) &fsg_fs_bulk_out_desc, 398 + #ifndef FSG_NO_INTR_EP 399 + (struct usb_descriptor_header *) &fsg_fs_intr_in_desc, 400 + #endif 401 + NULL, 402 + }; 403 + 404 + 405 + /* 406 + * USB 2.0 devices need to expose both high speed and full speed 407 + * descriptors, unless they only run at full speed. 408 + * 409 + * That means alternate endpoint descriptors (bigger packets) 410 + * and a "device qualifier" ... plus more construction options 411 + * for the configuration descriptor. 412 + */ 413 + static struct usb_endpoint_descriptor 414 + fsg_hs_bulk_in_desc = { 415 + .bLength = USB_DT_ENDPOINT_SIZE, 416 + .bDescriptorType = USB_DT_ENDPOINT, 417 + 418 + /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */ 419 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 420 + .wMaxPacketSize = cpu_to_le16(512), 421 + }; 422 + 423 + static struct usb_endpoint_descriptor 424 + fsg_hs_bulk_out_desc = { 425 + .bLength = USB_DT_ENDPOINT_SIZE, 426 + .bDescriptorType = USB_DT_ENDPOINT, 427 + 428 + /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */ 429 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 430 + .wMaxPacketSize = cpu_to_le16(512), 431 + .bInterval = 1, /* NAK every 1 uframe */ 432 + }; 433 + 434 + #ifndef FSG_NO_INTR_EP 435 + 436 + static struct usb_endpoint_descriptor 437 + fsg_hs_intr_in_desc = { 438 + .bLength = USB_DT_ENDPOINT_SIZE, 439 + .bDescriptorType = USB_DT_ENDPOINT, 440 + 441 + /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */ 442 + .bmAttributes = USB_ENDPOINT_XFER_INT, 443 + .wMaxPacketSize = cpu_to_le16(2), 444 + .bInterval = 9, /* 2**(9-1) = 256 uframes -> 32 ms */ 445 + }; 446 + 447 + #ifndef FSG_NO_OTG 448 + # define FSG_HS_FUNCTION_PRE_EP_ENTRIES 2 449 + #else 450 + # define FSG_HS_FUNCTION_PRE_EP_ENTRIES 1 451 + #endif 452 + 453 + #endif 454 + 455 + static struct usb_descriptor_header *fsg_hs_function[] = { 456 + #ifndef FSG_NO_OTG 457 + (struct usb_descriptor_header *) &fsg_otg_desc, 458 + #endif 459 + (struct usb_descriptor_header *) &fsg_intf_desc, 460 + (struct usb_descriptor_header *) &fsg_hs_bulk_in_desc, 461 + (struct usb_descriptor_header *) &fsg_hs_bulk_out_desc, 462 + #ifndef FSG_NO_INTR_EP 463 + (struct usb_descriptor_header *) &fsg_hs_intr_in_desc, 464 + #endif 465 + NULL, 466 + }; 467 + 468 + static struct usb_endpoint_descriptor 469 + fsg_ss_bulk_in_desc = { 470 + .bLength = USB_DT_ENDPOINT_SIZE, 471 + .bDescriptorType = USB_DT_ENDPOINT, 472 + 473 + /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */ 474 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 475 + .wMaxPacketSize = cpu_to_le16(1024), 476 + }; 477 + 478 + static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc = { 479 + .bLength = sizeof(fsg_ss_bulk_in_comp_desc), 480 + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 481 + 482 + /*.bMaxBurst = DYNAMIC, */ 483 + }; 484 + 485 + static struct usb_endpoint_descriptor 486 + fsg_ss_bulk_out_desc = { 487 + .bLength = USB_DT_ENDPOINT_SIZE, 488 + .bDescriptorType = USB_DT_ENDPOINT, 489 + 490 + /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */ 491 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 492 + .wMaxPacketSize = cpu_to_le16(1024), 493 + }; 494 + 495 + static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = { 496 + .bLength = sizeof(fsg_ss_bulk_in_comp_desc), 497 + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 498 + 499 + /*.bMaxBurst = DYNAMIC, */ 500 + }; 501 + 502 + #ifndef FSG_NO_INTR_EP 503 + 504 + static struct usb_endpoint_descriptor 505 + fsg_ss_intr_in_desc = { 506 + .bLength = USB_DT_ENDPOINT_SIZE, 507 + .bDescriptorType = USB_DT_ENDPOINT, 508 + 509 + /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */ 510 + .bmAttributes = USB_ENDPOINT_XFER_INT, 511 + .wMaxPacketSize = cpu_to_le16(2), 512 + .bInterval = 9, /* 2**(9-1) = 256 uframes -> 32 ms */ 513 + }; 514 + 515 + static struct usb_ss_ep_comp_descriptor fsg_ss_intr_in_comp_desc = { 516 + .bLength = sizeof(fsg_ss_bulk_in_comp_desc), 517 + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 518 + 519 + .wBytesPerInterval = cpu_to_le16(2), 520 + }; 521 + 522 + #ifndef FSG_NO_OTG 523 + # define FSG_SS_FUNCTION_PRE_EP_ENTRIES 2 524 + #else 525 + # define FSG_SS_FUNCTION_PRE_EP_ENTRIES 1 526 + #endif 527 + 528 + #endif 529 + 530 + static __maybe_unused struct usb_ext_cap_descriptor fsg_ext_cap_desc = { 531 + .bLength = USB_DT_USB_EXT_CAP_SIZE, 532 + .bDescriptorType = USB_DT_DEVICE_CAPABILITY, 533 + .bDevCapabilityType = USB_CAP_TYPE_EXT, 534 + 535 + .bmAttributes = cpu_to_le32(USB_LPM_SUPPORT), 536 + }; 537 + 538 + static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = { 539 + .bLength = USB_DT_USB_SS_CAP_SIZE, 540 + .bDescriptorType = USB_DT_DEVICE_CAPABILITY, 541 + .bDevCapabilityType = USB_SS_CAP_TYPE, 542 + 543 + /* .bmAttributes = LTM is not supported yet */ 544 + 545 + .wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION 546 + | USB_FULL_SPEED_OPERATION 547 + | USB_HIGH_SPEED_OPERATION 548 + | USB_5GBPS_OPERATION), 549 + .bFunctionalitySupport = USB_LOW_SPEED_OPERATION, 550 + .bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT, 551 + .bU2DevExitLat = cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT), 552 + }; 553 + 554 + static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = { 555 + .bLength = USB_DT_BOS_SIZE, 556 + .bDescriptorType = USB_DT_BOS, 557 + 558 + .wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE 559 + + USB_DT_USB_EXT_CAP_SIZE 560 + + USB_DT_USB_SS_CAP_SIZE), 561 + 562 + .bNumDeviceCaps = 2, 563 + }; 564 + 565 + static struct usb_descriptor_header *fsg_ss_function[] = { 566 + #ifndef FSG_NO_OTG 567 + (struct usb_descriptor_header *) &fsg_otg_desc, 568 + #endif 569 + (struct usb_descriptor_header *) &fsg_intf_desc, 570 + (struct usb_descriptor_header *) &fsg_ss_bulk_in_desc, 571 + (struct usb_descriptor_header *) &fsg_ss_bulk_in_comp_desc, 572 + (struct usb_descriptor_header *) &fsg_ss_bulk_out_desc, 573 + (struct usb_descriptor_header *) &fsg_ss_bulk_out_comp_desc, 574 + #ifndef FSG_NO_INTR_EP 575 + (struct usb_descriptor_header *) &fsg_ss_intr_in_desc, 576 + (struct usb_descriptor_header *) &fsg_ss_intr_in_comp_desc, 577 + #endif 578 + NULL, 579 + }; 580 + 581 + /* Maxpacket and other transfer characteristics vary by speed. */ 582 + static __maybe_unused struct usb_endpoint_descriptor * 583 + fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs, 584 + struct usb_endpoint_descriptor *hs, 585 + struct usb_endpoint_descriptor *ss) 586 + { 587 + if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) 588 + return ss; 589 + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) 590 + return hs; 591 + return fs; 592 + } 593 + 594 + 595 + /* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */ 596 + static struct usb_string fsg_strings[] = { 597 + #ifndef FSG_NO_DEVICE_STRINGS 598 + {FSG_STRING_MANUFACTURER, fsg_string_manufacturer}, 599 + {FSG_STRING_PRODUCT, fsg_string_product}, 600 + {FSG_STRING_SERIAL, ""}, 601 + {FSG_STRING_CONFIG, fsg_string_config}, 602 + #endif 603 + {FSG_STRING_INTERFACE, fsg_string_interface}, 604 + {} 605 + }; 606 + 607 + static struct usb_gadget_strings fsg_stringtab = { 608 + .language = 0x0409, /* en-us */ 609 + .strings = fsg_strings, 610 + }; 611 + 612 + 613 + /*-------------------------------------------------------------------------*/ 614 + 615 + /* 616 + * If the next two routines are called while the gadget is registered, 617 + * the caller must own fsg->filesem for writing. 618 + */ 619 + 620 + static void fsg_lun_close(struct fsg_lun *curlun) 621 + { 622 + if (curlun->filp) { 623 + LDBG(curlun, "close backing file\n"); 624 + fput(curlun->filp); 625 + curlun->filp = NULL; 626 + } 627 + } 628 + 629 + 630 + static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) 631 + { 632 + int ro; 633 + struct file *filp = NULL; 634 + int rc = -EINVAL; 635 + struct inode *inode = NULL; 636 + loff_t size; 637 + loff_t num_sectors; 638 + loff_t min_sectors; 639 + unsigned int blkbits; 640 + unsigned int blksize; 641 + 642 + /* R/W if we can, R/O if we must */ 643 + ro = curlun->initially_ro; 644 + if (!ro) { 645 + filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0); 646 + if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES) 647 + ro = 1; 648 + } 649 + if (ro) 650 + filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0); 651 + if (IS_ERR(filp)) { 652 + LINFO(curlun, "unable to open backing file: %s\n", filename); 653 + return PTR_ERR(filp); 654 + } 655 + 656 + if (!(filp->f_mode & FMODE_WRITE)) 657 + ro = 1; 658 + 659 + inode = filp->f_path.dentry->d_inode; 660 + if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { 661 + LINFO(curlun, "invalid file type: %s\n", filename); 662 + goto out; 663 + } 664 + 665 + /* 666 + * If we can't read the file, it's no good. 667 + * If we can't write the file, use it read-only. 668 + */ 669 + if (!(filp->f_op->read || filp->f_op->aio_read)) { 670 + LINFO(curlun, "file not readable: %s\n", filename); 671 + goto out; 672 + } 673 + if (!(filp->f_op->write || filp->f_op->aio_write)) 674 + ro = 1; 675 + 676 + size = i_size_read(inode->i_mapping->host); 677 + if (size < 0) { 678 + LINFO(curlun, "unable to find file size: %s\n", filename); 679 + rc = (int) size; 680 + goto out; 681 + } 682 + 683 + if (curlun->cdrom) { 684 + blksize = 2048; 685 + blkbits = 11; 686 + } else if (inode->i_bdev) { 687 + blksize = bdev_logical_block_size(inode->i_bdev); 688 + blkbits = blksize_bits(blksize); 689 + } else { 690 + blksize = 512; 691 + blkbits = 9; 692 + } 693 + 694 + num_sectors = size >> blkbits; /* File size in logic-block-size blocks */ 695 + min_sectors = 1; 696 + if (curlun->cdrom) { 697 + min_sectors = 300; /* Smallest track is 300 frames */ 698 + if (num_sectors >= 256*60*75) { 699 + num_sectors = 256*60*75 - 1; 700 + LINFO(curlun, "file too big: %s\n", filename); 701 + LINFO(curlun, "using only first %d blocks\n", 702 + (int) num_sectors); 703 + } 704 + } 705 + if (num_sectors < min_sectors) { 706 + LINFO(curlun, "file too small: %s\n", filename); 707 + rc = -ETOOSMALL; 708 + goto out; 709 + } 710 + 711 + if (fsg_lun_is_open(curlun)) 712 + fsg_lun_close(curlun); 713 + 714 + curlun->blksize = blksize; 715 + curlun->blkbits = blkbits; 716 + curlun->ro = ro; 717 + curlun->filp = filp; 718 + curlun->file_length = size; 719 + curlun->num_sectors = num_sectors; 720 + LDBG(curlun, "open backing file: %s\n", filename); 721 + return 0; 722 + 723 + out: 724 + fput(filp); 725 + return rc; 726 + } 727 + 728 + 729 + /*-------------------------------------------------------------------------*/ 730 + 731 + /* 732 + * Sync the file data, don't bother with the metadata. 733 + * This code was copied from fs/buffer.c:sys_fdatasync(). 734 + */ 735 + static int fsg_lun_fsync_sub(struct fsg_lun *curlun) 736 + { 737 + struct file *filp = curlun->filp; 738 + 739 + if (curlun->ro || !filp) 740 + return 0; 741 + return vfs_fsync(filp, 1); 742 + } 743 + 744 + static void store_cdrom_address(u8 *dest, int msf, u32 addr) 745 + { 746 + if (msf) { 747 + /* Convert to Minutes-Seconds-Frames */ 748 + addr >>= 2; /* Convert to 2048-byte frames */ 749 + addr += 2*75; /* Lead-in occupies 2 seconds */ 750 + dest[3] = addr % 75; /* Frames */ 751 + addr /= 75; 752 + dest[2] = addr % 60; /* Seconds */ 753 + addr /= 60; 754 + dest[1] = addr; /* Minutes */ 755 + dest[0] = 0; /* Reserved */ 756 + } else { 757 + /* Absolute sector */ 758 + put_unaligned_be32(addr, dest); 759 + } 760 + } 761 + 762 + 763 + /*-------------------------------------------------------------------------*/ 764 + 765 + 766 + static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr, 767 + char *buf) 768 + { 769 + struct fsg_lun *curlun = fsg_lun_from_dev(dev); 770 + 771 + return sprintf(buf, "%d\n", fsg_lun_is_open(curlun) 772 + ? curlun->ro 773 + : curlun->initially_ro); 774 + } 775 + 776 + static ssize_t fsg_show_nofua(struct device *dev, struct device_attribute *attr, 777 + char *buf) 778 + { 779 + struct fsg_lun *curlun = fsg_lun_from_dev(dev); 780 + 781 + return sprintf(buf, "%u\n", curlun->nofua); 782 + } 783 + 784 + static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr, 785 + char *buf) 786 + { 787 + struct fsg_lun *curlun = fsg_lun_from_dev(dev); 788 + struct rw_semaphore *filesem = dev_get_drvdata(dev); 789 + char *p; 790 + ssize_t rc; 791 + 792 + down_read(filesem); 793 + if (fsg_lun_is_open(curlun)) { /* Get the complete pathname */ 794 + p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1); 795 + if (IS_ERR(p)) 796 + rc = PTR_ERR(p); 797 + else { 798 + rc = strlen(p); 799 + memmove(buf, p, rc); 800 + buf[rc] = '\n'; /* Add a newline */ 801 + buf[++rc] = 0; 802 + } 803 + } else { /* No file, return 0 bytes */ 804 + *buf = 0; 805 + rc = 0; 806 + } 807 + up_read(filesem); 808 + return rc; 809 + } 810 + 811 + 812 + static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr, 813 + const char *buf, size_t count) 814 + { 815 + ssize_t rc; 816 + struct fsg_lun *curlun = fsg_lun_from_dev(dev); 817 + struct rw_semaphore *filesem = dev_get_drvdata(dev); 818 + unsigned ro; 819 + 820 + rc = kstrtouint(buf, 2, &ro); 821 + if (rc) 822 + return rc; 823 + 824 + /* 825 + * Allow the write-enable status to change only while the 826 + * backing file is closed. 827 + */ 828 + down_read(filesem); 829 + if (fsg_lun_is_open(curlun)) { 830 + LDBG(curlun, "read-only status change prevented\n"); 831 + rc = -EBUSY; 832 + } else { 833 + curlun->ro = ro; 834 + curlun->initially_ro = ro; 835 + LDBG(curlun, "read-only status set to %d\n", curlun->ro); 836 + rc = count; 837 + } 838 + up_read(filesem); 839 + return rc; 840 + } 841 + 842 + static ssize_t fsg_store_nofua(struct device *dev, 843 + struct device_attribute *attr, 844 + const char *buf, size_t count) 845 + { 846 + struct fsg_lun *curlun = fsg_lun_from_dev(dev); 847 + unsigned nofua; 848 + int ret; 849 + 850 + ret = kstrtouint(buf, 2, &nofua); 851 + if (ret) 852 + return ret; 853 + 854 + /* Sync data when switching from async mode to sync */ 855 + if (!nofua && curlun->nofua) 856 + fsg_lun_fsync_sub(curlun); 857 + 858 + curlun->nofua = nofua; 859 + 860 + return count; 861 + } 862 + 863 + static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr, 864 + const char *buf, size_t count) 865 + { 866 + struct fsg_lun *curlun = fsg_lun_from_dev(dev); 867 + struct rw_semaphore *filesem = dev_get_drvdata(dev); 868 + int rc = 0; 869 + 870 + if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) { 871 + LDBG(curlun, "eject attempt prevented\n"); 872 + return -EBUSY; /* "Door is locked" */ 873 + } 874 + 875 + /* Remove a trailing newline */ 876 + if (count > 0 && buf[count-1] == '\n') 877 + ((char *) buf)[count-1] = 0; /* Ugh! */ 878 + 879 + /* Load new medium */ 880 + down_write(filesem); 881 + if (count > 0 && buf[0]) { 882 + /* fsg_lun_open() will close existing file if any. */ 883 + rc = fsg_lun_open(curlun, buf); 884 + if (rc == 0) 885 + curlun->unit_attention_data = 886 + SS_NOT_READY_TO_READY_TRANSITION; 887 + } else if (fsg_lun_is_open(curlun)) { 888 + fsg_lun_close(curlun); 889 + curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; 890 + } 891 + up_write(filesem); 892 + return (rc < 0 ? rc : count); 893 + }
+986
drivers/staging/ccg/u_ether.c
··· 1 + /* 2 + * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack 3 + * 4 + * Copyright (C) 2003-2005,2008 David Brownell 5 + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 6 + * Copyright (C) 2008 Nokia Corporation 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + */ 13 + 14 + /* #define VERBOSE_DEBUG */ 15 + 16 + #include <linux/kernel.h> 17 + #include <linux/gfp.h> 18 + #include <linux/device.h> 19 + #include <linux/ctype.h> 20 + #include <linux/etherdevice.h> 21 + #include <linux/ethtool.h> 22 + 23 + #include "u_ether.h" 24 + 25 + 26 + /* 27 + * This component encapsulates the Ethernet link glue needed to provide 28 + * one (!) network link through the USB gadget stack, normally "usb0". 29 + * 30 + * The control and data models are handled by the function driver which 31 + * connects to this code; such as CDC Ethernet (ECM or EEM), 32 + * "CDC Subset", or RNDIS. That includes all descriptor and endpoint 33 + * management. 34 + * 35 + * Link level addressing is handled by this component using module 36 + * parameters; if no such parameters are provided, random link level 37 + * addresses are used. Each end of the link uses one address. The 38 + * host end address is exported in various ways, and is often recorded 39 + * in configuration databases. 40 + * 41 + * The driver which assembles each configuration using such a link is 42 + * responsible for ensuring that each configuration includes at most one 43 + * instance of is network link. (The network layer provides ways for 44 + * this single "physical" link to be used by multiple virtual links.) 45 + */ 46 + 47 + #define UETH__VERSION "29-May-2008" 48 + 49 + struct eth_dev { 50 + /* lock is held while accessing port_usb 51 + * or updating its backlink port_usb->ioport 52 + */ 53 + spinlock_t lock; 54 + struct gether *port_usb; 55 + 56 + struct net_device *net; 57 + struct usb_gadget *gadget; 58 + 59 + spinlock_t req_lock; /* guard {rx,tx}_reqs */ 60 + struct list_head tx_reqs, rx_reqs; 61 + atomic_t tx_qlen; 62 + 63 + struct sk_buff_head rx_frames; 64 + 65 + unsigned header_len; 66 + struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); 67 + int (*unwrap)(struct gether *, 68 + struct sk_buff *skb, 69 + struct sk_buff_head *list); 70 + 71 + struct work_struct work; 72 + 73 + unsigned long todo; 74 + #define WORK_RX_MEMORY 0 75 + 76 + bool zlp; 77 + u8 host_mac[ETH_ALEN]; 78 + }; 79 + 80 + /*-------------------------------------------------------------------------*/ 81 + 82 + #define RX_EXTRA 20 /* bytes guarding against rx overflows */ 83 + 84 + #define DEFAULT_QLEN 2 /* double buffering by default */ 85 + 86 + static unsigned qmult = 5; 87 + module_param(qmult, uint, S_IRUGO|S_IWUSR); 88 + MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed"); 89 + 90 + /* for dual-speed hardware, use deeper queues at high/super speed */ 91 + static inline int qlen(struct usb_gadget *gadget) 92 + { 93 + if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || 94 + gadget->speed == USB_SPEED_SUPER)) 95 + return qmult * DEFAULT_QLEN; 96 + else 97 + return DEFAULT_QLEN; 98 + } 99 + 100 + /*-------------------------------------------------------------------------*/ 101 + 102 + /* REVISIT there must be a better way than having two sets 103 + * of debug calls ... 104 + */ 105 + 106 + #undef DBG 107 + #undef VDBG 108 + #undef ERROR 109 + #undef INFO 110 + 111 + #define xprintk(d, level, fmt, args...) \ 112 + printk(level "%s: " fmt , (d)->net->name , ## args) 113 + 114 + #ifdef DEBUG 115 + #undef DEBUG 116 + #define DBG(dev, fmt, args...) \ 117 + xprintk(dev , KERN_DEBUG , fmt , ## args) 118 + #else 119 + #define DBG(dev, fmt, args...) \ 120 + do { } while (0) 121 + #endif /* DEBUG */ 122 + 123 + #ifdef VERBOSE_DEBUG 124 + #define VDBG DBG 125 + #else 126 + #define VDBG(dev, fmt, args...) \ 127 + do { } while (0) 128 + #endif /* DEBUG */ 129 + 130 + #define ERROR(dev, fmt, args...) \ 131 + xprintk(dev , KERN_ERR , fmt , ## args) 132 + #define INFO(dev, fmt, args...) \ 133 + xprintk(dev , KERN_INFO , fmt , ## args) 134 + 135 + /*-------------------------------------------------------------------------*/ 136 + 137 + /* NETWORK DRIVER HOOKUP (to the layer above this driver) */ 138 + 139 + static int ueth_change_mtu(struct net_device *net, int new_mtu) 140 + { 141 + struct eth_dev *dev = netdev_priv(net); 142 + unsigned long flags; 143 + int status = 0; 144 + 145 + /* don't change MTU on "live" link (peer won't know) */ 146 + spin_lock_irqsave(&dev->lock, flags); 147 + if (dev->port_usb) 148 + status = -EBUSY; 149 + else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN) 150 + status = -ERANGE; 151 + else 152 + net->mtu = new_mtu; 153 + spin_unlock_irqrestore(&dev->lock, flags); 154 + 155 + return status; 156 + } 157 + 158 + static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) 159 + { 160 + struct eth_dev *dev = netdev_priv(net); 161 + 162 + strlcpy(p->driver, "g_ether", sizeof p->driver); 163 + strlcpy(p->version, UETH__VERSION, sizeof p->version); 164 + strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version); 165 + strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info); 166 + } 167 + 168 + /* REVISIT can also support: 169 + * - WOL (by tracking suspends and issuing remote wakeup) 170 + * - msglevel (implies updated messaging) 171 + * - ... probably more ethtool ops 172 + */ 173 + 174 + static const struct ethtool_ops ops = { 175 + .get_drvinfo = eth_get_drvinfo, 176 + .get_link = ethtool_op_get_link, 177 + }; 178 + 179 + static void defer_kevent(struct eth_dev *dev, int flag) 180 + { 181 + if (test_and_set_bit(flag, &dev->todo)) 182 + return; 183 + if (!schedule_work(&dev->work)) 184 + ERROR(dev, "kevent %d may have been dropped\n", flag); 185 + else 186 + DBG(dev, "kevent %d scheduled\n", flag); 187 + } 188 + 189 + static void rx_complete(struct usb_ep *ep, struct usb_request *req); 190 + 191 + static int 192 + rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) 193 + { 194 + struct sk_buff *skb; 195 + int retval = -ENOMEM; 196 + size_t size = 0; 197 + struct usb_ep *out; 198 + unsigned long flags; 199 + 200 + spin_lock_irqsave(&dev->lock, flags); 201 + if (dev->port_usb) 202 + out = dev->port_usb->out_ep; 203 + else 204 + out = NULL; 205 + spin_unlock_irqrestore(&dev->lock, flags); 206 + 207 + if (!out) 208 + return -ENOTCONN; 209 + 210 + 211 + /* Padding up to RX_EXTRA handles minor disagreements with host. 212 + * Normally we use the USB "terminate on short read" convention; 213 + * so allow up to (N*maxpacket), since that memory is normally 214 + * already allocated. Some hardware doesn't deal well with short 215 + * reads (e.g. DMA must be N*maxpacket), so for now don't trim a 216 + * byte off the end (to force hardware errors on overflow). 217 + * 218 + * RNDIS uses internal framing, and explicitly allows senders to 219 + * pad to end-of-packet. That's potentially nice for speed, but 220 + * means receivers can't recover lost synch on their own (because 221 + * new packets don't only start after a short RX). 222 + */ 223 + size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; 224 + size += dev->port_usb->header_len; 225 + size += out->maxpacket - 1; 226 + size -= size % out->maxpacket; 227 + 228 + if (dev->port_usb->is_fixed) 229 + size = max_t(size_t, size, dev->port_usb->fixed_out_len); 230 + 231 + skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); 232 + if (skb == NULL) { 233 + DBG(dev, "no rx skb\n"); 234 + goto enomem; 235 + } 236 + 237 + /* Some platforms perform better when IP packets are aligned, 238 + * but on at least one, checksumming fails otherwise. Note: 239 + * RNDIS headers involve variable numbers of LE32 values. 240 + */ 241 + skb_reserve(skb, NET_IP_ALIGN); 242 + 243 + req->buf = skb->data; 244 + req->length = size; 245 + req->complete = rx_complete; 246 + req->context = skb; 247 + 248 + retval = usb_ep_queue(out, req, gfp_flags); 249 + if (retval == -ENOMEM) 250 + enomem: 251 + defer_kevent(dev, WORK_RX_MEMORY); 252 + if (retval) { 253 + DBG(dev, "rx submit --> %d\n", retval); 254 + if (skb) 255 + dev_kfree_skb_any(skb); 256 + spin_lock_irqsave(&dev->req_lock, flags); 257 + list_add(&req->list, &dev->rx_reqs); 258 + spin_unlock_irqrestore(&dev->req_lock, flags); 259 + } 260 + return retval; 261 + } 262 + 263 + static void rx_complete(struct usb_ep *ep, struct usb_request *req) 264 + { 265 + struct sk_buff *skb = req->context, *skb2; 266 + struct eth_dev *dev = ep->driver_data; 267 + int status = req->status; 268 + 269 + switch (status) { 270 + 271 + /* normal completion */ 272 + case 0: 273 + skb_put(skb, req->actual); 274 + 275 + if (dev->unwrap) { 276 + unsigned long flags; 277 + 278 + spin_lock_irqsave(&dev->lock, flags); 279 + if (dev->port_usb) { 280 + status = dev->unwrap(dev->port_usb, 281 + skb, 282 + &dev->rx_frames); 283 + } else { 284 + dev_kfree_skb_any(skb); 285 + status = -ENOTCONN; 286 + } 287 + spin_unlock_irqrestore(&dev->lock, flags); 288 + } else { 289 + skb_queue_tail(&dev->rx_frames, skb); 290 + } 291 + skb = NULL; 292 + 293 + skb2 = skb_dequeue(&dev->rx_frames); 294 + while (skb2) { 295 + if (status < 0 296 + || ETH_HLEN > skb2->len 297 + || skb2->len > ETH_FRAME_LEN) { 298 + dev->net->stats.rx_errors++; 299 + dev->net->stats.rx_length_errors++; 300 + DBG(dev, "rx length %d\n", skb2->len); 301 + dev_kfree_skb_any(skb2); 302 + goto next_frame; 303 + } 304 + skb2->protocol = eth_type_trans(skb2, dev->net); 305 + dev->net->stats.rx_packets++; 306 + dev->net->stats.rx_bytes += skb2->len; 307 + 308 + /* no buffer copies needed, unless hardware can't 309 + * use skb buffers. 310 + */ 311 + status = netif_rx(skb2); 312 + next_frame: 313 + skb2 = skb_dequeue(&dev->rx_frames); 314 + } 315 + break; 316 + 317 + /* software-driven interface shutdown */ 318 + case -ECONNRESET: /* unlink */ 319 + case -ESHUTDOWN: /* disconnect etc */ 320 + VDBG(dev, "rx shutdown, code %d\n", status); 321 + goto quiesce; 322 + 323 + /* for hardware automagic (such as pxa) */ 324 + case -ECONNABORTED: /* endpoint reset */ 325 + DBG(dev, "rx %s reset\n", ep->name); 326 + defer_kevent(dev, WORK_RX_MEMORY); 327 + quiesce: 328 + dev_kfree_skb_any(skb); 329 + goto clean; 330 + 331 + /* data overrun */ 332 + case -EOVERFLOW: 333 + dev->net->stats.rx_over_errors++; 334 + /* FALLTHROUGH */ 335 + 336 + default: 337 + dev->net->stats.rx_errors++; 338 + DBG(dev, "rx status %d\n", status); 339 + break; 340 + } 341 + 342 + if (skb) 343 + dev_kfree_skb_any(skb); 344 + if (!netif_running(dev->net)) { 345 + clean: 346 + spin_lock(&dev->req_lock); 347 + list_add(&req->list, &dev->rx_reqs); 348 + spin_unlock(&dev->req_lock); 349 + req = NULL; 350 + } 351 + if (req) 352 + rx_submit(dev, req, GFP_ATOMIC); 353 + } 354 + 355 + static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) 356 + { 357 + unsigned i; 358 + struct usb_request *req; 359 + 360 + if (!n) 361 + return -ENOMEM; 362 + 363 + /* queue/recycle up to N requests */ 364 + i = n; 365 + list_for_each_entry(req, list, list) { 366 + if (i-- == 0) 367 + goto extra; 368 + } 369 + while (i--) { 370 + req = usb_ep_alloc_request(ep, GFP_ATOMIC); 371 + if (!req) 372 + return list_empty(list) ? -ENOMEM : 0; 373 + list_add(&req->list, list); 374 + } 375 + return 0; 376 + 377 + extra: 378 + /* free extras */ 379 + for (;;) { 380 + struct list_head *next; 381 + 382 + next = req->list.next; 383 + list_del(&req->list); 384 + usb_ep_free_request(ep, req); 385 + 386 + if (next == list) 387 + break; 388 + 389 + req = container_of(next, struct usb_request, list); 390 + } 391 + return 0; 392 + } 393 + 394 + static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) 395 + { 396 + int status; 397 + 398 + spin_lock(&dev->req_lock); 399 + status = prealloc(&dev->tx_reqs, link->in_ep, n); 400 + if (status < 0) 401 + goto fail; 402 + status = prealloc(&dev->rx_reqs, link->out_ep, n); 403 + if (status < 0) 404 + goto fail; 405 + goto done; 406 + fail: 407 + DBG(dev, "can't alloc requests\n"); 408 + done: 409 + spin_unlock(&dev->req_lock); 410 + return status; 411 + } 412 + 413 + static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) 414 + { 415 + struct usb_request *req; 416 + unsigned long flags; 417 + 418 + /* fill unused rxq slots with some skb */ 419 + spin_lock_irqsave(&dev->req_lock, flags); 420 + while (!list_empty(&dev->rx_reqs)) { 421 + req = container_of(dev->rx_reqs.next, 422 + struct usb_request, list); 423 + list_del_init(&req->list); 424 + spin_unlock_irqrestore(&dev->req_lock, flags); 425 + 426 + if (rx_submit(dev, req, gfp_flags) < 0) { 427 + defer_kevent(dev, WORK_RX_MEMORY); 428 + return; 429 + } 430 + 431 + spin_lock_irqsave(&dev->req_lock, flags); 432 + } 433 + spin_unlock_irqrestore(&dev->req_lock, flags); 434 + } 435 + 436 + static void eth_work(struct work_struct *work) 437 + { 438 + struct eth_dev *dev = container_of(work, struct eth_dev, work); 439 + 440 + if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { 441 + if (netif_running(dev->net)) 442 + rx_fill(dev, GFP_KERNEL); 443 + } 444 + 445 + if (dev->todo) 446 + DBG(dev, "work done, flags = 0x%lx\n", dev->todo); 447 + } 448 + 449 + static void tx_complete(struct usb_ep *ep, struct usb_request *req) 450 + { 451 + struct sk_buff *skb = req->context; 452 + struct eth_dev *dev = ep->driver_data; 453 + 454 + switch (req->status) { 455 + default: 456 + dev->net->stats.tx_errors++; 457 + VDBG(dev, "tx err %d\n", req->status); 458 + /* FALLTHROUGH */ 459 + case -ECONNRESET: /* unlink */ 460 + case -ESHUTDOWN: /* disconnect etc */ 461 + break; 462 + case 0: 463 + dev->net->stats.tx_bytes += skb->len; 464 + } 465 + dev->net->stats.tx_packets++; 466 + 467 + spin_lock(&dev->req_lock); 468 + list_add(&req->list, &dev->tx_reqs); 469 + spin_unlock(&dev->req_lock); 470 + dev_kfree_skb_any(skb); 471 + 472 + atomic_dec(&dev->tx_qlen); 473 + if (netif_carrier_ok(dev->net)) 474 + netif_wake_queue(dev->net); 475 + } 476 + 477 + static inline int is_promisc(u16 cdc_filter) 478 + { 479 + return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; 480 + } 481 + 482 + static netdev_tx_t eth_start_xmit(struct sk_buff *skb, 483 + struct net_device *net) 484 + { 485 + struct eth_dev *dev = netdev_priv(net); 486 + int length = skb->len; 487 + int retval; 488 + struct usb_request *req = NULL; 489 + unsigned long flags; 490 + struct usb_ep *in; 491 + u16 cdc_filter; 492 + 493 + spin_lock_irqsave(&dev->lock, flags); 494 + if (dev->port_usb) { 495 + in = dev->port_usb->in_ep; 496 + cdc_filter = dev->port_usb->cdc_filter; 497 + } else { 498 + in = NULL; 499 + cdc_filter = 0; 500 + } 501 + spin_unlock_irqrestore(&dev->lock, flags); 502 + 503 + if (!in) { 504 + dev_kfree_skb_any(skb); 505 + return NETDEV_TX_OK; 506 + } 507 + 508 + /* apply outgoing CDC or RNDIS filters */ 509 + if (!is_promisc(cdc_filter)) { 510 + u8 *dest = skb->data; 511 + 512 + if (is_multicast_ether_addr(dest)) { 513 + u16 type; 514 + 515 + /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host 516 + * SET_ETHERNET_MULTICAST_FILTERS requests 517 + */ 518 + if (is_broadcast_ether_addr(dest)) 519 + type = USB_CDC_PACKET_TYPE_BROADCAST; 520 + else 521 + type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; 522 + if (!(cdc_filter & type)) { 523 + dev_kfree_skb_any(skb); 524 + return NETDEV_TX_OK; 525 + } 526 + } 527 + /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ 528 + } 529 + 530 + spin_lock_irqsave(&dev->req_lock, flags); 531 + /* 532 + * this freelist can be empty if an interrupt triggered disconnect() 533 + * and reconfigured the gadget (shutting down this queue) after the 534 + * network stack decided to xmit but before we got the spinlock. 535 + */ 536 + if (list_empty(&dev->tx_reqs)) { 537 + spin_unlock_irqrestore(&dev->req_lock, flags); 538 + return NETDEV_TX_BUSY; 539 + } 540 + 541 + req = container_of(dev->tx_reqs.next, struct usb_request, list); 542 + list_del(&req->list); 543 + 544 + /* temporarily stop TX queue when the freelist empties */ 545 + if (list_empty(&dev->tx_reqs)) 546 + netif_stop_queue(net); 547 + spin_unlock_irqrestore(&dev->req_lock, flags); 548 + 549 + /* no buffer copies needed, unless the network stack did it 550 + * or the hardware can't use skb buffers. 551 + * or there's not enough space for extra headers we need 552 + */ 553 + if (dev->wrap) { 554 + unsigned long flags; 555 + 556 + spin_lock_irqsave(&dev->lock, flags); 557 + if (dev->port_usb) 558 + skb = dev->wrap(dev->port_usb, skb); 559 + spin_unlock_irqrestore(&dev->lock, flags); 560 + if (!skb) 561 + goto drop; 562 + 563 + length = skb->len; 564 + } 565 + req->buf = skb->data; 566 + req->context = skb; 567 + req->complete = tx_complete; 568 + 569 + /* NCM requires no zlp if transfer is dwNtbInMaxSize */ 570 + if (dev->port_usb->is_fixed && 571 + length == dev->port_usb->fixed_in_len && 572 + (length % in->maxpacket) == 0) 573 + req->zero = 0; 574 + else 575 + req->zero = 1; 576 + 577 + /* use zlp framing on tx for strict CDC-Ether conformance, 578 + * though any robust network rx path ignores extra padding. 579 + * and some hardware doesn't like to write zlps. 580 + */ 581 + if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) 582 + length++; 583 + 584 + req->length = length; 585 + 586 + /* throttle high/super speed IRQ rate back slightly */ 587 + if (gadget_is_dualspeed(dev->gadget)) 588 + req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || 589 + dev->gadget->speed == USB_SPEED_SUPER) 590 + ? ((atomic_read(&dev->tx_qlen) % qmult) != 0) 591 + : 0; 592 + 593 + retval = usb_ep_queue(in, req, GFP_ATOMIC); 594 + switch (retval) { 595 + default: 596 + DBG(dev, "tx queue err %d\n", retval); 597 + break; 598 + case 0: 599 + net->trans_start = jiffies; 600 + atomic_inc(&dev->tx_qlen); 601 + } 602 + 603 + if (retval) { 604 + dev_kfree_skb_any(skb); 605 + drop: 606 + dev->net->stats.tx_dropped++; 607 + spin_lock_irqsave(&dev->req_lock, flags); 608 + if (list_empty(&dev->tx_reqs)) 609 + netif_start_queue(net); 610 + list_add(&req->list, &dev->tx_reqs); 611 + spin_unlock_irqrestore(&dev->req_lock, flags); 612 + } 613 + return NETDEV_TX_OK; 614 + } 615 + 616 + /*-------------------------------------------------------------------------*/ 617 + 618 + static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) 619 + { 620 + DBG(dev, "%s\n", __func__); 621 + 622 + /* fill the rx queue */ 623 + rx_fill(dev, gfp_flags); 624 + 625 + /* and open the tx floodgates */ 626 + atomic_set(&dev->tx_qlen, 0); 627 + netif_wake_queue(dev->net); 628 + } 629 + 630 + static int eth_open(struct net_device *net) 631 + { 632 + struct eth_dev *dev = netdev_priv(net); 633 + struct gether *link; 634 + 635 + DBG(dev, "%s\n", __func__); 636 + if (netif_carrier_ok(dev->net)) 637 + eth_start(dev, GFP_KERNEL); 638 + 639 + spin_lock_irq(&dev->lock); 640 + link = dev->port_usb; 641 + if (link && link->open) 642 + link->open(link); 643 + spin_unlock_irq(&dev->lock); 644 + 645 + return 0; 646 + } 647 + 648 + static int eth_stop(struct net_device *net) 649 + { 650 + struct eth_dev *dev = netdev_priv(net); 651 + unsigned long flags; 652 + 653 + VDBG(dev, "%s\n", __func__); 654 + netif_stop_queue(net); 655 + 656 + DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", 657 + dev->net->stats.rx_packets, dev->net->stats.tx_packets, 658 + dev->net->stats.rx_errors, dev->net->stats.tx_errors 659 + ); 660 + 661 + /* ensure there are no more active requests */ 662 + spin_lock_irqsave(&dev->lock, flags); 663 + if (dev->port_usb) { 664 + struct gether *link = dev->port_usb; 665 + 666 + if (link->close) 667 + link->close(link); 668 + 669 + /* NOTE: we have no abort-queue primitive we could use 670 + * to cancel all pending I/O. Instead, we disable then 671 + * reenable the endpoints ... this idiom may leave toggle 672 + * wrong, but that's a self-correcting error. 673 + * 674 + * REVISIT: we *COULD* just let the transfers complete at 675 + * their own pace; the network stack can handle old packets. 676 + * For the moment we leave this here, since it works. 677 + */ 678 + usb_ep_disable(link->in_ep); 679 + usb_ep_disable(link->out_ep); 680 + if (netif_carrier_ok(net)) { 681 + DBG(dev, "host still using in/out endpoints\n"); 682 + usb_ep_enable(link->in_ep); 683 + usb_ep_enable(link->out_ep); 684 + } 685 + } 686 + spin_unlock_irqrestore(&dev->lock, flags); 687 + 688 + return 0; 689 + } 690 + 691 + /*-------------------------------------------------------------------------*/ 692 + 693 + /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */ 694 + static char *dev_addr; 695 + module_param(dev_addr, charp, S_IRUGO); 696 + MODULE_PARM_DESC(dev_addr, "Device Ethernet Address"); 697 + 698 + /* this address is invisible to ifconfig */ 699 + static char *host_addr; 700 + module_param(host_addr, charp, S_IRUGO); 701 + MODULE_PARM_DESC(host_addr, "Host Ethernet Address"); 702 + 703 + static int get_ether_addr(const char *str, u8 *dev_addr) 704 + { 705 + if (str) { 706 + unsigned i; 707 + 708 + for (i = 0; i < 6; i++) { 709 + unsigned char num; 710 + 711 + if ((*str == '.') || (*str == ':')) 712 + str++; 713 + num = hex_to_bin(*str++) << 4; 714 + num |= hex_to_bin(*str++); 715 + dev_addr [i] = num; 716 + } 717 + if (is_valid_ether_addr(dev_addr)) 718 + return 0; 719 + } 720 + eth_random_addr(dev_addr); 721 + return 1; 722 + } 723 + 724 + static struct eth_dev *the_dev; 725 + 726 + static const struct net_device_ops eth_netdev_ops = { 727 + .ndo_open = eth_open, 728 + .ndo_stop = eth_stop, 729 + .ndo_start_xmit = eth_start_xmit, 730 + .ndo_change_mtu = ueth_change_mtu, 731 + .ndo_set_mac_address = eth_mac_addr, 732 + .ndo_validate_addr = eth_validate_addr, 733 + }; 734 + 735 + static struct device_type gadget_type = { 736 + .name = "gadget", 737 + }; 738 + 739 + /** 740 + * gether_setup_name - initialize one ethernet-over-usb link 741 + * @g: gadget to associated with these links 742 + * @ethaddr: NULL, or a buffer in which the ethernet address of the 743 + * host side of the link is recorded 744 + * @netname: name for network device (for example, "usb") 745 + * Context: may sleep 746 + * 747 + * This sets up the single network link that may be exported by a 748 + * gadget driver using this framework. The link layer addresses are 749 + * set up using module parameters. 750 + * 751 + * Returns negative errno, or zero on success 752 + */ 753 + int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN], 754 + const char *netname) 755 + { 756 + struct eth_dev *dev; 757 + struct net_device *net; 758 + int status; 759 + 760 + if (the_dev) 761 + return -EBUSY; 762 + 763 + net = alloc_etherdev(sizeof *dev); 764 + if (!net) 765 + return -ENOMEM; 766 + 767 + dev = netdev_priv(net); 768 + spin_lock_init(&dev->lock); 769 + spin_lock_init(&dev->req_lock); 770 + INIT_WORK(&dev->work, eth_work); 771 + INIT_LIST_HEAD(&dev->tx_reqs); 772 + INIT_LIST_HEAD(&dev->rx_reqs); 773 + 774 + skb_queue_head_init(&dev->rx_frames); 775 + 776 + /* network device setup */ 777 + dev->net = net; 778 + snprintf(net->name, sizeof(net->name), "%s%%d", netname); 779 + 780 + if (get_ether_addr(dev_addr, net->dev_addr)) 781 + dev_warn(&g->dev, 782 + "using random %s ethernet address\n", "self"); 783 + if (get_ether_addr(host_addr, dev->host_mac)) 784 + dev_warn(&g->dev, 785 + "using random %s ethernet address\n", "host"); 786 + 787 + if (ethaddr) 788 + memcpy(ethaddr, dev->host_mac, ETH_ALEN); 789 + 790 + net->netdev_ops = &eth_netdev_ops; 791 + 792 + SET_ETHTOOL_OPS(net, &ops); 793 + 794 + dev->gadget = g; 795 + SET_NETDEV_DEV(net, &g->dev); 796 + SET_NETDEV_DEVTYPE(net, &gadget_type); 797 + 798 + status = register_netdev(net); 799 + if (status < 0) { 800 + dev_dbg(&g->dev, "register_netdev failed, %d\n", status); 801 + free_netdev(net); 802 + } else { 803 + INFO(dev, "MAC %pM\n", net->dev_addr); 804 + INFO(dev, "HOST MAC %pM\n", dev->host_mac); 805 + 806 + the_dev = dev; 807 + 808 + /* two kinds of host-initiated state changes: 809 + * - iff DATA transfer is active, carrier is "on" 810 + * - tx queueing enabled if open *and* carrier is "on" 811 + */ 812 + netif_carrier_off(net); 813 + } 814 + 815 + return status; 816 + } 817 + 818 + /** 819 + * gether_cleanup - remove Ethernet-over-USB device 820 + * Context: may sleep 821 + * 822 + * This is called to free all resources allocated by @gether_setup(). 823 + */ 824 + void gether_cleanup(void) 825 + { 826 + if (!the_dev) 827 + return; 828 + 829 + unregister_netdev(the_dev->net); 830 + flush_work_sync(&the_dev->work); 831 + free_netdev(the_dev->net); 832 + 833 + the_dev = NULL; 834 + } 835 + 836 + 837 + /** 838 + * gether_connect - notify network layer that USB link is active 839 + * @link: the USB link, set up with endpoints, descriptors matching 840 + * current device speed, and any framing wrapper(s) set up. 841 + * Context: irqs blocked 842 + * 843 + * This is called to activate endpoints and let the network layer know 844 + * the connection is active ("carrier detect"). It may cause the I/O 845 + * queues to open and start letting network packets flow, but will in 846 + * any case activate the endpoints so that they respond properly to the 847 + * USB host. 848 + * 849 + * Verify net_device pointer returned using IS_ERR(). If it doesn't 850 + * indicate some error code (negative errno), ep->driver_data values 851 + * have been overwritten. 852 + */ 853 + struct net_device *gether_connect(struct gether *link) 854 + { 855 + struct eth_dev *dev = the_dev; 856 + int result = 0; 857 + 858 + if (!dev) 859 + return ERR_PTR(-EINVAL); 860 + 861 + link->in_ep->driver_data = dev; 862 + result = usb_ep_enable(link->in_ep); 863 + if (result != 0) { 864 + DBG(dev, "enable %s --> %d\n", 865 + link->in_ep->name, result); 866 + goto fail0; 867 + } 868 + 869 + link->out_ep->driver_data = dev; 870 + result = usb_ep_enable(link->out_ep); 871 + if (result != 0) { 872 + DBG(dev, "enable %s --> %d\n", 873 + link->out_ep->name, result); 874 + goto fail1; 875 + } 876 + 877 + if (result == 0) 878 + result = alloc_requests(dev, link, qlen(dev->gadget)); 879 + 880 + if (result == 0) { 881 + dev->zlp = link->is_zlp_ok; 882 + DBG(dev, "qlen %d\n", qlen(dev->gadget)); 883 + 884 + dev->header_len = link->header_len; 885 + dev->unwrap = link->unwrap; 886 + dev->wrap = link->wrap; 887 + 888 + spin_lock(&dev->lock); 889 + dev->port_usb = link; 890 + link->ioport = dev; 891 + if (netif_running(dev->net)) { 892 + if (link->open) 893 + link->open(link); 894 + } else { 895 + if (link->close) 896 + link->close(link); 897 + } 898 + spin_unlock(&dev->lock); 899 + 900 + netif_carrier_on(dev->net); 901 + if (netif_running(dev->net)) 902 + eth_start(dev, GFP_ATOMIC); 903 + 904 + /* on error, disable any endpoints */ 905 + } else { 906 + (void) usb_ep_disable(link->out_ep); 907 + fail1: 908 + (void) usb_ep_disable(link->in_ep); 909 + } 910 + fail0: 911 + /* caller is responsible for cleanup on error */ 912 + if (result < 0) 913 + return ERR_PTR(result); 914 + return dev->net; 915 + } 916 + 917 + /** 918 + * gether_disconnect - notify network layer that USB link is inactive 919 + * @link: the USB link, on which gether_connect() was called 920 + * Context: irqs blocked 921 + * 922 + * This is called to deactivate endpoints and let the network layer know 923 + * the connection went inactive ("no carrier"). 924 + * 925 + * On return, the state is as if gether_connect() had never been called. 926 + * The endpoints are inactive, and accordingly without active USB I/O. 927 + * Pointers to endpoint descriptors and endpoint private data are nulled. 928 + */ 929 + void gether_disconnect(struct gether *link) 930 + { 931 + struct eth_dev *dev = link->ioport; 932 + struct usb_request *req; 933 + 934 + WARN_ON(!dev); 935 + if (!dev) 936 + return; 937 + 938 + DBG(dev, "%s\n", __func__); 939 + 940 + netif_stop_queue(dev->net); 941 + netif_carrier_off(dev->net); 942 + 943 + /* disable endpoints, forcing (synchronous) completion 944 + * of all pending i/o. then free the request objects 945 + * and forget about the endpoints. 946 + */ 947 + usb_ep_disable(link->in_ep); 948 + spin_lock(&dev->req_lock); 949 + while (!list_empty(&dev->tx_reqs)) { 950 + req = container_of(dev->tx_reqs.next, 951 + struct usb_request, list); 952 + list_del(&req->list); 953 + 954 + spin_unlock(&dev->req_lock); 955 + usb_ep_free_request(link->in_ep, req); 956 + spin_lock(&dev->req_lock); 957 + } 958 + spin_unlock(&dev->req_lock); 959 + link->in_ep->driver_data = NULL; 960 + link->in_ep->desc = NULL; 961 + 962 + usb_ep_disable(link->out_ep); 963 + spin_lock(&dev->req_lock); 964 + while (!list_empty(&dev->rx_reqs)) { 965 + req = container_of(dev->rx_reqs.next, 966 + struct usb_request, list); 967 + list_del(&req->list); 968 + 969 + spin_unlock(&dev->req_lock); 970 + usb_ep_free_request(link->out_ep, req); 971 + spin_lock(&dev->req_lock); 972 + } 973 + spin_unlock(&dev->req_lock); 974 + link->out_ep->driver_data = NULL; 975 + link->out_ep->desc = NULL; 976 + 977 + /* finish forgetting about this USB link episode */ 978 + dev->header_len = 0; 979 + dev->unwrap = NULL; 980 + dev->wrap = NULL; 981 + 982 + spin_lock(&dev->lock); 983 + dev->port_usb = NULL; 984 + link->ioport = NULL; 985 + spin_unlock(&dev->lock); 986 + }
+154
drivers/staging/ccg/u_ether.h
··· 1 + /* 2 + * u_ether.h -- interface to USB gadget "ethernet link" utilities 3 + * 4 + * Copyright (C) 2003-2005,2008 David Brownell 5 + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 6 + * Copyright (C) 2008 Nokia Corporation 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License as published by 10 + * the Free Software Foundation; either version 2 of the License, or 11 + * (at your option) any later version. 12 + */ 13 + 14 + #ifndef __U_ETHER_H 15 + #define __U_ETHER_H 16 + 17 + #include <linux/err.h> 18 + #include <linux/if_ether.h> 19 + #include <linux/usb/composite.h> 20 + #include <linux/usb/cdc.h> 21 + 22 + #include "gadget_chips.h" 23 + 24 + 25 + /* 26 + * This represents the USB side of an "ethernet" link, managed by a USB 27 + * function which provides control and (maybe) framing. Two functions 28 + * in different configurations could share the same ethernet link/netdev, 29 + * using different host interaction models. 30 + * 31 + * There is a current limitation that only one instance of this link may 32 + * be present in any given configuration. When that's a problem, network 33 + * layer facilities can be used to package multiple logical links on this 34 + * single "physical" one. 35 + */ 36 + struct gether { 37 + struct usb_function func; 38 + 39 + /* updated by gether_{connect,disconnect} */ 40 + struct eth_dev *ioport; 41 + 42 + /* endpoints handle full and/or high speeds */ 43 + struct usb_ep *in_ep; 44 + struct usb_ep *out_ep; 45 + 46 + bool is_zlp_ok; 47 + 48 + u16 cdc_filter; 49 + 50 + /* hooks for added framing, as needed for RNDIS and EEM. */ 51 + u32 header_len; 52 + /* NCM requires fixed size bundles */ 53 + bool is_fixed; 54 + u32 fixed_out_len; 55 + u32 fixed_in_len; 56 + struct sk_buff *(*wrap)(struct gether *port, 57 + struct sk_buff *skb); 58 + int (*unwrap)(struct gether *port, 59 + struct sk_buff *skb, 60 + struct sk_buff_head *list); 61 + 62 + /* called on network open/close */ 63 + void (*open)(struct gether *); 64 + void (*close)(struct gether *); 65 + }; 66 + 67 + #define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \ 68 + |USB_CDC_PACKET_TYPE_ALL_MULTICAST \ 69 + |USB_CDC_PACKET_TYPE_PROMISCUOUS \ 70 + |USB_CDC_PACKET_TYPE_DIRECTED) 71 + 72 + /* variant of gether_setup that allows customizing network device name */ 73 + int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN], 74 + const char *netname); 75 + 76 + /* netdev setup/teardown as directed by the gadget driver */ 77 + /* gether_setup - initialize one ethernet-over-usb link 78 + * @g: gadget to associated with these links 79 + * @ethaddr: NULL, or a buffer in which the ethernet address of the 80 + * host side of the link is recorded 81 + * Context: may sleep 82 + * 83 + * This sets up the single network link that may be exported by a 84 + * gadget driver using this framework. The link layer addresses are 85 + * set up using module parameters. 86 + * 87 + * Returns negative errno, or zero on success 88 + */ 89 + static inline int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]) 90 + { 91 + return gether_setup_name(g, ethaddr, "usb"); 92 + } 93 + 94 + void gether_cleanup(void); 95 + 96 + /* connect/disconnect is handled by individual functions */ 97 + struct net_device *gether_connect(struct gether *); 98 + void gether_disconnect(struct gether *); 99 + 100 + /* Some controllers can't support CDC Ethernet (ECM) ... */ 101 + static inline bool can_support_ecm(struct usb_gadget *gadget) 102 + { 103 + if (!gadget_supports_altsettings(gadget)) 104 + return false; 105 + 106 + /* Everything else is *presumably* fine ... but this is a bit 107 + * chancy, so be **CERTAIN** there are no hardware issues with 108 + * your controller. Add it above if it can't handle CDC. 109 + */ 110 + return true; 111 + } 112 + 113 + /* each configuration may bind one instance of an ethernet link */ 114 + int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); 115 + int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); 116 + int ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); 117 + int eem_bind_config(struct usb_configuration *c); 118 + 119 + #ifdef USB_ETH_RNDIS 120 + 121 + int rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN], 122 + u32 vendorID, const char *manufacturer); 123 + 124 + #else 125 + 126 + static inline int 127 + rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN], 128 + u32 vendorID, const char *manufacturer) 129 + { 130 + return 0; 131 + } 132 + 133 + #endif 134 + 135 + /** 136 + * rndis_bind_config - add RNDIS network link to a configuration 137 + * @c: the configuration to support the network link 138 + * @ethaddr: a buffer in which the ethernet address of the host side 139 + * side of the link was recorded 140 + * Context: single threaded during gadget setup 141 + * 142 + * Returns zero on success, else negative errno. 143 + * 144 + * Caller must have called @gether_setup(). Caller is also responsible 145 + * for calling @gether_cleanup() before module unload. 146 + */ 147 + static inline int rndis_bind_config(struct usb_configuration *c, 148 + u8 ethaddr[ETH_ALEN]) 149 + { 150 + return rndis_bind_config_vendor(c, ethaddr, 0, NULL); 151 + } 152 + 153 + 154 + #endif /* __U_ETHER_H */
+1341
drivers/staging/ccg/u_serial.c
··· 1 + /* 2 + * u_serial.c - utilities for USB gadget "serial port"/TTY support 3 + * 4 + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) 5 + * Copyright (C) 2008 David Brownell 6 + * Copyright (C) 2008 by Nokia Corporation 7 + * 8 + * This code also borrows from usbserial.c, which is 9 + * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com) 10 + * Copyright (C) 2000 Peter Berger (pberger@brimson.com) 11 + * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com) 12 + * 13 + * This software is distributed under the terms of the GNU General 14 + * Public License ("GPL") as published by the Free Software Foundation, 15 + * either version 2 of that License or (at your option) any later version. 16 + */ 17 + 18 + /* #define VERBOSE_DEBUG */ 19 + 20 + #include <linux/kernel.h> 21 + #include <linux/sched.h> 22 + #include <linux/interrupt.h> 23 + #include <linux/device.h> 24 + #include <linux/delay.h> 25 + #include <linux/tty.h> 26 + #include <linux/tty_flip.h> 27 + #include <linux/slab.h> 28 + #include <linux/export.h> 29 + 30 + #include "u_serial.h" 31 + 32 + 33 + /* 34 + * This component encapsulates the TTY layer glue needed to provide basic 35 + * "serial port" functionality through the USB gadget stack. Each such 36 + * port is exposed through a /dev/ttyGS* node. 37 + * 38 + * After initialization (gserial_setup), these TTY port devices stay 39 + * available until they are removed (gserial_cleanup). Each one may be 40 + * connected to a USB function (gserial_connect), or disconnected (with 41 + * gserial_disconnect) when the USB host issues a config change event. 42 + * Data can only flow when the port is connected to the host. 43 + * 44 + * A given TTY port can be made available in multiple configurations. 45 + * For example, each one might expose a ttyGS0 node which provides a 46 + * login application. In one case that might use CDC ACM interface 0, 47 + * while another configuration might use interface 3 for that. The 48 + * work to handle that (including descriptor management) is not part 49 + * of this component. 50 + * 51 + * Configurations may expose more than one TTY port. For example, if 52 + * ttyGS0 provides login service, then ttyGS1 might provide dialer access 53 + * for a telephone or fax link. And ttyGS2 might be something that just 54 + * needs a simple byte stream interface for some messaging protocol that 55 + * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. 56 + */ 57 + 58 + #define PREFIX "ttyGS" 59 + 60 + /* 61 + * gserial is the lifecycle interface, used by USB functions 62 + * gs_port is the I/O nexus, used by the tty driver 63 + * tty_struct links to the tty/filesystem framework 64 + * 65 + * gserial <---> gs_port ... links will be null when the USB link is 66 + * inactive; managed by gserial_{connect,disconnect}(). each gserial 67 + * instance can wrap its own USB control protocol. 68 + * gserial->ioport == usb_ep->driver_data ... gs_port 69 + * gs_port->port_usb ... gserial 70 + * 71 + * gs_port <---> tty_struct ... links will be null when the TTY file 72 + * isn't opened; managed by gs_open()/gs_close() 73 + * gserial->port_tty ... tty_struct 74 + * tty_struct->driver_data ... gserial 75 + */ 76 + 77 + /* RX and TX queues can buffer QUEUE_SIZE packets before they hit the 78 + * next layer of buffering. For TX that's a circular buffer; for RX 79 + * consider it a NOP. A third layer is provided by the TTY code. 80 + */ 81 + #define QUEUE_SIZE 16 82 + #define WRITE_BUF_SIZE 8192 /* TX only */ 83 + 84 + /* circular buffer */ 85 + struct gs_buf { 86 + unsigned buf_size; 87 + char *buf_buf; 88 + char *buf_get; 89 + char *buf_put; 90 + }; 91 + 92 + /* 93 + * The port structure holds info for each port, one for each minor number 94 + * (and thus for each /dev/ node). 95 + */ 96 + struct gs_port { 97 + struct tty_port port; 98 + spinlock_t port_lock; /* guard port_* access */ 99 + 100 + struct gserial *port_usb; 101 + 102 + bool openclose; /* open/close in progress */ 103 + u8 port_num; 104 + 105 + struct list_head read_pool; 106 + int read_started; 107 + int read_allocated; 108 + struct list_head read_queue; 109 + unsigned n_read; 110 + struct tasklet_struct push; 111 + 112 + struct list_head write_pool; 113 + int write_started; 114 + int write_allocated; 115 + struct gs_buf port_write_buf; 116 + wait_queue_head_t drain_wait; /* wait while writes drain */ 117 + 118 + /* REVISIT this state ... */ 119 + struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ 120 + }; 121 + 122 + /* increase N_PORTS if you need more */ 123 + #define N_PORTS 4 124 + static struct portmaster { 125 + struct mutex lock; /* protect open/close */ 126 + struct gs_port *port; 127 + } ports[N_PORTS]; 128 + static unsigned n_ports; 129 + 130 + #define GS_CLOSE_TIMEOUT 15 /* seconds */ 131 + 132 + 133 + 134 + #ifdef VERBOSE_DEBUG 135 + #define pr_vdebug(fmt, arg...) \ 136 + pr_debug(fmt, ##arg) 137 + #else 138 + #define pr_vdebug(fmt, arg...) \ 139 + ({ if (0) pr_debug(fmt, ##arg); }) 140 + #endif 141 + 142 + /*-------------------------------------------------------------------------*/ 143 + 144 + /* Circular Buffer */ 145 + 146 + /* 147 + * gs_buf_alloc 148 + * 149 + * Allocate a circular buffer and all associated memory. 150 + */ 151 + static int gs_buf_alloc(struct gs_buf *gb, unsigned size) 152 + { 153 + gb->buf_buf = kmalloc(size, GFP_KERNEL); 154 + if (gb->buf_buf == NULL) 155 + return -ENOMEM; 156 + 157 + gb->buf_size = size; 158 + gb->buf_put = gb->buf_buf; 159 + gb->buf_get = gb->buf_buf; 160 + 161 + return 0; 162 + } 163 + 164 + /* 165 + * gs_buf_free 166 + * 167 + * Free the buffer and all associated memory. 168 + */ 169 + static void gs_buf_free(struct gs_buf *gb) 170 + { 171 + kfree(gb->buf_buf); 172 + gb->buf_buf = NULL; 173 + } 174 + 175 + /* 176 + * gs_buf_clear 177 + * 178 + * Clear out all data in the circular buffer. 179 + */ 180 + static void gs_buf_clear(struct gs_buf *gb) 181 + { 182 + gb->buf_get = gb->buf_put; 183 + /* equivalent to a get of all data available */ 184 + } 185 + 186 + /* 187 + * gs_buf_data_avail 188 + * 189 + * Return the number of bytes of data written into the circular 190 + * buffer. 191 + */ 192 + static unsigned gs_buf_data_avail(struct gs_buf *gb) 193 + { 194 + return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size; 195 + } 196 + 197 + /* 198 + * gs_buf_space_avail 199 + * 200 + * Return the number of bytes of space available in the circular 201 + * buffer. 202 + */ 203 + static unsigned gs_buf_space_avail(struct gs_buf *gb) 204 + { 205 + return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size; 206 + } 207 + 208 + /* 209 + * gs_buf_put 210 + * 211 + * Copy data data from a user buffer and put it into the circular buffer. 212 + * Restrict to the amount of space available. 213 + * 214 + * Return the number of bytes copied. 215 + */ 216 + static unsigned 217 + gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count) 218 + { 219 + unsigned len; 220 + 221 + len = gs_buf_space_avail(gb); 222 + if (count > len) 223 + count = len; 224 + 225 + if (count == 0) 226 + return 0; 227 + 228 + len = gb->buf_buf + gb->buf_size - gb->buf_put; 229 + if (count > len) { 230 + memcpy(gb->buf_put, buf, len); 231 + memcpy(gb->buf_buf, buf+len, count - len); 232 + gb->buf_put = gb->buf_buf + count - len; 233 + } else { 234 + memcpy(gb->buf_put, buf, count); 235 + if (count < len) 236 + gb->buf_put += count; 237 + else /* count == len */ 238 + gb->buf_put = gb->buf_buf; 239 + } 240 + 241 + return count; 242 + } 243 + 244 + /* 245 + * gs_buf_get 246 + * 247 + * Get data from the circular buffer and copy to the given buffer. 248 + * Restrict to the amount of data available. 249 + * 250 + * Return the number of bytes copied. 251 + */ 252 + static unsigned 253 + gs_buf_get(struct gs_buf *gb, char *buf, unsigned count) 254 + { 255 + unsigned len; 256 + 257 + len = gs_buf_data_avail(gb); 258 + if (count > len) 259 + count = len; 260 + 261 + if (count == 0) 262 + return 0; 263 + 264 + len = gb->buf_buf + gb->buf_size - gb->buf_get; 265 + if (count > len) { 266 + memcpy(buf, gb->buf_get, len); 267 + memcpy(buf+len, gb->buf_buf, count - len); 268 + gb->buf_get = gb->buf_buf + count - len; 269 + } else { 270 + memcpy(buf, gb->buf_get, count); 271 + if (count < len) 272 + gb->buf_get += count; 273 + else /* count == len */ 274 + gb->buf_get = gb->buf_buf; 275 + } 276 + 277 + return count; 278 + } 279 + 280 + /*-------------------------------------------------------------------------*/ 281 + 282 + /* I/O glue between TTY (upper) and USB function (lower) driver layers */ 283 + 284 + /* 285 + * gs_alloc_req 286 + * 287 + * Allocate a usb_request and its buffer. Returns a pointer to the 288 + * usb_request or NULL if there is an error. 289 + */ 290 + struct usb_request * 291 + gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) 292 + { 293 + struct usb_request *req; 294 + 295 + req = usb_ep_alloc_request(ep, kmalloc_flags); 296 + 297 + if (req != NULL) { 298 + req->length = len; 299 + req->buf = kmalloc(len, kmalloc_flags); 300 + if (req->buf == NULL) { 301 + usb_ep_free_request(ep, req); 302 + return NULL; 303 + } 304 + } 305 + 306 + return req; 307 + } 308 + 309 + /* 310 + * gs_free_req 311 + * 312 + * Free a usb_request and its buffer. 313 + */ 314 + void gs_free_req(struct usb_ep *ep, struct usb_request *req) 315 + { 316 + kfree(req->buf); 317 + usb_ep_free_request(ep, req); 318 + } 319 + 320 + /* 321 + * gs_send_packet 322 + * 323 + * If there is data to send, a packet is built in the given 324 + * buffer and the size is returned. If there is no data to 325 + * send, 0 is returned. 326 + * 327 + * Called with port_lock held. 328 + */ 329 + static unsigned 330 + gs_send_packet(struct gs_port *port, char *packet, unsigned size) 331 + { 332 + unsigned len; 333 + 334 + len = gs_buf_data_avail(&port->port_write_buf); 335 + if (len < size) 336 + size = len; 337 + if (size != 0) 338 + size = gs_buf_get(&port->port_write_buf, packet, size); 339 + return size; 340 + } 341 + 342 + /* 343 + * gs_start_tx 344 + * 345 + * This function finds available write requests, calls 346 + * gs_send_packet to fill these packets with data, and 347 + * continues until either there are no more write requests 348 + * available or no more data to send. This function is 349 + * run whenever data arrives or write requests are available. 350 + * 351 + * Context: caller owns port_lock; port_usb is non-null. 352 + */ 353 + static int gs_start_tx(struct gs_port *port) 354 + /* 355 + __releases(&port->port_lock) 356 + __acquires(&port->port_lock) 357 + */ 358 + { 359 + struct list_head *pool = &port->write_pool; 360 + struct usb_ep *in = port->port_usb->in; 361 + int status = 0; 362 + bool do_tty_wake = false; 363 + 364 + while (!list_empty(pool)) { 365 + struct usb_request *req; 366 + int len; 367 + 368 + if (port->write_started >= QUEUE_SIZE) 369 + break; 370 + 371 + req = list_entry(pool->next, struct usb_request, list); 372 + len = gs_send_packet(port, req->buf, in->maxpacket); 373 + if (len == 0) { 374 + wake_up_interruptible(&port->drain_wait); 375 + break; 376 + } 377 + do_tty_wake = true; 378 + 379 + req->length = len; 380 + list_del(&req->list); 381 + req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0); 382 + 383 + pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", 384 + port->port_num, len, *((u8 *)req->buf), 385 + *((u8 *)req->buf+1), *((u8 *)req->buf+2)); 386 + 387 + /* Drop lock while we call out of driver; completions 388 + * could be issued while we do so. Disconnection may 389 + * happen too; maybe immediately before we queue this! 390 + * 391 + * NOTE that we may keep sending data for a while after 392 + * the TTY closed (dev->ioport->port_tty is NULL). 393 + */ 394 + spin_unlock(&port->port_lock); 395 + status = usb_ep_queue(in, req, GFP_ATOMIC); 396 + spin_lock(&port->port_lock); 397 + 398 + if (status) { 399 + pr_debug("%s: %s %s err %d\n", 400 + __func__, "queue", in->name, status); 401 + list_add(&req->list, pool); 402 + break; 403 + } 404 + 405 + port->write_started++; 406 + 407 + /* abort immediately after disconnect */ 408 + if (!port->port_usb) 409 + break; 410 + } 411 + 412 + if (do_tty_wake && port->port.tty) 413 + tty_wakeup(port->port.tty); 414 + return status; 415 + } 416 + 417 + /* 418 + * Context: caller owns port_lock, and port_usb is set 419 + */ 420 + static unsigned gs_start_rx(struct gs_port *port) 421 + /* 422 + __releases(&port->port_lock) 423 + __acquires(&port->port_lock) 424 + */ 425 + { 426 + struct list_head *pool = &port->read_pool; 427 + struct usb_ep *out = port->port_usb->out; 428 + 429 + while (!list_empty(pool)) { 430 + struct usb_request *req; 431 + int status; 432 + struct tty_struct *tty; 433 + 434 + /* no more rx if closed */ 435 + tty = port->port.tty; 436 + if (!tty) 437 + break; 438 + 439 + if (port->read_started >= QUEUE_SIZE) 440 + break; 441 + 442 + req = list_entry(pool->next, struct usb_request, list); 443 + list_del(&req->list); 444 + req->length = out->maxpacket; 445 + 446 + /* drop lock while we call out; the controller driver 447 + * may need to call us back (e.g. for disconnect) 448 + */ 449 + spin_unlock(&port->port_lock); 450 + status = usb_ep_queue(out, req, GFP_ATOMIC); 451 + spin_lock(&port->port_lock); 452 + 453 + if (status) { 454 + pr_debug("%s: %s %s err %d\n", 455 + __func__, "queue", out->name, status); 456 + list_add(&req->list, pool); 457 + break; 458 + } 459 + port->read_started++; 460 + 461 + /* abort immediately after disconnect */ 462 + if (!port->port_usb) 463 + break; 464 + } 465 + return port->read_started; 466 + } 467 + 468 + /* 469 + * RX tasklet takes data out of the RX queue and hands it up to the TTY 470 + * layer until it refuses to take any more data (or is throttled back). 471 + * Then it issues reads for any further data. 472 + * 473 + * If the RX queue becomes full enough that no usb_request is queued, 474 + * the OUT endpoint may begin NAKing as soon as its FIFO fills up. 475 + * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) 476 + * can be buffered before the TTY layer's buffers (currently 64 KB). 477 + */ 478 + static void gs_rx_push(unsigned long _port) 479 + { 480 + struct gs_port *port = (void *)_port; 481 + struct tty_struct *tty; 482 + struct list_head *queue = &port->read_queue; 483 + bool disconnect = false; 484 + bool do_push = false; 485 + 486 + /* hand any queued data to the tty */ 487 + spin_lock_irq(&port->port_lock); 488 + tty = port->port.tty; 489 + while (!list_empty(queue)) { 490 + struct usb_request *req; 491 + 492 + req = list_first_entry(queue, struct usb_request, list); 493 + 494 + /* discard data if tty was closed */ 495 + if (!tty) 496 + goto recycle; 497 + 498 + /* leave data queued if tty was rx throttled */ 499 + if (test_bit(TTY_THROTTLED, &tty->flags)) 500 + break; 501 + 502 + switch (req->status) { 503 + case -ESHUTDOWN: 504 + disconnect = true; 505 + pr_vdebug(PREFIX "%d: shutdown\n", port->port_num); 506 + break; 507 + 508 + default: 509 + /* presumably a transient fault */ 510 + pr_warning(PREFIX "%d: unexpected RX status %d\n", 511 + port->port_num, req->status); 512 + /* FALLTHROUGH */ 513 + case 0: 514 + /* normal completion */ 515 + break; 516 + } 517 + 518 + /* push data to (open) tty */ 519 + if (req->actual) { 520 + char *packet = req->buf; 521 + unsigned size = req->actual; 522 + unsigned n; 523 + int count; 524 + 525 + /* we may have pushed part of this packet already... */ 526 + n = port->n_read; 527 + if (n) { 528 + packet += n; 529 + size -= n; 530 + } 531 + 532 + count = tty_insert_flip_string(tty, packet, size); 533 + if (count) 534 + do_push = true; 535 + if (count != size) { 536 + /* stop pushing; TTY layer can't handle more */ 537 + port->n_read += count; 538 + pr_vdebug(PREFIX "%d: rx block %d/%d\n", 539 + port->port_num, 540 + count, req->actual); 541 + break; 542 + } 543 + port->n_read = 0; 544 + } 545 + recycle: 546 + list_move(&req->list, &port->read_pool); 547 + port->read_started--; 548 + } 549 + 550 + /* Push from tty to ldisc; without low_latency set this is handled by 551 + * a workqueue, so we won't get callbacks and can hold port_lock 552 + */ 553 + if (tty && do_push) 554 + tty_flip_buffer_push(tty); 555 + 556 + 557 + /* We want our data queue to become empty ASAP, keeping data 558 + * in the tty and ldisc (not here). If we couldn't push any 559 + * this time around, there may be trouble unless there's an 560 + * implicit tty_unthrottle() call on its way... 561 + * 562 + * REVISIT we should probably add a timer to keep the tasklet 563 + * from starving ... but it's not clear that case ever happens. 564 + */ 565 + if (!list_empty(queue) && tty) { 566 + if (!test_bit(TTY_THROTTLED, &tty->flags)) { 567 + if (do_push) 568 + tasklet_schedule(&port->push); 569 + else 570 + pr_warning(PREFIX "%d: RX not scheduled?\n", 571 + port->port_num); 572 + } 573 + } 574 + 575 + /* If we're still connected, refill the USB RX queue. */ 576 + if (!disconnect && port->port_usb) 577 + gs_start_rx(port); 578 + 579 + spin_unlock_irq(&port->port_lock); 580 + } 581 + 582 + static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) 583 + { 584 + struct gs_port *port = ep->driver_data; 585 + 586 + /* Queue all received data until the tty layer is ready for it. */ 587 + spin_lock(&port->port_lock); 588 + list_add_tail(&req->list, &port->read_queue); 589 + tasklet_schedule(&port->push); 590 + spin_unlock(&port->port_lock); 591 + } 592 + 593 + static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) 594 + { 595 + struct gs_port *port = ep->driver_data; 596 + 597 + spin_lock(&port->port_lock); 598 + list_add(&req->list, &port->write_pool); 599 + port->write_started--; 600 + 601 + switch (req->status) { 602 + default: 603 + /* presumably a transient fault */ 604 + pr_warning("%s: unexpected %s status %d\n", 605 + __func__, ep->name, req->status); 606 + /* FALL THROUGH */ 607 + case 0: 608 + /* normal completion */ 609 + gs_start_tx(port); 610 + break; 611 + 612 + case -ESHUTDOWN: 613 + /* disconnect */ 614 + pr_vdebug("%s: %s shutdown\n", __func__, ep->name); 615 + break; 616 + } 617 + 618 + spin_unlock(&port->port_lock); 619 + } 620 + 621 + static void gs_free_requests(struct usb_ep *ep, struct list_head *head, 622 + int *allocated) 623 + { 624 + struct usb_request *req; 625 + 626 + while (!list_empty(head)) { 627 + req = list_entry(head->next, struct usb_request, list); 628 + list_del(&req->list); 629 + gs_free_req(ep, req); 630 + if (allocated) 631 + (*allocated)--; 632 + } 633 + } 634 + 635 + static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, 636 + void (*fn)(struct usb_ep *, struct usb_request *), 637 + int *allocated) 638 + { 639 + int i; 640 + struct usb_request *req; 641 + int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE; 642 + 643 + /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't 644 + * do quite that many this time, don't fail ... we just won't 645 + * be as speedy as we might otherwise be. 646 + */ 647 + for (i = 0; i < n; i++) { 648 + req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); 649 + if (!req) 650 + return list_empty(head) ? -ENOMEM : 0; 651 + req->complete = fn; 652 + list_add_tail(&req->list, head); 653 + if (allocated) 654 + (*allocated)++; 655 + } 656 + return 0; 657 + } 658 + 659 + /** 660 + * gs_start_io - start USB I/O streams 661 + * @dev: encapsulates endpoints to use 662 + * Context: holding port_lock; port_tty and port_usb are non-null 663 + * 664 + * We only start I/O when something is connected to both sides of 665 + * this port. If nothing is listening on the host side, we may 666 + * be pointlessly filling up our TX buffers and FIFO. 667 + */ 668 + static int gs_start_io(struct gs_port *port) 669 + { 670 + struct list_head *head = &port->read_pool; 671 + struct usb_ep *ep = port->port_usb->out; 672 + int status; 673 + unsigned started; 674 + 675 + /* Allocate RX and TX I/O buffers. We can't easily do this much 676 + * earlier (with GFP_KERNEL) because the requests are coupled to 677 + * endpoints, as are the packet sizes we'll be using. Different 678 + * configurations may use different endpoints with a given port; 679 + * and high speed vs full speed changes packet sizes too. 680 + */ 681 + status = gs_alloc_requests(ep, head, gs_read_complete, 682 + &port->read_allocated); 683 + if (status) 684 + return status; 685 + 686 + status = gs_alloc_requests(port->port_usb->in, &port->write_pool, 687 + gs_write_complete, &port->write_allocated); 688 + if (status) { 689 + gs_free_requests(ep, head, &port->read_allocated); 690 + return status; 691 + } 692 + 693 + /* queue read requests */ 694 + port->n_read = 0; 695 + started = gs_start_rx(port); 696 + 697 + /* unblock any pending writes into our circular buffer */ 698 + if (started) { 699 + tty_wakeup(port->port.tty); 700 + } else { 701 + gs_free_requests(ep, head, &port->read_allocated); 702 + gs_free_requests(port->port_usb->in, &port->write_pool, 703 + &port->write_allocated); 704 + status = -EIO; 705 + } 706 + 707 + return status; 708 + } 709 + 710 + /*-------------------------------------------------------------------------*/ 711 + 712 + /* TTY Driver */ 713 + 714 + /* 715 + * gs_open sets up the link between a gs_port and its associated TTY. 716 + * That link is broken *only* by TTY close(), and all driver methods 717 + * know that. 718 + */ 719 + static int gs_open(struct tty_struct *tty, struct file *file) 720 + { 721 + int port_num = tty->index; 722 + struct gs_port *port; 723 + int status; 724 + 725 + do { 726 + mutex_lock(&ports[port_num].lock); 727 + port = ports[port_num].port; 728 + if (!port) 729 + status = -ENODEV; 730 + else { 731 + spin_lock_irq(&port->port_lock); 732 + 733 + /* already open? Great. */ 734 + if (port->port.count) { 735 + status = 0; 736 + port->port.count++; 737 + 738 + /* currently opening/closing? wait ... */ 739 + } else if (port->openclose) { 740 + status = -EBUSY; 741 + 742 + /* ... else we do the work */ 743 + } else { 744 + status = -EAGAIN; 745 + port->openclose = true; 746 + } 747 + spin_unlock_irq(&port->port_lock); 748 + } 749 + mutex_unlock(&ports[port_num].lock); 750 + 751 + switch (status) { 752 + default: 753 + /* fully handled */ 754 + return status; 755 + case -EAGAIN: 756 + /* must do the work */ 757 + break; 758 + case -EBUSY: 759 + /* wait for EAGAIN task to finish */ 760 + msleep(1); 761 + /* REVISIT could have a waitchannel here, if 762 + * concurrent open performance is important 763 + */ 764 + break; 765 + } 766 + } while (status != -EAGAIN); 767 + 768 + /* Do the "real open" */ 769 + spin_lock_irq(&port->port_lock); 770 + 771 + /* allocate circular buffer on first open */ 772 + if (port->port_write_buf.buf_buf == NULL) { 773 + 774 + spin_unlock_irq(&port->port_lock); 775 + status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE); 776 + spin_lock_irq(&port->port_lock); 777 + 778 + if (status) { 779 + pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n", 780 + port->port_num, tty, file); 781 + port->openclose = false; 782 + goto exit_unlock_port; 783 + } 784 + } 785 + 786 + /* REVISIT if REMOVED (ports[].port NULL), abort the open 787 + * to let rmmod work faster (but this way isn't wrong). 788 + */ 789 + 790 + /* REVISIT maybe wait for "carrier detect" */ 791 + 792 + tty->driver_data = port; 793 + port->port.tty = tty; 794 + 795 + port->port.count = 1; 796 + port->openclose = false; 797 + 798 + /* if connected, start the I/O stream */ 799 + if (port->port_usb) { 800 + struct gserial *gser = port->port_usb; 801 + 802 + pr_debug("gs_open: start ttyGS%d\n", port->port_num); 803 + gs_start_io(port); 804 + 805 + if (gser->connect) 806 + gser->connect(gser); 807 + } 808 + 809 + pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); 810 + 811 + status = 0; 812 + 813 + exit_unlock_port: 814 + spin_unlock_irq(&port->port_lock); 815 + return status; 816 + } 817 + 818 + static int gs_writes_finished(struct gs_port *p) 819 + { 820 + int cond; 821 + 822 + /* return true on disconnect or empty buffer */ 823 + spin_lock_irq(&p->port_lock); 824 + cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf); 825 + spin_unlock_irq(&p->port_lock); 826 + 827 + return cond; 828 + } 829 + 830 + static void gs_close(struct tty_struct *tty, struct file *file) 831 + { 832 + struct gs_port *port = tty->driver_data; 833 + struct gserial *gser; 834 + 835 + spin_lock_irq(&port->port_lock); 836 + 837 + if (port->port.count != 1) { 838 + if (port->port.count == 0) 839 + WARN_ON(1); 840 + else 841 + --port->port.count; 842 + goto exit; 843 + } 844 + 845 + pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file); 846 + 847 + /* mark port as closing but in use; we can drop port lock 848 + * and sleep if necessary 849 + */ 850 + port->openclose = true; 851 + port->port.count = 0; 852 + 853 + gser = port->port_usb; 854 + if (gser && gser->disconnect) 855 + gser->disconnect(gser); 856 + 857 + /* wait for circular write buffer to drain, disconnect, or at 858 + * most GS_CLOSE_TIMEOUT seconds; then discard the rest 859 + */ 860 + if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) { 861 + spin_unlock_irq(&port->port_lock); 862 + wait_event_interruptible_timeout(port->drain_wait, 863 + gs_writes_finished(port), 864 + GS_CLOSE_TIMEOUT * HZ); 865 + spin_lock_irq(&port->port_lock); 866 + gser = port->port_usb; 867 + } 868 + 869 + /* Iff we're disconnected, there can be no I/O in flight so it's 870 + * ok to free the circular buffer; else just scrub it. And don't 871 + * let the push tasklet fire again until we're re-opened. 872 + */ 873 + if (gser == NULL) 874 + gs_buf_free(&port->port_write_buf); 875 + else 876 + gs_buf_clear(&port->port_write_buf); 877 + 878 + tty->driver_data = NULL; 879 + port->port.tty = NULL; 880 + 881 + port->openclose = false; 882 + 883 + pr_debug("gs_close: ttyGS%d (%p,%p) done!\n", 884 + port->port_num, tty, file); 885 + 886 + wake_up_interruptible(&port->port.close_wait); 887 + exit: 888 + spin_unlock_irq(&port->port_lock); 889 + } 890 + 891 + static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count) 892 + { 893 + struct gs_port *port = tty->driver_data; 894 + unsigned long flags; 895 + int status; 896 + 897 + pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n", 898 + port->port_num, tty, count); 899 + 900 + spin_lock_irqsave(&port->port_lock, flags); 901 + if (count) 902 + count = gs_buf_put(&port->port_write_buf, buf, count); 903 + /* treat count == 0 as flush_chars() */ 904 + if (port->port_usb) 905 + status = gs_start_tx(port); 906 + spin_unlock_irqrestore(&port->port_lock, flags); 907 + 908 + return count; 909 + } 910 + 911 + static int gs_put_char(struct tty_struct *tty, unsigned char ch) 912 + { 913 + struct gs_port *port = tty->driver_data; 914 + unsigned long flags; 915 + int status; 916 + 917 + pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %pf\n", 918 + port->port_num, tty, ch, __builtin_return_address(0)); 919 + 920 + spin_lock_irqsave(&port->port_lock, flags); 921 + status = gs_buf_put(&port->port_write_buf, &ch, 1); 922 + spin_unlock_irqrestore(&port->port_lock, flags); 923 + 924 + return status; 925 + } 926 + 927 + static void gs_flush_chars(struct tty_struct *tty) 928 + { 929 + struct gs_port *port = tty->driver_data; 930 + unsigned long flags; 931 + 932 + pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty); 933 + 934 + spin_lock_irqsave(&port->port_lock, flags); 935 + if (port->port_usb) 936 + gs_start_tx(port); 937 + spin_unlock_irqrestore(&port->port_lock, flags); 938 + } 939 + 940 + static int gs_write_room(struct tty_struct *tty) 941 + { 942 + struct gs_port *port = tty->driver_data; 943 + unsigned long flags; 944 + int room = 0; 945 + 946 + spin_lock_irqsave(&port->port_lock, flags); 947 + if (port->port_usb) 948 + room = gs_buf_space_avail(&port->port_write_buf); 949 + spin_unlock_irqrestore(&port->port_lock, flags); 950 + 951 + pr_vdebug("gs_write_room: (%d,%p) room=%d\n", 952 + port->port_num, tty, room); 953 + 954 + return room; 955 + } 956 + 957 + static int gs_chars_in_buffer(struct tty_struct *tty) 958 + { 959 + struct gs_port *port = tty->driver_data; 960 + unsigned long flags; 961 + int chars = 0; 962 + 963 + spin_lock_irqsave(&port->port_lock, flags); 964 + chars = gs_buf_data_avail(&port->port_write_buf); 965 + spin_unlock_irqrestore(&port->port_lock, flags); 966 + 967 + pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n", 968 + port->port_num, tty, chars); 969 + 970 + return chars; 971 + } 972 + 973 + /* undo side effects of setting TTY_THROTTLED */ 974 + static void gs_unthrottle(struct tty_struct *tty) 975 + { 976 + struct gs_port *port = tty->driver_data; 977 + unsigned long flags; 978 + 979 + spin_lock_irqsave(&port->port_lock, flags); 980 + if (port->port_usb) { 981 + /* Kickstart read queue processing. We don't do xon/xoff, 982 + * rts/cts, or other handshaking with the host, but if the 983 + * read queue backs up enough we'll be NAKing OUT packets. 984 + */ 985 + tasklet_schedule(&port->push); 986 + pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num); 987 + } 988 + spin_unlock_irqrestore(&port->port_lock, flags); 989 + } 990 + 991 + static int gs_break_ctl(struct tty_struct *tty, int duration) 992 + { 993 + struct gs_port *port = tty->driver_data; 994 + int status = 0; 995 + struct gserial *gser; 996 + 997 + pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n", 998 + port->port_num, duration); 999 + 1000 + spin_lock_irq(&port->port_lock); 1001 + gser = port->port_usb; 1002 + if (gser && gser->send_break) 1003 + status = gser->send_break(gser, duration); 1004 + spin_unlock_irq(&port->port_lock); 1005 + 1006 + return status; 1007 + } 1008 + 1009 + static const struct tty_operations gs_tty_ops = { 1010 + .open = gs_open, 1011 + .close = gs_close, 1012 + .write = gs_write, 1013 + .put_char = gs_put_char, 1014 + .flush_chars = gs_flush_chars, 1015 + .write_room = gs_write_room, 1016 + .chars_in_buffer = gs_chars_in_buffer, 1017 + .unthrottle = gs_unthrottle, 1018 + .break_ctl = gs_break_ctl, 1019 + }; 1020 + 1021 + /*-------------------------------------------------------------------------*/ 1022 + 1023 + static struct tty_driver *gs_tty_driver; 1024 + 1025 + static int 1026 + gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) 1027 + { 1028 + struct gs_port *port; 1029 + 1030 + port = kzalloc(sizeof(struct gs_port), GFP_KERNEL); 1031 + if (port == NULL) 1032 + return -ENOMEM; 1033 + 1034 + tty_port_init(&port->port); 1035 + spin_lock_init(&port->port_lock); 1036 + init_waitqueue_head(&port->drain_wait); 1037 + 1038 + tasklet_init(&port->push, gs_rx_push, (unsigned long) port); 1039 + 1040 + INIT_LIST_HEAD(&port->read_pool); 1041 + INIT_LIST_HEAD(&port->read_queue); 1042 + INIT_LIST_HEAD(&port->write_pool); 1043 + 1044 + port->port_num = port_num; 1045 + port->port_line_coding = *coding; 1046 + 1047 + ports[port_num].port = port; 1048 + 1049 + return 0; 1050 + } 1051 + 1052 + /** 1053 + * gserial_setup - initialize TTY driver for one or more ports 1054 + * @g: gadget to associate with these ports 1055 + * @count: how many ports to support 1056 + * Context: may sleep 1057 + * 1058 + * The TTY stack needs to know in advance how many devices it should 1059 + * plan to manage. Use this call to set up the ports you will be 1060 + * exporting through USB. Later, connect them to functions based 1061 + * on what configuration is activated by the USB host; and disconnect 1062 + * them as appropriate. 1063 + * 1064 + * An example would be a two-configuration device in which both 1065 + * configurations expose port 0, but through different functions. 1066 + * One configuration could even expose port 1 while the other 1067 + * one doesn't. 1068 + * 1069 + * Returns negative errno or zero. 1070 + */ 1071 + int gserial_setup(struct usb_gadget *g, unsigned count) 1072 + { 1073 + unsigned i; 1074 + struct usb_cdc_line_coding coding; 1075 + int status; 1076 + 1077 + if (count == 0 || count > N_PORTS) 1078 + return -EINVAL; 1079 + 1080 + gs_tty_driver = alloc_tty_driver(count); 1081 + if (!gs_tty_driver) 1082 + return -ENOMEM; 1083 + 1084 + gs_tty_driver->driver_name = "g_serial"; 1085 + gs_tty_driver->name = PREFIX; 1086 + /* uses dynamically assigned dev_t values */ 1087 + 1088 + gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; 1089 + gs_tty_driver->subtype = SERIAL_TYPE_NORMAL; 1090 + gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; 1091 + gs_tty_driver->init_termios = tty_std_termios; 1092 + 1093 + /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on 1094 + * MS-Windows. Otherwise, most of these flags shouldn't affect 1095 + * anything unless we were to actually hook up to a serial line. 1096 + */ 1097 + gs_tty_driver->init_termios.c_cflag = 1098 + B9600 | CS8 | CREAD | HUPCL | CLOCAL; 1099 + gs_tty_driver->init_termios.c_ispeed = 9600; 1100 + gs_tty_driver->init_termios.c_ospeed = 9600; 1101 + 1102 + coding.dwDTERate = cpu_to_le32(9600); 1103 + coding.bCharFormat = 8; 1104 + coding.bParityType = USB_CDC_NO_PARITY; 1105 + coding.bDataBits = USB_CDC_1_STOP_BITS; 1106 + 1107 + tty_set_operations(gs_tty_driver, &gs_tty_ops); 1108 + 1109 + /* make devices be openable */ 1110 + for (i = 0; i < count; i++) { 1111 + mutex_init(&ports[i].lock); 1112 + status = gs_port_alloc(i, &coding); 1113 + if (status) { 1114 + count = i; 1115 + goto fail; 1116 + } 1117 + } 1118 + n_ports = count; 1119 + 1120 + /* export the driver ... */ 1121 + status = tty_register_driver(gs_tty_driver); 1122 + if (status) { 1123 + pr_err("%s: cannot register, err %d\n", 1124 + __func__, status); 1125 + goto fail; 1126 + } 1127 + 1128 + /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */ 1129 + for (i = 0; i < count; i++) { 1130 + struct device *tty_dev; 1131 + 1132 + tty_dev = tty_register_device(gs_tty_driver, i, &g->dev); 1133 + if (IS_ERR(tty_dev)) 1134 + pr_warning("%s: no classdev for port %d, err %ld\n", 1135 + __func__, i, PTR_ERR(tty_dev)); 1136 + } 1137 + 1138 + pr_debug("%s: registered %d ttyGS* device%s\n", __func__, 1139 + count, (count == 1) ? "" : "s"); 1140 + 1141 + return status; 1142 + fail: 1143 + while (count--) 1144 + kfree(ports[count].port); 1145 + put_tty_driver(gs_tty_driver); 1146 + gs_tty_driver = NULL; 1147 + return status; 1148 + } 1149 + 1150 + static int gs_closed(struct gs_port *port) 1151 + { 1152 + int cond; 1153 + 1154 + spin_lock_irq(&port->port_lock); 1155 + cond = (port->port.count == 0) && !port->openclose; 1156 + spin_unlock_irq(&port->port_lock); 1157 + return cond; 1158 + } 1159 + 1160 + /** 1161 + * gserial_cleanup - remove TTY-over-USB driver and devices 1162 + * Context: may sleep 1163 + * 1164 + * This is called to free all resources allocated by @gserial_setup(). 1165 + * Accordingly, it may need to wait until some open /dev/ files have 1166 + * closed. 1167 + * 1168 + * The caller must have issued @gserial_disconnect() for any ports 1169 + * that had previously been connected, so that there is never any 1170 + * I/O pending when it's called. 1171 + */ 1172 + void gserial_cleanup(void) 1173 + { 1174 + unsigned i; 1175 + struct gs_port *port; 1176 + 1177 + if (!gs_tty_driver) 1178 + return; 1179 + 1180 + /* start sysfs and /dev/ttyGS* node removal */ 1181 + for (i = 0; i < n_ports; i++) 1182 + tty_unregister_device(gs_tty_driver, i); 1183 + 1184 + for (i = 0; i < n_ports; i++) { 1185 + /* prevent new opens */ 1186 + mutex_lock(&ports[i].lock); 1187 + port = ports[i].port; 1188 + ports[i].port = NULL; 1189 + mutex_unlock(&ports[i].lock); 1190 + 1191 + tasklet_kill(&port->push); 1192 + 1193 + /* wait for old opens to finish */ 1194 + wait_event(port->port.close_wait, gs_closed(port)); 1195 + 1196 + WARN_ON(port->port_usb != NULL); 1197 + 1198 + kfree(port); 1199 + } 1200 + n_ports = 0; 1201 + 1202 + tty_unregister_driver(gs_tty_driver); 1203 + put_tty_driver(gs_tty_driver); 1204 + gs_tty_driver = NULL; 1205 + 1206 + pr_debug("%s: cleaned up ttyGS* support\n", __func__); 1207 + } 1208 + 1209 + /** 1210 + * gserial_connect - notify TTY I/O glue that USB link is active 1211 + * @gser: the function, set up with endpoints and descriptors 1212 + * @port_num: which port is active 1213 + * Context: any (usually from irq) 1214 + * 1215 + * This is called activate endpoints and let the TTY layer know that 1216 + * the connection is active ... not unlike "carrier detect". It won't 1217 + * necessarily start I/O queues; unless the TTY is held open by any 1218 + * task, there would be no point. However, the endpoints will be 1219 + * activated so the USB host can perform I/O, subject to basic USB 1220 + * hardware flow control. 1221 + * 1222 + * Caller needs to have set up the endpoints and USB function in @dev 1223 + * before calling this, as well as the appropriate (speed-specific) 1224 + * endpoint descriptors, and also have set up the TTY driver by calling 1225 + * @gserial_setup(). 1226 + * 1227 + * Returns negative errno or zero. 1228 + * On success, ep->driver_data will be overwritten. 1229 + */ 1230 + int gserial_connect(struct gserial *gser, u8 port_num) 1231 + { 1232 + struct gs_port *port; 1233 + unsigned long flags; 1234 + int status; 1235 + 1236 + if (!gs_tty_driver || port_num >= n_ports) 1237 + return -ENXIO; 1238 + 1239 + /* we "know" gserial_cleanup() hasn't been called */ 1240 + port = ports[port_num].port; 1241 + 1242 + /* activate the endpoints */ 1243 + status = usb_ep_enable(gser->in); 1244 + if (status < 0) 1245 + return status; 1246 + gser->in->driver_data = port; 1247 + 1248 + status = usb_ep_enable(gser->out); 1249 + if (status < 0) 1250 + goto fail_out; 1251 + gser->out->driver_data = port; 1252 + 1253 + /* then tell the tty glue that I/O can work */ 1254 + spin_lock_irqsave(&port->port_lock, flags); 1255 + gser->ioport = port; 1256 + port->port_usb = gser; 1257 + 1258 + /* REVISIT unclear how best to handle this state... 1259 + * we don't really couple it with the Linux TTY. 1260 + */ 1261 + gser->port_line_coding = port->port_line_coding; 1262 + 1263 + /* REVISIT if waiting on "carrier detect", signal. */ 1264 + 1265 + /* if it's already open, start I/O ... and notify the serial 1266 + * protocol about open/close status (connect/disconnect). 1267 + */ 1268 + if (port->port.count) { 1269 + pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); 1270 + gs_start_io(port); 1271 + if (gser->connect) 1272 + gser->connect(gser); 1273 + } else { 1274 + if (gser->disconnect) 1275 + gser->disconnect(gser); 1276 + } 1277 + 1278 + spin_unlock_irqrestore(&port->port_lock, flags); 1279 + 1280 + return status; 1281 + 1282 + fail_out: 1283 + usb_ep_disable(gser->in); 1284 + gser->in->driver_data = NULL; 1285 + return status; 1286 + } 1287 + 1288 + /** 1289 + * gserial_disconnect - notify TTY I/O glue that USB link is inactive 1290 + * @gser: the function, on which gserial_connect() was called 1291 + * Context: any (usually from irq) 1292 + * 1293 + * This is called to deactivate endpoints and let the TTY layer know 1294 + * that the connection went inactive ... not unlike "hangup". 1295 + * 1296 + * On return, the state is as if gserial_connect() had never been called; 1297 + * there is no active USB I/O on these endpoints. 1298 + */ 1299 + void gserial_disconnect(struct gserial *gser) 1300 + { 1301 + struct gs_port *port = gser->ioport; 1302 + unsigned long flags; 1303 + 1304 + if (!port) 1305 + return; 1306 + 1307 + /* tell the TTY glue not to do I/O here any more */ 1308 + spin_lock_irqsave(&port->port_lock, flags); 1309 + 1310 + /* REVISIT as above: how best to track this? */ 1311 + port->port_line_coding = gser->port_line_coding; 1312 + 1313 + port->port_usb = NULL; 1314 + gser->ioport = NULL; 1315 + if (port->port.count > 0 || port->openclose) { 1316 + wake_up_interruptible(&port->drain_wait); 1317 + if (port->port.tty) 1318 + tty_hangup(port->port.tty); 1319 + } 1320 + spin_unlock_irqrestore(&port->port_lock, flags); 1321 + 1322 + /* disable endpoints, aborting down any active I/O */ 1323 + usb_ep_disable(gser->out); 1324 + gser->out->driver_data = NULL; 1325 + 1326 + usb_ep_disable(gser->in); 1327 + gser->in->driver_data = NULL; 1328 + 1329 + /* finally, free any unused/unusable I/O buffers */ 1330 + spin_lock_irqsave(&port->port_lock, flags); 1331 + if (port->port.count == 0 && !port->openclose) 1332 + gs_buf_free(&port->port_write_buf); 1333 + gs_free_requests(gser->out, &port->read_pool, NULL); 1334 + gs_free_requests(gser->out, &port->read_queue, NULL); 1335 + gs_free_requests(gser->in, &port->write_pool, NULL); 1336 + 1337 + port->read_allocated = port->read_started = 1338 + port->write_allocated = port->write_started = 0; 1339 + 1340 + spin_unlock_irqrestore(&port->port_lock, flags); 1341 + }
+65
drivers/staging/ccg/u_serial.h
··· 1 + /* 2 + * u_serial.h - interface to USB gadget "serial port"/TTY utilities 3 + * 4 + * Copyright (C) 2008 David Brownell 5 + * Copyright (C) 2008 by Nokia Corporation 6 + * 7 + * This software is distributed under the terms of the GNU General 8 + * Public License ("GPL") as published by the Free Software Foundation, 9 + * either version 2 of that License or (at your option) any later version. 10 + */ 11 + 12 + #ifndef __U_SERIAL_H 13 + #define __U_SERIAL_H 14 + 15 + #include <linux/usb/composite.h> 16 + #include <linux/usb/cdc.h> 17 + 18 + /* 19 + * One non-multiplexed "serial" I/O port ... there can be several of these 20 + * on any given USB peripheral device, if it provides enough endpoints. 21 + * 22 + * The "u_serial" utility component exists to do one thing: manage TTY 23 + * style I/O using the USB peripheral endpoints listed here, including 24 + * hookups to sysfs and /dev for each logical "tty" device. 25 + * 26 + * REVISIT at least ACM could support tiocmget() if needed. 27 + * 28 + * REVISIT someday, allow multiplexing several TTYs over these endpoints. 29 + */ 30 + struct gserial { 31 + struct usb_function func; 32 + 33 + /* port is managed by gserial_{connect,disconnect} */ 34 + struct gs_port *ioport; 35 + 36 + struct usb_ep *in; 37 + struct usb_ep *out; 38 + 39 + /* REVISIT avoid this CDC-ACM support harder ... */ 40 + struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */ 41 + 42 + /* notification callbacks */ 43 + void (*connect)(struct gserial *p); 44 + void (*disconnect)(struct gserial *p); 45 + int (*send_break)(struct gserial *p, int duration); 46 + }; 47 + 48 + /* utilities to allocate/free request and buffer */ 49 + struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags); 50 + void gs_free_req(struct usb_ep *, struct usb_request *req); 51 + 52 + /* port setup/teardown is handled by gadget driver */ 53 + int gserial_setup(struct usb_gadget *g, unsigned n_ports); 54 + void gserial_cleanup(void); 55 + 56 + /* connect/disconnect is handled by individual functions */ 57 + int gserial_connect(struct gserial *, u8 port_num); 58 + void gserial_disconnect(struct gserial *); 59 + 60 + /* functions are bound to configurations by a config or gadget driver */ 61 + int acm_bind_config(struct usb_configuration *c, u8 port_num); 62 + int gser_bind_config(struct usb_configuration *c, u8 port_num); 63 + int obex_bind_config(struct usb_configuration *c, u8 port_num); 64 + 65 + #endif /* __U_SERIAL_H */
+71
drivers/staging/ccg/usbstring.c
··· 1 + /* 2 + * Copyright (C) 2003 David Brownell 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU Lesser General Public License as published 6 + * by the Free Software Foundation; either version 2.1 of the License, or 7 + * (at your option) any later version. 8 + */ 9 + 10 + #include <linux/errno.h> 11 + #include <linux/kernel.h> 12 + #include <linux/list.h> 13 + #include <linux/string.h> 14 + #include <linux/device.h> 15 + #include <linux/init.h> 16 + #include <linux/nls.h> 17 + 18 + #include <linux/usb/ch9.h> 19 + #include <linux/usb/gadget.h> 20 + 21 + 22 + /** 23 + * usb_gadget_get_string - fill out a string descriptor 24 + * @table: of c strings encoded using UTF-8 25 + * @id: string id, from low byte of wValue in get string descriptor 26 + * @buf: at least 256 bytes, must be 16-bit aligned 27 + * 28 + * Finds the UTF-8 string matching the ID, and converts it into a 29 + * string descriptor in utf16-le. 30 + * Returns length of descriptor (always even) or negative errno 31 + * 32 + * If your driver needs stings in multiple languages, you'll probably 33 + * "switch (wIndex) { ... }" in your ep0 string descriptor logic, 34 + * using this routine after choosing which set of UTF-8 strings to use. 35 + * Note that US-ASCII is a strict subset of UTF-8; any string bytes with 36 + * the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1 37 + * characters (which are also widely used in C strings). 38 + */ 39 + int 40 + usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf) 41 + { 42 + struct usb_string *s; 43 + int len; 44 + 45 + /* descriptor 0 has the language id */ 46 + if (id == 0) { 47 + buf [0] = 4; 48 + buf [1] = USB_DT_STRING; 49 + buf [2] = (u8) table->language; 50 + buf [3] = (u8) (table->language >> 8); 51 + return 4; 52 + } 53 + for (s = table->strings; s && s->s; s++) 54 + if (s->id == id) 55 + break; 56 + 57 + /* unrecognized: stall. */ 58 + if (!s || !s->s) 59 + return -EINVAL; 60 + 61 + /* string descriptors have length, tag, then UTF16-LE text */ 62 + len = min ((size_t) 126, strlen (s->s)); 63 + len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN, 64 + (wchar_t *) &buf[2], 126); 65 + if (len < 0) 66 + return -EINVAL; 67 + buf [0] = (len + 1) * 2; 68 + buf [1] = USB_DT_STRING; 69 + return buf [0]; 70 + } 71 +