Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.2-rc1 1616 lines 47 kB view raw
1/* 2 * WUSB Wire Adapter 3 * Data transfer and URB enqueing 4 * 5 * Copyright (C) 2005-2006 Intel Corporation 6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 10 * 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 20 * 02110-1301, USA. 21 * 22 * 23 * How transfers work: get a buffer, break it up in segments (segment 24 * size is a multiple of the maxpacket size). For each segment issue a 25 * segment request (struct wa_xfer_*), then send the data buffer if 26 * out or nothing if in (all over the DTO endpoint). 27 * 28 * For each submitted segment request, a notification will come over 29 * the NEP endpoint and a transfer result (struct xfer_result) will 30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is 31 * data coming (inbound transfer), schedule a read and handle it. 32 * 33 * Sounds simple, it is a pain to implement. 34 * 35 * 36 * ENTRY POINTS 37 * 38 * FIXME 39 * 40 * LIFE CYCLE / STATE DIAGRAM 41 * 42 * FIXME 43 * 44 * THIS CODE IS DISGUSTING 45 * 46 * Warned you are; it's my second try and still not happy with it. 47 * 48 * NOTES: 49 * 50 * - No iso 51 * 52 * - Supports DMA xfers, control, bulk and maybe interrupt 53 * 54 * - Does not recycle unused rpipes 55 * 56 * An rpipe is assigned to an endpoint the first time it is used, 57 * and then it's there, assigned, until the endpoint is disabled 58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the 59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore 60 * (should be a mutex). 61 * 62 * Two methods it could be done: 63 * 64 * (a) set up a timer every time an rpipe's use count drops to 1 65 * (which means unused) or when a transfer ends. Reset the 66 * timer when a xfer is queued. If the timer expires, release 67 * the rpipe [see rpipe_ep_disable()]. 68 * 69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()], 70 * when none are found go over the list, check their endpoint 71 * and their activity record (if no last-xfer-done-ts in the 72 * last x seconds) take it 73 * 74 * However, due to the fact that we have a set of limited 75 * resources (max-segments-at-the-same-time per xfer, 76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end 77 * we are going to have to rebuild all this based on an scheduler, 78 * to where we have a list of transactions to do and based on the 79 * availability of the different required components (blocks, 80 * rpipes, segment slots, etc), we go scheduling them. Painful. 81 */ 82#include <linux/init.h> 83#include <linux/spinlock.h> 84#include <linux/slab.h> 85#include <linux/hash.h> 86#include <linux/ratelimit.h> 87#include <linux/export.h> 88 89#include "wa-hc.h" 90#include "wusbhc.h" 91 92enum { 93 WA_SEGS_MAX = 255, 94}; 95 96enum wa_seg_status { 97 WA_SEG_NOTREADY, 98 WA_SEG_READY, 99 WA_SEG_DELAYED, 100 WA_SEG_SUBMITTED, 101 WA_SEG_PENDING, 102 WA_SEG_DTI_PENDING, 103 WA_SEG_DONE, 104 WA_SEG_ERROR, 105 WA_SEG_ABORTED, 106}; 107 108static void wa_xfer_delayed_run(struct wa_rpipe *); 109 110/* 111 * Life cycle governed by 'struct urb' (the refcount of the struct is 112 * that of the 'struct urb' and usb_free_urb() would free the whole 113 * struct). 114 */ 115struct wa_seg { 116 struct urb urb; 117 struct urb *dto_urb; /* for data output? */ 118 struct list_head list_node; /* for rpipe->req_list */ 119 struct wa_xfer *xfer; /* out xfer */ 120 u8 index; /* which segment we are */ 121 enum wa_seg_status status; 122 ssize_t result; /* bytes xfered or error */ 123 struct wa_xfer_hdr xfer_hdr; 124 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */ 125}; 126 127static void wa_seg_init(struct wa_seg *seg) 128{ 129 /* usb_init_urb() repeats a lot of work, so we do it here */ 130 kref_init(&seg->urb.kref); 131} 132 133/* 134 * Protected by xfer->lock 135 * 136 */ 137struct wa_xfer { 138 struct kref refcnt; 139 struct list_head list_node; 140 spinlock_t lock; 141 u32 id; 142 143 struct wahc *wa; /* Wire adapter we are plugged to */ 144 struct usb_host_endpoint *ep; 145 struct urb *urb; /* URB we are transferring for */ 146 struct wa_seg **seg; /* transfer segments */ 147 u8 segs, segs_submitted, segs_done; 148 unsigned is_inbound:1; 149 unsigned is_dma:1; 150 size_t seg_size; 151 int result; 152 153 gfp_t gfp; /* allocation mask */ 154 155 struct wusb_dev *wusb_dev; /* for activity timestamps */ 156}; 157 158static inline void wa_xfer_init(struct wa_xfer *xfer) 159{ 160 kref_init(&xfer->refcnt); 161 INIT_LIST_HEAD(&xfer->list_node); 162 spin_lock_init(&xfer->lock); 163} 164 165/* 166 * Destroy a transfer structure 167 * 168 * Note that the xfer->seg[index] thingies follow the URB life cycle, 169 * so we need to put them, not free them. 170 */ 171static void wa_xfer_destroy(struct kref *_xfer) 172{ 173 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt); 174 if (xfer->seg) { 175 unsigned cnt; 176 for (cnt = 0; cnt < xfer->segs; cnt++) { 177 if (xfer->is_inbound) 178 usb_put_urb(xfer->seg[cnt]->dto_urb); 179 usb_put_urb(&xfer->seg[cnt]->urb); 180 } 181 } 182 kfree(xfer); 183} 184 185static void wa_xfer_get(struct wa_xfer *xfer) 186{ 187 kref_get(&xfer->refcnt); 188} 189 190static void wa_xfer_put(struct wa_xfer *xfer) 191{ 192 kref_put(&xfer->refcnt, wa_xfer_destroy); 193} 194 195/* 196 * xfer is referenced 197 * 198 * xfer->lock has to be unlocked 199 * 200 * We take xfer->lock for setting the result; this is a barrier 201 * against drivers/usb/core/hcd.c:unlink1() being called after we call 202 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a 203 * reference to the transfer. 204 */ 205static void wa_xfer_giveback(struct wa_xfer *xfer) 206{ 207 unsigned long flags; 208 209 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); 210 list_del_init(&xfer->list_node); 211 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); 212 /* FIXME: segmentation broken -- kills DWA */ 213 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); 214 wa_put(xfer->wa); 215 wa_xfer_put(xfer); 216} 217 218/* 219 * xfer is referenced 220 * 221 * xfer->lock has to be unlocked 222 */ 223static void wa_xfer_completion(struct wa_xfer *xfer) 224{ 225 if (xfer->wusb_dev) 226 wusb_dev_put(xfer->wusb_dev); 227 rpipe_put(xfer->ep->hcpriv); 228 wa_xfer_giveback(xfer); 229} 230 231/* 232 * If transfer is done, wrap it up and return true 233 * 234 * xfer->lock has to be locked 235 */ 236static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) 237{ 238 struct device *dev = &xfer->wa->usb_iface->dev; 239 unsigned result, cnt; 240 struct wa_seg *seg; 241 struct urb *urb = xfer->urb; 242 unsigned found_short = 0; 243 244 result = xfer->segs_done == xfer->segs_submitted; 245 if (result == 0) 246 goto out; 247 urb->actual_length = 0; 248 for (cnt = 0; cnt < xfer->segs; cnt++) { 249 seg = xfer->seg[cnt]; 250 switch (seg->status) { 251 case WA_SEG_DONE: 252 if (found_short && seg->result > 0) { 253 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n", 254 xfer, cnt, seg->result); 255 urb->status = -EINVAL; 256 goto out; 257 } 258 urb->actual_length += seg->result; 259 if (seg->result < xfer->seg_size 260 && cnt != xfer->segs-1) 261 found_short = 1; 262 dev_dbg(dev, "xfer %p#%u: DONE short %d " 263 "result %zu urb->actual_length %d\n", 264 xfer, seg->index, found_short, seg->result, 265 urb->actual_length); 266 break; 267 case WA_SEG_ERROR: 268 xfer->result = seg->result; 269 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n", 270 xfer, seg->index, seg->result); 271 goto out; 272 case WA_SEG_ABORTED: 273 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n", 274 xfer, seg->index, urb->status); 275 xfer->result = urb->status; 276 goto out; 277 default: 278 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n", 279 xfer, cnt, seg->status); 280 xfer->result = -EINVAL; 281 goto out; 282 } 283 } 284 xfer->result = 0; 285out: 286 return result; 287} 288 289/* 290 * Initialize a transfer's ID 291 * 292 * We need to use a sequential number; if we use the pointer or the 293 * hash of the pointer, it can repeat over sequential transfers and 294 * then it will confuse the HWA....wonder why in hell they put a 32 295 * bit handle in there then. 296 */ 297static void wa_xfer_id_init(struct wa_xfer *xfer) 298{ 299 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); 300} 301 302/* 303 * Return the xfer's ID associated with xfer 304 * 305 * Need to generate a 306 */ 307static u32 wa_xfer_id(struct wa_xfer *xfer) 308{ 309 return xfer->id; 310} 311 312/* 313 * Search for a transfer list ID on the HCD's URB list 314 * 315 * For 32 bit architectures, we use the pointer itself; for 64 bits, a 316 * 32-bit hash of the pointer. 317 * 318 * @returns NULL if not found. 319 */ 320static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id) 321{ 322 unsigned long flags; 323 struct wa_xfer *xfer_itr; 324 spin_lock_irqsave(&wa->xfer_list_lock, flags); 325 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) { 326 if (id == xfer_itr->id) { 327 wa_xfer_get(xfer_itr); 328 goto out; 329 } 330 } 331 xfer_itr = NULL; 332out: 333 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); 334 return xfer_itr; 335} 336 337struct wa_xfer_abort_buffer { 338 struct urb urb; 339 struct wa_xfer_abort cmd; 340}; 341 342static void __wa_xfer_abort_cb(struct urb *urb) 343{ 344 struct wa_xfer_abort_buffer *b = urb->context; 345 usb_put_urb(&b->urb); 346} 347 348/* 349 * Aborts an ongoing transaction 350 * 351 * Assumes the transfer is referenced and locked and in a submitted 352 * state (mainly that there is an endpoint/rpipe assigned). 353 * 354 * The callback (see above) does nothing but freeing up the data by 355 * putting the URB. Because the URB is allocated at the head of the 356 * struct, the whole space we allocated is kfreed. 357 * 358 * We'll get an 'aborted transaction' xfer result on DTI, that'll 359 * politely ignore because at this point the transaction has been 360 * marked as aborted already. 361 */ 362static void __wa_xfer_abort(struct wa_xfer *xfer) 363{ 364 int result; 365 struct device *dev = &xfer->wa->usb_iface->dev; 366 struct wa_xfer_abort_buffer *b; 367 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 368 369 b = kmalloc(sizeof(*b), GFP_ATOMIC); 370 if (b == NULL) 371 goto error_kmalloc; 372 b->cmd.bLength = sizeof(b->cmd); 373 b->cmd.bRequestType = WA_XFER_ABORT; 374 b->cmd.wRPipe = rpipe->descr.wRPipeIndex; 375 b->cmd.dwTransferID = wa_xfer_id(xfer); 376 377 usb_init_urb(&b->urb); 378 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, 379 usb_sndbulkpipe(xfer->wa->usb_dev, 380 xfer->wa->dto_epd->bEndpointAddress), 381 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b); 382 result = usb_submit_urb(&b->urb, GFP_ATOMIC); 383 if (result < 0) 384 goto error_submit; 385 return; /* callback frees! */ 386 387 388error_submit: 389 if (printk_ratelimit()) 390 dev_err(dev, "xfer %p: Can't submit abort request: %d\n", 391 xfer, result); 392 kfree(b); 393error_kmalloc: 394 return; 395 396} 397 398/* 399 * 400 * @returns < 0 on error, transfer segment request size if ok 401 */ 402static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, 403 enum wa_xfer_type *pxfer_type) 404{ 405 ssize_t result; 406 struct device *dev = &xfer->wa->usb_iface->dev; 407 size_t maxpktsize; 408 struct urb *urb = xfer->urb; 409 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 410 411 switch (rpipe->descr.bmAttribute & 0x3) { 412 case USB_ENDPOINT_XFER_CONTROL: 413 *pxfer_type = WA_XFER_TYPE_CTL; 414 result = sizeof(struct wa_xfer_ctl); 415 break; 416 case USB_ENDPOINT_XFER_INT: 417 case USB_ENDPOINT_XFER_BULK: 418 *pxfer_type = WA_XFER_TYPE_BI; 419 result = sizeof(struct wa_xfer_bi); 420 break; 421 case USB_ENDPOINT_XFER_ISOC: 422 dev_err(dev, "FIXME: ISOC not implemented\n"); 423 result = -ENOSYS; 424 goto error; 425 default: 426 /* never happens */ 427 BUG(); 428 result = -EINVAL; /* shut gcc up */ 429 }; 430 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0; 431 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; 432 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) 433 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1); 434 /* Compute the segment size and make sure it is a multiple of 435 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of 436 * a check (FIXME) */ 437 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize); 438 if (xfer->seg_size < maxpktsize) { 439 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize " 440 "%zu\n", xfer->seg_size, maxpktsize); 441 result = -EINVAL; 442 goto error; 443 } 444 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; 445 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1) 446 / xfer->seg_size; 447 if (xfer->segs >= WA_SEGS_MAX) { 448 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n", 449 (int)(urb->transfer_buffer_length / xfer->seg_size), 450 WA_SEGS_MAX); 451 result = -EINVAL; 452 goto error; 453 } 454 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) 455 xfer->segs = 1; 456error: 457 return result; 458} 459 460/* Fill in the common request header and xfer-type specific data. */ 461static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, 462 struct wa_xfer_hdr *xfer_hdr0, 463 enum wa_xfer_type xfer_type, 464 size_t xfer_hdr_size) 465{ 466 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 467 468 xfer_hdr0 = &xfer->seg[0]->xfer_hdr; 469 xfer_hdr0->bLength = xfer_hdr_size; 470 xfer_hdr0->bRequestType = xfer_type; 471 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex; 472 xfer_hdr0->dwTransferID = wa_xfer_id(xfer); 473 xfer_hdr0->bTransferSegment = 0; 474 switch (xfer_type) { 475 case WA_XFER_TYPE_CTL: { 476 struct wa_xfer_ctl *xfer_ctl = 477 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr); 478 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0; 479 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet, 480 sizeof(xfer_ctl->baSetupData)); 481 break; 482 } 483 case WA_XFER_TYPE_BI: 484 break; 485 case WA_XFER_TYPE_ISO: 486 printk(KERN_ERR "FIXME: ISOC not implemented\n"); 487 default: 488 BUG(); 489 }; 490} 491 492/* 493 * Callback for the OUT data phase of the segment request 494 * 495 * Check wa_seg_cb(); most comments also apply here because this 496 * function does almost the same thing and they work closely 497 * together. 498 * 499 * If the seg request has failed but this DTO phase has succeeded, 500 * wa_seg_cb() has already failed the segment and moved the 501 * status to WA_SEG_ERROR, so this will go through 'case 0' and 502 * effectively do nothing. 503 */ 504static void wa_seg_dto_cb(struct urb *urb) 505{ 506 struct wa_seg *seg = urb->context; 507 struct wa_xfer *xfer = seg->xfer; 508 struct wahc *wa; 509 struct device *dev; 510 struct wa_rpipe *rpipe; 511 unsigned long flags; 512 unsigned rpipe_ready = 0; 513 u8 done = 0; 514 515 switch (urb->status) { 516 case 0: 517 spin_lock_irqsave(&xfer->lock, flags); 518 wa = xfer->wa; 519 dev = &wa->usb_iface->dev; 520 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n", 521 xfer, seg->index, urb->actual_length); 522 if (seg->status < WA_SEG_PENDING) 523 seg->status = WA_SEG_PENDING; 524 seg->result = urb->actual_length; 525 spin_unlock_irqrestore(&xfer->lock, flags); 526 break; 527 case -ECONNRESET: /* URB unlinked; no need to do anything */ 528 case -ENOENT: /* as it was done by the who unlinked us */ 529 break; 530 default: /* Other errors ... */ 531 spin_lock_irqsave(&xfer->lock, flags); 532 wa = xfer->wa; 533 dev = &wa->usb_iface->dev; 534 rpipe = xfer->ep->hcpriv; 535 dev_dbg(dev, "xfer %p#%u: data out error %d\n", 536 xfer, seg->index, urb->status); 537 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 538 EDC_ERROR_TIMEFRAME)){ 539 dev_err(dev, "DTO: URB max acceptable errors " 540 "exceeded, resetting device\n"); 541 wa_reset_all(wa); 542 } 543 if (seg->status != WA_SEG_ERROR) { 544 seg->status = WA_SEG_ERROR; 545 seg->result = urb->status; 546 xfer->segs_done++; 547 __wa_xfer_abort(xfer); 548 rpipe_ready = rpipe_avail_inc(rpipe); 549 done = __wa_xfer_is_done(xfer); 550 } 551 spin_unlock_irqrestore(&xfer->lock, flags); 552 if (done) 553 wa_xfer_completion(xfer); 554 if (rpipe_ready) 555 wa_xfer_delayed_run(rpipe); 556 } 557} 558 559/* 560 * Callback for the segment request 561 * 562 * If successful transition state (unless already transitioned or 563 * outbound transfer); otherwise, take a note of the error, mark this 564 * segment done and try completion. 565 * 566 * Note we don't access until we are sure that the transfer hasn't 567 * been cancelled (ECONNRESET, ENOENT), which could mean that 568 * seg->xfer could be already gone. 569 * 570 * We have to check before setting the status to WA_SEG_PENDING 571 * because sometimes the xfer result callback arrives before this 572 * callback (geeeeeeze), so it might happen that we are already in 573 * another state. As well, we don't set it if the transfer is inbound, 574 * as in that case, wa_seg_dto_cb will do it when the OUT data phase 575 * finishes. 576 */ 577static void wa_seg_cb(struct urb *urb) 578{ 579 struct wa_seg *seg = urb->context; 580 struct wa_xfer *xfer = seg->xfer; 581 struct wahc *wa; 582 struct device *dev; 583 struct wa_rpipe *rpipe; 584 unsigned long flags; 585 unsigned rpipe_ready; 586 u8 done = 0; 587 588 switch (urb->status) { 589 case 0: 590 spin_lock_irqsave(&xfer->lock, flags); 591 wa = xfer->wa; 592 dev = &wa->usb_iface->dev; 593 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index); 594 if (xfer->is_inbound && seg->status < WA_SEG_PENDING) 595 seg->status = WA_SEG_PENDING; 596 spin_unlock_irqrestore(&xfer->lock, flags); 597 break; 598 case -ECONNRESET: /* URB unlinked; no need to do anything */ 599 case -ENOENT: /* as it was done by the who unlinked us */ 600 break; 601 default: /* Other errors ... */ 602 spin_lock_irqsave(&xfer->lock, flags); 603 wa = xfer->wa; 604 dev = &wa->usb_iface->dev; 605 rpipe = xfer->ep->hcpriv; 606 if (printk_ratelimit()) 607 dev_err(dev, "xfer %p#%u: request error %d\n", 608 xfer, seg->index, urb->status); 609 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 610 EDC_ERROR_TIMEFRAME)){ 611 dev_err(dev, "DTO: URB max acceptable errors " 612 "exceeded, resetting device\n"); 613 wa_reset_all(wa); 614 } 615 usb_unlink_urb(seg->dto_urb); 616 seg->status = WA_SEG_ERROR; 617 seg->result = urb->status; 618 xfer->segs_done++; 619 __wa_xfer_abort(xfer); 620 rpipe_ready = rpipe_avail_inc(rpipe); 621 done = __wa_xfer_is_done(xfer); 622 spin_unlock_irqrestore(&xfer->lock, flags); 623 if (done) 624 wa_xfer_completion(xfer); 625 if (rpipe_ready) 626 wa_xfer_delayed_run(rpipe); 627 } 628} 629 630/* 631 * Allocate the segs array and initialize each of them 632 * 633 * The segments are freed by wa_xfer_destroy() when the xfer use count 634 * drops to zero; however, because each segment is given the same life 635 * cycle as the USB URB it contains, it is actually freed by 636 * usb_put_urb() on the contained USB URB (twisted, eh?). 637 */ 638static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size) 639{ 640 int result, cnt; 641 size_t alloc_size = sizeof(*xfer->seg[0]) 642 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size; 643 struct usb_device *usb_dev = xfer->wa->usb_dev; 644 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd; 645 struct wa_seg *seg; 646 size_t buf_itr, buf_size, buf_itr_size; 647 648 result = -ENOMEM; 649 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); 650 if (xfer->seg == NULL) 651 goto error_segs_kzalloc; 652 buf_itr = 0; 653 buf_size = xfer->urb->transfer_buffer_length; 654 for (cnt = 0; cnt < xfer->segs; cnt++) { 655 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC); 656 if (seg == NULL) 657 goto error_seg_kzalloc; 658 wa_seg_init(seg); 659 seg->xfer = xfer; 660 seg->index = cnt; 661 usb_fill_bulk_urb(&seg->urb, usb_dev, 662 usb_sndbulkpipe(usb_dev, 663 dto_epd->bEndpointAddress), 664 &seg->xfer_hdr, xfer_hdr_size, 665 wa_seg_cb, seg); 666 buf_itr_size = buf_size > xfer->seg_size ? 667 xfer->seg_size : buf_size; 668 if (xfer->is_inbound == 0 && buf_size > 0) { 669 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); 670 if (seg->dto_urb == NULL) 671 goto error_dto_alloc; 672 usb_fill_bulk_urb( 673 seg->dto_urb, usb_dev, 674 usb_sndbulkpipe(usb_dev, 675 dto_epd->bEndpointAddress), 676 NULL, 0, wa_seg_dto_cb, seg); 677 if (xfer->is_dma) { 678 seg->dto_urb->transfer_dma = 679 xfer->urb->transfer_dma + buf_itr; 680 seg->dto_urb->transfer_flags |= 681 URB_NO_TRANSFER_DMA_MAP; 682 } else 683 seg->dto_urb->transfer_buffer = 684 xfer->urb->transfer_buffer + buf_itr; 685 seg->dto_urb->transfer_buffer_length = buf_itr_size; 686 } 687 seg->status = WA_SEG_READY; 688 buf_itr += buf_itr_size; 689 buf_size -= buf_itr_size; 690 } 691 return 0; 692 693error_dto_alloc: 694 kfree(xfer->seg[cnt]); 695 cnt--; 696error_seg_kzalloc: 697 /* use the fact that cnt is left at were it failed */ 698 for (; cnt > 0; cnt--) { 699 if (xfer->is_inbound == 0) 700 kfree(xfer->seg[cnt]->dto_urb); 701 kfree(xfer->seg[cnt]); 702 } 703error_segs_kzalloc: 704 return result; 705} 706 707/* 708 * Allocates all the stuff needed to submit a transfer 709 * 710 * Breaks the whole data buffer in a list of segments, each one has a 711 * structure allocated to it and linked in xfer->seg[index] 712 * 713 * FIXME: merge setup_segs() and the last part of this function, no 714 * need to do two for loops when we could run everything in a 715 * single one 716 */ 717static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) 718{ 719 int result; 720 struct device *dev = &xfer->wa->usb_iface->dev; 721 enum wa_xfer_type xfer_type = 0; /* shut up GCC */ 722 size_t xfer_hdr_size, cnt, transfer_size; 723 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; 724 725 result = __wa_xfer_setup_sizes(xfer, &xfer_type); 726 if (result < 0) 727 goto error_setup_sizes; 728 xfer_hdr_size = result; 729 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size); 730 if (result < 0) { 731 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n", 732 xfer, xfer->segs, result); 733 goto error_setup_segs; 734 } 735 /* Fill the first header */ 736 xfer_hdr0 = &xfer->seg[0]->xfer_hdr; 737 wa_xfer_id_init(xfer); 738 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); 739 740 /* Fill remainig headers */ 741 xfer_hdr = xfer_hdr0; 742 transfer_size = urb->transfer_buffer_length; 743 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? 744 xfer->seg_size : transfer_size; 745 transfer_size -= xfer->seg_size; 746 for (cnt = 1; cnt < xfer->segs; cnt++) { 747 xfer_hdr = &xfer->seg[cnt]->xfer_hdr; 748 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); 749 xfer_hdr->bTransferSegment = cnt; 750 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ? 751 cpu_to_le32(xfer->seg_size) 752 : cpu_to_le32(transfer_size); 753 xfer->seg[cnt]->status = WA_SEG_READY; 754 transfer_size -= xfer->seg_size; 755 } 756 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */ 757 result = 0; 758error_setup_segs: 759error_setup_sizes: 760 return result; 761} 762 763/* 764 * 765 * 766 * rpipe->seg_lock is held! 767 */ 768static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, 769 struct wa_seg *seg) 770{ 771 int result; 772 result = usb_submit_urb(&seg->urb, GFP_ATOMIC); 773 if (result < 0) { 774 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n", 775 xfer, seg->index, result); 776 goto error_seg_submit; 777 } 778 if (seg->dto_urb) { 779 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); 780 if (result < 0) { 781 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n", 782 xfer, seg->index, result); 783 goto error_dto_submit; 784 } 785 } 786 seg->status = WA_SEG_SUBMITTED; 787 rpipe_avail_dec(rpipe); 788 return 0; 789 790error_dto_submit: 791 usb_unlink_urb(&seg->urb); 792error_seg_submit: 793 seg->status = WA_SEG_ERROR; 794 seg->result = result; 795 return result; 796} 797 798/* 799 * Execute more queued request segments until the maximum concurrent allowed 800 * 801 * The ugly unlock/lock sequence on the error path is needed as the 802 * xfer->lock normally nests the seg_lock and not viceversa. 803 * 804 */ 805static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) 806{ 807 int result; 808 struct device *dev = &rpipe->wa->usb_iface->dev; 809 struct wa_seg *seg; 810 struct wa_xfer *xfer; 811 unsigned long flags; 812 813 spin_lock_irqsave(&rpipe->seg_lock, flags); 814 while (atomic_read(&rpipe->segs_available) > 0 815 && !list_empty(&rpipe->seg_list)) { 816 seg = list_entry(rpipe->seg_list.next, struct wa_seg, 817 list_node); 818 list_del(&seg->list_node); 819 xfer = seg->xfer; 820 result = __wa_seg_submit(rpipe, xfer, seg); 821 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n", 822 xfer, seg->index, atomic_read(&rpipe->segs_available), result); 823 if (unlikely(result < 0)) { 824 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 825 spin_lock_irqsave(&xfer->lock, flags); 826 __wa_xfer_abort(xfer); 827 xfer->segs_done++; 828 spin_unlock_irqrestore(&xfer->lock, flags); 829 spin_lock_irqsave(&rpipe->seg_lock, flags); 830 } 831 } 832 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 833} 834 835/* 836 * 837 * xfer->lock is taken 838 * 839 * On failure submitting we just stop submitting and return error; 840 * wa_urb_enqueue_b() will execute the completion path 841 */ 842static int __wa_xfer_submit(struct wa_xfer *xfer) 843{ 844 int result; 845 struct wahc *wa = xfer->wa; 846 struct device *dev = &wa->usb_iface->dev; 847 unsigned cnt; 848 struct wa_seg *seg; 849 unsigned long flags; 850 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 851 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests); 852 u8 available; 853 u8 empty; 854 855 spin_lock_irqsave(&wa->xfer_list_lock, flags); 856 list_add_tail(&xfer->list_node, &wa->xfer_list); 857 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); 858 859 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests); 860 result = 0; 861 spin_lock_irqsave(&rpipe->seg_lock, flags); 862 for (cnt = 0; cnt < xfer->segs; cnt++) { 863 available = atomic_read(&rpipe->segs_available); 864 empty = list_empty(&rpipe->seg_list); 865 seg = xfer->seg[cnt]; 866 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n", 867 xfer, cnt, available, empty, 868 available == 0 || !empty ? "delayed" : "submitted"); 869 if (available == 0 || !empty) { 870 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt); 871 seg->status = WA_SEG_DELAYED; 872 list_add_tail(&seg->list_node, &rpipe->seg_list); 873 } else { 874 result = __wa_seg_submit(rpipe, xfer, seg); 875 if (result < 0) { 876 __wa_xfer_abort(xfer); 877 goto error_seg_submit; 878 } 879 } 880 xfer->segs_submitted++; 881 } 882error_seg_submit: 883 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 884 return result; 885} 886 887/* 888 * Second part of a URB/transfer enqueuement 889 * 890 * Assumes this comes from wa_urb_enqueue() [maybe through 891 * wa_urb_enqueue_run()]. At this point: 892 * 893 * xfer->wa filled and refcounted 894 * xfer->ep filled with rpipe refcounted if 895 * delayed == 0 896 * xfer->urb filled and refcounted (this is the case when called 897 * from wa_urb_enqueue() as we come from usb_submit_urb() 898 * and when called by wa_urb_enqueue_run(), as we took an 899 * extra ref dropped by _run() after we return). 900 * xfer->gfp filled 901 * 902 * If we fail at __wa_xfer_submit(), then we just check if we are done 903 * and if so, we run the completion procedure. However, if we are not 904 * yet done, we do nothing and wait for the completion handlers from 905 * the submitted URBs or from the xfer-result path to kick in. If xfer 906 * result never kicks in, the xfer will timeout from the USB code and 907 * dequeue() will be called. 908 */ 909static void wa_urb_enqueue_b(struct wa_xfer *xfer) 910{ 911 int result; 912 unsigned long flags; 913 struct urb *urb = xfer->urb; 914 struct wahc *wa = xfer->wa; 915 struct wusbhc *wusbhc = wa->wusb; 916 struct wusb_dev *wusb_dev; 917 unsigned done; 918 919 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); 920 if (result < 0) 921 goto error_rpipe_get; 922 result = -ENODEV; 923 /* FIXME: segmentation broken -- kills DWA */ 924 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ 925 if (urb->dev == NULL) { 926 mutex_unlock(&wusbhc->mutex); 927 goto error_dev_gone; 928 } 929 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); 930 if (wusb_dev == NULL) { 931 mutex_unlock(&wusbhc->mutex); 932 goto error_dev_gone; 933 } 934 mutex_unlock(&wusbhc->mutex); 935 936 spin_lock_irqsave(&xfer->lock, flags); 937 xfer->wusb_dev = wusb_dev; 938 result = urb->status; 939 if (urb->status != -EINPROGRESS) 940 goto error_dequeued; 941 942 result = __wa_xfer_setup(xfer, urb); 943 if (result < 0) 944 goto error_xfer_setup; 945 result = __wa_xfer_submit(xfer); 946 if (result < 0) 947 goto error_xfer_submit; 948 spin_unlock_irqrestore(&xfer->lock, flags); 949 return; 950 951 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() 952 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean 953 * upundo setup(). 954 */ 955error_xfer_setup: 956error_dequeued: 957 spin_unlock_irqrestore(&xfer->lock, flags); 958 /* FIXME: segmentation broken, kills DWA */ 959 if (wusb_dev) 960 wusb_dev_put(wusb_dev); 961error_dev_gone: 962 rpipe_put(xfer->ep->hcpriv); 963error_rpipe_get: 964 xfer->result = result; 965 wa_xfer_giveback(xfer); 966 return; 967 968error_xfer_submit: 969 done = __wa_xfer_is_done(xfer); 970 xfer->result = result; 971 spin_unlock_irqrestore(&xfer->lock, flags); 972 if (done) 973 wa_xfer_completion(xfer); 974} 975 976/* 977 * Execute the delayed transfers in the Wire Adapter @wa 978 * 979 * We need to be careful here, as dequeue() could be called in the 980 * middle. That's why we do the whole thing under the 981 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock 982 * and then checks the list -- so as we would be acquiring in inverse 983 * order, we just drop the lock once we have the xfer and reacquire it 984 * later. 985 */ 986void wa_urb_enqueue_run(struct work_struct *ws) 987{ 988 struct wahc *wa = container_of(ws, struct wahc, xfer_work); 989 struct wa_xfer *xfer, *next; 990 struct urb *urb; 991 992 spin_lock_irq(&wa->xfer_list_lock); 993 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, 994 list_node) { 995 list_del_init(&xfer->list_node); 996 spin_unlock_irq(&wa->xfer_list_lock); 997 998 urb = xfer->urb; 999 wa_urb_enqueue_b(xfer); 1000 usb_put_urb(urb); /* taken when queuing */ 1001 1002 spin_lock_irq(&wa->xfer_list_lock); 1003 } 1004 spin_unlock_irq(&wa->xfer_list_lock); 1005} 1006EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); 1007 1008/* 1009 * Submit a transfer to the Wire Adapter in a delayed way 1010 * 1011 * The process of enqueuing involves possible sleeps() [see 1012 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are 1013 * in an atomic section, we defer the enqueue_b() call--else we call direct. 1014 * 1015 * @urb: We own a reference to it done by the HCI Linux USB stack that 1016 * will be given up by calling usb_hcd_giveback_urb() or by 1017 * returning error from this function -> ergo we don't have to 1018 * refcount it. 1019 */ 1020int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, 1021 struct urb *urb, gfp_t gfp) 1022{ 1023 int result; 1024 struct device *dev = &wa->usb_iface->dev; 1025 struct wa_xfer *xfer; 1026 unsigned long my_flags; 1027 unsigned cant_sleep = irqs_disabled() | in_atomic(); 1028 1029 if (urb->transfer_buffer == NULL 1030 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 1031 && urb->transfer_buffer_length != 0) { 1032 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb); 1033 dump_stack(); 1034 } 1035 1036 result = -ENOMEM; 1037 xfer = kzalloc(sizeof(*xfer), gfp); 1038 if (xfer == NULL) 1039 goto error_kmalloc; 1040 1041 result = -ENOENT; 1042 if (urb->status != -EINPROGRESS) /* cancelled */ 1043 goto error_dequeued; /* before starting? */ 1044 wa_xfer_init(xfer); 1045 xfer->wa = wa_get(wa); 1046 xfer->urb = urb; 1047 xfer->gfp = gfp; 1048 xfer->ep = ep; 1049 urb->hcpriv = xfer; 1050 1051 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", 1052 xfer, urb, urb->pipe, urb->transfer_buffer_length, 1053 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", 1054 urb->pipe & USB_DIR_IN ? "inbound" : "outbound", 1055 cant_sleep ? "deferred" : "inline"); 1056 1057 if (cant_sleep) { 1058 usb_get_urb(urb); 1059 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); 1060 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list); 1061 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); 1062 queue_work(wusbd, &wa->xfer_work); 1063 } else { 1064 wa_urb_enqueue_b(xfer); 1065 } 1066 return 0; 1067 1068error_dequeued: 1069 kfree(xfer); 1070error_kmalloc: 1071 return result; 1072} 1073EXPORT_SYMBOL_GPL(wa_urb_enqueue); 1074 1075/* 1076 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion 1077 * handler] is called. 1078 * 1079 * Until a transfer goes successfully through wa_urb_enqueue() it 1080 * needs to be dequeued with completion calling; when stuck in delayed 1081 * or before wa_xfer_setup() is called, we need to do completion. 1082 * 1083 * not setup If there is no hcpriv yet, that means that that enqueue 1084 * still had no time to set the xfer up. Because 1085 * urb->status should be other than -EINPROGRESS, 1086 * enqueue() will catch that and bail out. 1087 * 1088 * If the transfer has gone through setup, we just need to clean it 1089 * up. If it has gone through submit(), we have to abort it [with an 1090 * asynch request] and then make sure we cancel each segment. 1091 * 1092 */ 1093int wa_urb_dequeue(struct wahc *wa, struct urb *urb) 1094{ 1095 unsigned long flags, flags2; 1096 struct wa_xfer *xfer; 1097 struct wa_seg *seg; 1098 struct wa_rpipe *rpipe; 1099 unsigned cnt; 1100 unsigned rpipe_ready = 0; 1101 1102 xfer = urb->hcpriv; 1103 if (xfer == NULL) { 1104 /* NOthing setup yet enqueue will see urb->status != 1105 * -EINPROGRESS (by hcd layer) and bail out with 1106 * error, no need to do completion 1107 */ 1108 BUG_ON(urb->status == -EINPROGRESS); 1109 goto out; 1110 } 1111 spin_lock_irqsave(&xfer->lock, flags); 1112 rpipe = xfer->ep->hcpriv; 1113 /* Check the delayed list -> if there, release and complete */ 1114 spin_lock_irqsave(&wa->xfer_list_lock, flags2); 1115 if (!list_empty(&xfer->list_node) && xfer->seg == NULL) 1116 goto dequeue_delayed; 1117 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); 1118 if (xfer->seg == NULL) /* still hasn't reached */ 1119 goto out_unlock; /* setup(), enqueue_b() completes */ 1120 /* Ok, the xfer is in flight already, it's been setup and submitted.*/ 1121 __wa_xfer_abort(xfer); 1122 for (cnt = 0; cnt < xfer->segs; cnt++) { 1123 seg = xfer->seg[cnt]; 1124 switch (seg->status) { 1125 case WA_SEG_NOTREADY: 1126 case WA_SEG_READY: 1127 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n", 1128 xfer, cnt, seg->status); 1129 WARN_ON(1); 1130 break; 1131 case WA_SEG_DELAYED: 1132 seg->status = WA_SEG_ABORTED; 1133 spin_lock_irqsave(&rpipe->seg_lock, flags2); 1134 list_del(&seg->list_node); 1135 xfer->segs_done++; 1136 rpipe_ready = rpipe_avail_inc(rpipe); 1137 spin_unlock_irqrestore(&rpipe->seg_lock, flags2); 1138 break; 1139 case WA_SEG_SUBMITTED: 1140 seg->status = WA_SEG_ABORTED; 1141 usb_unlink_urb(&seg->urb); 1142 if (xfer->is_inbound == 0) 1143 usb_unlink_urb(seg->dto_urb); 1144 xfer->segs_done++; 1145 rpipe_ready = rpipe_avail_inc(rpipe); 1146 break; 1147 case WA_SEG_PENDING: 1148 seg->status = WA_SEG_ABORTED; 1149 xfer->segs_done++; 1150 rpipe_ready = rpipe_avail_inc(rpipe); 1151 break; 1152 case WA_SEG_DTI_PENDING: 1153 usb_unlink_urb(wa->dti_urb); 1154 seg->status = WA_SEG_ABORTED; 1155 xfer->segs_done++; 1156 rpipe_ready = rpipe_avail_inc(rpipe); 1157 break; 1158 case WA_SEG_DONE: 1159 case WA_SEG_ERROR: 1160 case WA_SEG_ABORTED: 1161 break; 1162 } 1163 } 1164 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ 1165 __wa_xfer_is_done(xfer); 1166 spin_unlock_irqrestore(&xfer->lock, flags); 1167 wa_xfer_completion(xfer); 1168 if (rpipe_ready) 1169 wa_xfer_delayed_run(rpipe); 1170 return 0; 1171 1172out_unlock: 1173 spin_unlock_irqrestore(&xfer->lock, flags); 1174out: 1175 return 0; 1176 1177dequeue_delayed: 1178 list_del_init(&xfer->list_node); 1179 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); 1180 xfer->result = urb->status; 1181 spin_unlock_irqrestore(&xfer->lock, flags); 1182 wa_xfer_giveback(xfer); 1183 usb_put_urb(urb); /* we got a ref in enqueue() */ 1184 return 0; 1185} 1186EXPORT_SYMBOL_GPL(wa_urb_dequeue); 1187 1188/* 1189 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno 1190 * codes 1191 * 1192 * Positive errno values are internal inconsistencies and should be 1193 * flagged louder. Negative are to be passed up to the user in the 1194 * normal way. 1195 * 1196 * @status: USB WA status code -- high two bits are stripped. 1197 */ 1198static int wa_xfer_status_to_errno(u8 status) 1199{ 1200 int errno; 1201 u8 real_status = status; 1202 static int xlat[] = { 1203 [WA_XFER_STATUS_SUCCESS] = 0, 1204 [WA_XFER_STATUS_HALTED] = -EPIPE, 1205 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS, 1206 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW, 1207 [WA_XFER_RESERVED] = EINVAL, 1208 [WA_XFER_STATUS_NOT_FOUND] = 0, 1209 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM, 1210 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ, 1211 [WA_XFER_STATUS_ABORTED] = -EINTR, 1212 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL, 1213 [WA_XFER_INVALID_FORMAT] = EINVAL, 1214 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL, 1215 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL, 1216 }; 1217 status &= 0x3f; 1218 1219 if (status == 0) 1220 return 0; 1221 if (status >= ARRAY_SIZE(xlat)) { 1222 printk_ratelimited(KERN_ERR "%s(): BUG? " 1223 "Unknown WA transfer status 0x%02x\n", 1224 __func__, real_status); 1225 return -EINVAL; 1226 } 1227 errno = xlat[status]; 1228 if (unlikely(errno > 0)) { 1229 printk_ratelimited(KERN_ERR "%s(): BUG? " 1230 "Inconsistent WA status: 0x%02x\n", 1231 __func__, real_status); 1232 errno = -errno; 1233 } 1234 return errno; 1235} 1236 1237/* 1238 * Process a xfer result completion message 1239 * 1240 * inbound transfers: need to schedule a DTI read 1241 * 1242 * FIXME: this functio needs to be broken up in parts 1243 */ 1244static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) 1245{ 1246 int result; 1247 struct device *dev = &wa->usb_iface->dev; 1248 unsigned long flags; 1249 u8 seg_idx; 1250 struct wa_seg *seg; 1251 struct wa_rpipe *rpipe; 1252 struct wa_xfer_result *xfer_result = wa->xfer_result; 1253 u8 done = 0; 1254 u8 usb_status; 1255 unsigned rpipe_ready = 0; 1256 1257 spin_lock_irqsave(&xfer->lock, flags); 1258 seg_idx = xfer_result->bTransferSegment & 0x7f; 1259 if (unlikely(seg_idx >= xfer->segs)) 1260 goto error_bad_seg; 1261 seg = xfer->seg[seg_idx]; 1262 rpipe = xfer->ep->hcpriv; 1263 usb_status = xfer_result->bTransferStatus; 1264 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", 1265 xfer, seg_idx, usb_status, seg->status); 1266 if (seg->status == WA_SEG_ABORTED 1267 || seg->status == WA_SEG_ERROR) /* already handled */ 1268 goto segment_aborted; 1269 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */ 1270 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */ 1271 if (seg->status != WA_SEG_PENDING) { 1272 if (printk_ratelimit()) 1273 dev_err(dev, "xfer %p#%u: Bad segment state %u\n", 1274 xfer, seg_idx, seg->status); 1275 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */ 1276 } 1277 if (usb_status & 0x80) { 1278 seg->result = wa_xfer_status_to_errno(usb_status); 1279 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n", 1280 xfer, seg->index, usb_status); 1281 goto error_complete; 1282 } 1283 /* FIXME: we ignore warnings, tally them for stats */ 1284 if (usb_status & 0x40) /* Warning?... */ 1285 usb_status = 0; /* ... pass */ 1286 if (xfer->is_inbound) { /* IN data phase: read to buffer */ 1287 seg->status = WA_SEG_DTI_PENDING; 1288 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); 1289 if (xfer->is_dma) { 1290 wa->buf_in_urb->transfer_dma = 1291 xfer->urb->transfer_dma 1292 + seg_idx * xfer->seg_size; 1293 wa->buf_in_urb->transfer_flags 1294 |= URB_NO_TRANSFER_DMA_MAP; 1295 } else { 1296 wa->buf_in_urb->transfer_buffer = 1297 xfer->urb->transfer_buffer 1298 + seg_idx * xfer->seg_size; 1299 wa->buf_in_urb->transfer_flags 1300 &= ~URB_NO_TRANSFER_DMA_MAP; 1301 } 1302 wa->buf_in_urb->transfer_buffer_length = 1303 le32_to_cpu(xfer_result->dwTransferLength); 1304 wa->buf_in_urb->context = seg; 1305 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); 1306 if (result < 0) 1307 goto error_submit_buf_in; 1308 } else { 1309 /* OUT data phase, complete it -- */ 1310 seg->status = WA_SEG_DONE; 1311 seg->result = le32_to_cpu(xfer_result->dwTransferLength); 1312 xfer->segs_done++; 1313 rpipe_ready = rpipe_avail_inc(rpipe); 1314 done = __wa_xfer_is_done(xfer); 1315 } 1316 spin_unlock_irqrestore(&xfer->lock, flags); 1317 if (done) 1318 wa_xfer_completion(xfer); 1319 if (rpipe_ready) 1320 wa_xfer_delayed_run(rpipe); 1321 return; 1322 1323error_submit_buf_in: 1324 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { 1325 dev_err(dev, "DTI: URB max acceptable errors " 1326 "exceeded, resetting device\n"); 1327 wa_reset_all(wa); 1328 } 1329 if (printk_ratelimit()) 1330 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", 1331 xfer, seg_idx, result); 1332 seg->result = result; 1333error_complete: 1334 seg->status = WA_SEG_ERROR; 1335 xfer->segs_done++; 1336 rpipe_ready = rpipe_avail_inc(rpipe); 1337 __wa_xfer_abort(xfer); 1338 done = __wa_xfer_is_done(xfer); 1339 spin_unlock_irqrestore(&xfer->lock, flags); 1340 if (done) 1341 wa_xfer_completion(xfer); 1342 if (rpipe_ready) 1343 wa_xfer_delayed_run(rpipe); 1344 return; 1345 1346error_bad_seg: 1347 spin_unlock_irqrestore(&xfer->lock, flags); 1348 wa_urb_dequeue(wa, xfer->urb); 1349 if (printk_ratelimit()) 1350 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); 1351 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { 1352 dev_err(dev, "DTI: URB max acceptable errors " 1353 "exceeded, resetting device\n"); 1354 wa_reset_all(wa); 1355 } 1356 return; 1357 1358segment_aborted: 1359 /* nothing to do, as the aborter did the completion */ 1360 spin_unlock_irqrestore(&xfer->lock, flags); 1361} 1362 1363/* 1364 * Callback for the IN data phase 1365 * 1366 * If successful transition state; otherwise, take a note of the 1367 * error, mark this segment done and try completion. 1368 * 1369 * Note we don't access until we are sure that the transfer hasn't 1370 * been cancelled (ECONNRESET, ENOENT), which could mean that 1371 * seg->xfer could be already gone. 1372 */ 1373static void wa_buf_in_cb(struct urb *urb) 1374{ 1375 struct wa_seg *seg = urb->context; 1376 struct wa_xfer *xfer = seg->xfer; 1377 struct wahc *wa; 1378 struct device *dev; 1379 struct wa_rpipe *rpipe; 1380 unsigned rpipe_ready; 1381 unsigned long flags; 1382 u8 done = 0; 1383 1384 switch (urb->status) { 1385 case 0: 1386 spin_lock_irqsave(&xfer->lock, flags); 1387 wa = xfer->wa; 1388 dev = &wa->usb_iface->dev; 1389 rpipe = xfer->ep->hcpriv; 1390 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", 1391 xfer, seg->index, (size_t)urb->actual_length); 1392 seg->status = WA_SEG_DONE; 1393 seg->result = urb->actual_length; 1394 xfer->segs_done++; 1395 rpipe_ready = rpipe_avail_inc(rpipe); 1396 done = __wa_xfer_is_done(xfer); 1397 spin_unlock_irqrestore(&xfer->lock, flags); 1398 if (done) 1399 wa_xfer_completion(xfer); 1400 if (rpipe_ready) 1401 wa_xfer_delayed_run(rpipe); 1402 break; 1403 case -ECONNRESET: /* URB unlinked; no need to do anything */ 1404 case -ENOENT: /* as it was done by the who unlinked us */ 1405 break; 1406 default: /* Other errors ... */ 1407 spin_lock_irqsave(&xfer->lock, flags); 1408 wa = xfer->wa; 1409 dev = &wa->usb_iface->dev; 1410 rpipe = xfer->ep->hcpriv; 1411 if (printk_ratelimit()) 1412 dev_err(dev, "xfer %p#%u: data in error %d\n", 1413 xfer, seg->index, urb->status); 1414 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 1415 EDC_ERROR_TIMEFRAME)){ 1416 dev_err(dev, "DTO: URB max acceptable errors " 1417 "exceeded, resetting device\n"); 1418 wa_reset_all(wa); 1419 } 1420 seg->status = WA_SEG_ERROR; 1421 seg->result = urb->status; 1422 xfer->segs_done++; 1423 rpipe_ready = rpipe_avail_inc(rpipe); 1424 __wa_xfer_abort(xfer); 1425 done = __wa_xfer_is_done(xfer); 1426 spin_unlock_irqrestore(&xfer->lock, flags); 1427 if (done) 1428 wa_xfer_completion(xfer); 1429 if (rpipe_ready) 1430 wa_xfer_delayed_run(rpipe); 1431 } 1432} 1433 1434/* 1435 * Handle an incoming transfer result buffer 1436 * 1437 * Given a transfer result buffer, it completes the transfer (possibly 1438 * scheduling and buffer in read) and then resubmits the DTI URB for a 1439 * new transfer result read. 1440 * 1441 * 1442 * The xfer_result DTI URB state machine 1443 * 1444 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In) 1445 * 1446 * We start in OFF mode, the first xfer_result notification [through 1447 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to 1448 * read. 1449 * 1450 * We receive a buffer -- if it is not a xfer_result, we complain and 1451 * repost the DTI-URB. If it is a xfer_result then do the xfer seg 1452 * request accounting. If it is an IN segment, we move to RBI and post 1453 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will 1454 * repost the DTI-URB and move to RXR state. if there was no IN 1455 * segment, it will repost the DTI-URB. 1456 * 1457 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many 1458 * errors) in the URBs. 1459 */ 1460static void wa_xfer_result_cb(struct urb *urb) 1461{ 1462 int result; 1463 struct wahc *wa = urb->context; 1464 struct device *dev = &wa->usb_iface->dev; 1465 struct wa_xfer_result *xfer_result; 1466 u32 xfer_id; 1467 struct wa_xfer *xfer; 1468 u8 usb_status; 1469 1470 BUG_ON(wa->dti_urb != urb); 1471 switch (wa->dti_urb->status) { 1472 case 0: 1473 /* We have a xfer result buffer; check it */ 1474 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", 1475 urb->actual_length, urb->transfer_buffer); 1476 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { 1477 dev_err(dev, "DTI Error: xfer result--bad size " 1478 "xfer result (%d bytes vs %zu needed)\n", 1479 urb->actual_length, sizeof(*xfer_result)); 1480 break; 1481 } 1482 xfer_result = wa->xfer_result; 1483 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { 1484 dev_err(dev, "DTI Error: xfer result--" 1485 "bad header length %u\n", 1486 xfer_result->hdr.bLength); 1487 break; 1488 } 1489 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { 1490 dev_err(dev, "DTI Error: xfer result--" 1491 "bad header type 0x%02x\n", 1492 xfer_result->hdr.bNotifyType); 1493 break; 1494 } 1495 usb_status = xfer_result->bTransferStatus & 0x3f; 1496 if (usb_status == WA_XFER_STATUS_ABORTED 1497 || usb_status == WA_XFER_STATUS_NOT_FOUND) 1498 /* taken care of already */ 1499 break; 1500 xfer_id = xfer_result->dwTransferID; 1501 xfer = wa_xfer_get_by_id(wa, xfer_id); 1502 if (xfer == NULL) { 1503 /* FIXME: transaction might have been cancelled */ 1504 dev_err(dev, "DTI Error: xfer result--" 1505 "unknown xfer 0x%08x (status 0x%02x)\n", 1506 xfer_id, usb_status); 1507 break; 1508 } 1509 wa_xfer_result_chew(wa, xfer); 1510 wa_xfer_put(xfer); 1511 break; 1512 case -ENOENT: /* (we killed the URB)...so, no broadcast */ 1513 case -ESHUTDOWN: /* going away! */ 1514 dev_dbg(dev, "DTI: going down! %d\n", urb->status); 1515 goto out; 1516 default: 1517 /* Unknown error */ 1518 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, 1519 EDC_ERROR_TIMEFRAME)) { 1520 dev_err(dev, "DTI: URB max acceptable errors " 1521 "exceeded, resetting device\n"); 1522 wa_reset_all(wa); 1523 goto out; 1524 } 1525 if (printk_ratelimit()) 1526 dev_err(dev, "DTI: URB error %d\n", urb->status); 1527 break; 1528 } 1529 /* Resubmit the DTI URB */ 1530 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); 1531 if (result < 0) { 1532 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " 1533 "resetting\n", result); 1534 wa_reset_all(wa); 1535 } 1536out: 1537 return; 1538} 1539 1540/* 1541 * Transfer complete notification 1542 * 1543 * Called from the notif.c code. We get a notification on EP2 saying 1544 * that some endpoint has some transfer result data available. We are 1545 * about to read it. 1546 * 1547 * To speed up things, we always have a URB reading the DTI URB; we 1548 * don't really set it up and start it until the first xfer complete 1549 * notification arrives, which is what we do here. 1550 * 1551 * Follow up in wa_xfer_result_cb(), as that's where the whole state 1552 * machine starts. 1553 * 1554 * So here we just initialize the DTI URB for reading transfer result 1555 * notifications and also the buffer-in URB, for reading buffers. Then 1556 * we just submit the DTI URB. 1557 * 1558 * @wa shall be referenced 1559 */ 1560void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr) 1561{ 1562 int result; 1563 struct device *dev = &wa->usb_iface->dev; 1564 struct wa_notif_xfer *notif_xfer; 1565 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; 1566 1567 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); 1568 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); 1569 1570 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) { 1571 /* FIXME: hardcoded limitation, adapt */ 1572 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n", 1573 notif_xfer->bEndpoint, dti_epd->bEndpointAddress); 1574 goto error; 1575 } 1576 if (wa->dti_urb != NULL) /* DTI URB already started */ 1577 goto out; 1578 1579 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); 1580 if (wa->dti_urb == NULL) { 1581 dev_err(dev, "Can't allocate DTI URB\n"); 1582 goto error_dti_urb_alloc; 1583 } 1584 usb_fill_bulk_urb( 1585 wa->dti_urb, wa->usb_dev, 1586 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), 1587 wa->xfer_result, wa->xfer_result_size, 1588 wa_xfer_result_cb, wa); 1589 1590 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL); 1591 if (wa->buf_in_urb == NULL) { 1592 dev_err(dev, "Can't allocate BUF-IN URB\n"); 1593 goto error_buf_in_urb_alloc; 1594 } 1595 usb_fill_bulk_urb( 1596 wa->buf_in_urb, wa->usb_dev, 1597 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), 1598 NULL, 0, wa_buf_in_cb, wa); 1599 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); 1600 if (result < 0) { 1601 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " 1602 "resetting\n", result); 1603 goto error_dti_urb_submit; 1604 } 1605out: 1606 return; 1607 1608error_dti_urb_submit: 1609 usb_put_urb(wa->buf_in_urb); 1610error_buf_in_urb_alloc: 1611 usb_put_urb(wa->dti_urb); 1612 wa->dti_urb = NULL; 1613error_dti_urb_alloc: 1614error: 1615 wa_reset_all(wa); 1616}