Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.29-rc4 1615 lines 47 kB view raw
1/* 2 * WUSB Wire Adapter 3 * Data transfer and URB enqueing 4 * 5 * Copyright (C) 2005-2006 Intel Corporation 6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 10 * 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 20 * 02110-1301, USA. 21 * 22 * 23 * How transfers work: get a buffer, break it up in segments (segment 24 * size is a multiple of the maxpacket size). For each segment issue a 25 * segment request (struct wa_xfer_*), then send the data buffer if 26 * out or nothing if in (all over the DTO endpoint). 27 * 28 * For each submitted segment request, a notification will come over 29 * the NEP endpoint and a transfer result (struct xfer_result) will 30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is 31 * data coming (inbound transfer), schedule a read and handle it. 32 * 33 * Sounds simple, it is a pain to implement. 34 * 35 * 36 * ENTRY POINTS 37 * 38 * FIXME 39 * 40 * LIFE CYCLE / STATE DIAGRAM 41 * 42 * FIXME 43 * 44 * THIS CODE IS DISGUSTING 45 * 46 * Warned you are; it's my second try and still not happy with it. 47 * 48 * NOTES: 49 * 50 * - No iso 51 * 52 * - Supports DMA xfers, control, bulk and maybe interrupt 53 * 54 * - Does not recycle unused rpipes 55 * 56 * An rpipe is assigned to an endpoint the first time it is used, 57 * and then it's there, assigned, until the endpoint is disabled 58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the 59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore 60 * (should be a mutex). 61 * 62 * Two methods it could be done: 63 * 64 * (a) set up a timer everytime an rpipe's use count drops to 1 65 * (which means unused) or when a transfer ends. Reset the 66 * timer when a xfer is queued. If the timer expires, release 67 * the rpipe [see rpipe_ep_disable()]. 68 * 69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()], 70 * when none are found go over the list, check their endpoint 71 * and their activity record (if no last-xfer-done-ts in the 72 * last x seconds) take it 73 * 74 * However, due to the fact that we have a set of limited 75 * resources (max-segments-at-the-same-time per xfer, 76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end 77 * we are going to have to rebuild all this based on an scheduler, 78 * to where we have a list of transactions to do and based on the 79 * availability of the different requried components (blocks, 80 * rpipes, segment slots, etc), we go scheduling them. Painful. 81 */ 82#include <linux/init.h> 83#include <linux/spinlock.h> 84#include <linux/hash.h> 85 86#include "wa-hc.h" 87#include "wusbhc.h" 88 89enum { 90 WA_SEGS_MAX = 255, 91}; 92 93enum wa_seg_status { 94 WA_SEG_NOTREADY, 95 WA_SEG_READY, 96 WA_SEG_DELAYED, 97 WA_SEG_SUBMITTED, 98 WA_SEG_PENDING, 99 WA_SEG_DTI_PENDING, 100 WA_SEG_DONE, 101 WA_SEG_ERROR, 102 WA_SEG_ABORTED, 103}; 104 105static void wa_xfer_delayed_run(struct wa_rpipe *); 106 107/* 108 * Life cycle governed by 'struct urb' (the refcount of the struct is 109 * that of the 'struct urb' and usb_free_urb() would free the whole 110 * struct). 111 */ 112struct wa_seg { 113 struct urb urb; 114 struct urb *dto_urb; /* for data output? */ 115 struct list_head list_node; /* for rpipe->req_list */ 116 struct wa_xfer *xfer; /* out xfer */ 117 u8 index; /* which segment we are */ 118 enum wa_seg_status status; 119 ssize_t result; /* bytes xfered or error */ 120 struct wa_xfer_hdr xfer_hdr; 121 u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */ 122}; 123 124static void wa_seg_init(struct wa_seg *seg) 125{ 126 /* usb_init_urb() repeats a lot of work, so we do it here */ 127 kref_init(&seg->urb.kref); 128} 129 130/* 131 * Protected by xfer->lock 132 * 133 */ 134struct wa_xfer { 135 struct kref refcnt; 136 struct list_head list_node; 137 spinlock_t lock; 138 u32 id; 139 140 struct wahc *wa; /* Wire adapter we are plugged to */ 141 struct usb_host_endpoint *ep; 142 struct urb *urb; /* URB we are transfering for */ 143 struct wa_seg **seg; /* transfer segments */ 144 u8 segs, segs_submitted, segs_done; 145 unsigned is_inbound:1; 146 unsigned is_dma:1; 147 size_t seg_size; 148 int result; 149 150 gfp_t gfp; /* allocation mask */ 151 152 struct wusb_dev *wusb_dev; /* for activity timestamps */ 153}; 154 155static inline void wa_xfer_init(struct wa_xfer *xfer) 156{ 157 kref_init(&xfer->refcnt); 158 INIT_LIST_HEAD(&xfer->list_node); 159 spin_lock_init(&xfer->lock); 160} 161 162/* 163 * Destory a transfer structure 164 * 165 * Note that the xfer->seg[index] thingies follow the URB life cycle, 166 * so we need to put them, not free them. 167 */ 168static void wa_xfer_destroy(struct kref *_xfer) 169{ 170 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt); 171 if (xfer->seg) { 172 unsigned cnt; 173 for (cnt = 0; cnt < xfer->segs; cnt++) { 174 if (xfer->is_inbound) 175 usb_put_urb(xfer->seg[cnt]->dto_urb); 176 usb_put_urb(&xfer->seg[cnt]->urb); 177 } 178 } 179 kfree(xfer); 180} 181 182static void wa_xfer_get(struct wa_xfer *xfer) 183{ 184 kref_get(&xfer->refcnt); 185} 186 187static void wa_xfer_put(struct wa_xfer *xfer) 188{ 189 kref_put(&xfer->refcnt, wa_xfer_destroy); 190} 191 192/* 193 * xfer is referenced 194 * 195 * xfer->lock has to be unlocked 196 * 197 * We take xfer->lock for setting the result; this is a barrier 198 * against drivers/usb/core/hcd.c:unlink1() being called after we call 199 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a 200 * reference to the transfer. 201 */ 202static void wa_xfer_giveback(struct wa_xfer *xfer) 203{ 204 unsigned long flags; 205 206 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); 207 list_del_init(&xfer->list_node); 208 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); 209 /* FIXME: segmentation broken -- kills DWA */ 210 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); 211 wa_put(xfer->wa); 212 wa_xfer_put(xfer); 213} 214 215/* 216 * xfer is referenced 217 * 218 * xfer->lock has to be unlocked 219 */ 220static void wa_xfer_completion(struct wa_xfer *xfer) 221{ 222 if (xfer->wusb_dev) 223 wusb_dev_put(xfer->wusb_dev); 224 rpipe_put(xfer->ep->hcpriv); 225 wa_xfer_giveback(xfer); 226} 227 228/* 229 * If transfer is done, wrap it up and return true 230 * 231 * xfer->lock has to be locked 232 */ 233static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) 234{ 235 struct device *dev = &xfer->wa->usb_iface->dev; 236 unsigned result, cnt; 237 struct wa_seg *seg; 238 struct urb *urb = xfer->urb; 239 unsigned found_short = 0; 240 241 result = xfer->segs_done == xfer->segs_submitted; 242 if (result == 0) 243 goto out; 244 urb->actual_length = 0; 245 for (cnt = 0; cnt < xfer->segs; cnt++) { 246 seg = xfer->seg[cnt]; 247 switch (seg->status) { 248 case WA_SEG_DONE: 249 if (found_short && seg->result > 0) { 250 dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n", 251 xfer, cnt, seg->result); 252 urb->status = -EINVAL; 253 goto out; 254 } 255 urb->actual_length += seg->result; 256 if (seg->result < xfer->seg_size 257 && cnt != xfer->segs-1) 258 found_short = 1; 259 dev_dbg(dev, "xfer %p#%u: DONE short %d " 260 "result %zu urb->actual_length %d\n", 261 xfer, seg->index, found_short, seg->result, 262 urb->actual_length); 263 break; 264 case WA_SEG_ERROR: 265 xfer->result = seg->result; 266 dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n", 267 xfer, seg->index, seg->result); 268 goto out; 269 case WA_SEG_ABORTED: 270 dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n", 271 xfer, seg->index, urb->status); 272 xfer->result = urb->status; 273 goto out; 274 default: 275 dev_warn(dev, "xfer %p#%u: is_done bad state %d\n", 276 xfer, cnt, seg->status); 277 xfer->result = -EINVAL; 278 goto out; 279 } 280 } 281 xfer->result = 0; 282out: 283 return result; 284} 285 286/* 287 * Initialize a transfer's ID 288 * 289 * We need to use a sequential number; if we use the pointer or the 290 * hash of the pointer, it can repeat over sequential transfers and 291 * then it will confuse the HWA....wonder why in hell they put a 32 292 * bit handle in there then. 293 */ 294static void wa_xfer_id_init(struct wa_xfer *xfer) 295{ 296 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); 297} 298 299/* 300 * Return the xfer's ID associated with xfer 301 * 302 * Need to generate a 303 */ 304static u32 wa_xfer_id(struct wa_xfer *xfer) 305{ 306 return xfer->id; 307} 308 309/* 310 * Search for a transfer list ID on the HCD's URB list 311 * 312 * For 32 bit architectures, we use the pointer itself; for 64 bits, a 313 * 32-bit hash of the pointer. 314 * 315 * @returns NULL if not found. 316 */ 317static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id) 318{ 319 unsigned long flags; 320 struct wa_xfer *xfer_itr; 321 spin_lock_irqsave(&wa->xfer_list_lock, flags); 322 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) { 323 if (id == xfer_itr->id) { 324 wa_xfer_get(xfer_itr); 325 goto out; 326 } 327 } 328 xfer_itr = NULL; 329out: 330 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); 331 return xfer_itr; 332} 333 334struct wa_xfer_abort_buffer { 335 struct urb urb; 336 struct wa_xfer_abort cmd; 337}; 338 339static void __wa_xfer_abort_cb(struct urb *urb) 340{ 341 struct wa_xfer_abort_buffer *b = urb->context; 342 usb_put_urb(&b->urb); 343} 344 345/* 346 * Aborts an ongoing transaction 347 * 348 * Assumes the transfer is referenced and locked and in a submitted 349 * state (mainly that there is an endpoint/rpipe assigned). 350 * 351 * The callback (see above) does nothing but freeing up the data by 352 * putting the URB. Because the URB is allocated at the head of the 353 * struct, the whole space we allocated is kfreed. 354 * 355 * We'll get an 'aborted transaction' xfer result on DTI, that'll 356 * politely ignore because at this point the transaction has been 357 * marked as aborted already. 358 */ 359static void __wa_xfer_abort(struct wa_xfer *xfer) 360{ 361 int result; 362 struct device *dev = &xfer->wa->usb_iface->dev; 363 struct wa_xfer_abort_buffer *b; 364 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 365 366 b = kmalloc(sizeof(*b), GFP_ATOMIC); 367 if (b == NULL) 368 goto error_kmalloc; 369 b->cmd.bLength = sizeof(b->cmd); 370 b->cmd.bRequestType = WA_XFER_ABORT; 371 b->cmd.wRPipe = rpipe->descr.wRPipeIndex; 372 b->cmd.dwTransferID = wa_xfer_id(xfer); 373 374 usb_init_urb(&b->urb); 375 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, 376 usb_sndbulkpipe(xfer->wa->usb_dev, 377 xfer->wa->dto_epd->bEndpointAddress), 378 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b); 379 result = usb_submit_urb(&b->urb, GFP_ATOMIC); 380 if (result < 0) 381 goto error_submit; 382 return; /* callback frees! */ 383 384 385error_submit: 386 if (printk_ratelimit()) 387 dev_err(dev, "xfer %p: Can't submit abort request: %d\n", 388 xfer, result); 389 kfree(b); 390error_kmalloc: 391 return; 392 393} 394 395/* 396 * 397 * @returns < 0 on error, transfer segment request size if ok 398 */ 399static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, 400 enum wa_xfer_type *pxfer_type) 401{ 402 ssize_t result; 403 struct device *dev = &xfer->wa->usb_iface->dev; 404 size_t maxpktsize; 405 struct urb *urb = xfer->urb; 406 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 407 408 switch (rpipe->descr.bmAttribute & 0x3) { 409 case USB_ENDPOINT_XFER_CONTROL: 410 *pxfer_type = WA_XFER_TYPE_CTL; 411 result = sizeof(struct wa_xfer_ctl); 412 break; 413 case USB_ENDPOINT_XFER_INT: 414 case USB_ENDPOINT_XFER_BULK: 415 *pxfer_type = WA_XFER_TYPE_BI; 416 result = sizeof(struct wa_xfer_bi); 417 break; 418 case USB_ENDPOINT_XFER_ISOC: 419 dev_err(dev, "FIXME: ISOC not implemented\n"); 420 result = -ENOSYS; 421 goto error; 422 default: 423 /* never happens */ 424 BUG(); 425 result = -EINVAL; /* shut gcc up */ 426 }; 427 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0; 428 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; 429 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) 430 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1); 431 /* Compute the segment size and make sure it is a multiple of 432 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of 433 * a check (FIXME) */ 434 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize); 435 if (xfer->seg_size < maxpktsize) { 436 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize " 437 "%zu\n", xfer->seg_size, maxpktsize); 438 result = -EINVAL; 439 goto error; 440 } 441 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; 442 xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1) 443 / xfer->seg_size; 444 if (xfer->segs >= WA_SEGS_MAX) { 445 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n", 446 (int)(urb->transfer_buffer_length / xfer->seg_size), 447 WA_SEGS_MAX); 448 result = -EINVAL; 449 goto error; 450 } 451 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) 452 xfer->segs = 1; 453error: 454 return result; 455} 456 457/* Fill in the common request header and xfer-type specific data. */ 458static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, 459 struct wa_xfer_hdr *xfer_hdr0, 460 enum wa_xfer_type xfer_type, 461 size_t xfer_hdr_size) 462{ 463 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 464 465 xfer_hdr0 = &xfer->seg[0]->xfer_hdr; 466 xfer_hdr0->bLength = xfer_hdr_size; 467 xfer_hdr0->bRequestType = xfer_type; 468 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex; 469 xfer_hdr0->dwTransferID = wa_xfer_id(xfer); 470 xfer_hdr0->bTransferSegment = 0; 471 switch (xfer_type) { 472 case WA_XFER_TYPE_CTL: { 473 struct wa_xfer_ctl *xfer_ctl = 474 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr); 475 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0; 476 BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP 477 && xfer->urb->setup_packet == NULL); 478 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet, 479 sizeof(xfer_ctl->baSetupData)); 480 break; 481 } 482 case WA_XFER_TYPE_BI: 483 break; 484 case WA_XFER_TYPE_ISO: 485 printk(KERN_ERR "FIXME: ISOC not implemented\n"); 486 default: 487 BUG(); 488 }; 489} 490 491/* 492 * Callback for the OUT data phase of the segment request 493 * 494 * Check wa_seg_cb(); most comments also apply here because this 495 * function does almost the same thing and they work closely 496 * together. 497 * 498 * If the seg request has failed but this DTO phase has suceeded, 499 * wa_seg_cb() has already failed the segment and moved the 500 * status to WA_SEG_ERROR, so this will go through 'case 0' and 501 * effectively do nothing. 502 */ 503static void wa_seg_dto_cb(struct urb *urb) 504{ 505 struct wa_seg *seg = urb->context; 506 struct wa_xfer *xfer = seg->xfer; 507 struct wahc *wa; 508 struct device *dev; 509 struct wa_rpipe *rpipe; 510 unsigned long flags; 511 unsigned rpipe_ready = 0; 512 u8 done = 0; 513 514 switch (urb->status) { 515 case 0: 516 spin_lock_irqsave(&xfer->lock, flags); 517 wa = xfer->wa; 518 dev = &wa->usb_iface->dev; 519 dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n", 520 xfer, seg->index, urb->actual_length); 521 if (seg->status < WA_SEG_PENDING) 522 seg->status = WA_SEG_PENDING; 523 seg->result = urb->actual_length; 524 spin_unlock_irqrestore(&xfer->lock, flags); 525 break; 526 case -ECONNRESET: /* URB unlinked; no need to do anything */ 527 case -ENOENT: /* as it was done by the who unlinked us */ 528 break; 529 default: /* Other errors ... */ 530 spin_lock_irqsave(&xfer->lock, flags); 531 wa = xfer->wa; 532 dev = &wa->usb_iface->dev; 533 rpipe = xfer->ep->hcpriv; 534 dev_dbg(dev, "xfer %p#%u: data out error %d\n", 535 xfer, seg->index, urb->status); 536 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 537 EDC_ERROR_TIMEFRAME)){ 538 dev_err(dev, "DTO: URB max acceptable errors " 539 "exceeded, resetting device\n"); 540 wa_reset_all(wa); 541 } 542 if (seg->status != WA_SEG_ERROR) { 543 seg->status = WA_SEG_ERROR; 544 seg->result = urb->status; 545 xfer->segs_done++; 546 __wa_xfer_abort(xfer); 547 rpipe_ready = rpipe_avail_inc(rpipe); 548 done = __wa_xfer_is_done(xfer); 549 } 550 spin_unlock_irqrestore(&xfer->lock, flags); 551 if (done) 552 wa_xfer_completion(xfer); 553 if (rpipe_ready) 554 wa_xfer_delayed_run(rpipe); 555 } 556} 557 558/* 559 * Callback for the segment request 560 * 561 * If succesful transition state (unless already transitioned or 562 * outbound transfer); otherwise, take a note of the error, mark this 563 * segment done and try completion. 564 * 565 * Note we don't access until we are sure that the transfer hasn't 566 * been cancelled (ECONNRESET, ENOENT), which could mean that 567 * seg->xfer could be already gone. 568 * 569 * We have to check before setting the status to WA_SEG_PENDING 570 * because sometimes the xfer result callback arrives before this 571 * callback (geeeeeeze), so it might happen that we are already in 572 * another state. As well, we don't set it if the transfer is inbound, 573 * as in that case, wa_seg_dto_cb will do it when the OUT data phase 574 * finishes. 575 */ 576static void wa_seg_cb(struct urb *urb) 577{ 578 struct wa_seg *seg = urb->context; 579 struct wa_xfer *xfer = seg->xfer; 580 struct wahc *wa; 581 struct device *dev; 582 struct wa_rpipe *rpipe; 583 unsigned long flags; 584 unsigned rpipe_ready; 585 u8 done = 0; 586 587 switch (urb->status) { 588 case 0: 589 spin_lock_irqsave(&xfer->lock, flags); 590 wa = xfer->wa; 591 dev = &wa->usb_iface->dev; 592 dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index); 593 if (xfer->is_inbound && seg->status < WA_SEG_PENDING) 594 seg->status = WA_SEG_PENDING; 595 spin_unlock_irqrestore(&xfer->lock, flags); 596 break; 597 case -ECONNRESET: /* URB unlinked; no need to do anything */ 598 case -ENOENT: /* as it was done by the who unlinked us */ 599 break; 600 default: /* Other errors ... */ 601 spin_lock_irqsave(&xfer->lock, flags); 602 wa = xfer->wa; 603 dev = &wa->usb_iface->dev; 604 rpipe = xfer->ep->hcpriv; 605 if (printk_ratelimit()) 606 dev_err(dev, "xfer %p#%u: request error %d\n", 607 xfer, seg->index, urb->status); 608 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 609 EDC_ERROR_TIMEFRAME)){ 610 dev_err(dev, "DTO: URB max acceptable errors " 611 "exceeded, resetting device\n"); 612 wa_reset_all(wa); 613 } 614 usb_unlink_urb(seg->dto_urb); 615 seg->status = WA_SEG_ERROR; 616 seg->result = urb->status; 617 xfer->segs_done++; 618 __wa_xfer_abort(xfer); 619 rpipe_ready = rpipe_avail_inc(rpipe); 620 done = __wa_xfer_is_done(xfer); 621 spin_unlock_irqrestore(&xfer->lock, flags); 622 if (done) 623 wa_xfer_completion(xfer); 624 if (rpipe_ready) 625 wa_xfer_delayed_run(rpipe); 626 } 627} 628 629/* 630 * Allocate the segs array and initialize each of them 631 * 632 * The segments are freed by wa_xfer_destroy() when the xfer use count 633 * drops to zero; however, because each segment is given the same life 634 * cycle as the USB URB it contains, it is actually freed by 635 * usb_put_urb() on the contained USB URB (twisted, eh?). 636 */ 637static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size) 638{ 639 int result, cnt; 640 size_t alloc_size = sizeof(*xfer->seg[0]) 641 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size; 642 struct usb_device *usb_dev = xfer->wa->usb_dev; 643 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd; 644 struct wa_seg *seg; 645 size_t buf_itr, buf_size, buf_itr_size; 646 647 result = -ENOMEM; 648 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); 649 if (xfer->seg == NULL) 650 goto error_segs_kzalloc; 651 buf_itr = 0; 652 buf_size = xfer->urb->transfer_buffer_length; 653 for (cnt = 0; cnt < xfer->segs; cnt++) { 654 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC); 655 if (seg == NULL) 656 goto error_seg_kzalloc; 657 wa_seg_init(seg); 658 seg->xfer = xfer; 659 seg->index = cnt; 660 usb_fill_bulk_urb(&seg->urb, usb_dev, 661 usb_sndbulkpipe(usb_dev, 662 dto_epd->bEndpointAddress), 663 &seg->xfer_hdr, xfer_hdr_size, 664 wa_seg_cb, seg); 665 buf_itr_size = buf_size > xfer->seg_size ? 666 xfer->seg_size : buf_size; 667 if (xfer->is_inbound == 0 && buf_size > 0) { 668 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); 669 if (seg->dto_urb == NULL) 670 goto error_dto_alloc; 671 usb_fill_bulk_urb( 672 seg->dto_urb, usb_dev, 673 usb_sndbulkpipe(usb_dev, 674 dto_epd->bEndpointAddress), 675 NULL, 0, wa_seg_dto_cb, seg); 676 if (xfer->is_dma) { 677 seg->dto_urb->transfer_dma = 678 xfer->urb->transfer_dma + buf_itr; 679 seg->dto_urb->transfer_flags |= 680 URB_NO_TRANSFER_DMA_MAP; 681 } else 682 seg->dto_urb->transfer_buffer = 683 xfer->urb->transfer_buffer + buf_itr; 684 seg->dto_urb->transfer_buffer_length = buf_itr_size; 685 } 686 seg->status = WA_SEG_READY; 687 buf_itr += buf_itr_size; 688 buf_size -= buf_itr_size; 689 } 690 return 0; 691 692error_dto_alloc: 693 kfree(xfer->seg[cnt]); 694 cnt--; 695error_seg_kzalloc: 696 /* use the fact that cnt is left at were it failed */ 697 for (; cnt > 0; cnt--) { 698 if (xfer->is_inbound == 0) 699 kfree(xfer->seg[cnt]->dto_urb); 700 kfree(xfer->seg[cnt]); 701 } 702error_segs_kzalloc: 703 return result; 704} 705 706/* 707 * Allocates all the stuff needed to submit a transfer 708 * 709 * Breaks the whole data buffer in a list of segments, each one has a 710 * structure allocated to it and linked in xfer->seg[index] 711 * 712 * FIXME: merge setup_segs() and the last part of this function, no 713 * need to do two for loops when we could run everything in a 714 * single one 715 */ 716static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) 717{ 718 int result; 719 struct device *dev = &xfer->wa->usb_iface->dev; 720 enum wa_xfer_type xfer_type = 0; /* shut up GCC */ 721 size_t xfer_hdr_size, cnt, transfer_size; 722 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; 723 724 result = __wa_xfer_setup_sizes(xfer, &xfer_type); 725 if (result < 0) 726 goto error_setup_sizes; 727 xfer_hdr_size = result; 728 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size); 729 if (result < 0) { 730 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n", 731 xfer, xfer->segs, result); 732 goto error_setup_segs; 733 } 734 /* Fill the first header */ 735 xfer_hdr0 = &xfer->seg[0]->xfer_hdr; 736 wa_xfer_id_init(xfer); 737 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); 738 739 /* Fill remainig headers */ 740 xfer_hdr = xfer_hdr0; 741 transfer_size = urb->transfer_buffer_length; 742 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? 743 xfer->seg_size : transfer_size; 744 transfer_size -= xfer->seg_size; 745 for (cnt = 1; cnt < xfer->segs; cnt++) { 746 xfer_hdr = &xfer->seg[cnt]->xfer_hdr; 747 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); 748 xfer_hdr->bTransferSegment = cnt; 749 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ? 750 cpu_to_le32(xfer->seg_size) 751 : cpu_to_le32(transfer_size); 752 xfer->seg[cnt]->status = WA_SEG_READY; 753 transfer_size -= xfer->seg_size; 754 } 755 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */ 756 result = 0; 757error_setup_segs: 758error_setup_sizes: 759 return result; 760} 761 762/* 763 * 764 * 765 * rpipe->seg_lock is held! 766 */ 767static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, 768 struct wa_seg *seg) 769{ 770 int result; 771 result = usb_submit_urb(&seg->urb, GFP_ATOMIC); 772 if (result < 0) { 773 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n", 774 xfer, seg->index, result); 775 goto error_seg_submit; 776 } 777 if (seg->dto_urb) { 778 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); 779 if (result < 0) { 780 printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n", 781 xfer, seg->index, result); 782 goto error_dto_submit; 783 } 784 } 785 seg->status = WA_SEG_SUBMITTED; 786 rpipe_avail_dec(rpipe); 787 return 0; 788 789error_dto_submit: 790 usb_unlink_urb(&seg->urb); 791error_seg_submit: 792 seg->status = WA_SEG_ERROR; 793 seg->result = result; 794 return result; 795} 796 797/* 798 * Execute more queued request segments until the maximum concurrent allowed 799 * 800 * The ugly unlock/lock sequence on the error path is needed as the 801 * xfer->lock normally nests the seg_lock and not viceversa. 802 * 803 */ 804static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) 805{ 806 int result; 807 struct device *dev = &rpipe->wa->usb_iface->dev; 808 struct wa_seg *seg; 809 struct wa_xfer *xfer; 810 unsigned long flags; 811 812 spin_lock_irqsave(&rpipe->seg_lock, flags); 813 while (atomic_read(&rpipe->segs_available) > 0 814 && !list_empty(&rpipe->seg_list)) { 815 seg = list_entry(rpipe->seg_list.next, struct wa_seg, 816 list_node); 817 list_del(&seg->list_node); 818 xfer = seg->xfer; 819 result = __wa_seg_submit(rpipe, xfer, seg); 820 dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n", 821 xfer, seg->index, atomic_read(&rpipe->segs_available), result); 822 if (unlikely(result < 0)) { 823 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 824 spin_lock_irqsave(&xfer->lock, flags); 825 __wa_xfer_abort(xfer); 826 xfer->segs_done++; 827 spin_unlock_irqrestore(&xfer->lock, flags); 828 spin_lock_irqsave(&rpipe->seg_lock, flags); 829 } 830 } 831 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 832} 833 834/* 835 * 836 * xfer->lock is taken 837 * 838 * On failure submitting we just stop submitting and return error; 839 * wa_urb_enqueue_b() will execute the completion path 840 */ 841static int __wa_xfer_submit(struct wa_xfer *xfer) 842{ 843 int result; 844 struct wahc *wa = xfer->wa; 845 struct device *dev = &wa->usb_iface->dev; 846 unsigned cnt; 847 struct wa_seg *seg; 848 unsigned long flags; 849 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 850 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests); 851 u8 available; 852 u8 empty; 853 854 spin_lock_irqsave(&wa->xfer_list_lock, flags); 855 list_add_tail(&xfer->list_node, &wa->xfer_list); 856 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); 857 858 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests); 859 result = 0; 860 spin_lock_irqsave(&rpipe->seg_lock, flags); 861 for (cnt = 0; cnt < xfer->segs; cnt++) { 862 available = atomic_read(&rpipe->segs_available); 863 empty = list_empty(&rpipe->seg_list); 864 seg = xfer->seg[cnt]; 865 dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n", 866 xfer, cnt, available, empty, 867 available == 0 || !empty ? "delayed" : "submitted"); 868 if (available == 0 || !empty) { 869 dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt); 870 seg->status = WA_SEG_DELAYED; 871 list_add_tail(&seg->list_node, &rpipe->seg_list); 872 } else { 873 result = __wa_seg_submit(rpipe, xfer, seg); 874 if (result < 0) { 875 __wa_xfer_abort(xfer); 876 goto error_seg_submit; 877 } 878 } 879 xfer->segs_submitted++; 880 } 881error_seg_submit: 882 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 883 return result; 884} 885 886/* 887 * Second part of a URB/transfer enqueuement 888 * 889 * Assumes this comes from wa_urb_enqueue() [maybe through 890 * wa_urb_enqueue_run()]. At this point: 891 * 892 * xfer->wa filled and refcounted 893 * xfer->ep filled with rpipe refcounted if 894 * delayed == 0 895 * xfer->urb filled and refcounted (this is the case when called 896 * from wa_urb_enqueue() as we come from usb_submit_urb() 897 * and when called by wa_urb_enqueue_run(), as we took an 898 * extra ref dropped by _run() after we return). 899 * xfer->gfp filled 900 * 901 * If we fail at __wa_xfer_submit(), then we just check if we are done 902 * and if so, we run the completion procedure. However, if we are not 903 * yet done, we do nothing and wait for the completion handlers from 904 * the submitted URBs or from the xfer-result path to kick in. If xfer 905 * result never kicks in, the xfer will timeout from the USB code and 906 * dequeue() will be called. 907 */ 908static void wa_urb_enqueue_b(struct wa_xfer *xfer) 909{ 910 int result; 911 unsigned long flags; 912 struct urb *urb = xfer->urb; 913 struct wahc *wa = xfer->wa; 914 struct wusbhc *wusbhc = wa->wusb; 915 struct wusb_dev *wusb_dev; 916 unsigned done; 917 918 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); 919 if (result < 0) 920 goto error_rpipe_get; 921 result = -ENODEV; 922 /* FIXME: segmentation broken -- kills DWA */ 923 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ 924 if (urb->dev == NULL) 925 goto error_dev_gone; 926 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); 927 if (wusb_dev == NULL) { 928 mutex_unlock(&wusbhc->mutex); 929 goto error_dev_gone; 930 } 931 mutex_unlock(&wusbhc->mutex); 932 933 spin_lock_irqsave(&xfer->lock, flags); 934 xfer->wusb_dev = wusb_dev; 935 result = urb->status; 936 if (urb->status != -EINPROGRESS) 937 goto error_dequeued; 938 939 result = __wa_xfer_setup(xfer, urb); 940 if (result < 0) 941 goto error_xfer_setup; 942 result = __wa_xfer_submit(xfer); 943 if (result < 0) 944 goto error_xfer_submit; 945 spin_unlock_irqrestore(&xfer->lock, flags); 946 return; 947 948 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() 949 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean 950 * upundo setup(). 951 */ 952error_xfer_setup: 953error_dequeued: 954 spin_unlock_irqrestore(&xfer->lock, flags); 955 /* FIXME: segmentation broken, kills DWA */ 956 if (wusb_dev) 957 wusb_dev_put(wusb_dev); 958error_dev_gone: 959 rpipe_put(xfer->ep->hcpriv); 960error_rpipe_get: 961 xfer->result = result; 962 wa_xfer_giveback(xfer); 963 return; 964 965error_xfer_submit: 966 done = __wa_xfer_is_done(xfer); 967 xfer->result = result; 968 spin_unlock_irqrestore(&xfer->lock, flags); 969 if (done) 970 wa_xfer_completion(xfer); 971} 972 973/* 974 * Execute the delayed transfers in the Wire Adapter @wa 975 * 976 * We need to be careful here, as dequeue() could be called in the 977 * middle. That's why we do the whole thing under the 978 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock 979 * and then checks the list -- so as we would be acquiring in inverse 980 * order, we just drop the lock once we have the xfer and reacquire it 981 * later. 982 */ 983void wa_urb_enqueue_run(struct work_struct *ws) 984{ 985 struct wahc *wa = container_of(ws, struct wahc, xfer_work); 986 struct wa_xfer *xfer, *next; 987 struct urb *urb; 988 989 spin_lock_irq(&wa->xfer_list_lock); 990 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, 991 list_node) { 992 list_del_init(&xfer->list_node); 993 spin_unlock_irq(&wa->xfer_list_lock); 994 995 urb = xfer->urb; 996 wa_urb_enqueue_b(xfer); 997 usb_put_urb(urb); /* taken when queuing */ 998 999 spin_lock_irq(&wa->xfer_list_lock); 1000 } 1001 spin_unlock_irq(&wa->xfer_list_lock); 1002} 1003EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); 1004 1005/* 1006 * Submit a transfer to the Wire Adapter in a delayed way 1007 * 1008 * The process of enqueuing involves possible sleeps() [see 1009 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are 1010 * in an atomic section, we defer the enqueue_b() call--else we call direct. 1011 * 1012 * @urb: We own a reference to it done by the HCI Linux USB stack that 1013 * will be given up by calling usb_hcd_giveback_urb() or by 1014 * returning error from this function -> ergo we don't have to 1015 * refcount it. 1016 */ 1017int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, 1018 struct urb *urb, gfp_t gfp) 1019{ 1020 int result; 1021 struct device *dev = &wa->usb_iface->dev; 1022 struct wa_xfer *xfer; 1023 unsigned long my_flags; 1024 unsigned cant_sleep = irqs_disabled() | in_atomic(); 1025 1026 if (urb->transfer_buffer == NULL 1027 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 1028 && urb->transfer_buffer_length != 0) { 1029 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb); 1030 dump_stack(); 1031 } 1032 1033 result = -ENOMEM; 1034 xfer = kzalloc(sizeof(*xfer), gfp); 1035 if (xfer == NULL) 1036 goto error_kmalloc; 1037 1038 result = -ENOENT; 1039 if (urb->status != -EINPROGRESS) /* cancelled */ 1040 goto error_dequeued; /* before starting? */ 1041 wa_xfer_init(xfer); 1042 xfer->wa = wa_get(wa); 1043 xfer->urb = urb; 1044 xfer->gfp = gfp; 1045 xfer->ep = ep; 1046 urb->hcpriv = xfer; 1047 1048 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", 1049 xfer, urb, urb->pipe, urb->transfer_buffer_length, 1050 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", 1051 urb->pipe & USB_DIR_IN ? "inbound" : "outbound", 1052 cant_sleep ? "deferred" : "inline"); 1053 1054 if (cant_sleep) { 1055 usb_get_urb(urb); 1056 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); 1057 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list); 1058 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); 1059 queue_work(wusbd, &wa->xfer_work); 1060 } else { 1061 wa_urb_enqueue_b(xfer); 1062 } 1063 return 0; 1064 1065error_dequeued: 1066 kfree(xfer); 1067error_kmalloc: 1068 return result; 1069} 1070EXPORT_SYMBOL_GPL(wa_urb_enqueue); 1071 1072/* 1073 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion 1074 * handler] is called. 1075 * 1076 * Until a transfer goes successfully through wa_urb_enqueue() it 1077 * needs to be dequeued with completion calling; when stuck in delayed 1078 * or before wa_xfer_setup() is called, we need to do completion. 1079 * 1080 * not setup If there is no hcpriv yet, that means that that enqueue 1081 * still had no time to set the xfer up. Because 1082 * urb->status should be other than -EINPROGRESS, 1083 * enqueue() will catch that and bail out. 1084 * 1085 * If the transfer has gone through setup, we just need to clean it 1086 * up. If it has gone through submit(), we have to abort it [with an 1087 * asynch request] and then make sure we cancel each segment. 1088 * 1089 */ 1090int wa_urb_dequeue(struct wahc *wa, struct urb *urb) 1091{ 1092 unsigned long flags, flags2; 1093 struct wa_xfer *xfer; 1094 struct wa_seg *seg; 1095 struct wa_rpipe *rpipe; 1096 unsigned cnt; 1097 unsigned rpipe_ready = 0; 1098 1099 xfer = urb->hcpriv; 1100 if (xfer == NULL) { 1101 /* NOthing setup yet enqueue will see urb->status != 1102 * -EINPROGRESS (by hcd layer) and bail out with 1103 * error, no need to do completion 1104 */ 1105 BUG_ON(urb->status == -EINPROGRESS); 1106 goto out; 1107 } 1108 spin_lock_irqsave(&xfer->lock, flags); 1109 rpipe = xfer->ep->hcpriv; 1110 /* Check the delayed list -> if there, release and complete */ 1111 spin_lock_irqsave(&wa->xfer_list_lock, flags2); 1112 if (!list_empty(&xfer->list_node) && xfer->seg == NULL) 1113 goto dequeue_delayed; 1114 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); 1115 if (xfer->seg == NULL) /* still hasn't reached */ 1116 goto out_unlock; /* setup(), enqueue_b() completes */ 1117 /* Ok, the xfer is in flight already, it's been setup and submitted.*/ 1118 __wa_xfer_abort(xfer); 1119 for (cnt = 0; cnt < xfer->segs; cnt++) { 1120 seg = xfer->seg[cnt]; 1121 switch (seg->status) { 1122 case WA_SEG_NOTREADY: 1123 case WA_SEG_READY: 1124 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n", 1125 xfer, cnt, seg->status); 1126 WARN_ON(1); 1127 break; 1128 case WA_SEG_DELAYED: 1129 seg->status = WA_SEG_ABORTED; 1130 spin_lock_irqsave(&rpipe->seg_lock, flags2); 1131 list_del(&seg->list_node); 1132 xfer->segs_done++; 1133 rpipe_ready = rpipe_avail_inc(rpipe); 1134 spin_unlock_irqrestore(&rpipe->seg_lock, flags2); 1135 break; 1136 case WA_SEG_SUBMITTED: 1137 seg->status = WA_SEG_ABORTED; 1138 usb_unlink_urb(&seg->urb); 1139 if (xfer->is_inbound == 0) 1140 usb_unlink_urb(seg->dto_urb); 1141 xfer->segs_done++; 1142 rpipe_ready = rpipe_avail_inc(rpipe); 1143 break; 1144 case WA_SEG_PENDING: 1145 seg->status = WA_SEG_ABORTED; 1146 xfer->segs_done++; 1147 rpipe_ready = rpipe_avail_inc(rpipe); 1148 break; 1149 case WA_SEG_DTI_PENDING: 1150 usb_unlink_urb(wa->dti_urb); 1151 seg->status = WA_SEG_ABORTED; 1152 xfer->segs_done++; 1153 rpipe_ready = rpipe_avail_inc(rpipe); 1154 break; 1155 case WA_SEG_DONE: 1156 case WA_SEG_ERROR: 1157 case WA_SEG_ABORTED: 1158 break; 1159 } 1160 } 1161 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ 1162 __wa_xfer_is_done(xfer); 1163 spin_unlock_irqrestore(&xfer->lock, flags); 1164 wa_xfer_completion(xfer); 1165 if (rpipe_ready) 1166 wa_xfer_delayed_run(rpipe); 1167 return 0; 1168 1169out_unlock: 1170 spin_unlock_irqrestore(&xfer->lock, flags); 1171out: 1172 return 0; 1173 1174dequeue_delayed: 1175 list_del_init(&xfer->list_node); 1176 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); 1177 xfer->result = urb->status; 1178 spin_unlock_irqrestore(&xfer->lock, flags); 1179 wa_xfer_giveback(xfer); 1180 usb_put_urb(urb); /* we got a ref in enqueue() */ 1181 return 0; 1182} 1183EXPORT_SYMBOL_GPL(wa_urb_dequeue); 1184 1185/* 1186 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno 1187 * codes 1188 * 1189 * Positive errno values are internal inconsistencies and should be 1190 * flagged louder. Negative are to be passed up to the user in the 1191 * normal way. 1192 * 1193 * @status: USB WA status code -- high two bits are stripped. 1194 */ 1195static int wa_xfer_status_to_errno(u8 status) 1196{ 1197 int errno; 1198 u8 real_status = status; 1199 static int xlat[] = { 1200 [WA_XFER_STATUS_SUCCESS] = 0, 1201 [WA_XFER_STATUS_HALTED] = -EPIPE, 1202 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS, 1203 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW, 1204 [WA_XFER_RESERVED] = EINVAL, 1205 [WA_XFER_STATUS_NOT_FOUND] = 0, 1206 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM, 1207 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ, 1208 [WA_XFER_STATUS_ABORTED] = -EINTR, 1209 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL, 1210 [WA_XFER_INVALID_FORMAT] = EINVAL, 1211 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL, 1212 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL, 1213 }; 1214 status &= 0x3f; 1215 1216 if (status == 0) 1217 return 0; 1218 if (status >= ARRAY_SIZE(xlat)) { 1219 if (printk_ratelimit()) 1220 printk(KERN_ERR "%s(): BUG? " 1221 "Unknown WA transfer status 0x%02x\n", 1222 __func__, real_status); 1223 return -EINVAL; 1224 } 1225 errno = xlat[status]; 1226 if (unlikely(errno > 0)) { 1227 if (printk_ratelimit()) 1228 printk(KERN_ERR "%s(): BUG? " 1229 "Inconsistent WA status: 0x%02x\n", 1230 __func__, real_status); 1231 errno = -errno; 1232 } 1233 return errno; 1234} 1235 1236/* 1237 * Process a xfer result completion message 1238 * 1239 * inbound transfers: need to schedule a DTI read 1240 * 1241 * FIXME: this functio needs to be broken up in parts 1242 */ 1243static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer) 1244{ 1245 int result; 1246 struct device *dev = &wa->usb_iface->dev; 1247 unsigned long flags; 1248 u8 seg_idx; 1249 struct wa_seg *seg; 1250 struct wa_rpipe *rpipe; 1251 struct wa_xfer_result *xfer_result = wa->xfer_result; 1252 u8 done = 0; 1253 u8 usb_status; 1254 unsigned rpipe_ready = 0; 1255 1256 spin_lock_irqsave(&xfer->lock, flags); 1257 seg_idx = xfer_result->bTransferSegment & 0x7f; 1258 if (unlikely(seg_idx >= xfer->segs)) 1259 goto error_bad_seg; 1260 seg = xfer->seg[seg_idx]; 1261 rpipe = xfer->ep->hcpriv; 1262 usb_status = xfer_result->bTransferStatus; 1263 dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", 1264 xfer, seg_idx, usb_status, seg->status); 1265 if (seg->status == WA_SEG_ABORTED 1266 || seg->status == WA_SEG_ERROR) /* already handled */ 1267 goto segment_aborted; 1268 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */ 1269 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */ 1270 if (seg->status != WA_SEG_PENDING) { 1271 if (printk_ratelimit()) 1272 dev_err(dev, "xfer %p#%u: Bad segment state %u\n", 1273 xfer, seg_idx, seg->status); 1274 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */ 1275 } 1276 if (usb_status & 0x80) { 1277 seg->result = wa_xfer_status_to_errno(usb_status); 1278 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n", 1279 xfer, seg->index, usb_status); 1280 goto error_complete; 1281 } 1282 /* FIXME: we ignore warnings, tally them for stats */ 1283 if (usb_status & 0x40) /* Warning?... */ 1284 usb_status = 0; /* ... pass */ 1285 if (xfer->is_inbound) { /* IN data phase: read to buffer */ 1286 seg->status = WA_SEG_DTI_PENDING; 1287 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); 1288 if (xfer->is_dma) { 1289 wa->buf_in_urb->transfer_dma = 1290 xfer->urb->transfer_dma 1291 + seg_idx * xfer->seg_size; 1292 wa->buf_in_urb->transfer_flags 1293 |= URB_NO_TRANSFER_DMA_MAP; 1294 } else { 1295 wa->buf_in_urb->transfer_buffer = 1296 xfer->urb->transfer_buffer 1297 + seg_idx * xfer->seg_size; 1298 wa->buf_in_urb->transfer_flags 1299 &= ~URB_NO_TRANSFER_DMA_MAP; 1300 } 1301 wa->buf_in_urb->transfer_buffer_length = 1302 le32_to_cpu(xfer_result->dwTransferLength); 1303 wa->buf_in_urb->context = seg; 1304 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); 1305 if (result < 0) 1306 goto error_submit_buf_in; 1307 } else { 1308 /* OUT data phase, complete it -- */ 1309 seg->status = WA_SEG_DONE; 1310 seg->result = le32_to_cpu(xfer_result->dwTransferLength); 1311 xfer->segs_done++; 1312 rpipe_ready = rpipe_avail_inc(rpipe); 1313 done = __wa_xfer_is_done(xfer); 1314 } 1315 spin_unlock_irqrestore(&xfer->lock, flags); 1316 if (done) 1317 wa_xfer_completion(xfer); 1318 if (rpipe_ready) 1319 wa_xfer_delayed_run(rpipe); 1320 return; 1321 1322error_submit_buf_in: 1323 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { 1324 dev_err(dev, "DTI: URB max acceptable errors " 1325 "exceeded, resetting device\n"); 1326 wa_reset_all(wa); 1327 } 1328 if (printk_ratelimit()) 1329 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", 1330 xfer, seg_idx, result); 1331 seg->result = result; 1332error_complete: 1333 seg->status = WA_SEG_ERROR; 1334 xfer->segs_done++; 1335 rpipe_ready = rpipe_avail_inc(rpipe); 1336 __wa_xfer_abort(xfer); 1337 done = __wa_xfer_is_done(xfer); 1338 spin_unlock_irqrestore(&xfer->lock, flags); 1339 if (done) 1340 wa_xfer_completion(xfer); 1341 if (rpipe_ready) 1342 wa_xfer_delayed_run(rpipe); 1343 return; 1344 1345error_bad_seg: 1346 spin_unlock_irqrestore(&xfer->lock, flags); 1347 wa_urb_dequeue(wa, xfer->urb); 1348 if (printk_ratelimit()) 1349 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); 1350 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { 1351 dev_err(dev, "DTI: URB max acceptable errors " 1352 "exceeded, resetting device\n"); 1353 wa_reset_all(wa); 1354 } 1355 return; 1356 1357segment_aborted: 1358 /* nothing to do, as the aborter did the completion */ 1359 spin_unlock_irqrestore(&xfer->lock, flags); 1360} 1361 1362/* 1363 * Callback for the IN data phase 1364 * 1365 * If succesful transition state; otherwise, take a note of the 1366 * error, mark this segment done and try completion. 1367 * 1368 * Note we don't access until we are sure that the transfer hasn't 1369 * been cancelled (ECONNRESET, ENOENT), which could mean that 1370 * seg->xfer could be already gone. 1371 */ 1372static void wa_buf_in_cb(struct urb *urb) 1373{ 1374 struct wa_seg *seg = urb->context; 1375 struct wa_xfer *xfer = seg->xfer; 1376 struct wahc *wa; 1377 struct device *dev; 1378 struct wa_rpipe *rpipe; 1379 unsigned rpipe_ready; 1380 unsigned long flags; 1381 u8 done = 0; 1382 1383 switch (urb->status) { 1384 case 0: 1385 spin_lock_irqsave(&xfer->lock, flags); 1386 wa = xfer->wa; 1387 dev = &wa->usb_iface->dev; 1388 rpipe = xfer->ep->hcpriv; 1389 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", 1390 xfer, seg->index, (size_t)urb->actual_length); 1391 seg->status = WA_SEG_DONE; 1392 seg->result = urb->actual_length; 1393 xfer->segs_done++; 1394 rpipe_ready = rpipe_avail_inc(rpipe); 1395 done = __wa_xfer_is_done(xfer); 1396 spin_unlock_irqrestore(&xfer->lock, flags); 1397 if (done) 1398 wa_xfer_completion(xfer); 1399 if (rpipe_ready) 1400 wa_xfer_delayed_run(rpipe); 1401 break; 1402 case -ECONNRESET: /* URB unlinked; no need to do anything */ 1403 case -ENOENT: /* as it was done by the who unlinked us */ 1404 break; 1405 default: /* Other errors ... */ 1406 spin_lock_irqsave(&xfer->lock, flags); 1407 wa = xfer->wa; 1408 dev = &wa->usb_iface->dev; 1409 rpipe = xfer->ep->hcpriv; 1410 if (printk_ratelimit()) 1411 dev_err(dev, "xfer %p#%u: data in error %d\n", 1412 xfer, seg->index, urb->status); 1413 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 1414 EDC_ERROR_TIMEFRAME)){ 1415 dev_err(dev, "DTO: URB max acceptable errors " 1416 "exceeded, resetting device\n"); 1417 wa_reset_all(wa); 1418 } 1419 seg->status = WA_SEG_ERROR; 1420 seg->result = urb->status; 1421 xfer->segs_done++; 1422 rpipe_ready = rpipe_avail_inc(rpipe); 1423 __wa_xfer_abort(xfer); 1424 done = __wa_xfer_is_done(xfer); 1425 spin_unlock_irqrestore(&xfer->lock, flags); 1426 if (done) 1427 wa_xfer_completion(xfer); 1428 if (rpipe_ready) 1429 wa_xfer_delayed_run(rpipe); 1430 } 1431} 1432 1433/* 1434 * Handle an incoming transfer result buffer 1435 * 1436 * Given a transfer result buffer, it completes the transfer (possibly 1437 * scheduling and buffer in read) and then resubmits the DTI URB for a 1438 * new transfer result read. 1439 * 1440 * 1441 * The xfer_result DTI URB state machine 1442 * 1443 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In) 1444 * 1445 * We start in OFF mode, the first xfer_result notification [through 1446 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to 1447 * read. 1448 * 1449 * We receive a buffer -- if it is not a xfer_result, we complain and 1450 * repost the DTI-URB. If it is a xfer_result then do the xfer seg 1451 * request accounting. If it is an IN segment, we move to RBI and post 1452 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will 1453 * repost the DTI-URB and move to RXR state. if there was no IN 1454 * segment, it will repost the DTI-URB. 1455 * 1456 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many 1457 * errors) in the URBs. 1458 */ 1459static void wa_xfer_result_cb(struct urb *urb) 1460{ 1461 int result; 1462 struct wahc *wa = urb->context; 1463 struct device *dev = &wa->usb_iface->dev; 1464 struct wa_xfer_result *xfer_result; 1465 u32 xfer_id; 1466 struct wa_xfer *xfer; 1467 u8 usb_status; 1468 1469 BUG_ON(wa->dti_urb != urb); 1470 switch (wa->dti_urb->status) { 1471 case 0: 1472 /* We have a xfer result buffer; check it */ 1473 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", 1474 urb->actual_length, urb->transfer_buffer); 1475 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { 1476 dev_err(dev, "DTI Error: xfer result--bad size " 1477 "xfer result (%d bytes vs %zu needed)\n", 1478 urb->actual_length, sizeof(*xfer_result)); 1479 break; 1480 } 1481 xfer_result = wa->xfer_result; 1482 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { 1483 dev_err(dev, "DTI Error: xfer result--" 1484 "bad header length %u\n", 1485 xfer_result->hdr.bLength); 1486 break; 1487 } 1488 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { 1489 dev_err(dev, "DTI Error: xfer result--" 1490 "bad header type 0x%02x\n", 1491 xfer_result->hdr.bNotifyType); 1492 break; 1493 } 1494 usb_status = xfer_result->bTransferStatus & 0x3f; 1495 if (usb_status == WA_XFER_STATUS_ABORTED 1496 || usb_status == WA_XFER_STATUS_NOT_FOUND) 1497 /* taken care of already */ 1498 break; 1499 xfer_id = xfer_result->dwTransferID; 1500 xfer = wa_xfer_get_by_id(wa, xfer_id); 1501 if (xfer == NULL) { 1502 /* FIXME: transaction might have been cancelled */ 1503 dev_err(dev, "DTI Error: xfer result--" 1504 "unknown xfer 0x%08x (status 0x%02x)\n", 1505 xfer_id, usb_status); 1506 break; 1507 } 1508 wa_xfer_result_chew(wa, xfer); 1509 wa_xfer_put(xfer); 1510 break; 1511 case -ENOENT: /* (we killed the URB)...so, no broadcast */ 1512 case -ESHUTDOWN: /* going away! */ 1513 dev_dbg(dev, "DTI: going down! %d\n", urb->status); 1514 goto out; 1515 default: 1516 /* Unknown error */ 1517 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, 1518 EDC_ERROR_TIMEFRAME)) { 1519 dev_err(dev, "DTI: URB max acceptable errors " 1520 "exceeded, resetting device\n"); 1521 wa_reset_all(wa); 1522 goto out; 1523 } 1524 if (printk_ratelimit()) 1525 dev_err(dev, "DTI: URB error %d\n", urb->status); 1526 break; 1527 } 1528 /* Resubmit the DTI URB */ 1529 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); 1530 if (result < 0) { 1531 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " 1532 "resetting\n", result); 1533 wa_reset_all(wa); 1534 } 1535out: 1536 return; 1537} 1538 1539/* 1540 * Transfer complete notification 1541 * 1542 * Called from the notif.c code. We get a notification on EP2 saying 1543 * that some endpoint has some transfer result data available. We are 1544 * about to read it. 1545 * 1546 * To speed up things, we always have a URB reading the DTI URB; we 1547 * don't really set it up and start it until the first xfer complete 1548 * notification arrives, which is what we do here. 1549 * 1550 * Follow up in wa_xfer_result_cb(), as that's where the whole state 1551 * machine starts. 1552 * 1553 * So here we just initialize the DTI URB for reading transfer result 1554 * notifications and also the buffer-in URB, for reading buffers. Then 1555 * we just submit the DTI URB. 1556 * 1557 * @wa shall be referenced 1558 */ 1559void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr) 1560{ 1561 int result; 1562 struct device *dev = &wa->usb_iface->dev; 1563 struct wa_notif_xfer *notif_xfer; 1564 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; 1565 1566 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); 1567 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); 1568 1569 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) { 1570 /* FIXME: hardcoded limitation, adapt */ 1571 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n", 1572 notif_xfer->bEndpoint, dti_epd->bEndpointAddress); 1573 goto error; 1574 } 1575 if (wa->dti_urb != NULL) /* DTI URB already started */ 1576 goto out; 1577 1578 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); 1579 if (wa->dti_urb == NULL) { 1580 dev_err(dev, "Can't allocate DTI URB\n"); 1581 goto error_dti_urb_alloc; 1582 } 1583 usb_fill_bulk_urb( 1584 wa->dti_urb, wa->usb_dev, 1585 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), 1586 wa->xfer_result, wa->xfer_result_size, 1587 wa_xfer_result_cb, wa); 1588 1589 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL); 1590 if (wa->buf_in_urb == NULL) { 1591 dev_err(dev, "Can't allocate BUF-IN URB\n"); 1592 goto error_buf_in_urb_alloc; 1593 } 1594 usb_fill_bulk_urb( 1595 wa->buf_in_urb, wa->usb_dev, 1596 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), 1597 NULL, 0, wa_buf_in_cb, wa); 1598 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); 1599 if (result < 0) { 1600 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), " 1601 "resetting\n", result); 1602 goto error_dti_urb_submit; 1603 } 1604out: 1605 return; 1606 1607error_dti_urb_submit: 1608 usb_put_urb(wa->buf_in_urb); 1609error_buf_in_urb_alloc: 1610 usb_put_urb(wa->dti_urb); 1611 wa->dti_urb = NULL; 1612error_dti_urb_alloc: 1613error: 1614 wa_reset_all(wa); 1615}