Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.14-rc4 2693 lines 82 kB view raw
1/* 2 * WUSB Wire Adapter 3 * Data transfer and URB enqueing 4 * 5 * Copyright (C) 2005-2006 Intel Corporation 6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version 10 * 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 20 * 02110-1301, USA. 21 * 22 * 23 * How transfers work: get a buffer, break it up in segments (segment 24 * size is a multiple of the maxpacket size). For each segment issue a 25 * segment request (struct wa_xfer_*), then send the data buffer if 26 * out or nothing if in (all over the DTO endpoint). 27 * 28 * For each submitted segment request, a notification will come over 29 * the NEP endpoint and a transfer result (struct xfer_result) will 30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is 31 * data coming (inbound transfer), schedule a read and handle it. 32 * 33 * Sounds simple, it is a pain to implement. 34 * 35 * 36 * ENTRY POINTS 37 * 38 * FIXME 39 * 40 * LIFE CYCLE / STATE DIAGRAM 41 * 42 * FIXME 43 * 44 * THIS CODE IS DISGUSTING 45 * 46 * Warned you are; it's my second try and still not happy with it. 47 * 48 * NOTES: 49 * 50 * - No iso 51 * 52 * - Supports DMA xfers, control, bulk and maybe interrupt 53 * 54 * - Does not recycle unused rpipes 55 * 56 * An rpipe is assigned to an endpoint the first time it is used, 57 * and then it's there, assigned, until the endpoint is disabled 58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the 59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore 60 * (should be a mutex). 61 * 62 * Two methods it could be done: 63 * 64 * (a) set up a timer every time an rpipe's use count drops to 1 65 * (which means unused) or when a transfer ends. Reset the 66 * timer when a xfer is queued. If the timer expires, release 67 * the rpipe [see rpipe_ep_disable()]. 68 * 69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()], 70 * when none are found go over the list, check their endpoint 71 * and their activity record (if no last-xfer-done-ts in the 72 * last x seconds) take it 73 * 74 * However, due to the fact that we have a set of limited 75 * resources (max-segments-at-the-same-time per xfer, 76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end 77 * we are going to have to rebuild all this based on an scheduler, 78 * to where we have a list of transactions to do and based on the 79 * availability of the different required components (blocks, 80 * rpipes, segment slots, etc), we go scheduling them. Painful. 81 */ 82#include <linux/spinlock.h> 83#include <linux/slab.h> 84#include <linux/hash.h> 85#include <linux/ratelimit.h> 86#include <linux/export.h> 87#include <linux/scatterlist.h> 88 89#include "wa-hc.h" 90#include "wusbhc.h" 91 92enum { 93 /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */ 94 WA_SEGS_MAX = 128, 95}; 96 97enum wa_seg_status { 98 WA_SEG_NOTREADY, 99 WA_SEG_READY, 100 WA_SEG_DELAYED, 101 WA_SEG_SUBMITTED, 102 WA_SEG_PENDING, 103 WA_SEG_DTI_PENDING, 104 WA_SEG_DONE, 105 WA_SEG_ERROR, 106 WA_SEG_ABORTED, 107}; 108 109static void wa_xfer_delayed_run(struct wa_rpipe *); 110static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting); 111 112/* 113 * Life cycle governed by 'struct urb' (the refcount of the struct is 114 * that of the 'struct urb' and usb_free_urb() would free the whole 115 * struct). 116 */ 117struct wa_seg { 118 struct urb tr_urb; /* transfer request urb. */ 119 struct urb *isoc_pack_desc_urb; /* for isoc packet descriptor. */ 120 struct urb *dto_urb; /* for data output. */ 121 struct list_head list_node; /* for rpipe->req_list */ 122 struct wa_xfer *xfer; /* out xfer */ 123 u8 index; /* which segment we are */ 124 int isoc_frame_count; /* number of isoc frames in this segment. */ 125 int isoc_frame_offset; /* starting frame offset in the xfer URB. */ 126 /* Isoc frame that the current transfer buffer corresponds to. */ 127 int isoc_frame_index; 128 int isoc_size; /* size of all isoc frames sent by this seg. */ 129 enum wa_seg_status status; 130 ssize_t result; /* bytes xfered or error */ 131 struct wa_xfer_hdr xfer_hdr; 132}; 133 134static inline void wa_seg_init(struct wa_seg *seg) 135{ 136 usb_init_urb(&seg->tr_urb); 137 138 /* set the remaining memory to 0. */ 139 memset(((void *)seg) + sizeof(seg->tr_urb), 0, 140 sizeof(*seg) - sizeof(seg->tr_urb)); 141} 142 143/* 144 * Protected by xfer->lock 145 * 146 */ 147struct wa_xfer { 148 struct kref refcnt; 149 struct list_head list_node; 150 spinlock_t lock; 151 u32 id; 152 153 struct wahc *wa; /* Wire adapter we are plugged to */ 154 struct usb_host_endpoint *ep; 155 struct urb *urb; /* URB we are transferring for */ 156 struct wa_seg **seg; /* transfer segments */ 157 u8 segs, segs_submitted, segs_done; 158 unsigned is_inbound:1; 159 unsigned is_dma:1; 160 size_t seg_size; 161 int result; 162 163 gfp_t gfp; /* allocation mask */ 164 165 struct wusb_dev *wusb_dev; /* for activity timestamps */ 166}; 167 168static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer, 169 struct wa_seg *seg, int curr_iso_frame); 170 171static inline void wa_xfer_init(struct wa_xfer *xfer) 172{ 173 kref_init(&xfer->refcnt); 174 INIT_LIST_HEAD(&xfer->list_node); 175 spin_lock_init(&xfer->lock); 176} 177 178/* 179 * Destroy a transfer structure 180 * 181 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing 182 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs. 183 */ 184static void wa_xfer_destroy(struct kref *_xfer) 185{ 186 struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt); 187 if (xfer->seg) { 188 unsigned cnt; 189 for (cnt = 0; cnt < xfer->segs; cnt++) { 190 struct wa_seg *seg = xfer->seg[cnt]; 191 if (seg) { 192 usb_free_urb(seg->isoc_pack_desc_urb); 193 if (seg->dto_urb) { 194 kfree(seg->dto_urb->sg); 195 usb_free_urb(seg->dto_urb); 196 } 197 usb_free_urb(&seg->tr_urb); 198 } 199 } 200 kfree(xfer->seg); 201 } 202 kfree(xfer); 203} 204 205static void wa_xfer_get(struct wa_xfer *xfer) 206{ 207 kref_get(&xfer->refcnt); 208} 209 210static void wa_xfer_put(struct wa_xfer *xfer) 211{ 212 kref_put(&xfer->refcnt, wa_xfer_destroy); 213} 214 215/* 216 * Try to get exclusive access to the DTO endpoint resource. Return true 217 * if successful. 218 */ 219static inline int __wa_dto_try_get(struct wahc *wa) 220{ 221 return (test_and_set_bit(0, &wa->dto_in_use) == 0); 222} 223 224/* Release the DTO endpoint resource. */ 225static inline void __wa_dto_put(struct wahc *wa) 226{ 227 clear_bit_unlock(0, &wa->dto_in_use); 228} 229 230/* Service RPIPEs that are waiting on the DTO resource. */ 231static void wa_check_for_delayed_rpipes(struct wahc *wa) 232{ 233 unsigned long flags; 234 int dto_waiting = 0; 235 struct wa_rpipe *rpipe; 236 237 spin_lock_irqsave(&wa->rpipe_lock, flags); 238 while (!list_empty(&wa->rpipe_delayed_list) && !dto_waiting) { 239 rpipe = list_first_entry(&wa->rpipe_delayed_list, 240 struct wa_rpipe, list_node); 241 __wa_xfer_delayed_run(rpipe, &dto_waiting); 242 /* remove this RPIPE from the list if it is not waiting. */ 243 if (!dto_waiting) { 244 pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n", 245 __func__, 246 le16_to_cpu(rpipe->descr.wRPipeIndex)); 247 list_del_init(&rpipe->list_node); 248 } 249 } 250 spin_unlock_irqrestore(&wa->rpipe_lock, flags); 251} 252 253/* add this RPIPE to the end of the delayed RPIPE list. */ 254static void wa_add_delayed_rpipe(struct wahc *wa, struct wa_rpipe *rpipe) 255{ 256 unsigned long flags; 257 258 spin_lock_irqsave(&wa->rpipe_lock, flags); 259 /* add rpipe to the list if it is not already on it. */ 260 if (list_empty(&rpipe->list_node)) { 261 pr_debug("%s: adding RPIPE %d to the delayed list.\n", 262 __func__, le16_to_cpu(rpipe->descr.wRPipeIndex)); 263 list_add_tail(&rpipe->list_node, &wa->rpipe_delayed_list); 264 } 265 spin_unlock_irqrestore(&wa->rpipe_lock, flags); 266} 267 268/* 269 * xfer is referenced 270 * 271 * xfer->lock has to be unlocked 272 * 273 * We take xfer->lock for setting the result; this is a barrier 274 * against drivers/usb/core/hcd.c:unlink1() being called after we call 275 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a 276 * reference to the transfer. 277 */ 278static void wa_xfer_giveback(struct wa_xfer *xfer) 279{ 280 unsigned long flags; 281 282 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); 283 list_del_init(&xfer->list_node); 284 usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb); 285 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); 286 /* FIXME: segmentation broken -- kills DWA */ 287 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); 288 wa_put(xfer->wa); 289 wa_xfer_put(xfer); 290} 291 292/* 293 * xfer is referenced 294 * 295 * xfer->lock has to be unlocked 296 */ 297static void wa_xfer_completion(struct wa_xfer *xfer) 298{ 299 if (xfer->wusb_dev) 300 wusb_dev_put(xfer->wusb_dev); 301 rpipe_put(xfer->ep->hcpriv); 302 wa_xfer_giveback(xfer); 303} 304 305/* 306 * Initialize a transfer's ID 307 * 308 * We need to use a sequential number; if we use the pointer or the 309 * hash of the pointer, it can repeat over sequential transfers and 310 * then it will confuse the HWA....wonder why in hell they put a 32 311 * bit handle in there then. 312 */ 313static void wa_xfer_id_init(struct wa_xfer *xfer) 314{ 315 xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count); 316} 317 318/* Return the xfer's ID. */ 319static inline u32 wa_xfer_id(struct wa_xfer *xfer) 320{ 321 return xfer->id; 322} 323 324/* Return the xfer's ID in transport format (little endian). */ 325static inline __le32 wa_xfer_id_le32(struct wa_xfer *xfer) 326{ 327 return cpu_to_le32(xfer->id); 328} 329 330/* 331 * If transfer is done, wrap it up and return true 332 * 333 * xfer->lock has to be locked 334 */ 335static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) 336{ 337 struct device *dev = &xfer->wa->usb_iface->dev; 338 unsigned result, cnt; 339 struct wa_seg *seg; 340 struct urb *urb = xfer->urb; 341 unsigned found_short = 0; 342 343 result = xfer->segs_done == xfer->segs_submitted; 344 if (result == 0) 345 goto out; 346 urb->actual_length = 0; 347 for (cnt = 0; cnt < xfer->segs; cnt++) { 348 seg = xfer->seg[cnt]; 349 switch (seg->status) { 350 case WA_SEG_DONE: 351 if (found_short && seg->result > 0) { 352 dev_dbg(dev, "xfer %p ID %08X#%u: bad short segments (%zu)\n", 353 xfer, wa_xfer_id(xfer), cnt, 354 seg->result); 355 urb->status = -EINVAL; 356 goto out; 357 } 358 urb->actual_length += seg->result; 359 if (!(usb_pipeisoc(xfer->urb->pipe)) 360 && seg->result < xfer->seg_size 361 && cnt != xfer->segs-1) 362 found_short = 1; 363 dev_dbg(dev, "xfer %p ID %08X#%u: DONE short %d " 364 "result %zu urb->actual_length %d\n", 365 xfer, wa_xfer_id(xfer), seg->index, found_short, 366 seg->result, urb->actual_length); 367 break; 368 case WA_SEG_ERROR: 369 xfer->result = seg->result; 370 dev_dbg(dev, "xfer %p ID %08X#%u: ERROR result %zu(0x%08zX)\n", 371 xfer, wa_xfer_id(xfer), seg->index, seg->result, 372 seg->result); 373 goto out; 374 case WA_SEG_ABORTED: 375 xfer->result = seg->result; 376 dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zu(0x%08zX)\n", 377 xfer, wa_xfer_id(xfer), seg->index, seg->result, 378 seg->result); 379 goto out; 380 default: 381 dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n", 382 xfer, wa_xfer_id(xfer), cnt, seg->status); 383 xfer->result = -EINVAL; 384 goto out; 385 } 386 } 387 xfer->result = 0; 388out: 389 return result; 390} 391 392/* 393 * Search for a transfer list ID on the HCD's URB list 394 * 395 * For 32 bit architectures, we use the pointer itself; for 64 bits, a 396 * 32-bit hash of the pointer. 397 * 398 * @returns NULL if not found. 399 */ 400static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id) 401{ 402 unsigned long flags; 403 struct wa_xfer *xfer_itr; 404 spin_lock_irqsave(&wa->xfer_list_lock, flags); 405 list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) { 406 if (id == xfer_itr->id) { 407 wa_xfer_get(xfer_itr); 408 goto out; 409 } 410 } 411 xfer_itr = NULL; 412out: 413 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); 414 return xfer_itr; 415} 416 417struct wa_xfer_abort_buffer { 418 struct urb urb; 419 struct wa_xfer_abort cmd; 420}; 421 422static void __wa_xfer_abort_cb(struct urb *urb) 423{ 424 struct wa_xfer_abort_buffer *b = urb->context; 425 usb_put_urb(&b->urb); 426} 427 428/* 429 * Aborts an ongoing transaction 430 * 431 * Assumes the transfer is referenced and locked and in a submitted 432 * state (mainly that there is an endpoint/rpipe assigned). 433 * 434 * The callback (see above) does nothing but freeing up the data by 435 * putting the URB. Because the URB is allocated at the head of the 436 * struct, the whole space we allocated is kfreed. * 437 */ 438static int __wa_xfer_abort(struct wa_xfer *xfer) 439{ 440 int result = -ENOMEM; 441 struct device *dev = &xfer->wa->usb_iface->dev; 442 struct wa_xfer_abort_buffer *b; 443 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 444 445 b = kmalloc(sizeof(*b), GFP_ATOMIC); 446 if (b == NULL) 447 goto error_kmalloc; 448 b->cmd.bLength = sizeof(b->cmd); 449 b->cmd.bRequestType = WA_XFER_ABORT; 450 b->cmd.wRPipe = rpipe->descr.wRPipeIndex; 451 b->cmd.dwTransferID = wa_xfer_id_le32(xfer); 452 453 usb_init_urb(&b->urb); 454 usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev, 455 usb_sndbulkpipe(xfer->wa->usb_dev, 456 xfer->wa->dto_epd->bEndpointAddress), 457 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b); 458 result = usb_submit_urb(&b->urb, GFP_ATOMIC); 459 if (result < 0) 460 goto error_submit; 461 return result; /* callback frees! */ 462 463 464error_submit: 465 if (printk_ratelimit()) 466 dev_err(dev, "xfer %p: Can't submit abort request: %d\n", 467 xfer, result); 468 kfree(b); 469error_kmalloc: 470 return result; 471 472} 473 474/* 475 * Calculate the number of isoc frames starting from isoc_frame_offset 476 * that will fit a in transfer segment. 477 */ 478static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer, 479 int isoc_frame_offset, int *total_size) 480{ 481 int segment_size = 0, frame_count = 0; 482 int index = isoc_frame_offset; 483 struct usb_iso_packet_descriptor *iso_frame_desc = 484 xfer->urb->iso_frame_desc; 485 486 while ((index < xfer->urb->number_of_packets) 487 && ((segment_size + iso_frame_desc[index].length) 488 <= xfer->seg_size)) { 489 /* 490 * For Alereon HWA devices, only include an isoc frame in an 491 * out segment if it is physically contiguous with the previous 492 * frame. This is required because those devices expect 493 * the isoc frames to be sent as a single USB transaction as 494 * opposed to one transaction per frame with standard HWA. 495 */ 496 if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) 497 && (xfer->is_inbound == 0) 498 && (index > isoc_frame_offset) 499 && ((iso_frame_desc[index - 1].offset + 500 iso_frame_desc[index - 1].length) != 501 iso_frame_desc[index].offset)) 502 break; 503 504 /* this frame fits. count it. */ 505 ++frame_count; 506 segment_size += iso_frame_desc[index].length; 507 508 /* move to the next isoc frame. */ 509 ++index; 510 } 511 512 *total_size = segment_size; 513 return frame_count; 514} 515 516/* 517 * 518 * @returns < 0 on error, transfer segment request size if ok 519 */ 520static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer, 521 enum wa_xfer_type *pxfer_type) 522{ 523 ssize_t result; 524 struct device *dev = &xfer->wa->usb_iface->dev; 525 size_t maxpktsize; 526 struct urb *urb = xfer->urb; 527 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 528 529 switch (rpipe->descr.bmAttribute & 0x3) { 530 case USB_ENDPOINT_XFER_CONTROL: 531 *pxfer_type = WA_XFER_TYPE_CTL; 532 result = sizeof(struct wa_xfer_ctl); 533 break; 534 case USB_ENDPOINT_XFER_INT: 535 case USB_ENDPOINT_XFER_BULK: 536 *pxfer_type = WA_XFER_TYPE_BI; 537 result = sizeof(struct wa_xfer_bi); 538 break; 539 case USB_ENDPOINT_XFER_ISOC: 540 *pxfer_type = WA_XFER_TYPE_ISO; 541 result = sizeof(struct wa_xfer_hwaiso); 542 break; 543 default: 544 /* never happens */ 545 BUG(); 546 result = -EINVAL; /* shut gcc up */ 547 } 548 xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0; 549 xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0; 550 551 maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize); 552 xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks) 553 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1); 554 /* Compute the segment size and make sure it is a multiple of 555 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of 556 * a check (FIXME) */ 557 if (xfer->seg_size < maxpktsize) { 558 dev_err(dev, 559 "HW BUG? seg_size %zu smaller than maxpktsize %zu\n", 560 xfer->seg_size, maxpktsize); 561 result = -EINVAL; 562 goto error; 563 } 564 xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize; 565 if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) { 566 int index = 0; 567 568 xfer->segs = 0; 569 /* 570 * loop over urb->number_of_packets to determine how many 571 * xfer segments will be needed to send the isoc frames. 572 */ 573 while (index < urb->number_of_packets) { 574 int seg_size; /* don't care. */ 575 index += __wa_seg_calculate_isoc_frame_count(xfer, 576 index, &seg_size); 577 ++xfer->segs; 578 } 579 } else { 580 xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length, 581 xfer->seg_size); 582 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) 583 xfer->segs = 1; 584 } 585 586 if (xfer->segs > WA_SEGS_MAX) { 587 dev_err(dev, "BUG? oops, number of segments %zu bigger than %d\n", 588 (urb->transfer_buffer_length/xfer->seg_size), 589 WA_SEGS_MAX); 590 result = -EINVAL; 591 goto error; 592 } 593error: 594 return result; 595} 596 597static void __wa_setup_isoc_packet_descr( 598 struct wa_xfer_packet_info_hwaiso *packet_desc, 599 struct wa_xfer *xfer, 600 struct wa_seg *seg) { 601 struct usb_iso_packet_descriptor *iso_frame_desc = 602 xfer->urb->iso_frame_desc; 603 int frame_index; 604 605 /* populate isoc packet descriptor. */ 606 packet_desc->bPacketType = WA_XFER_ISO_PACKET_INFO; 607 packet_desc->wLength = cpu_to_le16(sizeof(*packet_desc) + 608 (sizeof(packet_desc->PacketLength[0]) * 609 seg->isoc_frame_count)); 610 for (frame_index = 0; frame_index < seg->isoc_frame_count; 611 ++frame_index) { 612 int offset_index = frame_index + seg->isoc_frame_offset; 613 packet_desc->PacketLength[frame_index] = 614 cpu_to_le16(iso_frame_desc[offset_index].length); 615 } 616} 617 618 619/* Fill in the common request header and xfer-type specific data. */ 620static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, 621 struct wa_xfer_hdr *xfer_hdr0, 622 enum wa_xfer_type xfer_type, 623 size_t xfer_hdr_size) 624{ 625 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 626 struct wa_seg *seg = xfer->seg[0]; 627 628 xfer_hdr0 = &seg->xfer_hdr; 629 xfer_hdr0->bLength = xfer_hdr_size; 630 xfer_hdr0->bRequestType = xfer_type; 631 xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex; 632 xfer_hdr0->dwTransferID = wa_xfer_id_le32(xfer); 633 xfer_hdr0->bTransferSegment = 0; 634 switch (xfer_type) { 635 case WA_XFER_TYPE_CTL: { 636 struct wa_xfer_ctl *xfer_ctl = 637 container_of(xfer_hdr0, struct wa_xfer_ctl, hdr); 638 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0; 639 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet, 640 sizeof(xfer_ctl->baSetupData)); 641 break; 642 } 643 case WA_XFER_TYPE_BI: 644 break; 645 case WA_XFER_TYPE_ISO: { 646 struct wa_xfer_hwaiso *xfer_iso = 647 container_of(xfer_hdr0, struct wa_xfer_hwaiso, hdr); 648 struct wa_xfer_packet_info_hwaiso *packet_desc = 649 ((void *)xfer_iso) + xfer_hdr_size; 650 651 /* populate the isoc section of the transfer request. */ 652 xfer_iso->dwNumOfPackets = cpu_to_le32(seg->isoc_frame_count); 653 /* populate isoc packet descriptor. */ 654 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg); 655 break; 656 } 657 default: 658 BUG(); 659 }; 660} 661 662/* 663 * Callback for the OUT data phase of the segment request 664 * 665 * Check wa_seg_tr_cb(); most comments also apply here because this 666 * function does almost the same thing and they work closely 667 * together. 668 * 669 * If the seg request has failed but this DTO phase has succeeded, 670 * wa_seg_tr_cb() has already failed the segment and moved the 671 * status to WA_SEG_ERROR, so this will go through 'case 0' and 672 * effectively do nothing. 673 */ 674static void wa_seg_dto_cb(struct urb *urb) 675{ 676 struct wa_seg *seg = urb->context; 677 struct wa_xfer *xfer = seg->xfer; 678 struct wahc *wa; 679 struct device *dev; 680 struct wa_rpipe *rpipe; 681 unsigned long flags; 682 unsigned rpipe_ready = 0; 683 int data_send_done = 1, release_dto = 0, holding_dto = 0; 684 u8 done = 0; 685 int result; 686 687 /* free the sg if it was used. */ 688 kfree(urb->sg); 689 urb->sg = NULL; 690 691 spin_lock_irqsave(&xfer->lock, flags); 692 wa = xfer->wa; 693 dev = &wa->usb_iface->dev; 694 if (usb_pipeisoc(xfer->urb->pipe)) { 695 /* Alereon HWA sends all isoc frames in a single transfer. */ 696 if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) 697 seg->isoc_frame_index += seg->isoc_frame_count; 698 else 699 seg->isoc_frame_index += 1; 700 if (seg->isoc_frame_index < seg->isoc_frame_count) { 701 data_send_done = 0; 702 holding_dto = 1; /* checked in error cases. */ 703 /* 704 * if this is the last isoc frame of the segment, we 705 * can release DTO after sending this frame. 706 */ 707 if ((seg->isoc_frame_index + 1) >= 708 seg->isoc_frame_count) 709 release_dto = 1; 710 } 711 dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n", 712 wa_xfer_id(xfer), seg->index, seg->isoc_frame_index, 713 holding_dto, release_dto); 714 } 715 spin_unlock_irqrestore(&xfer->lock, flags); 716 717 switch (urb->status) { 718 case 0: 719 spin_lock_irqsave(&xfer->lock, flags); 720 seg->result += urb->actual_length; 721 if (data_send_done) { 722 dev_dbg(dev, "xfer 0x%08X#%u: data out done (%zu bytes)\n", 723 wa_xfer_id(xfer), seg->index, seg->result); 724 if (seg->status < WA_SEG_PENDING) 725 seg->status = WA_SEG_PENDING; 726 } else { 727 /* should only hit this for isoc xfers. */ 728 /* 729 * Populate the dto URB with the next isoc frame buffer, 730 * send the URB and release DTO if we no longer need it. 731 */ 732 __wa_populate_dto_urb_isoc(xfer, seg, 733 seg->isoc_frame_offset + seg->isoc_frame_index); 734 735 /* resubmit the URB with the next isoc frame. */ 736 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); 737 if (result < 0) { 738 dev_err(dev, "xfer 0x%08X#%u: DTO submit failed: %d\n", 739 wa_xfer_id(xfer), seg->index, result); 740 spin_unlock_irqrestore(&xfer->lock, flags); 741 goto error_dto_submit; 742 } 743 } 744 spin_unlock_irqrestore(&xfer->lock, flags); 745 if (release_dto) { 746 __wa_dto_put(wa); 747 wa_check_for_delayed_rpipes(wa); 748 } 749 break; 750 case -ECONNRESET: /* URB unlinked; no need to do anything */ 751 case -ENOENT: /* as it was done by the who unlinked us */ 752 if (holding_dto) { 753 __wa_dto_put(wa); 754 wa_check_for_delayed_rpipes(wa); 755 } 756 break; 757 default: /* Other errors ... */ 758 dev_err(dev, "xfer 0x%08X#%u: data out error %d\n", 759 wa_xfer_id(xfer), seg->index, urb->status); 760 goto error_default; 761 } 762 763 return; 764 765error_dto_submit: 766error_default: 767 spin_lock_irqsave(&xfer->lock, flags); 768 rpipe = xfer->ep->hcpriv; 769 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 770 EDC_ERROR_TIMEFRAME)){ 771 dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n"); 772 wa_reset_all(wa); 773 } 774 if (seg->status != WA_SEG_ERROR) { 775 seg->status = WA_SEG_ERROR; 776 seg->result = urb->status; 777 xfer->segs_done++; 778 __wa_xfer_abort(xfer); 779 rpipe_ready = rpipe_avail_inc(rpipe); 780 done = __wa_xfer_is_done(xfer); 781 } 782 spin_unlock_irqrestore(&xfer->lock, flags); 783 if (holding_dto) { 784 __wa_dto_put(wa); 785 wa_check_for_delayed_rpipes(wa); 786 } 787 if (done) 788 wa_xfer_completion(xfer); 789 if (rpipe_ready) 790 wa_xfer_delayed_run(rpipe); 791 792} 793 794/* 795 * Callback for the isoc packet descriptor phase of the segment request 796 * 797 * Check wa_seg_tr_cb(); most comments also apply here because this 798 * function does almost the same thing and they work closely 799 * together. 800 * 801 * If the seg request has failed but this phase has succeeded, 802 * wa_seg_tr_cb() has already failed the segment and moved the 803 * status to WA_SEG_ERROR, so this will go through 'case 0' and 804 * effectively do nothing. 805 */ 806static void wa_seg_iso_pack_desc_cb(struct urb *urb) 807{ 808 struct wa_seg *seg = urb->context; 809 struct wa_xfer *xfer = seg->xfer; 810 struct wahc *wa; 811 struct device *dev; 812 struct wa_rpipe *rpipe; 813 unsigned long flags; 814 unsigned rpipe_ready = 0; 815 u8 done = 0; 816 817 switch (urb->status) { 818 case 0: 819 spin_lock_irqsave(&xfer->lock, flags); 820 wa = xfer->wa; 821 dev = &wa->usb_iface->dev; 822 dev_dbg(dev, "iso xfer %08X#%u: packet descriptor done\n", 823 wa_xfer_id(xfer), seg->index); 824 if (xfer->is_inbound && seg->status < WA_SEG_PENDING) 825 seg->status = WA_SEG_PENDING; 826 spin_unlock_irqrestore(&xfer->lock, flags); 827 break; 828 case -ECONNRESET: /* URB unlinked; no need to do anything */ 829 case -ENOENT: /* as it was done by the who unlinked us */ 830 break; 831 default: /* Other errors ... */ 832 spin_lock_irqsave(&xfer->lock, flags); 833 wa = xfer->wa; 834 dev = &wa->usb_iface->dev; 835 rpipe = xfer->ep->hcpriv; 836 pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n", 837 wa_xfer_id(xfer), seg->index, urb->status); 838 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 839 EDC_ERROR_TIMEFRAME)){ 840 dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n"); 841 wa_reset_all(wa); 842 } 843 if (seg->status != WA_SEG_ERROR) { 844 usb_unlink_urb(seg->dto_urb); 845 seg->status = WA_SEG_ERROR; 846 seg->result = urb->status; 847 xfer->segs_done++; 848 __wa_xfer_abort(xfer); 849 rpipe_ready = rpipe_avail_inc(rpipe); 850 done = __wa_xfer_is_done(xfer); 851 } 852 spin_unlock_irqrestore(&xfer->lock, flags); 853 if (done) 854 wa_xfer_completion(xfer); 855 if (rpipe_ready) 856 wa_xfer_delayed_run(rpipe); 857 } 858} 859 860/* 861 * Callback for the segment request 862 * 863 * If successful transition state (unless already transitioned or 864 * outbound transfer); otherwise, take a note of the error, mark this 865 * segment done and try completion. 866 * 867 * Note we don't access until we are sure that the transfer hasn't 868 * been cancelled (ECONNRESET, ENOENT), which could mean that 869 * seg->xfer could be already gone. 870 * 871 * We have to check before setting the status to WA_SEG_PENDING 872 * because sometimes the xfer result callback arrives before this 873 * callback (geeeeeeze), so it might happen that we are already in 874 * another state. As well, we don't set it if the transfer is not inbound, 875 * as in that case, wa_seg_dto_cb will do it when the OUT data phase 876 * finishes. 877 */ 878static void wa_seg_tr_cb(struct urb *urb) 879{ 880 struct wa_seg *seg = urb->context; 881 struct wa_xfer *xfer = seg->xfer; 882 struct wahc *wa; 883 struct device *dev; 884 struct wa_rpipe *rpipe; 885 unsigned long flags; 886 unsigned rpipe_ready; 887 u8 done = 0; 888 889 switch (urb->status) { 890 case 0: 891 spin_lock_irqsave(&xfer->lock, flags); 892 wa = xfer->wa; 893 dev = &wa->usb_iface->dev; 894 dev_dbg(dev, "xfer %p ID 0x%08X#%u: request done\n", 895 xfer, wa_xfer_id(xfer), seg->index); 896 if (xfer->is_inbound && 897 seg->status < WA_SEG_PENDING && 898 !(usb_pipeisoc(xfer->urb->pipe))) 899 seg->status = WA_SEG_PENDING; 900 spin_unlock_irqrestore(&xfer->lock, flags); 901 break; 902 case -ECONNRESET: /* URB unlinked; no need to do anything */ 903 case -ENOENT: /* as it was done by the who unlinked us */ 904 break; 905 default: /* Other errors ... */ 906 spin_lock_irqsave(&xfer->lock, flags); 907 wa = xfer->wa; 908 dev = &wa->usb_iface->dev; 909 rpipe = xfer->ep->hcpriv; 910 if (printk_ratelimit()) 911 dev_err(dev, "xfer %p ID 0x%08X#%u: request error %d\n", 912 xfer, wa_xfer_id(xfer), seg->index, 913 urb->status); 914 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 915 EDC_ERROR_TIMEFRAME)){ 916 dev_err(dev, "DTO: URB max acceptable errors " 917 "exceeded, resetting device\n"); 918 wa_reset_all(wa); 919 } 920 usb_unlink_urb(seg->isoc_pack_desc_urb); 921 usb_unlink_urb(seg->dto_urb); 922 seg->status = WA_SEG_ERROR; 923 seg->result = urb->status; 924 xfer->segs_done++; 925 __wa_xfer_abort(xfer); 926 rpipe_ready = rpipe_avail_inc(rpipe); 927 done = __wa_xfer_is_done(xfer); 928 spin_unlock_irqrestore(&xfer->lock, flags); 929 if (done) 930 wa_xfer_completion(xfer); 931 if (rpipe_ready) 932 wa_xfer_delayed_run(rpipe); 933 } 934} 935 936/* 937 * Allocate an SG list to store bytes_to_transfer bytes and copy the 938 * subset of the in_sg that matches the buffer subset 939 * we are about to transfer. 940 */ 941static struct scatterlist *wa_xfer_create_subset_sg(struct scatterlist *in_sg, 942 const unsigned int bytes_transferred, 943 const unsigned int bytes_to_transfer, unsigned int *out_num_sgs) 944{ 945 struct scatterlist *out_sg; 946 unsigned int bytes_processed = 0, offset_into_current_page_data = 0, 947 nents; 948 struct scatterlist *current_xfer_sg = in_sg; 949 struct scatterlist *current_seg_sg, *last_seg_sg; 950 951 /* skip previously transferred pages. */ 952 while ((current_xfer_sg) && 953 (bytes_processed < bytes_transferred)) { 954 bytes_processed += current_xfer_sg->length; 955 956 /* advance the sg if current segment starts on or past the 957 next page. */ 958 if (bytes_processed <= bytes_transferred) 959 current_xfer_sg = sg_next(current_xfer_sg); 960 } 961 962 /* the data for the current segment starts in current_xfer_sg. 963 calculate the offset. */ 964 if (bytes_processed > bytes_transferred) { 965 offset_into_current_page_data = current_xfer_sg->length - 966 (bytes_processed - bytes_transferred); 967 } 968 969 /* calculate the number of pages needed by this segment. */ 970 nents = DIV_ROUND_UP((bytes_to_transfer + 971 offset_into_current_page_data + 972 current_xfer_sg->offset), 973 PAGE_SIZE); 974 975 out_sg = kmalloc((sizeof(struct scatterlist) * nents), GFP_ATOMIC); 976 if (out_sg) { 977 sg_init_table(out_sg, nents); 978 979 /* copy the portion of the incoming SG that correlates to the 980 * data to be transferred by this segment to the segment SG. */ 981 last_seg_sg = current_seg_sg = out_sg; 982 bytes_processed = 0; 983 984 /* reset nents and calculate the actual number of sg entries 985 needed. */ 986 nents = 0; 987 while ((bytes_processed < bytes_to_transfer) && 988 current_seg_sg && current_xfer_sg) { 989 unsigned int page_len = min((current_xfer_sg->length - 990 offset_into_current_page_data), 991 (bytes_to_transfer - bytes_processed)); 992 993 sg_set_page(current_seg_sg, sg_page(current_xfer_sg), 994 page_len, 995 current_xfer_sg->offset + 996 offset_into_current_page_data); 997 998 bytes_processed += page_len; 999 1000 last_seg_sg = current_seg_sg; 1001 current_seg_sg = sg_next(current_seg_sg); 1002 current_xfer_sg = sg_next(current_xfer_sg); 1003 1004 /* only the first page may require additional offset. */ 1005 offset_into_current_page_data = 0; 1006 nents++; 1007 } 1008 1009 /* update num_sgs and terminate the list since we may have 1010 * concatenated pages. */ 1011 sg_mark_end(last_seg_sg); 1012 *out_num_sgs = nents; 1013 } 1014 1015 return out_sg; 1016} 1017 1018/* 1019 * Populate DMA buffer info for the isoc dto urb. 1020 */ 1021static void __wa_populate_dto_urb_isoc(struct wa_xfer *xfer, 1022 struct wa_seg *seg, int curr_iso_frame) 1023{ 1024 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1025 seg->dto_urb->sg = NULL; 1026 seg->dto_urb->num_sgs = 0; 1027 /* dto urb buffer address pulled from iso_frame_desc. */ 1028 seg->dto_urb->transfer_dma = xfer->urb->transfer_dma + 1029 xfer->urb->iso_frame_desc[curr_iso_frame].offset; 1030 /* The Alereon HWA sends a single URB with all isoc segs. */ 1031 if (xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) 1032 seg->dto_urb->transfer_buffer_length = seg->isoc_size; 1033 else 1034 seg->dto_urb->transfer_buffer_length = 1035 xfer->urb->iso_frame_desc[curr_iso_frame].length; 1036} 1037 1038/* 1039 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb. 1040 */ 1041static int __wa_populate_dto_urb(struct wa_xfer *xfer, 1042 struct wa_seg *seg, size_t buf_itr_offset, size_t buf_itr_size) 1043{ 1044 int result = 0; 1045 1046 if (xfer->is_dma) { 1047 seg->dto_urb->transfer_dma = 1048 xfer->urb->transfer_dma + buf_itr_offset; 1049 seg->dto_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1050 seg->dto_urb->sg = NULL; 1051 seg->dto_urb->num_sgs = 0; 1052 } else { 1053 /* do buffer or SG processing. */ 1054 seg->dto_urb->transfer_flags &= 1055 ~URB_NO_TRANSFER_DMA_MAP; 1056 /* this should always be 0 before a resubmit. */ 1057 seg->dto_urb->num_mapped_sgs = 0; 1058 1059 if (xfer->urb->transfer_buffer) { 1060 seg->dto_urb->transfer_buffer = 1061 xfer->urb->transfer_buffer + 1062 buf_itr_offset; 1063 seg->dto_urb->sg = NULL; 1064 seg->dto_urb->num_sgs = 0; 1065 } else { 1066 seg->dto_urb->transfer_buffer = NULL; 1067 1068 /* 1069 * allocate an SG list to store seg_size bytes 1070 * and copy the subset of the xfer->urb->sg that 1071 * matches the buffer subset we are about to 1072 * read. 1073 */ 1074 seg->dto_urb->sg = wa_xfer_create_subset_sg( 1075 xfer->urb->sg, 1076 buf_itr_offset, buf_itr_size, 1077 &(seg->dto_urb->num_sgs)); 1078 if (!(seg->dto_urb->sg)) 1079 result = -ENOMEM; 1080 } 1081 } 1082 seg->dto_urb->transfer_buffer_length = buf_itr_size; 1083 1084 return result; 1085} 1086 1087/* 1088 * Allocate the segs array and initialize each of them 1089 * 1090 * The segments are freed by wa_xfer_destroy() when the xfer use count 1091 * drops to zero; however, because each segment is given the same life 1092 * cycle as the USB URB it contains, it is actually freed by 1093 * usb_put_urb() on the contained USB URB (twisted, eh?). 1094 */ 1095static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size) 1096{ 1097 int result, cnt, iso_frame_offset; 1098 size_t alloc_size = sizeof(*xfer->seg[0]) 1099 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size; 1100 struct usb_device *usb_dev = xfer->wa->usb_dev; 1101 const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd; 1102 struct wa_seg *seg; 1103 size_t buf_itr, buf_size, buf_itr_size; 1104 int isoc_frame_offset = 0; 1105 1106 result = -ENOMEM; 1107 xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC); 1108 if (xfer->seg == NULL) 1109 goto error_segs_kzalloc; 1110 buf_itr = 0; 1111 buf_size = xfer->urb->transfer_buffer_length; 1112 iso_frame_offset = 0; 1113 for (cnt = 0; cnt < xfer->segs; cnt++) { 1114 size_t iso_pkt_descr_size = 0; 1115 int seg_isoc_frame_count = 0, seg_isoc_size = 0; 1116 1117 /* 1118 * Adjust the size of the segment object to contain space for 1119 * the isoc packet descriptor buffer. 1120 */ 1121 if (usb_pipeisoc(xfer->urb->pipe)) { 1122 seg_isoc_frame_count = 1123 __wa_seg_calculate_isoc_frame_count(xfer, 1124 isoc_frame_offset, &seg_isoc_size); 1125 1126 iso_pkt_descr_size = 1127 sizeof(struct wa_xfer_packet_info_hwaiso) + 1128 (seg_isoc_frame_count * sizeof(__le16)); 1129 } 1130 seg = xfer->seg[cnt] = kmalloc(alloc_size + iso_pkt_descr_size, 1131 GFP_ATOMIC); 1132 if (seg == NULL) 1133 goto error_seg_kmalloc; 1134 wa_seg_init(seg); 1135 seg->xfer = xfer; 1136 seg->index = cnt; 1137 usb_fill_bulk_urb(&seg->tr_urb, usb_dev, 1138 usb_sndbulkpipe(usb_dev, 1139 dto_epd->bEndpointAddress), 1140 &seg->xfer_hdr, xfer_hdr_size, 1141 wa_seg_tr_cb, seg); 1142 buf_itr_size = min(buf_size, xfer->seg_size); 1143 1144 if (usb_pipeisoc(xfer->urb->pipe)) { 1145 seg->isoc_frame_count = seg_isoc_frame_count; 1146 seg->isoc_frame_offset = isoc_frame_offset; 1147 seg->isoc_size = seg_isoc_size; 1148 /* iso packet descriptor. */ 1149 seg->isoc_pack_desc_urb = 1150 usb_alloc_urb(0, GFP_ATOMIC); 1151 if (seg->isoc_pack_desc_urb == NULL) 1152 goto error_iso_pack_desc_alloc; 1153 /* 1154 * The buffer for the isoc packet descriptor starts 1155 * after the transfer request header in the 1156 * segment object memory buffer. 1157 */ 1158 usb_fill_bulk_urb( 1159 seg->isoc_pack_desc_urb, usb_dev, 1160 usb_sndbulkpipe(usb_dev, 1161 dto_epd->bEndpointAddress), 1162 (void *)(&seg->xfer_hdr) + 1163 xfer_hdr_size, 1164 iso_pkt_descr_size, 1165 wa_seg_iso_pack_desc_cb, seg); 1166 1167 /* adjust starting frame offset for next seg. */ 1168 isoc_frame_offset += seg_isoc_frame_count; 1169 } 1170 1171 if (xfer->is_inbound == 0 && buf_size > 0) { 1172 /* outbound data. */ 1173 seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC); 1174 if (seg->dto_urb == NULL) 1175 goto error_dto_alloc; 1176 usb_fill_bulk_urb( 1177 seg->dto_urb, usb_dev, 1178 usb_sndbulkpipe(usb_dev, 1179 dto_epd->bEndpointAddress), 1180 NULL, 0, wa_seg_dto_cb, seg); 1181 1182 if (usb_pipeisoc(xfer->urb->pipe)) { 1183 /* 1184 * Fill in the xfer buffer information for the 1185 * first isoc frame. Subsequent frames in this 1186 * segment will be filled in and sent from the 1187 * DTO completion routine, if needed. 1188 */ 1189 __wa_populate_dto_urb_isoc(xfer, seg, 1190 seg->isoc_frame_offset); 1191 } else { 1192 /* fill in the xfer buffer information. */ 1193 result = __wa_populate_dto_urb(xfer, seg, 1194 buf_itr, buf_itr_size); 1195 if (result < 0) 1196 goto error_seg_outbound_populate; 1197 1198 buf_itr += buf_itr_size; 1199 buf_size -= buf_itr_size; 1200 } 1201 } 1202 seg->status = WA_SEG_READY; 1203 } 1204 return 0; 1205 1206 /* 1207 * Free the memory for the current segment which failed to init. 1208 * Use the fact that cnt is left at were it failed. The remaining 1209 * segments will be cleaned up by wa_xfer_destroy. 1210 */ 1211error_seg_outbound_populate: 1212 usb_free_urb(xfer->seg[cnt]->dto_urb); 1213error_dto_alloc: 1214 usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb); 1215error_iso_pack_desc_alloc: 1216 kfree(xfer->seg[cnt]); 1217 xfer->seg[cnt] = NULL; 1218error_seg_kmalloc: 1219error_segs_kzalloc: 1220 return result; 1221} 1222 1223/* 1224 * Allocates all the stuff needed to submit a transfer 1225 * 1226 * Breaks the whole data buffer in a list of segments, each one has a 1227 * structure allocated to it and linked in xfer->seg[index] 1228 * 1229 * FIXME: merge setup_segs() and the last part of this function, no 1230 * need to do two for loops when we could run everything in a 1231 * single one 1232 */ 1233static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) 1234{ 1235 int result; 1236 struct device *dev = &xfer->wa->usb_iface->dev; 1237 enum wa_xfer_type xfer_type = 0; /* shut up GCC */ 1238 size_t xfer_hdr_size, cnt, transfer_size; 1239 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; 1240 1241 result = __wa_xfer_setup_sizes(xfer, &xfer_type); 1242 if (result < 0) 1243 goto error_setup_sizes; 1244 xfer_hdr_size = result; 1245 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size); 1246 if (result < 0) { 1247 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n", 1248 xfer, xfer->segs, result); 1249 goto error_setup_segs; 1250 } 1251 /* Fill the first header */ 1252 xfer_hdr0 = &xfer->seg[0]->xfer_hdr; 1253 wa_xfer_id_init(xfer); 1254 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size); 1255 1256 /* Fill remaining headers */ 1257 xfer_hdr = xfer_hdr0; 1258 if (xfer_type == WA_XFER_TYPE_ISO) { 1259 xfer_hdr0->dwTransferLength = 1260 cpu_to_le32(xfer->seg[0]->isoc_size); 1261 for (cnt = 1; cnt < xfer->segs; cnt++) { 1262 struct wa_xfer_packet_info_hwaiso *packet_desc; 1263 struct wa_seg *seg = xfer->seg[cnt]; 1264 struct wa_xfer_hwaiso *xfer_iso; 1265 1266 xfer_hdr = &seg->xfer_hdr; 1267 xfer_iso = container_of(xfer_hdr, 1268 struct wa_xfer_hwaiso, hdr); 1269 packet_desc = ((void *)xfer_hdr) + xfer_hdr_size; 1270 /* 1271 * Copy values from the 0th header. Segment specific 1272 * values are set below. 1273 */ 1274 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); 1275 xfer_hdr->bTransferSegment = cnt; 1276 xfer_hdr->dwTransferLength = 1277 cpu_to_le32(seg->isoc_size); 1278 xfer_iso->dwNumOfPackets = 1279 cpu_to_le32(seg->isoc_frame_count); 1280 __wa_setup_isoc_packet_descr(packet_desc, xfer, seg); 1281 seg->status = WA_SEG_READY; 1282 } 1283 } else { 1284 transfer_size = urb->transfer_buffer_length; 1285 xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ? 1286 cpu_to_le32(xfer->seg_size) : 1287 cpu_to_le32(transfer_size); 1288 transfer_size -= xfer->seg_size; 1289 for (cnt = 1; cnt < xfer->segs; cnt++) { 1290 xfer_hdr = &xfer->seg[cnt]->xfer_hdr; 1291 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size); 1292 xfer_hdr->bTransferSegment = cnt; 1293 xfer_hdr->dwTransferLength = 1294 transfer_size > xfer->seg_size ? 1295 cpu_to_le32(xfer->seg_size) 1296 : cpu_to_le32(transfer_size); 1297 xfer->seg[cnt]->status = WA_SEG_READY; 1298 transfer_size -= xfer->seg_size; 1299 } 1300 } 1301 xfer_hdr->bTransferSegment |= 0x80; /* this is the last segment */ 1302 result = 0; 1303error_setup_segs: 1304error_setup_sizes: 1305 return result; 1306} 1307 1308/* 1309 * 1310 * 1311 * rpipe->seg_lock is held! 1312 */ 1313static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, 1314 struct wa_seg *seg, int *dto_done) 1315{ 1316 int result; 1317 1318 /* default to done unless we encounter a multi-frame isoc segment. */ 1319 *dto_done = 1; 1320 1321 /* submit the transfer request. */ 1322 result = usb_submit_urb(&seg->tr_urb, GFP_ATOMIC); 1323 if (result < 0) { 1324 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n", 1325 __func__, xfer, seg->index, result); 1326 goto error_seg_submit; 1327 } 1328 /* submit the isoc packet descriptor if present. */ 1329 if (seg->isoc_pack_desc_urb) { 1330 result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC); 1331 seg->isoc_frame_index = 0; 1332 if (result < 0) { 1333 pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n", 1334 __func__, xfer, seg->index, result); 1335 goto error_iso_pack_desc_submit; 1336 } 1337 } 1338 /* submit the out data if this is an out request. */ 1339 if (seg->dto_urb) { 1340 struct wahc *wa = xfer->wa; 1341 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); 1342 if (result < 0) { 1343 pr_err("%s: xfer %p#%u: DTO submit failed: %d\n", 1344 __func__, xfer, seg->index, result); 1345 goto error_dto_submit; 1346 } 1347 /* 1348 * If this segment contains more than one isoc frame, hold 1349 * onto the dto resource until we send all frames. 1350 * Only applies to non-Alereon devices. 1351 */ 1352 if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0) 1353 && (seg->isoc_frame_count > 1)) 1354 *dto_done = 0; 1355 } 1356 seg->status = WA_SEG_SUBMITTED; 1357 rpipe_avail_dec(rpipe); 1358 return 0; 1359 1360error_dto_submit: 1361 usb_unlink_urb(seg->isoc_pack_desc_urb); 1362error_iso_pack_desc_submit: 1363 usb_unlink_urb(&seg->tr_urb); 1364error_seg_submit: 1365 seg->status = WA_SEG_ERROR; 1366 seg->result = result; 1367 *dto_done = 1; 1368 return result; 1369} 1370 1371/* 1372 * Execute more queued request segments until the maximum concurrent allowed. 1373 * Return true if the DTO resource was acquired and released. 1374 * 1375 * The ugly unlock/lock sequence on the error path is needed as the 1376 * xfer->lock normally nests the seg_lock and not viceversa. 1377 */ 1378static int __wa_xfer_delayed_run(struct wa_rpipe *rpipe, int *dto_waiting) 1379{ 1380 int result, dto_acquired = 0, dto_done = 0; 1381 struct device *dev = &rpipe->wa->usb_iface->dev; 1382 struct wa_seg *seg; 1383 struct wa_xfer *xfer; 1384 unsigned long flags; 1385 1386 *dto_waiting = 0; 1387 1388 spin_lock_irqsave(&rpipe->seg_lock, flags); 1389 while (atomic_read(&rpipe->segs_available) > 0 1390 && !list_empty(&rpipe->seg_list) 1391 && (dto_acquired = __wa_dto_try_get(rpipe->wa))) { 1392 seg = list_first_entry(&(rpipe->seg_list), struct wa_seg, 1393 list_node); 1394 list_del(&seg->list_node); 1395 xfer = seg->xfer; 1396 result = __wa_seg_submit(rpipe, xfer, seg, &dto_done); 1397 /* release the dto resource if this RPIPE is done with it. */ 1398 if (dto_done) 1399 __wa_dto_put(rpipe->wa); 1400 dev_dbg(dev, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n", 1401 xfer, wa_xfer_id(xfer), seg->index, 1402 atomic_read(&rpipe->segs_available), result); 1403 if (unlikely(result < 0)) { 1404 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 1405 spin_lock_irqsave(&xfer->lock, flags); 1406 __wa_xfer_abort(xfer); 1407 xfer->segs_done++; 1408 spin_unlock_irqrestore(&xfer->lock, flags); 1409 spin_lock_irqsave(&rpipe->seg_lock, flags); 1410 } 1411 } 1412 /* 1413 * Mark this RPIPE as waiting if dto was not acquired, there are 1414 * delayed segs and no active transfers to wake us up later. 1415 */ 1416 if (!dto_acquired && !list_empty(&rpipe->seg_list) 1417 && (atomic_read(&rpipe->segs_available) == 1418 le16_to_cpu(rpipe->descr.wRequests))) 1419 *dto_waiting = 1; 1420 1421 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 1422 1423 return dto_done; 1424} 1425 1426static void wa_xfer_delayed_run(struct wa_rpipe *rpipe) 1427{ 1428 int dto_waiting; 1429 int dto_done = __wa_xfer_delayed_run(rpipe, &dto_waiting); 1430 1431 /* 1432 * If this RPIPE is waiting on the DTO resource, add it to the tail of 1433 * the waiting list. 1434 * Otherwise, if the WA DTO resource was acquired and released by 1435 * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire 1436 * DTO and failed during that time. Check the delayed list and process 1437 * any waiters. Start searching from the next RPIPE index. 1438 */ 1439 if (dto_waiting) 1440 wa_add_delayed_rpipe(rpipe->wa, rpipe); 1441 else if (dto_done) 1442 wa_check_for_delayed_rpipes(rpipe->wa); 1443} 1444 1445/* 1446 * 1447 * xfer->lock is taken 1448 * 1449 * On failure submitting we just stop submitting and return error; 1450 * wa_urb_enqueue_b() will execute the completion path 1451 */ 1452static int __wa_xfer_submit(struct wa_xfer *xfer) 1453{ 1454 int result, dto_acquired = 0, dto_done = 0, dto_waiting = 0; 1455 struct wahc *wa = xfer->wa; 1456 struct device *dev = &wa->usb_iface->dev; 1457 unsigned cnt; 1458 struct wa_seg *seg; 1459 unsigned long flags; 1460 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 1461 size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests); 1462 u8 available; 1463 u8 empty; 1464 1465 spin_lock_irqsave(&wa->xfer_list_lock, flags); 1466 list_add_tail(&xfer->list_node, &wa->xfer_list); 1467 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); 1468 1469 BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests); 1470 result = 0; 1471 spin_lock_irqsave(&rpipe->seg_lock, flags); 1472 for (cnt = 0; cnt < xfer->segs; cnt++) { 1473 int delay_seg = 1; 1474 1475 available = atomic_read(&rpipe->segs_available); 1476 empty = list_empty(&rpipe->seg_list); 1477 seg = xfer->seg[cnt]; 1478 if (available && empty) { 1479 /* 1480 * Only attempt to acquire DTO if we have a segment 1481 * to send. 1482 */ 1483 dto_acquired = __wa_dto_try_get(rpipe->wa); 1484 if (dto_acquired) { 1485 delay_seg = 0; 1486 result = __wa_seg_submit(rpipe, xfer, seg, 1487 &dto_done); 1488 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n", 1489 xfer, wa_xfer_id(xfer), cnt, available, 1490 empty); 1491 if (dto_done) 1492 __wa_dto_put(rpipe->wa); 1493 1494 if (result < 0) { 1495 __wa_xfer_abort(xfer); 1496 goto error_seg_submit; 1497 } 1498 } 1499 } 1500 1501 if (delay_seg) { 1502 dev_dbg(dev, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n", 1503 xfer, wa_xfer_id(xfer), cnt, available, empty); 1504 seg->status = WA_SEG_DELAYED; 1505 list_add_tail(&seg->list_node, &rpipe->seg_list); 1506 } 1507 xfer->segs_submitted++; 1508 } 1509error_seg_submit: 1510 /* 1511 * Mark this RPIPE as waiting if dto was not acquired, there are 1512 * delayed segs and no active transfers to wake us up later. 1513 */ 1514 if (!dto_acquired && !list_empty(&rpipe->seg_list) 1515 && (atomic_read(&rpipe->segs_available) == 1516 le16_to_cpu(rpipe->descr.wRequests))) 1517 dto_waiting = 1; 1518 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 1519 1520 if (dto_waiting) 1521 wa_add_delayed_rpipe(rpipe->wa, rpipe); 1522 else if (dto_done) 1523 wa_check_for_delayed_rpipes(rpipe->wa); 1524 1525 return result; 1526} 1527 1528/* 1529 * Second part of a URB/transfer enqueuement 1530 * 1531 * Assumes this comes from wa_urb_enqueue() [maybe through 1532 * wa_urb_enqueue_run()]. At this point: 1533 * 1534 * xfer->wa filled and refcounted 1535 * xfer->ep filled with rpipe refcounted if 1536 * delayed == 0 1537 * xfer->urb filled and refcounted (this is the case when called 1538 * from wa_urb_enqueue() as we come from usb_submit_urb() 1539 * and when called by wa_urb_enqueue_run(), as we took an 1540 * extra ref dropped by _run() after we return). 1541 * xfer->gfp filled 1542 * 1543 * If we fail at __wa_xfer_submit(), then we just check if we are done 1544 * and if so, we run the completion procedure. However, if we are not 1545 * yet done, we do nothing and wait for the completion handlers from 1546 * the submitted URBs or from the xfer-result path to kick in. If xfer 1547 * result never kicks in, the xfer will timeout from the USB code and 1548 * dequeue() will be called. 1549 */ 1550static int wa_urb_enqueue_b(struct wa_xfer *xfer) 1551{ 1552 int result; 1553 unsigned long flags; 1554 struct urb *urb = xfer->urb; 1555 struct wahc *wa = xfer->wa; 1556 struct wusbhc *wusbhc = wa->wusb; 1557 struct wusb_dev *wusb_dev; 1558 unsigned done; 1559 1560 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); 1561 if (result < 0) { 1562 pr_err("%s: error_rpipe_get\n", __func__); 1563 goto error_rpipe_get; 1564 } 1565 result = -ENODEV; 1566 /* FIXME: segmentation broken -- kills DWA */ 1567 mutex_lock(&wusbhc->mutex); /* get a WUSB dev */ 1568 if (urb->dev == NULL) { 1569 mutex_unlock(&wusbhc->mutex); 1570 pr_err("%s: error usb dev gone\n", __func__); 1571 goto error_dev_gone; 1572 } 1573 wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); 1574 if (wusb_dev == NULL) { 1575 mutex_unlock(&wusbhc->mutex); 1576 dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n", 1577 __func__); 1578 goto error_dev_gone; 1579 } 1580 mutex_unlock(&wusbhc->mutex); 1581 1582 spin_lock_irqsave(&xfer->lock, flags); 1583 xfer->wusb_dev = wusb_dev; 1584 result = urb->status; 1585 if (urb->status != -EINPROGRESS) { 1586 dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__); 1587 goto error_dequeued; 1588 } 1589 1590 result = __wa_xfer_setup(xfer, urb); 1591 if (result < 0) { 1592 dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__); 1593 goto error_xfer_setup; 1594 } 1595 result = __wa_xfer_submit(xfer); 1596 if (result < 0) { 1597 dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__); 1598 goto error_xfer_submit; 1599 } 1600 spin_unlock_irqrestore(&xfer->lock, flags); 1601 return 0; 1602 1603 /* 1604 * this is basically wa_xfer_completion() broken up wa_xfer_giveback() 1605 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo 1606 * setup(). 1607 */ 1608error_xfer_setup: 1609error_dequeued: 1610 spin_unlock_irqrestore(&xfer->lock, flags); 1611 /* FIXME: segmentation broken, kills DWA */ 1612 if (wusb_dev) 1613 wusb_dev_put(wusb_dev); 1614error_dev_gone: 1615 rpipe_put(xfer->ep->hcpriv); 1616error_rpipe_get: 1617 xfer->result = result; 1618 return result; 1619 1620error_xfer_submit: 1621 done = __wa_xfer_is_done(xfer); 1622 xfer->result = result; 1623 spin_unlock_irqrestore(&xfer->lock, flags); 1624 if (done) 1625 wa_xfer_completion(xfer); 1626 /* return success since the completion routine will run. */ 1627 return 0; 1628} 1629 1630/* 1631 * Execute the delayed transfers in the Wire Adapter @wa 1632 * 1633 * We need to be careful here, as dequeue() could be called in the 1634 * middle. That's why we do the whole thing under the 1635 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock 1636 * and then checks the list -- so as we would be acquiring in inverse 1637 * order, we move the delayed list to a separate list while locked and then 1638 * submit them without the list lock held. 1639 */ 1640void wa_urb_enqueue_run(struct work_struct *ws) 1641{ 1642 struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work); 1643 struct wa_xfer *xfer, *next; 1644 struct urb *urb; 1645 LIST_HEAD(tmp_list); 1646 1647 /* Create a copy of the wa->xfer_delayed_list while holding the lock */ 1648 spin_lock_irq(&wa->xfer_list_lock); 1649 list_cut_position(&tmp_list, &wa->xfer_delayed_list, 1650 wa->xfer_delayed_list.prev); 1651 spin_unlock_irq(&wa->xfer_list_lock); 1652 1653 /* 1654 * enqueue from temp list without list lock held since wa_urb_enqueue_b 1655 * can take xfer->lock as well as lock mutexes. 1656 */ 1657 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) { 1658 list_del_init(&xfer->list_node); 1659 1660 urb = xfer->urb; 1661 if (wa_urb_enqueue_b(xfer) < 0) 1662 wa_xfer_giveback(xfer); 1663 usb_put_urb(urb); /* taken when queuing */ 1664 } 1665} 1666EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); 1667 1668/* 1669 * Process the errored transfers on the Wire Adapter outside of interrupt. 1670 */ 1671void wa_process_errored_transfers_run(struct work_struct *ws) 1672{ 1673 struct wahc *wa = container_of(ws, struct wahc, xfer_error_work); 1674 struct wa_xfer *xfer, *next; 1675 LIST_HEAD(tmp_list); 1676 1677 pr_info("%s: Run delayed STALL processing.\n", __func__); 1678 1679 /* Create a copy of the wa->xfer_errored_list while holding the lock */ 1680 spin_lock_irq(&wa->xfer_list_lock); 1681 list_cut_position(&tmp_list, &wa->xfer_errored_list, 1682 wa->xfer_errored_list.prev); 1683 spin_unlock_irq(&wa->xfer_list_lock); 1684 1685 /* 1686 * run rpipe_clear_feature_stalled from temp list without list lock 1687 * held. 1688 */ 1689 list_for_each_entry_safe(xfer, next, &tmp_list, list_node) { 1690 struct usb_host_endpoint *ep; 1691 unsigned long flags; 1692 struct wa_rpipe *rpipe; 1693 1694 spin_lock_irqsave(&xfer->lock, flags); 1695 ep = xfer->ep; 1696 rpipe = ep->hcpriv; 1697 spin_unlock_irqrestore(&xfer->lock, flags); 1698 1699 /* clear RPIPE feature stalled without holding a lock. */ 1700 rpipe_clear_feature_stalled(wa, ep); 1701 1702 /* complete the xfer. This removes it from the tmp list. */ 1703 wa_xfer_completion(xfer); 1704 1705 /* check for work. */ 1706 wa_xfer_delayed_run(rpipe); 1707 } 1708} 1709EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run); 1710 1711/* 1712 * Submit a transfer to the Wire Adapter in a delayed way 1713 * 1714 * The process of enqueuing involves possible sleeps() [see 1715 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are 1716 * in an atomic section, we defer the enqueue_b() call--else we call direct. 1717 * 1718 * @urb: We own a reference to it done by the HCI Linux USB stack that 1719 * will be given up by calling usb_hcd_giveback_urb() or by 1720 * returning error from this function -> ergo we don't have to 1721 * refcount it. 1722 */ 1723int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, 1724 struct urb *urb, gfp_t gfp) 1725{ 1726 int result; 1727 struct device *dev = &wa->usb_iface->dev; 1728 struct wa_xfer *xfer; 1729 unsigned long my_flags; 1730 unsigned cant_sleep = irqs_disabled() | in_atomic(); 1731 1732 if ((urb->transfer_buffer == NULL) 1733 && (urb->sg == NULL) 1734 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 1735 && urb->transfer_buffer_length != 0) { 1736 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb); 1737 dump_stack(); 1738 } 1739 1740 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); 1741 result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb); 1742 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); 1743 if (result < 0) 1744 goto error_link_urb; 1745 1746 result = -ENOMEM; 1747 xfer = kzalloc(sizeof(*xfer), gfp); 1748 if (xfer == NULL) 1749 goto error_kmalloc; 1750 1751 result = -ENOENT; 1752 if (urb->status != -EINPROGRESS) /* cancelled */ 1753 goto error_dequeued; /* before starting? */ 1754 wa_xfer_init(xfer); 1755 xfer->wa = wa_get(wa); 1756 xfer->urb = urb; 1757 xfer->gfp = gfp; 1758 xfer->ep = ep; 1759 urb->hcpriv = xfer; 1760 1761 dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", 1762 xfer, urb, urb->pipe, urb->transfer_buffer_length, 1763 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", 1764 urb->pipe & USB_DIR_IN ? "inbound" : "outbound", 1765 cant_sleep ? "deferred" : "inline"); 1766 1767 if (cant_sleep) { 1768 usb_get_urb(urb); 1769 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); 1770 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list); 1771 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); 1772 queue_work(wusbd, &wa->xfer_enqueue_work); 1773 } else { 1774 result = wa_urb_enqueue_b(xfer); 1775 if (result < 0) { 1776 /* 1777 * URB submit/enqueue failed. Clean up, return an 1778 * error and do not run the callback. This avoids 1779 * an infinite submit/complete loop. 1780 */ 1781 dev_err(dev, "%s: URB enqueue failed: %d\n", 1782 __func__, result); 1783 wa_put(xfer->wa); 1784 wa_xfer_put(xfer); 1785 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); 1786 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); 1787 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); 1788 return result; 1789 } 1790 } 1791 return 0; 1792 1793error_dequeued: 1794 kfree(xfer); 1795error_kmalloc: 1796 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); 1797 usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); 1798 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); 1799error_link_urb: 1800 return result; 1801} 1802EXPORT_SYMBOL_GPL(wa_urb_enqueue); 1803 1804/* 1805 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion 1806 * handler] is called. 1807 * 1808 * Until a transfer goes successfully through wa_urb_enqueue() it 1809 * needs to be dequeued with completion calling; when stuck in delayed 1810 * or before wa_xfer_setup() is called, we need to do completion. 1811 * 1812 * not setup If there is no hcpriv yet, that means that that enqueue 1813 * still had no time to set the xfer up. Because 1814 * urb->status should be other than -EINPROGRESS, 1815 * enqueue() will catch that and bail out. 1816 * 1817 * If the transfer has gone through setup, we just need to clean it 1818 * up. If it has gone through submit(), we have to abort it [with an 1819 * asynch request] and then make sure we cancel each segment. 1820 * 1821 */ 1822int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) 1823{ 1824 unsigned long flags, flags2; 1825 struct wa_xfer *xfer; 1826 struct wa_seg *seg; 1827 struct wa_rpipe *rpipe; 1828 unsigned cnt, done = 0, xfer_abort_pending; 1829 unsigned rpipe_ready = 0; 1830 int result; 1831 1832 /* check if it is safe to unlink. */ 1833 spin_lock_irqsave(&wa->xfer_list_lock, flags); 1834 result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status); 1835 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); 1836 if (result) 1837 return result; 1838 1839 xfer = urb->hcpriv; 1840 if (xfer == NULL) { 1841 /* 1842 * Nothing setup yet enqueue will see urb->status != 1843 * -EINPROGRESS (by hcd layer) and bail out with 1844 * error, no need to do completion 1845 */ 1846 BUG_ON(urb->status == -EINPROGRESS); 1847 goto out; 1848 } 1849 spin_lock_irqsave(&xfer->lock, flags); 1850 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer)); 1851 rpipe = xfer->ep->hcpriv; 1852 if (rpipe == NULL) { 1853 pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s", 1854 __func__, xfer, wa_xfer_id(xfer), 1855 "Probably already aborted.\n" ); 1856 result = -ENOENT; 1857 goto out_unlock; 1858 } 1859 /* Check the delayed list -> if there, release and complete */ 1860 spin_lock_irqsave(&wa->xfer_list_lock, flags2); 1861 if (!list_empty(&xfer->list_node) && xfer->seg == NULL) 1862 goto dequeue_delayed; 1863 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); 1864 if (xfer->seg == NULL) /* still hasn't reached */ 1865 goto out_unlock; /* setup(), enqueue_b() completes */ 1866 /* Ok, the xfer is in flight already, it's been setup and submitted.*/ 1867 xfer_abort_pending = __wa_xfer_abort(xfer) >= 0; 1868 for (cnt = 0; cnt < xfer->segs; cnt++) { 1869 seg = xfer->seg[cnt]; 1870 pr_debug("%s: xfer id 0x%08X#%d status = %d\n", 1871 __func__, wa_xfer_id(xfer), cnt, seg->status); 1872 switch (seg->status) { 1873 case WA_SEG_NOTREADY: 1874 case WA_SEG_READY: 1875 printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n", 1876 xfer, cnt, seg->status); 1877 WARN_ON(1); 1878 break; 1879 case WA_SEG_DELAYED: 1880 /* 1881 * delete from rpipe delayed list. If no segments on 1882 * this xfer have been submitted, __wa_xfer_is_done will 1883 * trigger a giveback below. Otherwise, the submitted 1884 * segments will be completed in the DTI interrupt. 1885 */ 1886 seg->status = WA_SEG_ABORTED; 1887 seg->result = -ENOENT; 1888 spin_lock_irqsave(&rpipe->seg_lock, flags2); 1889 list_del(&seg->list_node); 1890 xfer->segs_done++; 1891 spin_unlock_irqrestore(&rpipe->seg_lock, flags2); 1892 break; 1893 case WA_SEG_DONE: 1894 case WA_SEG_ERROR: 1895 case WA_SEG_ABORTED: 1896 break; 1897 /* 1898 * In the states below, the HWA device already knows 1899 * about the transfer. If an abort request was sent, 1900 * allow the HWA to process it and wait for the 1901 * results. Otherwise, the DTI state and seg completed 1902 * counts can get out of sync. 1903 */ 1904 case WA_SEG_SUBMITTED: 1905 case WA_SEG_PENDING: 1906 case WA_SEG_DTI_PENDING: 1907 /* 1908 * Check if the abort was successfully sent. This could 1909 * be false if the HWA has been removed but we haven't 1910 * gotten the disconnect notification yet. 1911 */ 1912 if (!xfer_abort_pending) { 1913 seg->status = WA_SEG_ABORTED; 1914 rpipe_ready = rpipe_avail_inc(rpipe); 1915 xfer->segs_done++; 1916 } 1917 break; 1918 } 1919 } 1920 xfer->result = urb->status; /* -ENOENT or -ECONNRESET */ 1921 done = __wa_xfer_is_done(xfer); 1922 spin_unlock_irqrestore(&xfer->lock, flags); 1923 if (done) 1924 wa_xfer_completion(xfer); 1925 if (rpipe_ready) 1926 wa_xfer_delayed_run(rpipe); 1927 return result; 1928 1929out_unlock: 1930 spin_unlock_irqrestore(&xfer->lock, flags); 1931out: 1932 return result; 1933 1934dequeue_delayed: 1935 list_del_init(&xfer->list_node); 1936 spin_unlock_irqrestore(&wa->xfer_list_lock, flags2); 1937 xfer->result = urb->status; 1938 spin_unlock_irqrestore(&xfer->lock, flags); 1939 wa_xfer_giveback(xfer); 1940 usb_put_urb(urb); /* we got a ref in enqueue() */ 1941 return 0; 1942} 1943EXPORT_SYMBOL_GPL(wa_urb_dequeue); 1944 1945/* 1946 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno 1947 * codes 1948 * 1949 * Positive errno values are internal inconsistencies and should be 1950 * flagged louder. Negative are to be passed up to the user in the 1951 * normal way. 1952 * 1953 * @status: USB WA status code -- high two bits are stripped. 1954 */ 1955static int wa_xfer_status_to_errno(u8 status) 1956{ 1957 int errno; 1958 u8 real_status = status; 1959 static int xlat[] = { 1960 [WA_XFER_STATUS_SUCCESS] = 0, 1961 [WA_XFER_STATUS_HALTED] = -EPIPE, 1962 [WA_XFER_STATUS_DATA_BUFFER_ERROR] = -ENOBUFS, 1963 [WA_XFER_STATUS_BABBLE] = -EOVERFLOW, 1964 [WA_XFER_RESERVED] = EINVAL, 1965 [WA_XFER_STATUS_NOT_FOUND] = 0, 1966 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM, 1967 [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ, 1968 [WA_XFER_STATUS_ABORTED] = -ENOENT, 1969 [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL, 1970 [WA_XFER_INVALID_FORMAT] = EINVAL, 1971 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL, 1972 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] = EINVAL, 1973 }; 1974 status &= 0x3f; 1975 1976 if (status == 0) 1977 return 0; 1978 if (status >= ARRAY_SIZE(xlat)) { 1979 printk_ratelimited(KERN_ERR "%s(): BUG? " 1980 "Unknown WA transfer status 0x%02x\n", 1981 __func__, real_status); 1982 return -EINVAL; 1983 } 1984 errno = xlat[status]; 1985 if (unlikely(errno > 0)) { 1986 printk_ratelimited(KERN_ERR "%s(): BUG? " 1987 "Inconsistent WA status: 0x%02x\n", 1988 __func__, real_status); 1989 errno = -errno; 1990 } 1991 return errno; 1992} 1993 1994/* 1995 * If a last segment flag and/or a transfer result error is encountered, 1996 * no other segment transfer results will be returned from the device. 1997 * Mark the remaining submitted or pending xfers as completed so that 1998 * the xfer will complete cleanly. 1999 */ 2000static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, 2001 struct wa_seg *incoming_seg, enum wa_seg_status status) 2002{ 2003 int index; 2004 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 2005 2006 for (index = incoming_seg->index + 1; index < xfer->segs_submitted; 2007 index++) { 2008 struct wa_seg *current_seg = xfer->seg[index]; 2009 2010 BUG_ON(current_seg == NULL); 2011 2012 switch (current_seg->status) { 2013 case WA_SEG_SUBMITTED: 2014 case WA_SEG_PENDING: 2015 case WA_SEG_DTI_PENDING: 2016 rpipe_avail_inc(rpipe); 2017 /* 2018 * do not increment RPIPE avail for the WA_SEG_DELAYED case 2019 * since it has not been submitted to the RPIPE. 2020 */ 2021 case WA_SEG_DELAYED: 2022 xfer->segs_done++; 2023 current_seg->status = status; 2024 break; 2025 case WA_SEG_ABORTED: 2026 break; 2027 default: 2028 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n", 2029 __func__, wa_xfer_id(xfer), index, 2030 current_seg->status); 2031 break; 2032 } 2033 } 2034} 2035 2036/* Populate the wa->buf_in_urb based on the current isoc transfer state. */ 2037static void __wa_populate_buf_in_urb_isoc(struct wahc *wa, struct wa_xfer *xfer, 2038 struct wa_seg *seg, int curr_iso_frame) 2039{ 2040 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); 2041 2042 /* this should always be 0 before a resubmit. */ 2043 wa->buf_in_urb->num_mapped_sgs = 0; 2044 wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma + 2045 xfer->urb->iso_frame_desc[curr_iso_frame].offset; 2046 wa->buf_in_urb->transfer_buffer_length = 2047 xfer->urb->iso_frame_desc[curr_iso_frame].length; 2048 wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 2049 wa->buf_in_urb->transfer_buffer = NULL; 2050 wa->buf_in_urb->sg = NULL; 2051 wa->buf_in_urb->num_sgs = 0; 2052 wa->buf_in_urb->context = seg; 2053} 2054 2055/* Populate the wa->buf_in_urb based on the current transfer state. */ 2056static int wa_populate_buf_in_urb(struct wahc *wa, struct wa_xfer *xfer, 2057 unsigned int seg_idx, unsigned int bytes_transferred) 2058{ 2059 int result = 0; 2060 struct wa_seg *seg = xfer->seg[seg_idx]; 2061 2062 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); 2063 /* this should always be 0 before a resubmit. */ 2064 wa->buf_in_urb->num_mapped_sgs = 0; 2065 2066 if (xfer->is_dma) { 2067 wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma 2068 + (seg_idx * xfer->seg_size); 2069 wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 2070 wa->buf_in_urb->transfer_buffer = NULL; 2071 wa->buf_in_urb->sg = NULL; 2072 wa->buf_in_urb->num_sgs = 0; 2073 } else { 2074 /* do buffer or SG processing. */ 2075 wa->buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; 2076 2077 if (xfer->urb->transfer_buffer) { 2078 wa->buf_in_urb->transfer_buffer = 2079 xfer->urb->transfer_buffer 2080 + (seg_idx * xfer->seg_size); 2081 wa->buf_in_urb->sg = NULL; 2082 wa->buf_in_urb->num_sgs = 0; 2083 } else { 2084 /* allocate an SG list to store seg_size bytes 2085 and copy the subset of the xfer->urb->sg 2086 that matches the buffer subset we are 2087 about to read. */ 2088 wa->buf_in_urb->sg = wa_xfer_create_subset_sg( 2089 xfer->urb->sg, 2090 seg_idx * xfer->seg_size, 2091 bytes_transferred, 2092 &(wa->buf_in_urb->num_sgs)); 2093 2094 if (!(wa->buf_in_urb->sg)) { 2095 wa->buf_in_urb->num_sgs = 0; 2096 result = -ENOMEM; 2097 } 2098 wa->buf_in_urb->transfer_buffer = NULL; 2099 } 2100 } 2101 wa->buf_in_urb->transfer_buffer_length = bytes_transferred; 2102 wa->buf_in_urb->context = seg; 2103 2104 return result; 2105} 2106 2107/* 2108 * Process a xfer result completion message 2109 * 2110 * inbound transfers: need to schedule a buf_in_urb read 2111 * 2112 * FIXME: this function needs to be broken up in parts 2113 */ 2114static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, 2115 struct wa_xfer_result *xfer_result) 2116{ 2117 int result; 2118 struct device *dev = &wa->usb_iface->dev; 2119 unsigned long flags; 2120 unsigned int seg_idx; 2121 struct wa_seg *seg; 2122 struct wa_rpipe *rpipe; 2123 unsigned done = 0; 2124 u8 usb_status; 2125 unsigned rpipe_ready = 0; 2126 unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength); 2127 2128 spin_lock_irqsave(&xfer->lock, flags); 2129 seg_idx = xfer_result->bTransferSegment & 0x7f; 2130 if (unlikely(seg_idx >= xfer->segs)) 2131 goto error_bad_seg; 2132 seg = xfer->seg[seg_idx]; 2133 rpipe = xfer->ep->hcpriv; 2134 usb_status = xfer_result->bTransferStatus; 2135 dev_dbg(dev, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n", 2136 xfer, wa_xfer_id(xfer), seg_idx, usb_status, seg->status); 2137 if (seg->status == WA_SEG_ABORTED 2138 || seg->status == WA_SEG_ERROR) /* already handled */ 2139 goto segment_aborted; 2140 if (seg->status == WA_SEG_SUBMITTED) /* ops, got here */ 2141 seg->status = WA_SEG_PENDING; /* before wa_seg{_dto}_cb() */ 2142 if (seg->status != WA_SEG_PENDING) { 2143 if (printk_ratelimit()) 2144 dev_err(dev, "xfer %p#%u: Bad segment state %u\n", 2145 xfer, seg_idx, seg->status); 2146 seg->status = WA_SEG_PENDING; /* workaround/"fix" it */ 2147 } 2148 if (usb_status & 0x80) { 2149 seg->result = wa_xfer_status_to_errno(usb_status); 2150 dev_err(dev, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n", 2151 xfer, xfer->id, seg->index, usb_status); 2152 seg->status = ((usb_status & 0x7F) == WA_XFER_STATUS_ABORTED) ? 2153 WA_SEG_ABORTED : WA_SEG_ERROR; 2154 goto error_complete; 2155 } 2156 /* FIXME: we ignore warnings, tally them for stats */ 2157 if (usb_status & 0x40) /* Warning?... */ 2158 usb_status = 0; /* ... pass */ 2159 /* 2160 * If the last segment bit is set, complete the remaining segments. 2161 * When the current segment is completed, either in wa_buf_in_cb for 2162 * transfers with data or below for no data, the xfer will complete. 2163 */ 2164 if (xfer_result->bTransferSegment & 0x80) 2165 wa_complete_remaining_xfer_segs(xfer, seg, WA_SEG_DONE); 2166 if (usb_pipeisoc(xfer->urb->pipe) 2167 && (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) { 2168 /* set up WA state to read the isoc packet status next. */ 2169 wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer); 2170 wa->dti_isoc_xfer_seg = seg_idx; 2171 wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING; 2172 } else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe) 2173 && (bytes_transferred > 0)) { 2174 /* IN data phase: read to buffer */ 2175 seg->status = WA_SEG_DTI_PENDING; 2176 result = wa_populate_buf_in_urb(wa, xfer, seg_idx, 2177 bytes_transferred); 2178 if (result < 0) 2179 goto error_buf_in_populate; 2180 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); 2181 if (result < 0) 2182 goto error_submit_buf_in; 2183 } else { 2184 /* OUT data phase or no data, complete it -- */ 2185 seg->status = WA_SEG_DONE; 2186 seg->result = bytes_transferred; 2187 xfer->segs_done++; 2188 rpipe_ready = rpipe_avail_inc(rpipe); 2189 done = __wa_xfer_is_done(xfer); 2190 } 2191 spin_unlock_irqrestore(&xfer->lock, flags); 2192 if (done) 2193 wa_xfer_completion(xfer); 2194 if (rpipe_ready) 2195 wa_xfer_delayed_run(rpipe); 2196 return; 2197 2198error_submit_buf_in: 2199 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { 2200 dev_err(dev, "DTI: URB max acceptable errors " 2201 "exceeded, resetting device\n"); 2202 wa_reset_all(wa); 2203 } 2204 if (printk_ratelimit()) 2205 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n", 2206 xfer, seg_idx, result); 2207 seg->result = result; 2208 kfree(wa->buf_in_urb->sg); 2209 wa->buf_in_urb->sg = NULL; 2210error_buf_in_populate: 2211 __wa_xfer_abort(xfer); 2212 seg->status = WA_SEG_ERROR; 2213error_complete: 2214 xfer->segs_done++; 2215 rpipe_ready = rpipe_avail_inc(rpipe); 2216 wa_complete_remaining_xfer_segs(xfer, seg, seg->status); 2217 done = __wa_xfer_is_done(xfer); 2218 /* 2219 * queue work item to clear STALL for control endpoints. 2220 * Otherwise, let endpoint_reset take care of it. 2221 */ 2222 if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) && 2223 usb_endpoint_xfer_control(&xfer->ep->desc) && 2224 done) { 2225 2226 dev_info(dev, "Control EP stall. Queue delayed work.\n"); 2227 spin_lock_irq(&wa->xfer_list_lock); 2228 /* move xfer from xfer_list to xfer_errored_list. */ 2229 list_move_tail(&xfer->list_node, &wa->xfer_errored_list); 2230 spin_unlock_irq(&wa->xfer_list_lock); 2231 spin_unlock_irqrestore(&xfer->lock, flags); 2232 queue_work(wusbd, &wa->xfer_error_work); 2233 } else { 2234 spin_unlock_irqrestore(&xfer->lock, flags); 2235 if (done) 2236 wa_xfer_completion(xfer); 2237 if (rpipe_ready) 2238 wa_xfer_delayed_run(rpipe); 2239 } 2240 2241 return; 2242 2243error_bad_seg: 2244 spin_unlock_irqrestore(&xfer->lock, flags); 2245 wa_urb_dequeue(wa, xfer->urb, -ENOENT); 2246 if (printk_ratelimit()) 2247 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); 2248 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { 2249 dev_err(dev, "DTI: URB max acceptable errors " 2250 "exceeded, resetting device\n"); 2251 wa_reset_all(wa); 2252 } 2253 return; 2254 2255segment_aborted: 2256 /* nothing to do, as the aborter did the completion */ 2257 spin_unlock_irqrestore(&xfer->lock, flags); 2258} 2259 2260/* 2261 * Process a isochronous packet status message 2262 * 2263 * inbound transfers: need to schedule a buf_in_urb read 2264 */ 2265static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb) 2266{ 2267 struct device *dev = &wa->usb_iface->dev; 2268 struct wa_xfer_packet_status_hwaiso *packet_status; 2269 struct wa_xfer_packet_status_len_hwaiso *status_array; 2270 struct wa_xfer *xfer; 2271 unsigned long flags; 2272 struct wa_seg *seg; 2273 struct wa_rpipe *rpipe; 2274 unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index; 2275 unsigned first_frame_index = 0, rpipe_ready = 0; 2276 int expected_size; 2277 2278 /* We have a xfer result buffer; check it */ 2279 dev_dbg(dev, "DTI: isoc packet status %d bytes at %p\n", 2280 urb->actual_length, urb->transfer_buffer); 2281 packet_status = (struct wa_xfer_packet_status_hwaiso *)(wa->dti_buf); 2282 if (packet_status->bPacketType != WA_XFER_ISO_PACKET_STATUS) { 2283 dev_err(dev, "DTI Error: isoc packet status--bad type 0x%02x\n", 2284 packet_status->bPacketType); 2285 goto error_parse_buffer; 2286 } 2287 xfer = wa_xfer_get_by_id(wa, wa->dti_isoc_xfer_in_progress); 2288 if (xfer == NULL) { 2289 dev_err(dev, "DTI Error: isoc packet status--unknown xfer 0x%08x\n", 2290 wa->dti_isoc_xfer_in_progress); 2291 goto error_parse_buffer; 2292 } 2293 spin_lock_irqsave(&xfer->lock, flags); 2294 if (unlikely(wa->dti_isoc_xfer_seg >= xfer->segs)) 2295 goto error_bad_seg; 2296 seg = xfer->seg[wa->dti_isoc_xfer_seg]; 2297 rpipe = xfer->ep->hcpriv; 2298 expected_size = sizeof(*packet_status) + 2299 (sizeof(packet_status->PacketStatus[0]) * 2300 seg->isoc_frame_count); 2301 if (urb->actual_length != expected_size) { 2302 dev_err(dev, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n", 2303 urb->actual_length, expected_size); 2304 goto error_bad_seg; 2305 } 2306 if (le16_to_cpu(packet_status->wLength) != expected_size) { 2307 dev_err(dev, "DTI Error: isoc packet status--bad length %u\n", 2308 le16_to_cpu(packet_status->wLength)); 2309 goto error_bad_seg; 2310 } 2311 /* write isoc packet status and lengths back to the xfer urb. */ 2312 status_array = packet_status->PacketStatus; 2313 xfer->urb->start_frame = 2314 wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd); 2315 for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) { 2316 struct usb_iso_packet_descriptor *iso_frame_desc = 2317 xfer->urb->iso_frame_desc; 2318 const int urb_frame_index = 2319 seg->isoc_frame_offset + seg_index; 2320 2321 iso_frame_desc[urb_frame_index].status = 2322 wa_xfer_status_to_errno( 2323 le16_to_cpu(status_array[seg_index].PacketStatus)); 2324 iso_frame_desc[urb_frame_index].actual_length = 2325 le16_to_cpu(status_array[seg_index].PacketLength); 2326 /* track the number of frames successfully transferred. */ 2327 if (iso_frame_desc[urb_frame_index].actual_length > 0) { 2328 /* save the starting frame index for buf_in_urb. */ 2329 if (!data_frame_count) 2330 first_frame_index = seg_index; 2331 ++data_frame_count; 2332 } 2333 } 2334 2335 if (xfer->is_inbound && data_frame_count) { 2336 int result; 2337 2338 seg->isoc_frame_index = first_frame_index; 2339 /* submit a read URB for the first frame with data. */ 2340 __wa_populate_buf_in_urb_isoc(wa, xfer, seg, 2341 seg->isoc_frame_index + seg->isoc_frame_offset); 2342 2343 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); 2344 if (result < 0) { 2345 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", 2346 result); 2347 wa_reset_all(wa); 2348 } else if (data_frame_count > 1) 2349 /* If we need to read multiple frames, set DTI busy. */ 2350 dti_busy = 1; 2351 } else { 2352 /* OUT transfer or no more IN data, complete it -- */ 2353 seg->status = WA_SEG_DONE; 2354 xfer->segs_done++; 2355 rpipe_ready = rpipe_avail_inc(rpipe); 2356 done = __wa_xfer_is_done(xfer); 2357 } 2358 spin_unlock_irqrestore(&xfer->lock, flags); 2359 wa->dti_state = WA_DTI_TRANSFER_RESULT_PENDING; 2360 if (done) 2361 wa_xfer_completion(xfer); 2362 if (rpipe_ready) 2363 wa_xfer_delayed_run(rpipe); 2364 wa_xfer_put(xfer); 2365 return dti_busy; 2366 2367error_bad_seg: 2368 spin_unlock_irqrestore(&xfer->lock, flags); 2369 wa_xfer_put(xfer); 2370error_parse_buffer: 2371 return dti_busy; 2372} 2373 2374/* 2375 * Callback for the IN data phase 2376 * 2377 * If successful transition state; otherwise, take a note of the 2378 * error, mark this segment done and try completion. 2379 * 2380 * Note we don't access until we are sure that the transfer hasn't 2381 * been cancelled (ECONNRESET, ENOENT), which could mean that 2382 * seg->xfer could be already gone. 2383 */ 2384static void wa_buf_in_cb(struct urb *urb) 2385{ 2386 struct wa_seg *seg = urb->context; 2387 struct wa_xfer *xfer = seg->xfer; 2388 struct wahc *wa; 2389 struct device *dev; 2390 struct wa_rpipe *rpipe; 2391 unsigned rpipe_ready = 0, seg_index, isoc_data_frame_count = 0; 2392 unsigned long flags; 2393 u8 done = 0; 2394 2395 /* free the sg if it was used. */ 2396 kfree(urb->sg); 2397 urb->sg = NULL; 2398 2399 spin_lock_irqsave(&xfer->lock, flags); 2400 wa = xfer->wa; 2401 dev = &wa->usb_iface->dev; 2402 2403 if (usb_pipeisoc(xfer->urb->pipe)) { 2404 /* 2405 * Find the next isoc frame with data. Bail out after 2406 * isoc_data_frame_count > 1 since there is no need to walk 2407 * the entire frame array. We just need to know if 2408 * isoc_data_frame_count is 0, 1, or >1. 2409 */ 2410 seg_index = seg->isoc_frame_index + 1; 2411 while ((seg_index < seg->isoc_frame_count) 2412 && (isoc_data_frame_count <= 1)) { 2413 struct usb_iso_packet_descriptor *iso_frame_desc = 2414 xfer->urb->iso_frame_desc; 2415 const int urb_frame_index = 2416 seg->isoc_frame_offset + seg_index; 2417 2418 if (iso_frame_desc[urb_frame_index].actual_length > 0) { 2419 /* save the index of the next frame with data */ 2420 if (!isoc_data_frame_count) 2421 seg->isoc_frame_index = seg_index; 2422 ++isoc_data_frame_count; 2423 } 2424 ++seg_index; 2425 } 2426 } 2427 spin_unlock_irqrestore(&xfer->lock, flags); 2428 2429 switch (urb->status) { 2430 case 0: 2431 spin_lock_irqsave(&xfer->lock, flags); 2432 2433 seg->result += urb->actual_length; 2434 if (isoc_data_frame_count > 0) { 2435 int result; 2436 /* submit a read URB for the first frame with data. */ 2437 __wa_populate_buf_in_urb_isoc(wa, xfer, seg, 2438 seg->isoc_frame_index + seg->isoc_frame_offset); 2439 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); 2440 if (result < 0) { 2441 dev_err(dev, "DTI Error: Could not submit buf in URB (%d)", 2442 result); 2443 wa_reset_all(wa); 2444 } 2445 } else { 2446 rpipe = xfer->ep->hcpriv; 2447 seg->status = WA_SEG_DONE; 2448 dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", 2449 xfer, seg->index, seg->result); 2450 xfer->segs_done++; 2451 rpipe_ready = rpipe_avail_inc(rpipe); 2452 done = __wa_xfer_is_done(xfer); 2453 } 2454 spin_unlock_irqrestore(&xfer->lock, flags); 2455 if (done) 2456 wa_xfer_completion(xfer); 2457 if (rpipe_ready) 2458 wa_xfer_delayed_run(rpipe); 2459 break; 2460 case -ECONNRESET: /* URB unlinked; no need to do anything */ 2461 case -ENOENT: /* as it was done by the who unlinked us */ 2462 break; 2463 default: /* Other errors ... */ 2464 spin_lock_irqsave(&xfer->lock, flags); 2465 rpipe = xfer->ep->hcpriv; 2466 if (printk_ratelimit()) 2467 dev_err(dev, "xfer %p#%u: data in error %d\n", 2468 xfer, seg->index, urb->status); 2469 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 2470 EDC_ERROR_TIMEFRAME)){ 2471 dev_err(dev, "DTO: URB max acceptable errors " 2472 "exceeded, resetting device\n"); 2473 wa_reset_all(wa); 2474 } 2475 seg->status = WA_SEG_ERROR; 2476 seg->result = urb->status; 2477 xfer->segs_done++; 2478 rpipe_ready = rpipe_avail_inc(rpipe); 2479 __wa_xfer_abort(xfer); 2480 done = __wa_xfer_is_done(xfer); 2481 spin_unlock_irqrestore(&xfer->lock, flags); 2482 if (done) 2483 wa_xfer_completion(xfer); 2484 if (rpipe_ready) 2485 wa_xfer_delayed_run(rpipe); 2486 } 2487 /* 2488 * If we are in this callback and isoc_data_frame_count > 0, it means 2489 * that the dti_urb submission was delayed in wa_dti_cb. Once 2490 * isoc_data_frame_count gets to 1, we can submit the deferred URB 2491 * since the last buf_in_urb was just submitted. 2492 */ 2493 if (isoc_data_frame_count == 1) { 2494 int result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); 2495 if (result < 0) { 2496 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", 2497 result); 2498 wa_reset_all(wa); 2499 } 2500 } 2501} 2502 2503/* 2504 * Handle an incoming transfer result buffer 2505 * 2506 * Given a transfer result buffer, it completes the transfer (possibly 2507 * scheduling and buffer in read) and then resubmits the DTI URB for a 2508 * new transfer result read. 2509 * 2510 * 2511 * The xfer_result DTI URB state machine 2512 * 2513 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In) 2514 * 2515 * We start in OFF mode, the first xfer_result notification [through 2516 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to 2517 * read. 2518 * 2519 * We receive a buffer -- if it is not a xfer_result, we complain and 2520 * repost the DTI-URB. If it is a xfer_result then do the xfer seg 2521 * request accounting. If it is an IN segment, we move to RBI and post 2522 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will 2523 * repost the DTI-URB and move to RXR state. if there was no IN 2524 * segment, it will repost the DTI-URB. 2525 * 2526 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many 2527 * errors) in the URBs. 2528 */ 2529static void wa_dti_cb(struct urb *urb) 2530{ 2531 int result, dti_busy = 0; 2532 struct wahc *wa = urb->context; 2533 struct device *dev = &wa->usb_iface->dev; 2534 u32 xfer_id; 2535 u8 usb_status; 2536 2537 BUG_ON(wa->dti_urb != urb); 2538 switch (wa->dti_urb->status) { 2539 case 0: 2540 if (wa->dti_state == WA_DTI_TRANSFER_RESULT_PENDING) { 2541 struct wa_xfer_result *xfer_result; 2542 struct wa_xfer *xfer; 2543 2544 /* We have a xfer result buffer; check it */ 2545 dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", 2546 urb->actual_length, urb->transfer_buffer); 2547 if (urb->actual_length != sizeof(*xfer_result)) { 2548 dev_err(dev, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n", 2549 urb->actual_length, 2550 sizeof(*xfer_result)); 2551 break; 2552 } 2553 xfer_result = (struct wa_xfer_result *)(wa->dti_buf); 2554 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) { 2555 dev_err(dev, "DTI Error: xfer result--bad header length %u\n", 2556 xfer_result->hdr.bLength); 2557 break; 2558 } 2559 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) { 2560 dev_err(dev, "DTI Error: xfer result--bad header type 0x%02x\n", 2561 xfer_result->hdr.bNotifyType); 2562 break; 2563 } 2564 usb_status = xfer_result->bTransferStatus & 0x3f; 2565 if (usb_status == WA_XFER_STATUS_NOT_FOUND) 2566 /* taken care of already */ 2567 break; 2568 xfer_id = le32_to_cpu(xfer_result->dwTransferID); 2569 xfer = wa_xfer_get_by_id(wa, xfer_id); 2570 if (xfer == NULL) { 2571 /* FIXME: transaction not found. */ 2572 dev_err(dev, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n", 2573 xfer_id, usb_status); 2574 break; 2575 } 2576 wa_xfer_result_chew(wa, xfer, xfer_result); 2577 wa_xfer_put(xfer); 2578 } else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) { 2579 dti_busy = wa_process_iso_packet_status(wa, urb); 2580 } else { 2581 dev_err(dev, "DTI Error: unexpected EP state = %d\n", 2582 wa->dti_state); 2583 } 2584 break; 2585 case -ENOENT: /* (we killed the URB)...so, no broadcast */ 2586 case -ESHUTDOWN: /* going away! */ 2587 dev_dbg(dev, "DTI: going down! %d\n", urb->status); 2588 goto out; 2589 default: 2590 /* Unknown error */ 2591 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, 2592 EDC_ERROR_TIMEFRAME)) { 2593 dev_err(dev, "DTI: URB max acceptable errors " 2594 "exceeded, resetting device\n"); 2595 wa_reset_all(wa); 2596 goto out; 2597 } 2598 if (printk_ratelimit()) 2599 dev_err(dev, "DTI: URB error %d\n", urb->status); 2600 break; 2601 } 2602 2603 /* Resubmit the DTI URB if we are not busy processing isoc in frames. */ 2604 if (!dti_busy) { 2605 result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC); 2606 if (result < 0) { 2607 dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n", 2608 result); 2609 wa_reset_all(wa); 2610 } 2611 } 2612out: 2613 return; 2614} 2615 2616/* 2617 * Transfer complete notification 2618 * 2619 * Called from the notif.c code. We get a notification on EP2 saying 2620 * that some endpoint has some transfer result data available. We are 2621 * about to read it. 2622 * 2623 * To speed up things, we always have a URB reading the DTI URB; we 2624 * don't really set it up and start it until the first xfer complete 2625 * notification arrives, which is what we do here. 2626 * 2627 * Follow up in wa_dti_cb(), as that's where the whole state 2628 * machine starts. 2629 * 2630 * So here we just initialize the DTI URB for reading transfer result 2631 * notifications and also the buffer-in URB, for reading buffers. Then 2632 * we just submit the DTI URB. 2633 * 2634 * @wa shall be referenced 2635 */ 2636void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr) 2637{ 2638 int result; 2639 struct device *dev = &wa->usb_iface->dev; 2640 struct wa_notif_xfer *notif_xfer; 2641 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; 2642 2643 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); 2644 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); 2645 2646 if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) { 2647 /* FIXME: hardcoded limitation, adapt */ 2648 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n", 2649 notif_xfer->bEndpoint, dti_epd->bEndpointAddress); 2650 goto error; 2651 } 2652 if (wa->dti_urb != NULL) /* DTI URB already started */ 2653 goto out; 2654 2655 wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL); 2656 if (wa->dti_urb == NULL) { 2657 dev_err(dev, "Can't allocate DTI URB\n"); 2658 goto error_dti_urb_alloc; 2659 } 2660 usb_fill_bulk_urb( 2661 wa->dti_urb, wa->usb_dev, 2662 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), 2663 wa->dti_buf, wa->dti_buf_size, 2664 wa_dti_cb, wa); 2665 2666 wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL); 2667 if (wa->buf_in_urb == NULL) { 2668 dev_err(dev, "Can't allocate BUF-IN URB\n"); 2669 goto error_buf_in_urb_alloc; 2670 } 2671 usb_fill_bulk_urb( 2672 wa->buf_in_urb, wa->usb_dev, 2673 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint), 2674 NULL, 0, wa_buf_in_cb, wa); 2675 result = usb_submit_urb(wa->dti_urb, GFP_KERNEL); 2676 if (result < 0) { 2677 dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n", 2678 result); 2679 goto error_dti_urb_submit; 2680 } 2681out: 2682 return; 2683 2684error_dti_urb_submit: 2685 usb_put_urb(wa->buf_in_urb); 2686 wa->buf_in_urb = NULL; 2687error_buf_in_urb_alloc: 2688 usb_put_urb(wa->dti_urb); 2689 wa->dti_urb = NULL; 2690error_dti_urb_alloc: 2691error: 2692 wa_reset_all(wa); 2693}