Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

USB: xhci: Correct assumptions about number of rings per endpoint.

Much of the xHCI driver code assumes that endpoints only have one ring.
Now an endpoint can have one ring per enabled stream ID, so correct that
assumption. Use functions that translate the stream_id field in the URB
or the DMA address of a TRB into the correct stream ring.

Correct the polling loop to print out all enabled stream rings. Make the
URB cancellation routine find the correct stream ring if the URB has
stream_id set. Make sure the URB enqueueing routine does the same. Also
correct the code that handles stalled/halted endpoints.

Check that commands and registers that can take stream IDs handle them
properly. That includes ringing an endpoint doorbell, resetting a
stalled/halted endpoint, and setting a transfer ring dequeue pointer
(since that command can set the dequeue pointer in a stream context or an
endpoint context).

Correct the transfer event handler to translate a TRB DMA address into the
stream ring it was enqueued to. Make the code to allocate and prepare TD
structures adds the TD to the right td_list for the stream ring. Make
sure the code to give the first TRB in a TD to the hardware manipulates
the correct stream ring.

When an endpoint stalls, store the stream ID of the stream ring that
stalled in the xhci_virt_ep structure. Use that instead of the stream ID
in the URB, since an URB may be re-used after it is given back after a
non-control endpoint stall.

Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Sarah Sharp and committed by
Greg Kroah-Hartman
e9df17eb 8df75f42

+280 -55
+24
drivers/usb/host/xhci-dbg.c
··· 364 364 xhci_debug_segment(xhci, seg); 365 365 } 366 366 367 + void xhci_dbg_ep_rings(struct xhci_hcd *xhci, 368 + unsigned int slot_id, unsigned int ep_index, 369 + struct xhci_virt_ep *ep) 370 + { 371 + int i; 372 + struct xhci_ring *ring; 373 + 374 + if (ep->ep_state & EP_HAS_STREAMS) { 375 + for (i = 1; i < ep->stream_info->num_streams; i++) { 376 + ring = ep->stream_info->stream_rings[i]; 377 + xhci_dbg(xhci, "Dev %d endpoint %d stream ID %d:\n", 378 + slot_id, ep_index, i); 379 + xhci_debug_segment(xhci, ring->deq_seg); 380 + } 381 + } else { 382 + ring = ep->ring; 383 + if (!ring) 384 + return; 385 + xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", 386 + slot_id, ep_index); 387 + xhci_debug_segment(xhci, ring->deq_seg); 388 + } 389 + } 390 + 367 391 void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst) 368 392 { 369 393 u32 addr = (u32) erst->erst_dma_addr;
+73 -1
drivers/usb/host/xhci-mem.c
··· 353 353 mem_flags, dma); 354 354 } 355 355 356 + struct xhci_ring *xhci_dma_to_transfer_ring( 357 + struct xhci_virt_ep *ep, 358 + u64 address) 359 + { 360 + if (ep->ep_state & EP_HAS_STREAMS) 361 + return radix_tree_lookup(&ep->stream_info->trb_address_map, 362 + address >> SEGMENT_SHIFT); 363 + return ep->ring; 364 + } 365 + 366 + /* Only use this when you know stream_info is valid */ 356 367 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 357 - struct xhci_ring *dma_to_stream_ring( 368 + static struct xhci_ring *dma_to_stream_ring( 358 369 struct xhci_stream_info *stream_info, 359 370 u64 address) 360 371 { ··· 373 362 address >> SEGMENT_SHIFT); 374 363 } 375 364 #endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */ 365 + 366 + struct xhci_ring *xhci_stream_id_to_ring( 367 + struct xhci_virt_device *dev, 368 + unsigned int ep_index, 369 + unsigned int stream_id) 370 + { 371 + struct xhci_virt_ep *ep = &dev->eps[ep_index]; 372 + 373 + if (stream_id == 0) 374 + return ep->ring; 375 + if (!ep->stream_info) 376 + return NULL; 377 + 378 + if (stream_id > ep->stream_info->num_streams) 379 + return NULL; 380 + return ep->stream_info->stream_rings[stream_id]; 381 + } 382 + 383 + struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 384 + unsigned int slot_id, unsigned int ep_index, 385 + unsigned int stream_id) 386 + { 387 + struct xhci_virt_ep *ep; 388 + 389 + ep = &xhci->devs[slot_id]->eps[ep_index]; 390 + /* Common case: no streams */ 391 + if (!(ep->ep_state & EP_HAS_STREAMS)) 392 + return ep->ring; 393 + 394 + if (stream_id == 0) { 395 + xhci_warn(xhci, 396 + "WARN: Slot ID %u, ep index %u has streams, " 397 + "but URB has no stream ID.\n", 398 + slot_id, ep_index); 399 + return NULL; 400 + } 401 + 402 + if (stream_id < ep->stream_info->num_streams) 403 + return ep->stream_info->stream_rings[stream_id]; 404 + 405 + xhci_warn(xhci, 406 + "WARN: Slot ID %u, ep index %u has " 407 + "stream IDs 1 to %u allocated, " 408 + "but stream ID %u is requested.\n", 409 + slot_id, ep_index, 410 + ep->stream_info->num_streams - 1, 411 + stream_id); 412 + return NULL; 413 + } 414 + 415 + /* Get the right ring for the given URB. 416 + * If the endpoint supports streams, boundary check the URB's stream ID. 417 + * If the endpoint doesn't support streams, return the singular endpoint ring. 418 + */ 419 + struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 420 + struct urb *urb) 421 + { 422 + return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id, 423 + xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id); 424 + } 376 425 377 426 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 378 427 static int xhci_test_radix_tree(struct xhci_hcd *xhci, ··· 586 515 cur_ring = stream_info->stream_rings[cur_stream]; 587 516 if (!cur_ring) 588 517 goto cleanup_rings; 518 + cur_ring->stream_id = cur_stream; 589 519 /* Set deq ptr, cycle bit, and stream context type */ 590 520 addr = cur_ring->first_seg->dma | 591 521 SCT_FOR_CTX(SCT_PRI_TR) |
+147 -45
drivers/usb/host/xhci-ring.c
··· 312 312 313 313 static void ring_ep_doorbell(struct xhci_hcd *xhci, 314 314 unsigned int slot_id, 315 - unsigned int ep_index) 315 + unsigned int ep_index, 316 + unsigned int stream_id) 316 317 { 317 318 struct xhci_virt_ep *ep; 318 319 unsigned int ep_state; ··· 332 331 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING) 333 332 && !(ep_state & EP_HALTED)) { 334 333 field = xhci_readl(xhci, db_addr) & DB_MASK; 335 - xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 334 + field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id); 335 + xhci_writel(xhci, field, db_addr); 336 336 /* Flush PCI posted writes - FIXME Matthew Wilcox says this 337 337 * isn't time-critical and we shouldn't make the CPU wait for 338 338 * the flush. 339 339 */ 340 340 xhci_readl(xhci, db_addr); 341 + } 342 + } 343 + 344 + /* Ring the doorbell for any rings with pending URBs */ 345 + static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, 346 + unsigned int slot_id, 347 + unsigned int ep_index) 348 + { 349 + unsigned int stream_id; 350 + struct xhci_virt_ep *ep; 351 + 352 + ep = &xhci->devs[slot_id]->eps[ep_index]; 353 + 354 + /* A ring has pending URBs if its TD list is not empty */ 355 + if (!(ep->ep_state & EP_HAS_STREAMS)) { 356 + if (!(list_empty(&ep->ring->td_list))) 357 + ring_ep_doorbell(xhci, slot_id, ep_index, 0); 358 + return; 359 + } 360 + 361 + for (stream_id = 1; stream_id < ep->stream_info->num_streams; 362 + stream_id++) { 363 + struct xhci_stream_info *stream_info = ep->stream_info; 364 + if (!list_empty(&stream_info->stream_rings[stream_id]->td_list)) 365 + ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 341 366 } 342 367 } 343 368 ··· 409 382 */ 410 383 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 411 384 unsigned int slot_id, unsigned int ep_index, 412 - struct xhci_td *cur_td, struct xhci_dequeue_state *state) 385 + unsigned int stream_id, struct xhci_td *cur_td, 386 + struct xhci_dequeue_state *state) 413 387 { 414 388 struct xhci_virt_device *dev = xhci->devs[slot_id]; 415 - struct xhci_ring *ep_ring = dev->eps[ep_index].ring; 389 + struct xhci_ring *ep_ring; 416 390 struct xhci_generic_trb *trb; 417 391 struct xhci_ep_ctx *ep_ctx; 418 392 dma_addr_t addr; 419 393 394 + ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, 395 + ep_index, stream_id); 396 + if (!ep_ring) { 397 + xhci_warn(xhci, "WARN can't find new dequeue state " 398 + "for invalid stream ID %u.\n", 399 + stream_id); 400 + return; 401 + } 420 402 state->new_cycle_state = 0; 421 403 xhci_dbg(xhci, "Finding segment containing stopped TRB.\n"); 422 404 state->new_deq_seg = find_trb_seg(cur_td->start_seg, ··· 505 469 } 506 470 507 471 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 508 - unsigned int ep_index, struct xhci_segment *deq_seg, 472 + unsigned int ep_index, unsigned int stream_id, 473 + struct xhci_segment *deq_seg, 509 474 union xhci_trb *deq_ptr, u32 cycle_state); 510 475 511 476 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 512 477 unsigned int slot_id, unsigned int ep_index, 478 + unsigned int stream_id, 513 479 struct xhci_dequeue_state *deq_state) 514 480 { 515 481 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; ··· 523 485 deq_state->new_deq_ptr, 524 486 (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr), 525 487 deq_state->new_cycle_state); 526 - queue_set_tr_deq(xhci, slot_id, ep_index, 488 + queue_set_tr_deq(xhci, slot_id, ep_index, stream_id, 527 489 deq_state->new_deq_seg, 528 490 deq_state->new_deq_ptr, 529 491 (u32) deq_state->new_cycle_state); ··· 591 553 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 592 554 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 593 555 ep = &xhci->devs[slot_id]->eps[ep_index]; 594 - ep_ring = ep->ring; 595 556 596 557 if (list_empty(&ep->cancelled_td_list)) { 597 558 xhci_stop_watchdog_timer_in_irq(xhci, ep); 598 - ring_ep_doorbell(xhci, slot_id, ep_index); 559 + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 599 560 return; 600 561 } 601 562 ··· 608 571 xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n", 609 572 cur_td->first_trb, 610 573 (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb)); 574 + ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb); 575 + if (!ep_ring) { 576 + /* This shouldn't happen unless a driver is mucking 577 + * with the stream ID after submission. This will 578 + * leave the TD on the hardware ring, and the hardware 579 + * will try to execute it, and may access a buffer 580 + * that has already been freed. In the best case, the 581 + * hardware will execute it, and the event handler will 582 + * ignore the completion event for that TD, since it was 583 + * removed from the td_list for that endpoint. In 584 + * short, don't muck with the stream ID after 585 + * submission. 586 + */ 587 + xhci_warn(xhci, "WARN Cancelled URB %p " 588 + "has invalid stream ID %u.\n", 589 + cur_td->urb, 590 + cur_td->urb->stream_id); 591 + goto remove_finished_td; 592 + } 611 593 /* 612 594 * If we stopped on the TD we need to cancel, then we have to 613 595 * move the xHC endpoint ring dequeue pointer past this TD. 614 596 */ 615 597 if (cur_td == ep->stopped_td) 616 - xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td, 617 - &deq_state); 598 + xhci_find_new_dequeue_state(xhci, slot_id, ep_index, 599 + cur_td->urb->stream_id, 600 + cur_td, &deq_state); 618 601 else 619 602 td_to_noop(xhci, ep_ring, cur_td); 603 + remove_finished_td: 620 604 /* 621 605 * The event handler won't see a completion for this TD anymore, 622 606 * so remove it from the endpoint ring's TD list. Keep it in ··· 651 593 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 652 594 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 653 595 xhci_queue_new_dequeue_state(xhci, 654 - slot_id, ep_index, &deq_state); 596 + slot_id, ep_index, 597 + ep->stopped_td->urb->stream_id, 598 + &deq_state); 655 599 xhci_ring_cmd_db(xhci); 656 600 } else { 657 - /* Otherwise just ring the doorbell to restart the ring */ 658 - ring_ep_doorbell(xhci, slot_id, ep_index); 601 + /* Otherwise ring the doorbell(s) to restart queued transfers */ 602 + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 659 603 } 660 604 ep->stopped_td = NULL; 661 605 ep->stopped_trb = NULL; ··· 817 757 { 818 758 unsigned int slot_id; 819 759 unsigned int ep_index; 760 + unsigned int stream_id; 820 761 struct xhci_ring *ep_ring; 821 762 struct xhci_virt_device *dev; 822 763 struct xhci_ep_ctx *ep_ctx; ··· 825 764 826 765 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 827 766 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 767 + stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]); 828 768 dev = xhci->devs[slot_id]; 829 - ep_ring = dev->eps[ep_index].ring; 769 + 770 + ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); 771 + if (!ep_ring) { 772 + xhci_warn(xhci, "WARN Set TR deq ptr command for " 773 + "freed stream ID %u\n", 774 + stream_id); 775 + /* XXX: Harmless??? */ 776 + dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 777 + return; 778 + } 779 + 830 780 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 831 781 slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 832 782 ··· 882 810 } 883 811 884 812 dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; 885 - ring_ep_doorbell(xhci, slot_id, ep_index); 813 + /* Restart any rings with pending URBs */ 814 + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 886 815 } 887 816 888 817 static void handle_reset_ep_completion(struct xhci_hcd *xhci, ··· 892 819 { 893 820 int slot_id; 894 821 unsigned int ep_index; 895 - struct xhci_ring *ep_ring; 896 822 897 823 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 898 824 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 899 - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 900 825 /* This command will only fail if the endpoint wasn't halted, 901 826 * but we don't care. 902 827 */ ··· 912 841 false); 913 842 xhci_ring_cmd_db(xhci); 914 843 } else { 915 - /* Clear our internal halted state and restart the ring */ 844 + /* Clear our internal halted state and restart the ring(s) */ 916 845 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 917 - ring_ep_doorbell(xhci, slot_id, ep_index); 846 + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 918 847 } 919 848 } 920 849 ··· 1000 929 /* Input ctx add_flags are the endpoint index plus one */ 1001 930 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 1002 931 /* A usb_set_interface() call directly after clearing a halted 1003 - * condition may race on this quirky hardware. 1004 - * Not worth worrying about, since this is prototype hardware. 932 + * condition may race on this quirky hardware. Not worth 933 + * worrying about, since this is prototype hardware. Not sure 934 + * if this will work for streams, but streams support was 935 + * untested on this prototype. 1005 936 */ 1006 937 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 1007 938 ep_index != (unsigned int) -1 && ··· 1016 943 xhci_dbg(xhci, "Completed config ep cmd - " 1017 944 "last ep index = %d, state = %d\n", 1018 945 ep_index, ep_state); 1019 - /* Clear our internal halted state and restart ring */ 946 + /* Clear internal halted state and restart ring(s) */ 1020 947 xhci->devs[slot_id]->eps[ep_index].ep_state &= 1021 948 ~EP_HALTED; 1022 - ring_ep_doorbell(xhci, slot_id, ep_index); 949 + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); 1023 950 break; 1024 951 } 1025 952 bandwidth_change: ··· 1152 1079 1153 1080 static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, 1154 1081 unsigned int slot_id, unsigned int ep_index, 1082 + unsigned int stream_id, 1155 1083 struct xhci_td *td, union xhci_trb *event_trb) 1156 1084 { 1157 1085 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index]; 1158 1086 ep->ep_state |= EP_HALTED; 1159 1087 ep->stopped_td = td; 1160 1088 ep->stopped_trb = event_trb; 1089 + ep->stopped_stream = stream_id; 1161 1090 1162 1091 xhci_queue_reset_ep(xhci, slot_id, ep_index); 1163 1092 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); ··· 1244 1169 ep_index = TRB_TO_EP_ID(event->flags) - 1; 1245 1170 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 1246 1171 ep = &xdev->eps[ep_index]; 1247 - ep_ring = ep->ring; 1172 + ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1248 1173 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1249 1174 if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 1250 - xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 1175 + xhci_err(xhci, "ERROR Transfer event for disabled endpoint " 1176 + "or incorrect stream ring\n"); 1251 1177 return -ENODEV; 1252 1178 } 1253 1179 ··· 1379 1303 td->urb->actual_length = 0; 1380 1304 1381 1305 xhci_cleanup_halted_endpoint(xhci, 1382 - slot_id, ep_index, td, event_trb); 1306 + slot_id, ep_index, 0, td, event_trb); 1383 1307 goto td_cleanup; 1384 1308 } 1385 1309 /* ··· 1528 1452 */ 1529 1453 ep->stopped_td = td; 1530 1454 ep->stopped_trb = event_trb; 1455 + ep->stopped_stream = ep_ring->stream_id; 1531 1456 } else if (xhci_requires_manual_halt_cleanup(xhci, 1532 1457 ep_ctx, trb_comp_code)) { 1533 1458 /* Other types of errors halt the endpoint, but the ··· 1537 1460 * xHCI hardware manually. 1538 1461 */ 1539 1462 xhci_cleanup_halted_endpoint(xhci, 1540 - slot_id, ep_index, td, event_trb); 1463 + slot_id, ep_index, ep_ring->stream_id, td, event_trb); 1541 1464 } else { 1542 1465 /* Update ring dequeue pointer */ 1543 1466 while (ep_ring->dequeue != td->last_trb) ··· 1733 1656 static int prepare_transfer(struct xhci_hcd *xhci, 1734 1657 struct xhci_virt_device *xdev, 1735 1658 unsigned int ep_index, 1659 + unsigned int stream_id, 1736 1660 unsigned int num_trbs, 1737 1661 struct urb *urb, 1738 1662 struct xhci_td **td, 1739 1663 gfp_t mem_flags) 1740 1664 { 1741 1665 int ret; 1666 + struct xhci_ring *ep_ring; 1742 1667 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1743 - ret = prepare_ring(xhci, xdev->eps[ep_index].ring, 1668 + 1669 + ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id); 1670 + if (!ep_ring) { 1671 + xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n", 1672 + stream_id); 1673 + return -EINVAL; 1674 + } 1675 + 1676 + ret = prepare_ring(xhci, ep_ring, 1744 1677 ep_ctx->ep_info & EP_STATE_MASK, 1745 1678 num_trbs, mem_flags); 1746 1679 if (ret) ··· 1770 1683 (*td)->urb = urb; 1771 1684 urb->hcpriv = (void *) (*td); 1772 1685 /* Add this TD to the tail of the endpoint ring's TD list */ 1773 - list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list); 1774 - (*td)->start_seg = xdev->eps[ep_index].ring->enq_seg; 1775 - (*td)->first_trb = xdev->eps[ep_index].ring->enqueue; 1686 + list_add_tail(&(*td)->td_list, &ep_ring->td_list); 1687 + (*td)->start_seg = ep_ring->enq_seg; 1688 + (*td)->first_trb = ep_ring->enqueue; 1776 1689 1777 1690 return 0; 1778 1691 } ··· 1838 1751 } 1839 1752 1840 1753 static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, 1841 - unsigned int ep_index, int start_cycle, 1754 + unsigned int ep_index, unsigned int stream_id, int start_cycle, 1842 1755 struct xhci_generic_trb *start_trb, struct xhci_td *td) 1843 1756 { 1844 1757 /* ··· 1847 1760 */ 1848 1761 wmb(); 1849 1762 start_trb->field[3] |= start_cycle; 1850 - ring_ep_doorbell(xhci, slot_id, ep_index); 1763 + ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); 1851 1764 } 1852 1765 1853 1766 /* ··· 1921 1834 struct xhci_generic_trb *start_trb; 1922 1835 int start_cycle; 1923 1836 1924 - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1837 + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1838 + if (!ep_ring) 1839 + return -EINVAL; 1840 + 1925 1841 num_trbs = count_sg_trbs_needed(xhci, urb); 1926 1842 num_sgs = urb->num_sgs; 1927 1843 1928 1844 trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id], 1929 - ep_index, num_trbs, urb, &td, mem_flags); 1845 + ep_index, urb->stream_id, 1846 + num_trbs, urb, &td, mem_flags); 1930 1847 if (trb_buff_len < 0) 1931 1848 return trb_buff_len; 1932 1849 /* ··· 2039 1948 } while (running_total < urb->transfer_buffer_length); 2040 1949 2041 1950 check_trb_math(urb, num_trbs, running_total); 2042 - giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 1951 + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 1952 + start_cycle, start_trb, td); 2043 1953 return 0; 2044 1954 } 2045 1955 ··· 2062 1970 if (urb->num_sgs) 2063 1971 return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); 2064 1972 2065 - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 1973 + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 1974 + if (!ep_ring) 1975 + return -EINVAL; 2066 1976 2067 1977 num_trbs = 0; 2068 1978 /* How much data is (potentially) left before the 64KB boundary? */ ··· 2091 1997 (unsigned long long)urb->transfer_dma, 2092 1998 num_trbs); 2093 1999 2094 - ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, 2000 + ret = prepare_transfer(xhci, xhci->devs[slot_id], 2001 + ep_index, urb->stream_id, 2095 2002 num_trbs, urb, &td, mem_flags); 2096 2003 if (ret < 0) 2097 2004 return ret; ··· 2162 2067 } while (running_total < urb->transfer_buffer_length); 2163 2068 2164 2069 check_trb_math(urb, num_trbs, running_total); 2165 - giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2070 + giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id, 2071 + start_cycle, start_trb, td); 2166 2072 return 0; 2167 2073 } 2168 2074 ··· 2180 2084 u32 field, length_field; 2181 2085 struct xhci_td *td; 2182 2086 2183 - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 2087 + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 2088 + if (!ep_ring) 2089 + return -EINVAL; 2184 2090 2185 2091 /* 2186 2092 * Need to copy setup packet into setup TRB, so we can't use the setup ··· 2203 2105 */ 2204 2106 if (urb->transfer_buffer_length > 0) 2205 2107 num_trbs++; 2206 - ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, 2207 - urb, &td, mem_flags); 2108 + ret = prepare_transfer(xhci, xhci->devs[slot_id], 2109 + ep_index, urb->stream_id, 2110 + num_trbs, urb, &td, mem_flags); 2208 2111 if (ret < 0) 2209 2112 return ret; 2210 2113 ··· 2260 2161 /* Event on completion */ 2261 2162 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 2262 2163 2263 - giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td); 2164 + giveback_first_trb(xhci, slot_id, ep_index, 0, 2165 + start_cycle, start_trb, td); 2264 2166 return 0; 2265 2167 } 2266 2168 ··· 2373 2273 * This should not be used for endpoints that have streams enabled. 2374 2274 */ 2375 2275 static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id, 2376 - unsigned int ep_index, struct xhci_segment *deq_seg, 2276 + unsigned int ep_index, unsigned int stream_id, 2277 + struct xhci_segment *deq_seg, 2377 2278 union xhci_trb *deq_ptr, u32 cycle_state) 2378 2279 { 2379 2280 dma_addr_t addr; 2380 2281 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); 2381 2282 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); 2283 + u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id); 2382 2284 u32 type = TRB_TYPE(TRB_SET_DEQ); 2383 2285 2384 2286 addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr); ··· 2391 2289 return 0; 2392 2290 } 2393 2291 return queue_command(xhci, lower_32_bits(addr) | cycle_state, 2394 - upper_32_bits(addr), 0, 2292 + upper_32_bits(addr), trb_stream_id, 2395 2293 trb_slot_id | trb_ep_index | type, false); 2396 2294 } 2397 2295
+11 -8
drivers/usb/host/xhci.c
··· 353 353 if (!xhci->devs[i]) 354 354 continue; 355 355 for (j = 0; j < 31; ++j) { 356 - struct xhci_ring *ring = xhci->devs[i]->eps[j].ring; 357 - if (!ring) 358 - continue; 359 - xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); 360 - xhci_debug_segment(xhci, ring->deq_seg); 356 + xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]); 361 357 } 362 358 } 363 359 ··· 835 839 xhci_debug_ring(xhci, xhci->event_ring); 836 840 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 837 841 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; 838 - ep_ring = ep->ring; 842 + ep_ring = xhci_urb_to_transfer_ring(xhci, urb); 843 + if (!ep_ring) { 844 + ret = -EINVAL; 845 + goto done; 846 + } 847 + 839 848 xhci_dbg(xhci, "Endpoint ring:\n"); 840 849 xhci_debug_ring(xhci, ep_ring); 841 850 td = (struct xhci_td *) urb->hcpriv; ··· 1384 1383 * or it will attempt to resend it on the next doorbell ring. 1385 1384 */ 1386 1385 xhci_find_new_dequeue_state(xhci, udev->slot_id, 1387 - ep_index, ep->stopped_td, 1386 + ep_index, ep->stopped_stream, ep->stopped_td, 1388 1387 &deq_state); 1389 1388 1390 1389 /* HW with the reset endpoint quirk will use the saved dequeue state to ··· 1393 1392 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) { 1394 1393 xhci_dbg(xhci, "Queueing new dequeue state\n"); 1395 1394 xhci_queue_new_dequeue_state(xhci, udev->slot_id, 1396 - ep_index, &deq_state); 1395 + ep_index, ep->stopped_stream, &deq_state); 1397 1396 } else { 1398 1397 /* Better hope no one uses the input context between now and the 1399 1398 * reset endpoint completion! 1399 + * XXX: No idea how this hardware will react when stream rings 1400 + * are enabled. 1400 1401 */ 1401 1402 xhci_dbg(xhci, "Setting up input context for " 1402 1403 "configure endpoint command\n");
+25 -1
drivers/usb/host/xhci.h
··· 444 444 445 445 /* Endpoint Target - bits 0:7 */ 446 446 #define EPI_TO_DB(p) (((p) + 1) & 0xff) 447 + #define STREAM_ID_TO_DB(p) (((p) & 0xffff) << 16) 447 448 448 449 449 450 /** ··· 715 714 /* The TRB that was last reported in a stopped endpoint ring */ 716 715 union xhci_trb *stopped_trb; 717 716 struct xhci_td *stopped_td; 717 + unsigned int stopped_stream; 718 718 /* Watchdog timer for stop endpoint command to cancel URBs */ 719 719 struct timer_list stop_cmd_timer; 720 720 int stop_cmds_pending; ··· 872 870 /* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */ 873 871 #define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1) 874 872 #define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16) 873 + 874 + /* Set TR Dequeue Pointer command TRB fields */ 875 + #define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16)) 876 + #define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16) 875 877 876 878 877 879 /* Port Status Change Event TRB fields */ ··· 1046 1040 * if we own the TRB (if we are the consumer). See section 4.9.1. 1047 1041 */ 1048 1042 u32 cycle_state; 1043 + unsigned int stream_id; 1049 1044 }; 1050 1045 1051 1046 struct xhci_erst_entry { ··· 1272 1265 void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); 1273 1266 char *xhci_get_slot_state(struct xhci_hcd *xhci, 1274 1267 struct xhci_container_ctx *ctx); 1268 + void xhci_dbg_ep_rings(struct xhci_hcd *xhci, 1269 + unsigned int slot_id, unsigned int ep_index, 1270 + struct xhci_virt_ep *ep); 1275 1271 1276 1272 /* xHCI memory management */ 1277 1273 void xhci_mem_cleanup(struct xhci_hcd *xhci); ··· 1312 1302 void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci, 1313 1303 struct xhci_ep_ctx *ep_ctx, 1314 1304 struct xhci_virt_ep *ep); 1305 + struct xhci_ring *xhci_dma_to_transfer_ring( 1306 + struct xhci_virt_ep *ep, 1307 + u64 address); 1308 + struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci, 1309 + struct urb *urb); 1310 + struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, 1311 + unsigned int slot_id, unsigned int ep_index, 1312 + unsigned int stream_id); 1313 + struct xhci_ring *xhci_stream_id_to_ring( 1314 + struct xhci_virt_device *dev, 1315 + unsigned int ep_index, 1316 + unsigned int stream_id); 1315 1317 struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, 1316 1318 bool allocate_in_ctx, bool allocate_completion, 1317 1319 gfp_t mem_flags); ··· 1396 1374 int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id); 1397 1375 void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, 1398 1376 unsigned int slot_id, unsigned int ep_index, 1399 - struct xhci_td *cur_td, struct xhci_dequeue_state *state); 1377 + unsigned int stream_id, struct xhci_td *cur_td, 1378 + struct xhci_dequeue_state *state); 1400 1379 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci, 1401 1380 unsigned int slot_id, unsigned int ep_index, 1381 + unsigned int stream_id, 1402 1382 struct xhci_dequeue_state *deq_state); 1403 1383 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, 1404 1384 struct usb_device *udev, unsigned int ep_index);