Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

USB: xhci: Control transfer support.

Allow device drivers to enqueue URBs to control endpoints on devices under
an xHCI host controller. Each control transfer is represented by a
series of Transfer Descriptors (TDs) written to an endpoint ring. There
is one TD for the Setup phase, (optionally) one TD for the Data phase, and
one TD for the Status phase.

Enqueue these TDs onto the endpoint ring that represents the control
endpoint. The host controller hardware will return an event on the event
ring that points to the (DMA) address of one of the TDs on the endpoint
ring. If the transfer was successful, the transfer event TRB will have a
completion code of success, and it will point to the Status phase TD.
Anything else is considered an error.

This should work for control endpoints besides the default endpoint, but
that hasn't been tested.

Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Sarah Sharp and committed by
Greg Kroah-Hartman
d0e96f5a 6d65b78a

+506 -3
+93
drivers/usb/host/xhci-hcd.c
··· 509 509 510 510 /*-------------------------------------------------------------------------*/ 511 511 512 + /** 513 + * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 514 + * HCDs. Find the index for an endpoint given its descriptor. Use the return 515 + * value to right shift 1 for the bitmask. 516 + * 517 + * Index = (epnum * 2) + direction - 1, 518 + * where direction = 0 for OUT, 1 for IN. 519 + * For control endpoints, the IN index is used (OUT index is unused), so 520 + * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 521 + */ 522 + unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 523 + { 524 + unsigned int index; 525 + if (usb_endpoint_xfer_control(desc)) 526 + index = (unsigned int) (usb_endpoint_num(desc)*2); 527 + else 528 + index = (unsigned int) (usb_endpoint_num(desc)*2) + 529 + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 530 + return index; 531 + } 532 + 533 + /* Returns 1 if the arguments are OK; 534 + * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 535 + */ 536 + int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 537 + struct usb_host_endpoint *ep, int check_ep, const char *func) { 538 + if (!hcd || (check_ep && !ep) || !udev) { 539 + printk(KERN_DEBUG "xHCI %s called with invalid args\n", 540 + func); 541 + return -EINVAL; 542 + } 543 + if (!udev->parent) { 544 + printk(KERN_DEBUG "xHCI %s called for root hub\n", 545 + func); 546 + return 0; 547 + } 548 + if (!udev->slot_id) { 549 + printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", 550 + func); 551 + return -EINVAL; 552 + } 553 + return 1; 554 + } 555 + 556 + /* 557 + * non-error returns are a promise to giveback() the urb later 558 + * we drop ownership so next owner (or urb unlink) can get it 559 + */ 560 + int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 561 + { 562 + struct xhci_hcd *xhci = hcd_to_xhci(hcd); 563 + unsigned long flags; 564 + int ret = 0; 565 + unsigned int slot_id, ep_index; 566 + 567 + if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) 568 + return -EINVAL; 569 + 570 + slot_id = urb->dev->slot_id; 571 + ep_index = xhci_get_endpoint_index(&urb->ep->desc); 572 + /* Only support ep 0 control transfers for now */ 573 + if (ep_index != 0) { 574 + xhci_dbg(xhci, "WARN: urb submitted to unsupported ep %x\n", 575 + urb->ep->desc.bEndpointAddress); 576 + return -ENOSYS; 577 + } 578 + 579 + spin_lock_irqsave(&xhci->lock, flags); 580 + if (!xhci->devs || !xhci->devs[slot_id]) { 581 + if (!in_interrupt()) 582 + dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); 583 + return -EINVAL; 584 + } 585 + if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { 586 + if (!in_interrupt()) 587 + xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 588 + ret = -ESHUTDOWN; 589 + goto exit; 590 + } 591 + ret = queue_ctrl_tx(xhci, mem_flags, urb, slot_id, ep_index); 592 + exit: 593 + spin_unlock_irqrestore(&xhci->lock, flags); 594 + return ret; 595 + } 596 + 597 + /* Remove from hardware lists 598 + * completions normally happen asynchronously 599 + */ 600 + int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 601 + { 602 + return -ENOSYS; 603 + } 604 + 512 605 /* 513 606 * At this point, the struct usb_device is about to go away, the device has 514 607 * disconnected, and all traffic has been stopped and the endpoints have been
+2
drivers/usb/host/xhci-mem.c
··· 141 141 if (!ring) 142 142 return 0; 143 143 144 + INIT_LIST_HEAD(&ring->td_list); 144 145 if (num_segs == 0) 145 146 return ring; 146 147 ··· 189 188 return 0; 190 189 } 191 190 191 + /* All the xhci_tds in the ring's TD list should be freed at this point */ 192 192 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 193 193 { 194 194 struct xhci_virt_device *dev;
+2
drivers/usb/host/xhci-pci.c
··· 111 111 /* 112 112 * managing i/o requests and associated device resources 113 113 */ 114 + .urb_enqueue = xhci_urb_enqueue, 115 + .urb_dequeue = xhci_urb_dequeue, 114 116 .alloc_dev = xhci_alloc_dev, 115 117 .free_dev = xhci_free_dev, 116 118 .address_device = xhci_address_device,
+383
drivers/usb/host/xhci-ring.c
··· 321 321 } 322 322 323 323 /* 324 + * This TD is defined by the TRBs starting at start_trb in start_seg and ending 325 + * at end_trb, which may be in another segment. If the suspect DMA address is a 326 + * TRB in this TD, this function returns that TRB's segment. Otherwise it 327 + * returns 0. 328 + */ 329 + static struct xhci_segment *trb_in_td( 330 + struct xhci_segment *start_seg, 331 + union xhci_trb *start_trb, 332 + union xhci_trb *end_trb, 333 + dma_addr_t suspect_dma) 334 + { 335 + dma_addr_t start_dma; 336 + dma_addr_t end_seg_dma; 337 + dma_addr_t end_trb_dma; 338 + struct xhci_segment *cur_seg; 339 + 340 + start_dma = trb_virt_to_dma(start_seg, start_trb); 341 + cur_seg = start_seg; 342 + 343 + do { 344 + /* 345 + * Last TRB is a link TRB (unless we start inserting links in 346 + * the middle, FIXME if you do) 347 + */ 348 + end_seg_dma = trb_virt_to_dma(cur_seg, &start_seg->trbs[TRBS_PER_SEGMENT - 2]); 349 + /* If the end TRB isn't in this segment, this is set to 0 */ 350 + end_trb_dma = trb_virt_to_dma(cur_seg, end_trb); 351 + 352 + if (end_trb_dma > 0) { 353 + /* The end TRB is in this segment, so suspect should be here */ 354 + if (start_dma <= end_trb_dma) { 355 + if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma) 356 + return cur_seg; 357 + } else { 358 + /* Case for one segment with 359 + * a TD wrapped around to the top 360 + */ 361 + if ((suspect_dma >= start_dma && 362 + suspect_dma <= end_seg_dma) || 363 + (suspect_dma >= cur_seg->dma && 364 + suspect_dma <= end_trb_dma)) 365 + return cur_seg; 366 + } 367 + return 0; 368 + } else { 369 + /* Might still be somewhere in this segment */ 370 + if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma) 371 + return cur_seg; 372 + } 373 + cur_seg = cur_seg->next; 374 + start_dma = trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]); 375 + } while (1); 376 + 377 + } 378 + 379 + /* 380 + * If this function returns an error condition, it means it got a Transfer 381 + * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 382 + * At this point, the host controller is probably hosed and should be reset. 383 + */ 384 + static int handle_tx_event(struct xhci_hcd *xhci, 385 + struct xhci_transfer_event *event) 386 + { 387 + struct xhci_virt_device *xdev; 388 + struct xhci_ring *ep_ring; 389 + int ep_index; 390 + struct xhci_td *td = 0; 391 + dma_addr_t event_dma; 392 + struct xhci_segment *event_seg; 393 + union xhci_trb *event_trb; 394 + struct urb *urb = NULL; 395 + int status = -EINPROGRESS; 396 + 397 + xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; 398 + if (!xdev) { 399 + xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n"); 400 + return -ENODEV; 401 + } 402 + 403 + /* Endpoint ID is 1 based, our index is zero based */ 404 + ep_index = TRB_TO_EP_ID(event->flags) - 1; 405 + ep_ring = xdev->ep_rings[ep_index]; 406 + if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 407 + xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 408 + return -ENODEV; 409 + } 410 + 411 + event_dma = event->buffer[0]; 412 + if (event->buffer[1] != 0) 413 + xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n"); 414 + 415 + /* This TRB should be in the TD at the head of this ring's TD list */ 416 + if (list_empty(&ep_ring->td_list)) { 417 + xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", 418 + TRB_TO_SLOT_ID(event->flags), ep_index); 419 + xhci_dbg(xhci, "Event TRB with TRB type ID %u\n", 420 + (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10); 421 + xhci_print_trb_offsets(xhci, (union xhci_trb *) event); 422 + urb = NULL; 423 + goto cleanup; 424 + } 425 + td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list); 426 + 427 + /* Is this a TRB in the currently executing TD? */ 428 + event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue, 429 + td->last_trb, event_dma); 430 + if (!event_seg) { 431 + /* HC is busted, give up! */ 432 + xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n"); 433 + return -ESHUTDOWN; 434 + } 435 + event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)]; 436 + 437 + /* Now update the urb's actual_length and give back to the core */ 438 + /* Was this a control transfer? */ 439 + if (usb_endpoint_xfer_control(&td->urb->ep->desc)) { 440 + xhci_debug_trb(xhci, xhci->event_ring->dequeue); 441 + switch (GET_COMP_CODE(event->transfer_len)) { 442 + case COMP_SUCCESS: 443 + if (event_trb == ep_ring->dequeue) { 444 + xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n"); 445 + status = -ESHUTDOWN; 446 + } else if (event_trb != td->last_trb) { 447 + xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n"); 448 + status = -ESHUTDOWN; 449 + } else { 450 + xhci_dbg(xhci, "Successful control transfer!\n"); 451 + status = 0; 452 + } 453 + break; 454 + case COMP_SHORT_TX: 455 + xhci_warn(xhci, "WARN: short transfer on control ep\n"); 456 + status = -EREMOTEIO; 457 + break; 458 + case COMP_STALL: 459 + xhci_warn(xhci, "WARN: Stalled control ep\n"); 460 + status = -EPIPE; 461 + break; 462 + case COMP_TRB_ERR: 463 + xhci_warn(xhci, "WARN: TRB error on control ep\n"); 464 + status = -EILSEQ; 465 + break; 466 + case COMP_TX_ERR: 467 + xhci_warn(xhci, "WARN: transfer error on control ep\n"); 468 + status = -EPROTO; 469 + break; 470 + case COMP_DB_ERR: 471 + xhci_warn(xhci, "WARN: HC couldn't access mem fast enough on control TX\n"); 472 + status = -ENOSR; 473 + break; 474 + default: 475 + xhci_dbg(xhci, "ERROR Unknown event condition, HC probably busted\n"); 476 + goto cleanup; 477 + } 478 + /* 479 + * Did we transfer any data, despite the errors that might have 480 + * happened? I.e. did we get past the setup stage? 481 + */ 482 + if (event_trb != ep_ring->dequeue) { 483 + /* The event was for the status stage */ 484 + if (event_trb == td->last_trb) { 485 + td->urb->actual_length = td->urb->transfer_buffer_length; 486 + } else { 487 + /* The event was for the data stage */ 488 + td->urb->actual_length = td->urb->transfer_buffer_length - 489 + TRB_LEN(event->transfer_len); 490 + } 491 + } 492 + while (ep_ring->dequeue != td->last_trb) 493 + inc_deq(xhci, ep_ring, false); 494 + inc_deq(xhci, ep_ring, false); 495 + 496 + /* Clean up the endpoint's TD list */ 497 + urb = td->urb; 498 + list_del(&td->td_list); 499 + kfree(td); 500 + } else { 501 + xhci_dbg(xhci, "FIXME do something for non-control transfers\n"); 502 + } 503 + cleanup: 504 + inc_deq(xhci, xhci->event_ring, true); 505 + set_hc_event_deq(xhci); 506 + 507 + if (urb) { 508 + usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb); 509 + spin_unlock(&xhci->lock); 510 + usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status); 511 + spin_lock(&xhci->lock); 512 + } 513 + return 0; 514 + } 515 + 516 + /* 324 517 * This function handles all OS-owned events on the event ring. It may drop 325 518 * xhci->lock between event processing (e.g. to pass up port status changes). 326 519 */ ··· 521 328 { 522 329 union xhci_trb *event; 523 330 int update_ptrs = 1; 331 + int ret; 524 332 525 333 if (!xhci->event_ring || !xhci->event_ring->dequeue) { 526 334 xhci->error_bitmask |= 1 << 1; ··· 545 351 handle_port_status(xhci, event); 546 352 update_ptrs = 0; 547 353 break; 354 + case TRB_TYPE(TRB_TRANSFER): 355 + ret = handle_tx_event(xhci, &event->trans_event); 356 + if (ret < 0) 357 + xhci->error_bitmask |= 1 << 9; 358 + else 359 + update_ptrs = 0; 360 + break; 548 361 default: 549 362 xhci->error_bitmask |= 1 << 3; 550 363 } ··· 564 363 /* Are there more items on the event ring? */ 565 364 handle_event(xhci); 566 365 } 366 + 367 + /**** Endpoint Ring Operations ****/ 567 368 568 369 /* 569 370 * Generic function for queueing a TRB on a ring. ··· 584 381 trb->field[3] = field4; 585 382 inc_enq(xhci, ring, consumer); 586 383 } 384 + 385 + /* 386 + * Does various checks on the endpoint ring, and makes it ready to queue num_trbs. 387 + * FIXME allocate segments if the ring is full. 388 + */ 389 + static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, 390 + u32 ep_state, unsigned int num_trbs, gfp_t mem_flags) 391 + { 392 + /* Make sure the endpoint has been added to xHC schedule */ 393 + xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state); 394 + switch (ep_state) { 395 + case EP_STATE_DISABLED: 396 + /* 397 + * USB core changed config/interfaces without notifying us, 398 + * or hardware is reporting the wrong state. 399 + */ 400 + xhci_warn(xhci, "WARN urb submitted to disabled ep\n"); 401 + return -ENOENT; 402 + case EP_STATE_HALTED: 403 + case EP_STATE_ERROR: 404 + xhci_warn(xhci, "WARN waiting for halt or error on ep " 405 + "to be cleared\n"); 406 + /* FIXME event handling code for error needs to clear it */ 407 + /* XXX not sure if this should be -ENOENT or not */ 408 + return -EINVAL; 409 + case EP_STATE_STOPPED: 410 + case EP_STATE_RUNNING: 411 + break; 412 + default: 413 + xhci_err(xhci, "ERROR unknown endpoint state for ep\n"); 414 + /* 415 + * FIXME issue Configure Endpoint command to try to get the HC 416 + * back into a known state. 417 + */ 418 + return -EINVAL; 419 + } 420 + if (!room_on_ring(xhci, ep_ring, num_trbs)) { 421 + /* FIXME allocate more room */ 422 + xhci_err(xhci, "ERROR no room on ep ring\n"); 423 + return -ENOMEM; 424 + } 425 + return 0; 426 + } 427 + 428 + int xhci_prepare_transfer(struct xhci_hcd *xhci, 429 + struct xhci_virt_device *xdev, 430 + unsigned int ep_index, 431 + unsigned int num_trbs, 432 + struct urb *urb, 433 + struct xhci_td **td, 434 + gfp_t mem_flags) 435 + { 436 + int ret; 437 + 438 + ret = prepare_ring(xhci, xdev->ep_rings[ep_index], 439 + xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, 440 + num_trbs, mem_flags); 441 + if (ret) 442 + return ret; 443 + *td = kzalloc(sizeof(struct xhci_td), mem_flags); 444 + if (!*td) 445 + return -ENOMEM; 446 + INIT_LIST_HEAD(&(*td)->td_list); 447 + 448 + ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb); 449 + if (unlikely(ret)) { 450 + kfree(*td); 451 + return ret; 452 + } 453 + 454 + (*td)->urb = urb; 455 + urb->hcpriv = (void *) (*td); 456 + /* Add this TD to the tail of the endpoint ring's TD list */ 457 + list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list); 458 + 459 + return 0; 460 + } 461 + 462 + /* Caller must have locked xhci->lock */ 463 + int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 464 + struct urb *urb, int slot_id, unsigned int ep_index) 465 + { 466 + struct xhci_ring *ep_ring; 467 + int num_trbs; 468 + int ret; 469 + struct usb_ctrlrequest *setup; 470 + struct xhci_generic_trb *start_trb; 471 + int start_cycle; 472 + u32 field; 473 + struct xhci_td *td; 474 + 475 + ep_ring = xhci->devs[slot_id]->ep_rings[ep_index]; 476 + 477 + /* 478 + * Need to copy setup packet into setup TRB, so we can't use the setup 479 + * DMA address. 480 + */ 481 + if (!urb->setup_packet) 482 + return -EINVAL; 483 + 484 + if (!in_interrupt()) 485 + xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n", 486 + slot_id, ep_index); 487 + /* 1 TRB for setup, 1 for status */ 488 + num_trbs = 2; 489 + /* 490 + * Don't need to check if we need additional event data and normal TRBs, 491 + * since data in control transfers will never get bigger than 16MB 492 + * XXX: can we get a buffer that crosses 64KB boundaries? 493 + */ 494 + if (urb->transfer_buffer_length > 0) 495 + num_trbs++; 496 + ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs, 497 + urb, &td, mem_flags); 498 + if (ret < 0) 499 + return ret; 500 + 501 + /* 502 + * Don't give the first TRB to the hardware (by toggling the cycle bit) 503 + * until we've finished creating all the other TRBs. The ring's cycle 504 + * state may change as we enqueue the other TRBs, so save it too. 505 + */ 506 + start_trb = &ep_ring->enqueue->generic; 507 + start_cycle = ep_ring->cycle_state; 508 + 509 + /* Queue setup TRB - see section 6.4.1.2.1 */ 510 + /* FIXME better way to translate setup_packet into two u32 fields? */ 511 + setup = (struct usb_ctrlrequest *) urb->setup_packet; 512 + queue_trb(xhci, ep_ring, false, 513 + /* FIXME endianness is probably going to bite my ass here. */ 514 + setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16, 515 + setup->wIndex | setup->wLength << 16, 516 + TRB_LEN(8) | TRB_INTR_TARGET(0), 517 + /* Immediate data in pointer */ 518 + TRB_IDT | TRB_TYPE(TRB_SETUP)); 519 + 520 + /* If there's data, queue data TRBs */ 521 + field = 0; 522 + if (urb->transfer_buffer_length > 0) { 523 + if (setup->bRequestType & USB_DIR_IN) 524 + field |= TRB_DIR_IN; 525 + queue_trb(xhci, ep_ring, false, 526 + lower_32_bits(urb->transfer_dma), 527 + upper_32_bits(urb->transfer_dma), 528 + TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0), 529 + /* Event on short tx */ 530 + field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state); 531 + } 532 + 533 + /* Save the DMA address of the last TRB in the TD */ 534 + td->last_trb = ep_ring->enqueue; 535 + 536 + /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */ 537 + /* If the device sent data, the status stage is an OUT transfer */ 538 + if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN) 539 + field = 0; 540 + else 541 + field = TRB_DIR_IN; 542 + queue_trb(xhci, ep_ring, false, 543 + 0, 544 + 0, 545 + TRB_INTR_TARGET(0), 546 + /* Event on completion */ 547 + field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state); 548 + 549 + /* 550 + * Pass all the TRBs to the hardware at once and make sure this write 551 + * isn't reordered. 552 + */ 553 + wmb(); 554 + start_trb->field[3] |= start_cycle; 555 + field = xhci_readl(xhci, &xhci->dba->doorbell[slot_id]) & DB_MASK; 556 + xhci_writel(xhci, field | EPI_TO_DB(ep_index), &xhci->dba->doorbell[slot_id]); 557 + /* Flush PCI posted writes */ 558 + xhci_readl(xhci, &xhci->dba->doorbell[slot_id]); 559 + 560 + return 0; 561 + } 562 + 563 + /**** Command Ring Operations ****/ 587 564 588 565 /* Generic function for queueing a command TRB on the command ring */ 589 566 static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
+26 -3
drivers/usb/host/xhci.h
··· 448 448 #define DB_STREAM_ID_HOST 0x0 449 449 #define DB_MASK (0xff << 8) 450 450 451 + /* Endpoint Target - bits 0:7 */ 452 + #define EPI_TO_DB(p) (((p) + 1) & 0xff) 453 + 451 454 452 455 /** 453 456 * struct xhci_slot_ctx ··· 555 552 * 4 - TRB error 556 553 * 5-7 - reserved 557 554 */ 558 - #define EP_STATE (0xf) 555 + #define EP_STATE_MASK (0xf) 556 + #define EP_STATE_DISABLED 0 557 + #define EP_STATE_RUNNING 1 558 + #define EP_STATE_HALTED 2 559 + #define EP_STATE_STOPPED 3 560 + #define EP_STATE_ERROR 4 559 561 /* Mult - Max number of burtst within an interval, in EP companion desc. */ 560 562 #define EP_MULT(p) ((p & 0x3) << 8) 561 563 /* bits 10:14 are Max Primary Streams */ 562 564 /* bit 15 is Linear Stream Array */ 563 565 /* Interval - period between requests to an endpoint - 125u increments. */ 564 - #define EP_INTERVAL (0xff << 16) 566 + #define EP_INTERVAL (0xff << 16) 565 567 566 568 /* ep_info2 bitmasks */ 567 569 /* ··· 626 618 dma_addr_t in_ctx_dma; 627 619 /* FIXME when stream support is added */ 628 620 struct xhci_ring *ep_rings[31]; 629 - dma_addr_t ep_dma[31]; 630 621 /* Status of the last command issued for this device */ 631 622 u32 cmd_status; 632 623 }; ··· 663 656 /* This field is interpreted differently based on the type of TRB */ 664 657 u32 flags; 665 658 } __attribute__ ((packed)); 659 + 660 + /** Transfer Event bit fields **/ 661 + #define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f) 666 662 667 663 /* Completion Code - only applicable for some types of TRBs */ 668 664 #define COMP_CODE_MASK (0xff << 24) ··· 887 877 #define TRBS_PER_SEGMENT 64 888 878 #define SEGMENT_SIZE (TRBS_PER_SEGMENT*16) 889 879 880 + struct xhci_td { 881 + struct list_head td_list; 882 + struct urb *urb; 883 + union xhci_trb *last_trb; 884 + }; 885 + 890 886 struct xhci_segment { 891 887 union xhci_trb *trbs; 892 888 /* private to HCD */ ··· 908 892 union xhci_trb *dequeue; 909 893 struct xhci_segment *deq_seg; 910 894 unsigned int deq_updates; 895 + struct list_head td_list; 911 896 /* 912 897 * Write the cycle state into the TRB cycle field to give ownership of 913 898 * the TRB to the host controller (if we are the producer), or to check ··· 1059 1042 void xhci_print_registers(struct xhci_hcd *xhci); 1060 1043 void xhci_dbg_regs(struct xhci_hcd *xhci); 1061 1044 void xhci_print_run_regs(struct xhci_hcd *xhci); 1045 + void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb); 1046 + void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb); 1062 1047 void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg); 1063 1048 void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring); 1064 1049 void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); ··· 1074 1055 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); 1075 1056 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); 1076 1057 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); 1058 + unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc); 1077 1059 1078 1060 #ifdef CONFIG_PCI 1079 1061 /* xHCI PCI glue */ ··· 1094 1074 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); 1095 1075 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); 1096 1076 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); 1077 + int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags); 1078 + int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); 1097 1079 1098 1080 /* xHCI ring, segment, TRB, and TD functions */ 1099 1081 dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); ··· 1105 1083 void set_hc_event_deq(struct xhci_hcd *xhci); 1106 1084 int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); 1107 1085 int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); 1086 + int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index); 1108 1087 1109 1088 /* xHCI roothub code */ 1110 1089 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,