Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

USB: xhci: Allocate and address USB devices

xHCI needs to get a "Slot ID" from the host controller and allocate other
data structures for every USB device. Make usb_alloc_dev() and
usb_release_dev() allocate and free these device structures. After
setting up the xHC device structures, usb_alloc_dev() must wait for the
hardware to respond to an Enable Slot command. usb_alloc_dev() fires off
a Disable Slot command and does not wait for it to complete.

When the USB core wants to choose an address for the device, the xHCI
driver must issue a Set Address command and wait for an event for that
command.

Signed-off-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Sarah Sharp and committed by
Greg Kroah-Hartman
3ffbba95 c6515272

+590 -29
+79
drivers/usb/host/xhci-dbg.c
··· 410 410 val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[1]); 411 411 xhci_dbg(xhci, "// xHC command ring deq ptr high bits = 0x%x\n", val); 412 412 } 413 + 414 + void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) 415 + { 416 + int i, j; 417 + int last_ep_ctx = 31; 418 + /* Fields are 32 bits wide, DMA addresses are in bytes */ 419 + int field_size = 32 / 8; 420 + 421 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - drop flags\n", 422 + (unsigned int) &ctx->drop_flags, 423 + dma, ctx->drop_flags); 424 + dma += field_size; 425 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - add flags\n", 426 + (unsigned int) &ctx->add_flags, 427 + dma, ctx->add_flags); 428 + dma += field_size; 429 + for (i = 0; i > 6; ++i) { 430 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n", 431 + (unsigned int) &ctx->rsvd[i], 432 + dma, ctx->rsvd[i], i); 433 + dma += field_size; 434 + } 435 + 436 + xhci_dbg(xhci, "Slot Context:\n"); 437 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info\n", 438 + (unsigned int) &ctx->slot.dev_info, 439 + dma, ctx->slot.dev_info); 440 + dma += field_size; 441 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_info2\n", 442 + (unsigned int) &ctx->slot.dev_info2, 443 + dma, ctx->slot.dev_info2); 444 + dma += field_size; 445 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tt_info\n", 446 + (unsigned int) &ctx->slot.tt_info, 447 + dma, ctx->slot.tt_info); 448 + dma += field_size; 449 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - dev_state\n", 450 + (unsigned int) &ctx->slot.dev_state, 451 + dma, ctx->slot.dev_state); 452 + dma += field_size; 453 + for (i = 0; i > 4; ++i) { 454 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n", 455 + (unsigned int) &ctx->slot.reserved[i], 456 + dma, ctx->slot.reserved[i], i); 457 + dma += field_size; 458 + } 459 + 460 + if (last_ep < 31) 461 + last_ep_ctx = last_ep + 1; 462 + for (i = 0; i < last_ep_ctx; ++i) { 463 + xhci_dbg(xhci, "Endpoint %02d Context:\n", i); 464 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info\n", 465 + (unsigned int) &ctx->ep[i].ep_info, 466 + dma, ctx->ep[i].ep_info); 467 + dma += field_size; 468 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - ep_info2\n", 469 + (unsigned int) &ctx->ep[i].ep_info2, 470 + dma, ctx->ep[i].ep_info2); 471 + dma += field_size; 472 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[0]\n", 473 + (unsigned int) &ctx->ep[i].deq[0], 474 + dma, ctx->ep[i].deq[0]); 475 + dma += field_size; 476 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - deq[1]\n", 477 + (unsigned int) &ctx->ep[i].deq[1], 478 + dma, ctx->ep[i].deq[1]); 479 + dma += field_size; 480 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - tx_info\n", 481 + (unsigned int) &ctx->ep[i].tx_info, 482 + dma, ctx->ep[i].tx_info); 483 + dma += field_size; 484 + for (j = 0; j < 3; ++j) { 485 + xhci_dbg(xhci, "@%08x (virt) @%08x (dma) %#08x - rsvd[%d]\n", 486 + (unsigned int) &ctx->ep[i].reserved[j], 487 + dma, ctx->ep[i].reserved[j], j); 488 + dma += field_size; 489 + } 490 + } 491 + }
+201
drivers/usb/host/xhci-hcd.c
··· 318 318 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 319 319 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 320 320 xhci_dbg_cmd_ptrs(xhci); 321 + for (i = 0; i < MAX_HC_SLOTS; ++i) { 322 + if (xhci->devs[i]) { 323 + for (j = 0; j < 31; ++j) { 324 + if (xhci->devs[i]->ep_rings[j]) { 325 + xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); 326 + xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg); 327 + } 328 + } 329 + } 330 + } 321 331 322 332 if (xhci->noops_submitted != NUM_TEST_NOOPS) 323 333 if (setup_one_noop(xhci)) ··· 508 498 } 509 499 510 500 /*-------------------------------------------------------------------------*/ 501 + 502 + /* 503 + * At this point, the struct usb_device is about to go away, the device has 504 + * disconnected, and all traffic has been stopped and the endpoints have been 505 + * disabled. Free any HC data structures associated with that device. 506 + */ 507 + void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 508 + { 509 + struct xhci_hcd *xhci = hcd_to_xhci(hcd); 510 + unsigned long flags; 511 + 512 + if (udev->slot_id == 0) 513 + return; 514 + 515 + spin_lock_irqsave(&xhci->lock, flags); 516 + if (queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { 517 + spin_unlock_irqrestore(&xhci->lock, flags); 518 + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 519 + return; 520 + } 521 + ring_cmd_db(xhci); 522 + spin_unlock_irqrestore(&xhci->lock, flags); 523 + /* 524 + * Event command completion handler will free any data structures 525 + * associated with the slot 526 + */ 527 + } 528 + 529 + /* 530 + * Returns 0 if the xHC ran out of device slots, the Enable Slot command 531 + * timed out, or allocating memory failed. Returns 1 on success. 532 + */ 533 + int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 534 + { 535 + struct xhci_hcd *xhci = hcd_to_xhci(hcd); 536 + unsigned long flags; 537 + int timeleft; 538 + int ret; 539 + 540 + spin_lock_irqsave(&xhci->lock, flags); 541 + ret = queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 542 + if (ret) { 543 + spin_unlock_irqrestore(&xhci->lock, flags); 544 + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 545 + return 0; 546 + } 547 + ring_cmd_db(xhci); 548 + spin_unlock_irqrestore(&xhci->lock, flags); 549 + 550 + /* XXX: how much time for xHC slot assignment? */ 551 + timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 552 + USB_CTRL_SET_TIMEOUT); 553 + if (timeleft <= 0) { 554 + xhci_warn(xhci, "%s while waiting for a slot\n", 555 + timeleft == 0 ? "Timeout" : "Signal"); 556 + /* FIXME cancel the enable slot request */ 557 + return 0; 558 + } 559 + 560 + spin_lock_irqsave(&xhci->lock, flags); 561 + if (!xhci->slot_id) { 562 + xhci_err(xhci, "Error while assigning device slot ID\n"); 563 + spin_unlock_irqrestore(&xhci->lock, flags); 564 + return 0; 565 + } 566 + if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { 567 + /* Disable slot, if we can do it without mem alloc */ 568 + xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 569 + if (!queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) 570 + ring_cmd_db(xhci); 571 + spin_unlock_irqrestore(&xhci->lock, flags); 572 + return 0; 573 + } 574 + udev->slot_id = xhci->slot_id; 575 + /* Is this a LS or FS device under a HS hub? */ 576 + /* Hub or peripherial? */ 577 + spin_unlock_irqrestore(&xhci->lock, flags); 578 + return 1; 579 + } 580 + 581 + /* 582 + * Issue an Address Device command (which will issue a SetAddress request to 583 + * the device). 584 + * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so 585 + * we should only issue and wait on one address command at the same time. 586 + * 587 + * We add one to the device address issued by the hardware because the USB core 588 + * uses address 1 for the root hubs (even though they're not really devices). 589 + */ 590 + int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 591 + { 592 + unsigned long flags; 593 + int timeleft; 594 + struct xhci_virt_device *virt_dev; 595 + int ret = 0; 596 + struct xhci_hcd *xhci = hcd_to_xhci(hcd); 597 + u32 temp; 598 + 599 + if (!udev->slot_id) { 600 + xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 601 + return -EINVAL; 602 + } 603 + 604 + spin_lock_irqsave(&xhci->lock, flags); 605 + virt_dev = xhci->devs[udev->slot_id]; 606 + 607 + /* If this is a Set Address to an unconfigured device, setup ep 0 */ 608 + if (!udev->config) 609 + xhci_setup_addressable_virt_dev(xhci, udev); 610 + /* Otherwise, assume the core has the device configured how it wants */ 611 + 612 + ret = queue_address_device(xhci, virt_dev->in_ctx_dma, udev->slot_id); 613 + if (ret) { 614 + spin_unlock_irqrestore(&xhci->lock, flags); 615 + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 616 + return ret; 617 + } 618 + ring_cmd_db(xhci); 619 + spin_unlock_irqrestore(&xhci->lock, flags); 620 + 621 + /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 622 + timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 623 + USB_CTRL_SET_TIMEOUT); 624 + /* FIXME: From section 4.3.4: "Software shall be responsible for timing 625 + * the SetAddress() "recovery interval" required by USB and aborting the 626 + * command on a timeout. 627 + */ 628 + if (timeleft <= 0) { 629 + xhci_warn(xhci, "%s while waiting for a slot\n", 630 + timeleft == 0 ? "Timeout" : "Signal"); 631 + /* FIXME cancel the address device command */ 632 + return -ETIME; 633 + } 634 + 635 + spin_lock_irqsave(&xhci->lock, flags); 636 + switch (virt_dev->cmd_status) { 637 + case COMP_CTX_STATE: 638 + case COMP_EBADSLT: 639 + xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", 640 + udev->slot_id); 641 + ret = -EINVAL; 642 + break; 643 + case COMP_TX_ERR: 644 + dev_warn(&udev->dev, "Device not responding to set address.\n"); 645 + ret = -EPROTO; 646 + break; 647 + case COMP_SUCCESS: 648 + xhci_dbg(xhci, "Successful Address Device command\n"); 649 + break; 650 + default: 651 + xhci_err(xhci, "ERROR: unexpected command completion " 652 + "code 0x%x.\n", virt_dev->cmd_status); 653 + ret = -EINVAL; 654 + break; 655 + } 656 + if (ret) { 657 + spin_unlock_irqrestore(&xhci->lock, flags); 658 + return ret; 659 + } 660 + temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[0]); 661 + xhci_dbg(xhci, "Op regs DCBAA ptr[0] = %#08x\n", temp); 662 + temp = xhci_readl(xhci, &xhci->op_regs->dcbaa_ptr[1]); 663 + xhci_dbg(xhci, "Op regs DCBAA ptr[1] = %#08x\n", temp); 664 + xhci_dbg(xhci, "Slot ID %d dcbaa entry[0] @%08x = %#08x\n", 665 + udev->slot_id, 666 + (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id], 667 + xhci->dcbaa->dev_context_ptrs[2*udev->slot_id]); 668 + xhci_dbg(xhci, "Slot ID %d dcbaa entry[1] @%08x = %#08x\n", 669 + udev->slot_id, 670 + (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1], 671 + xhci->dcbaa->dev_context_ptrs[2*udev->slot_id+1]); 672 + xhci_dbg(xhci, "Output Context DMA address = %#08x\n", 673 + virt_dev->out_ctx_dma); 674 + xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 675 + xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); 676 + xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 677 + xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); 678 + /* 679 + * USB core uses address 1 for the roothubs, so we add one to the 680 + * address given back to us by the HC. 681 + */ 682 + udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; 683 + /* FIXME: Zero the input context control for later use? */ 684 + spin_unlock_irqrestore(&xhci->lock, flags); 685 + 686 + xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 687 + /* XXX Meh, not sure if anyone else but choose_address uses this. */ 688 + set_bit(udev->devnum, udev->bus->devmap.devicemap); 689 + 690 + return 0; 691 + } 511 692 512 693 int xhci_get_frame(struct usb_hcd *hcd) 513 694 {
+199 -5
drivers/usb/host/xhci-mem.c
··· 188 188 return 0; 189 189 } 190 190 191 + void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 192 + { 193 + struct xhci_virt_device *dev; 194 + int i; 195 + 196 + /* Slot ID 0 is reserved */ 197 + if (slot_id == 0 || !xhci->devs[slot_id]) 198 + return; 199 + 200 + dev = xhci->devs[slot_id]; 201 + xhci->dcbaa->dev_context_ptrs[2*slot_id] = 0; 202 + xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; 203 + if (!dev) 204 + return; 205 + 206 + for (i = 0; i < 31; ++i) 207 + if (dev->ep_rings[i]) 208 + xhci_ring_free(xhci, dev->ep_rings[i]); 209 + 210 + if (dev->in_ctx) 211 + dma_pool_free(xhci->device_pool, 212 + dev->in_ctx, dev->in_ctx_dma); 213 + if (dev->out_ctx) 214 + dma_pool_free(xhci->device_pool, 215 + dev->out_ctx, dev->out_ctx_dma); 216 + kfree(xhci->devs[slot_id]); 217 + xhci->devs[slot_id] = 0; 218 + } 219 + 220 + int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 221 + struct usb_device *udev, gfp_t flags) 222 + { 223 + dma_addr_t dma; 224 + struct xhci_virt_device *dev; 225 + 226 + /* Slot ID 0 is reserved */ 227 + if (slot_id == 0 || xhci->devs[slot_id]) { 228 + xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); 229 + return 0; 230 + } 231 + 232 + xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); 233 + if (!xhci->devs[slot_id]) 234 + return 0; 235 + dev = xhci->devs[slot_id]; 236 + 237 + /* Allocate the (output) device context that will be used in the HC */ 238 + dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); 239 + if (!dev->out_ctx) 240 + goto fail; 241 + dev->out_ctx_dma = dma; 242 + xhci_dbg(xhci, "Slot %d output ctx = 0x%x (dma)\n", slot_id, dma); 243 + memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); 244 + 245 + /* Allocate the (input) device context for address device command */ 246 + dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); 247 + if (!dev->in_ctx) 248 + goto fail; 249 + dev->in_ctx_dma = dma; 250 + xhci_dbg(xhci, "Slot %d input ctx = 0x%x (dma)\n", slot_id, dma); 251 + memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); 252 + 253 + /* Allocate endpoint 0 ring */ 254 + dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); 255 + if (!dev->ep_rings[0]) 256 + goto fail; 257 + 258 + /* 259 + * Point to output device context in dcbaa; skip the output control 260 + * context, which is eight 32 bit fields (or 32 bytes long) 261 + */ 262 + xhci->dcbaa->dev_context_ptrs[2*slot_id] = 263 + (u32) dev->out_ctx_dma + (32); 264 + xhci_dbg(xhci, "Set slot id %d dcbaa entry 0x%x to 0x%x\n", 265 + slot_id, 266 + (unsigned int) &xhci->dcbaa->dev_context_ptrs[2*slot_id], 267 + dev->out_ctx_dma); 268 + xhci->dcbaa->dev_context_ptrs[2*slot_id + 1] = 0; 269 + 270 + return 1; 271 + fail: 272 + xhci_free_virt_device(xhci, slot_id); 273 + return 0; 274 + } 275 + 276 + /* Setup an xHCI virtual device for a Set Address command */ 277 + int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) 278 + { 279 + struct xhci_virt_device *dev; 280 + struct xhci_ep_ctx *ep0_ctx; 281 + struct usb_device *top_dev; 282 + 283 + dev = xhci->devs[udev->slot_id]; 284 + /* Slot ID 0 is reserved */ 285 + if (udev->slot_id == 0 || !dev) { 286 + xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", 287 + udev->slot_id); 288 + return -EINVAL; 289 + } 290 + ep0_ctx = &dev->in_ctx->ep[0]; 291 + 292 + /* 2) New slot context and endpoint 0 context are valid*/ 293 + dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 294 + 295 + /* 3) Only the control endpoint is valid - one endpoint context */ 296 + dev->in_ctx->slot.dev_info |= LAST_CTX(1); 297 + 298 + switch (udev->speed) { 299 + case USB_SPEED_SUPER: 300 + dev->in_ctx->slot.dev_info |= (u32) udev->route; 301 + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; 302 + break; 303 + case USB_SPEED_HIGH: 304 + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; 305 + break; 306 + case USB_SPEED_FULL: 307 + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; 308 + break; 309 + case USB_SPEED_LOW: 310 + dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; 311 + break; 312 + case USB_SPEED_VARIABLE: 313 + xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); 314 + return -EINVAL; 315 + break; 316 + default: 317 + /* Speed was set earlier, this shouldn't happen. */ 318 + BUG(); 319 + } 320 + /* Find the root hub port this device is under */ 321 + for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 322 + top_dev = top_dev->parent) 323 + /* Found device below root hub */; 324 + dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 325 + xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); 326 + 327 + /* Is this a LS/FS device under a HS hub? */ 328 + /* 329 + * FIXME: I don't think this is right, where does the TT info for the 330 + * roothub or parent hub come from? 331 + */ 332 + if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && 333 + udev->tt) { 334 + dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; 335 + dev->in_ctx->slot.tt_info |= udev->ttport << 8; 336 + } 337 + xhci_dbg(xhci, "udev->tt = 0x%x\n", (unsigned int) udev->tt); 338 + xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); 339 + 340 + /* Step 4 - ring already allocated */ 341 + /* Step 5 */ 342 + ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); 343 + /* 344 + * See section 4.3 bullet 6: 345 + * The default Max Packet size for ep0 is "8 bytes for a USB2 346 + * LS/FS/HS device or 512 bytes for a USB3 SS device" 347 + * XXX: Not sure about wireless USB devices. 348 + */ 349 + if (udev->speed == USB_SPEED_SUPER) 350 + ep0_ctx->ep_info2 |= MAX_PACKET(512); 351 + else 352 + ep0_ctx->ep_info2 |= MAX_PACKET(8); 353 + /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ 354 + ep0_ctx->ep_info2 |= MAX_BURST(0); 355 + ep0_ctx->ep_info2 |= ERROR_COUNT(3); 356 + 357 + ep0_ctx->deq[0] = 358 + dev->ep_rings[0]->first_seg->dma; 359 + ep0_ctx->deq[0] |= dev->ep_rings[0]->cycle_state; 360 + ep0_ctx->deq[1] = 0; 361 + 362 + /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ 363 + 364 + return 0; 365 + } 366 + 191 367 void xhci_mem_cleanup(struct xhci_hcd *xhci) 192 368 { 193 369 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 194 370 int size; 195 - 196 - /* XXX: Free all the segments in the various rings */ 371 + int i; 197 372 198 373 /* Free the Event Ring Segment Table and the actual Event Ring */ 199 374 xhci_writel(xhci, 0, &xhci->ir_set->erst_size); ··· 393 218 xhci_ring_free(xhci, xhci->cmd_ring); 394 219 xhci->cmd_ring = NULL; 395 220 xhci_dbg(xhci, "Freed command ring\n"); 221 + 222 + for (i = 1; i < MAX_HC_SLOTS; ++i) 223 + xhci_free_virt_device(xhci, i); 224 + 396 225 if (xhci->segment_pool) 397 226 dma_pool_destroy(xhci->segment_pool); 398 227 xhci->segment_pool = NULL; 399 228 xhci_dbg(xhci, "Freed segment pool\n"); 229 + 230 + if (xhci->device_pool) 231 + dma_pool_destroy(xhci->device_pool); 232 + xhci->device_pool = NULL; 233 + xhci_dbg(xhci, "Freed device context pool\n"); 234 + 400 235 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); 401 236 xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); 402 237 if (xhci->dcbaa) 403 238 pci_free_consistent(pdev, sizeof(*xhci->dcbaa), 404 239 xhci->dcbaa, xhci->dcbaa->dma); 405 240 xhci->dcbaa = NULL; 241 + 406 242 xhci->page_size = 0; 407 243 xhci->page_shift = 0; 408 244 } ··· 466 280 goto fail; 467 281 memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); 468 282 xhci->dcbaa->dma = dma; 469 - xhci_dbg(xhci, "// Setting device context base array address to 0x%x\n", 470 - xhci->dcbaa->dma); 283 + xhci_dbg(xhci, "// Device context base array address = 0x%x (DMA), 0x%x (virt)\n", 284 + xhci->dcbaa->dma, (unsigned int) xhci->dcbaa); 471 285 xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); 472 286 xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); 473 287 ··· 479 293 */ 480 294 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 481 295 SEGMENT_SIZE, 64, xhci->page_size); 482 - if (!xhci->segment_pool) 296 + /* See Table 46 and Note on Figure 55 */ 297 + /* FIXME support 64-byte contexts */ 298 + xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 299 + sizeof(struct xhci_device_control), 300 + 64, xhci->page_size); 301 + if (!xhci->segment_pool || !xhci->device_pool) 483 302 goto fail; 484 303 485 304 /* Set up the command ring to have one segments for now. */ ··· 576 385 * something other than the default (~1ms minimum between interrupts). 577 386 * See section 5.5.1.2. 578 387 */ 388 + init_completion(&xhci->addr_dev); 389 + for (i = 0; i < MAX_HC_SLOTS; ++i) 390 + xhci->devs[i] = 0; 579 391 580 392 return 0; 581 393 fail:
+7
drivers/usb/host/xhci-pci.c
··· 109 109 .shutdown = xhci_shutdown, 110 110 111 111 /* 112 + * managing i/o requests and associated device resources 113 + */ 114 + .alloc_dev = xhci_alloc_dev, 115 + .free_dev = xhci_free_dev, 116 + .address_device = xhci_address_device, 117 + 118 + /* 112 119 * scheduling support 113 120 */ 114 121 .get_frame_number = xhci_get_frame,
+30 -4
drivers/usb/host/xhci-ring.c
··· 252 252 static void handle_cmd_completion(struct xhci_hcd *xhci, 253 253 struct xhci_event_cmd *event) 254 254 { 255 + int slot_id = TRB_TO_SLOT_ID(event->flags); 255 256 u64 cmd_dma; 256 257 dma_addr_t cmd_dequeue_dma; 257 - 258 - /* Check completion code */ 259 - if (GET_COMP_CODE(event->status) != COMP_SUCCESS) 260 - xhci_dbg(xhci, "WARN: unsuccessful no-op command\n"); 261 258 262 259 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0]; 263 260 cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg, ··· 270 273 return; 271 274 } 272 275 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) { 276 + case TRB_TYPE(TRB_ENABLE_SLOT): 277 + if (GET_COMP_CODE(event->status) == COMP_SUCCESS) 278 + xhci->slot_id = slot_id; 279 + else 280 + xhci->slot_id = 0; 281 + complete(&xhci->addr_dev); 282 + break; 283 + case TRB_TYPE(TRB_DISABLE_SLOT): 284 + if (xhci->devs[slot_id]) 285 + xhci_free_virt_device(xhci, slot_id); 286 + break; 287 + case TRB_TYPE(TRB_ADDR_DEV): 288 + xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status); 289 + complete(&xhci->addr_dev); 290 + break; 273 291 case TRB_TYPE(TRB_CMD_NOOP): 274 292 ++xhci->noops_handled; 275 293 break; ··· 411 399 return NULL; 412 400 xhci->noops_submitted++; 413 401 return ring_cmd_db; 402 + } 403 + 404 + /* Queue a slot enable or disable request on the command ring */ 405 + int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id) 406 + { 407 + return queue_command(xhci, 0, 0, 0, 408 + TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id)); 409 + } 410 + 411 + /* Queue an address device command TRB */ 412 + int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id) 413 + { 414 + return queue_command(xhci, in_ctx_ptr, 0, 0, 415 + TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)); 414 416 }
+74 -20
drivers/usb/host/xhci.h
··· 285 285 * 4 - super speed 286 286 * 5-15 reserved 287 287 */ 288 - #define DEV_SPEED_MASK (0xf<<10) 288 + #define DEV_SPEED_MASK (0xf << 10) 289 + #define XDEV_FS (0x1 << 10) 290 + #define XDEV_LS (0x2 << 10) 291 + #define XDEV_HS (0x3 << 10) 292 + #define XDEV_SS (0x4 << 10) 289 293 #define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10)) 290 - #define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == (0x1<<10)) 291 - #define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == (0x2<<10)) 292 - #define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == (0x3<<10)) 293 - #define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == (0x4<<10)) 294 + #define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS) 295 + #define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS) 296 + #define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS) 297 + #define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS) 298 + /* Bits 20:23 in the Slot Context are the speed for the device */ 299 + #define SLOT_SPEED_FS (XDEV_FS << 10) 300 + #define SLOT_SPEED_LS (XDEV_LS << 10) 301 + #define SLOT_SPEED_HS (XDEV_HS << 10) 302 + #define SLOT_SPEED_SS (XDEV_SS << 10) 294 303 /* Port Indicator Control */ 295 304 #define PORT_LED_OFF (0 << 14) 296 305 #define PORT_LED_AMBER (1 << 14) ··· 480 471 /* Set if the device is a hub - bit 26 */ 481 472 #define DEV_HUB (0x1 << 26) 482 473 /* Index of the last valid endpoint context in this device context - 27:31 */ 483 - #define LAST_EP_MASK (0x1f << 27) 484 - #define LAST_EP(p) ((p) << 27) 474 + #define LAST_CTX_MASK (0x1f << 27) 475 + #define LAST_CTX(p) ((p) << 27) 476 + #define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1) 477 + /* Plus one for the slot context flag */ 478 + #define EPI_TO_FLAG(p) (1 << ((p) + 1)) 479 + #define SLOT_FLAG (1 << 0) 480 + #define EP0_FLAG (1 << 1) 485 481 486 482 /* dev_info2 bitmasks */ 487 483 /* Max Exit Latency (ms) - worst case time to wake up all links in dev path */ 488 484 #define MAX_EXIT (0xffff) 489 485 /* Root hub port number that is needed to access the USB device */ 490 - #define ROOT_HUB_PORT (0xff << 16) 486 + #define ROOT_HUB_PORT(p) (((p) & 0xff) << 16) 491 487 492 488 /* tt_info bitmasks */ 493 489 /* ··· 509 495 510 496 /* dev_state bitmasks */ 511 497 /* USB device address - assigned by the HC */ 512 - #define DEV_ADDR (0xff) 498 + #define DEV_ADDR_MASK (0xff) 513 499 /* bits 8:26 reserved */ 514 500 /* Slot state */ 515 501 #define SLOT_STATE (0x1f << 27) ··· 521 507 * @ep_info2: information on endpoint type, max packet size, max burst size, 522 508 * error count, and whether the HC will force an event for all 523 509 * transactions. 524 - * @ep_ring: 64-bit ring address. If the endpoint only defines one flow, 525 - * this points to the endpoint transfer ring. Otherwise, it points 526 - * to a flow context array, which has a ring pointer for each flow. 527 - * @intr_target: 528 - * 64-bit address of the Interrupter Target that will receive 529 - * events from this endpoint. 510 + * @deq: 64-bit ring dequeue pointer address. If the endpoint only 511 + * defines one stream, this points to the endpoint transfer ring. 512 + * Otherwise, it points to a stream context array, which has a 513 + * ring pointer for each flow. 514 + * @tx_info: 515 + * Average TRB lengths for the endpoint ring and 516 + * max payload within an Endpoint Service Interval Time (ESIT). 530 517 * 531 518 * Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context 532 519 * structures. If the HC uses 64-byte contexts, there is an additional 32 bytes ··· 536 521 struct xhci_ep_ctx { 537 522 u32 ep_info; 538 523 u32 ep_info2; 539 - /* 64-bit endpoint ring address */ 540 - u32 ep_ring[2]; 541 - /* 64-bit address of the interrupter target */ 542 - u32 intr_target[2]; 524 + u32 deq[2]; 525 + u32 tx_info; 543 526 /* offset 0x14 - 0x1f reserved for HC internal use */ 544 - u32 reserved[2]; 527 + u32 reserved[3]; 545 528 } __attribute__ ((packed)); 546 529 547 530 /* ep_info bitmasks */ ··· 600 587 #define DROP_EP(x) (0x1 << x) 601 588 /* add context bitmasks */ 602 589 #define ADD_EP(x) (0x1 << x) 590 + 591 + 592 + struct xhci_virt_device { 593 + /* 594 + * Commands to the hardware are passed an "input context" that 595 + * tells the hardware what to change in its data structures. 596 + * The hardware will return changes in an "output context" that 597 + * software must allocate for the hardware. We need to keep 598 + * track of input and output contexts separately because 599 + * these commands might fail and we don't trust the hardware. 600 + */ 601 + struct xhci_device_control *out_ctx; 602 + dma_addr_t out_ctx_dma; 603 + /* Used for addressing devices and configuration changes */ 604 + struct xhci_device_control *in_ctx; 605 + dma_addr_t in_ctx_dma; 606 + /* FIXME when stream support is added */ 607 + struct xhci_ring *ep_rings[31]; 608 + dma_addr_t ep_dma[31]; 609 + /* Status of the last command issued for this device */ 610 + u32 cmd_status; 611 + }; 603 612 604 613 605 614 /** ··· 746 711 u32 flags; 747 712 } __attribute__ ((packed)); 748 713 714 + /* flags bitmasks */ 715 + /* bits 16:23 are the virtual function ID */ 716 + /* bits 24:31 are the slot ID */ 717 + #define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24) 718 + #define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24) 749 719 750 720 /* Port Status Change Event TRB fields */ 751 721 /* Port ID - bits 31:24 */ ··· 971 931 struct xhci_ring *cmd_ring; 972 932 struct xhci_ring *event_ring; 973 933 struct xhci_erst erst; 934 + /* slot enabling and address device helpers */ 935 + struct completion addr_dev; 936 + int slot_id; 937 + /* Internal mirror of the HW's dcbaa */ 938 + struct xhci_virt_device *devs[MAX_HC_SLOTS]; 974 939 975 940 /* DMA pools */ 976 941 struct dma_pool *device_pool; ··· 1047 1002 void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); 1048 1003 void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 1049 1004 void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); 1005 + void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); 1050 1006 1051 1007 /* xHCI memory managment */ 1052 1008 void xhci_mem_cleanup(struct xhci_hcd *xhci); 1053 1009 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags); 1010 + void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id); 1011 + int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags); 1012 + int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev); 1054 1013 1055 1014 #ifdef CONFIG_PCI 1056 1015 /* xHCI PCI glue */ ··· 1071 1022 void xhci_shutdown(struct usb_hcd *hcd); 1072 1023 int xhci_get_frame(struct usb_hcd *hcd); 1073 1024 irqreturn_t xhci_irq(struct usb_hcd *hcd); 1025 + int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev); 1026 + void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev); 1027 + int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev); 1074 1028 1075 1029 /* xHCI ring, segment, TRB, and TD functions */ 1076 1030 dma_addr_t trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); ··· 1081 1029 void *setup_one_noop(struct xhci_hcd *xhci); 1082 1030 void handle_event(struct xhci_hcd *xhci); 1083 1031 void set_hc_event_deq(struct xhci_hcd *xhci); 1032 + int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id); 1033 + int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); 1084 1034 1085 1035 /* xHCI roothub code */ 1086 1036 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,