Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.31-rc9 1382 lines 44 kB view raw
1/* 2 * xHCI host controller driver 3 * 4 * Copyright (C) 2008 Intel Corp. 5 * 6 * Author: Sarah Sharp 7 * Some code borrowed from the Linux EHCI driver. 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY 15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16 * for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software Foundation, 20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 21 */ 22 23#include <linux/irq.h> 24#include <linux/module.h> 25 26#include "xhci.h" 27 28#define DRIVER_AUTHOR "Sarah Sharp" 29#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 30 31/* TODO: copied from ehci-hcd.c - can this be refactored? */ 32/* 33 * handshake - spin reading hc until handshake completes or fails 34 * @ptr: address of hc register to be read 35 * @mask: bits to look at in result of read 36 * @done: value of those bits when handshake succeeds 37 * @usec: timeout in microseconds 38 * 39 * Returns negative errno, or zero on success 40 * 41 * Success happens when the "mask" bits have the specified value (hardware 42 * handshake done). There are two failure modes: "usec" have passed (major 43 * hardware flakeout), or the register reads as all-ones (hardware removed). 44 */ 45static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, 46 u32 mask, u32 done, int usec) 47{ 48 u32 result; 49 50 do { 51 result = xhci_readl(xhci, ptr); 52 if (result == ~(u32)0) /* card removed */ 53 return -ENODEV; 54 result &= mask; 55 if (result == done) 56 return 0; 57 udelay(1); 58 usec--; 59 } while (usec > 0); 60 return -ETIMEDOUT; 61} 62 63/* 64 * Force HC into halt state. 65 * 66 * Disable any IRQs and clear the run/stop bit. 67 * HC will complete any current and actively pipelined transactions, and 68 * should halt within 16 microframes of the run/stop bit being cleared. 69 * Read HC Halted bit in the status register to see when the HC is finished. 70 * XXX: shouldn't we set HC_STATE_HALT here somewhere? 71 */ 72int xhci_halt(struct xhci_hcd *xhci) 73{ 74 u32 halted; 75 u32 cmd; 76 u32 mask; 77 78 xhci_dbg(xhci, "// Halt the HC\n"); 79 /* Disable all interrupts from the host controller */ 80 mask = ~(XHCI_IRQS); 81 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; 82 if (!halted) 83 mask &= ~CMD_RUN; 84 85 cmd = xhci_readl(xhci, &xhci->op_regs->command); 86 cmd &= mask; 87 xhci_writel(xhci, cmd, &xhci->op_regs->command); 88 89 return handshake(xhci, &xhci->op_regs->status, 90 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 91} 92 93/* 94 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. 95 * 96 * This resets pipelines, timers, counters, state machines, etc. 97 * Transactions will be terminated immediately, and operational registers 98 * will be set to their defaults. 99 */ 100int xhci_reset(struct xhci_hcd *xhci) 101{ 102 u32 command; 103 u32 state; 104 105 state = xhci_readl(xhci, &xhci->op_regs->status); 106 if ((state & STS_HALT) == 0) { 107 xhci_warn(xhci, "Host controller not halted, aborting reset.\n"); 108 return 0; 109 } 110 111 xhci_dbg(xhci, "// Reset the HC\n"); 112 command = xhci_readl(xhci, &xhci->op_regs->command); 113 command |= CMD_RESET; 114 xhci_writel(xhci, command, &xhci->op_regs->command); 115 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ 116 xhci_to_hcd(xhci)->state = HC_STATE_HALT; 117 118 return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); 119} 120 121/* 122 * Stop the HC from processing the endpoint queues. 123 */ 124static void xhci_quiesce(struct xhci_hcd *xhci) 125{ 126 /* 127 * Queues are per endpoint, so we need to disable an endpoint or slot. 128 * 129 * To disable a slot, we need to insert a disable slot command on the 130 * command ring and ring the doorbell. This will also free any internal 131 * resources associated with the slot (which might not be what we want). 132 * 133 * A Release Endpoint command sounds better - doesn't free internal HC 134 * memory, but removes the endpoints from the schedule and releases the 135 * bandwidth, disables the doorbells, and clears the endpoint enable 136 * flag. Usually used prior to a set interface command. 137 * 138 * TODO: Implement after command ring code is done. 139 */ 140 BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state)); 141 xhci_dbg(xhci, "Finished quiescing -- code not written yet\n"); 142} 143 144#if 0 145/* Set up MSI-X table for entry 0 (may claim other entries later) */ 146static int xhci_setup_msix(struct xhci_hcd *xhci) 147{ 148 int ret; 149 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 150 151 xhci->msix_count = 0; 152 /* XXX: did I do this right? ixgbe does kcalloc for more than one */ 153 xhci->msix_entries = kmalloc(sizeof(struct msix_entry), GFP_KERNEL); 154 if (!xhci->msix_entries) { 155 xhci_err(xhci, "Failed to allocate MSI-X entries\n"); 156 return -ENOMEM; 157 } 158 xhci->msix_entries[0].entry = 0; 159 160 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); 161 if (ret) { 162 xhci_err(xhci, "Failed to enable MSI-X\n"); 163 goto free_entries; 164 } 165 166 /* 167 * Pass the xhci pointer value as the request_irq "cookie". 168 * If more irqs are added, this will need to be unique for each one. 169 */ 170 ret = request_irq(xhci->msix_entries[0].vector, &xhci_irq, 0, 171 "xHCI", xhci_to_hcd(xhci)); 172 if (ret) { 173 xhci_err(xhci, "Failed to allocate MSI-X interrupt\n"); 174 goto disable_msix; 175 } 176 xhci_dbg(xhci, "Finished setting up MSI-X\n"); 177 return 0; 178 179disable_msix: 180 pci_disable_msix(pdev); 181free_entries: 182 kfree(xhci->msix_entries); 183 xhci->msix_entries = NULL; 184 return ret; 185} 186 187/* XXX: code duplication; can xhci_setup_msix call this? */ 188/* Free any IRQs and disable MSI-X */ 189static void xhci_cleanup_msix(struct xhci_hcd *xhci) 190{ 191 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 192 if (!xhci->msix_entries) 193 return; 194 195 free_irq(xhci->msix_entries[0].vector, xhci); 196 pci_disable_msix(pdev); 197 kfree(xhci->msix_entries); 198 xhci->msix_entries = NULL; 199 xhci_dbg(xhci, "Finished cleaning up MSI-X\n"); 200} 201#endif 202 203/* 204 * Initialize memory for HCD and xHC (one-time init). 205 * 206 * Program the PAGESIZE register, initialize the device context array, create 207 * device contexts (?), set up a command ring segment (or two?), create event 208 * ring (one for now). 209 */ 210int xhci_init(struct usb_hcd *hcd) 211{ 212 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 213 int retval = 0; 214 215 xhci_dbg(xhci, "xhci_init\n"); 216 spin_lock_init(&xhci->lock); 217 retval = xhci_mem_init(xhci, GFP_KERNEL); 218 xhci_dbg(xhci, "Finished xhci_init\n"); 219 220 return retval; 221} 222 223/* 224 * Called in interrupt context when there might be work 225 * queued on the event ring 226 * 227 * xhci->lock must be held by caller. 228 */ 229static void xhci_work(struct xhci_hcd *xhci) 230{ 231 u32 temp; 232 u64 temp_64; 233 234 /* 235 * Clear the op reg interrupt status first, 236 * so we can receive interrupts from other MSI-X interrupters. 237 * Write 1 to clear the interrupt status. 238 */ 239 temp = xhci_readl(xhci, &xhci->op_regs->status); 240 temp |= STS_EINT; 241 xhci_writel(xhci, temp, &xhci->op_regs->status); 242 /* FIXME when MSI-X is supported and there are multiple vectors */ 243 /* Clear the MSI-X event interrupt status */ 244 245 /* Acknowledge the interrupt */ 246 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 247 temp |= 0x3; 248 xhci_writel(xhci, temp, &xhci->ir_set->irq_pending); 249 /* Flush posted writes */ 250 xhci_readl(xhci, &xhci->ir_set->irq_pending); 251 252 /* FIXME this should be a delayed service routine that clears the EHB */ 253 xhci_handle_event(xhci); 254 255 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ 256 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 257 xhci_write_64(xhci, temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue); 258 /* Flush posted writes -- FIXME is this necessary? */ 259 xhci_readl(xhci, &xhci->ir_set->irq_pending); 260} 261 262/*-------------------------------------------------------------------------*/ 263 264/* 265 * xHCI spec says we can get an interrupt, and if the HC has an error condition, 266 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of 267 * indicators of an event TRB error, but we check the status *first* to be safe. 268 */ 269irqreturn_t xhci_irq(struct usb_hcd *hcd) 270{ 271 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 272 u32 temp, temp2; 273 union xhci_trb *trb; 274 275 spin_lock(&xhci->lock); 276 trb = xhci->event_ring->dequeue; 277 /* Check if the xHC generated the interrupt, or the irq is shared */ 278 temp = xhci_readl(xhci, &xhci->op_regs->status); 279 temp2 = xhci_readl(xhci, &xhci->ir_set->irq_pending); 280 if (temp == 0xffffffff && temp2 == 0xffffffff) 281 goto hw_died; 282 283 if (!(temp & STS_EINT) && !ER_IRQ_PENDING(temp2)) { 284 spin_unlock(&xhci->lock); 285 return IRQ_NONE; 286 } 287 xhci_dbg(xhci, "op reg status = %08x\n", temp); 288 xhci_dbg(xhci, "ir set irq_pending = %08x\n", temp2); 289 xhci_dbg(xhci, "Event ring dequeue ptr:\n"); 290 xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n", 291 (unsigned long long)xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb), 292 lower_32_bits(trb->link.segment_ptr), 293 upper_32_bits(trb->link.segment_ptr), 294 (unsigned int) trb->link.intr_target, 295 (unsigned int) trb->link.control); 296 297 if (temp & STS_FATAL) { 298 xhci_warn(xhci, "WARNING: Host System Error\n"); 299 xhci_halt(xhci); 300hw_died: 301 xhci_to_hcd(xhci)->state = HC_STATE_HALT; 302 spin_unlock(&xhci->lock); 303 return -ESHUTDOWN; 304 } 305 306 xhci_work(xhci); 307 spin_unlock(&xhci->lock); 308 309 return IRQ_HANDLED; 310} 311 312#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 313void xhci_event_ring_work(unsigned long arg) 314{ 315 unsigned long flags; 316 int temp; 317 u64 temp_64; 318 struct xhci_hcd *xhci = (struct xhci_hcd *) arg; 319 int i, j; 320 321 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies); 322 323 spin_lock_irqsave(&xhci->lock, flags); 324 temp = xhci_readl(xhci, &xhci->op_regs->status); 325 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 326 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 327 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp); 328 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled); 329 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask); 330 xhci->error_bitmask = 0; 331 xhci_dbg(xhci, "Event ring:\n"); 332 xhci_debug_segment(xhci, xhci->event_ring->deq_seg); 333 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 334 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 335 temp_64 &= ~ERST_PTR_MASK; 336 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 337 xhci_dbg(xhci, "Command ring:\n"); 338 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg); 339 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 340 xhci_dbg_cmd_ptrs(xhci); 341 for (i = 0; i < MAX_HC_SLOTS; ++i) { 342 if (xhci->devs[i]) { 343 for (j = 0; j < 31; ++j) { 344 if (xhci->devs[i]->ep_rings[j]) { 345 xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j); 346 xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg); 347 } 348 } 349 } 350 } 351 352 if (xhci->noops_submitted != NUM_TEST_NOOPS) 353 if (xhci_setup_one_noop(xhci)) 354 xhci_ring_cmd_db(xhci); 355 spin_unlock_irqrestore(&xhci->lock, flags); 356 357 if (!xhci->zombie) 358 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ); 359 else 360 xhci_dbg(xhci, "Quit polling the event ring.\n"); 361} 362#endif 363 364/* 365 * Start the HC after it was halted. 366 * 367 * This function is called by the USB core when the HC driver is added. 368 * Its opposite is xhci_stop(). 369 * 370 * xhci_init() must be called once before this function can be called. 371 * Reset the HC, enable device slot contexts, program DCBAAP, and 372 * set command ring pointer and event ring pointer. 373 * 374 * Setup MSI-X vectors and enable interrupts. 375 */ 376int xhci_run(struct usb_hcd *hcd) 377{ 378 u32 temp; 379 u64 temp_64; 380 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 381 void (*doorbell)(struct xhci_hcd *) = NULL; 382 383 hcd->uses_new_polling = 1; 384 hcd->poll_rh = 0; 385 386 xhci_dbg(xhci, "xhci_run\n"); 387#if 0 /* FIXME: MSI not setup yet */ 388 /* Do this at the very last minute */ 389 ret = xhci_setup_msix(xhci); 390 if (!ret) 391 return ret; 392 393 return -ENOSYS; 394#endif 395#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 396 init_timer(&xhci->event_ring_timer); 397 xhci->event_ring_timer.data = (unsigned long) xhci; 398 xhci->event_ring_timer.function = xhci_event_ring_work; 399 /* Poll the event ring */ 400 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ; 401 xhci->zombie = 0; 402 xhci_dbg(xhci, "Setting event ring polling timer\n"); 403 add_timer(&xhci->event_ring_timer); 404#endif 405 406 xhci_dbg(xhci, "Command ring memory map follows:\n"); 407 xhci_debug_ring(xhci, xhci->cmd_ring); 408 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); 409 xhci_dbg_cmd_ptrs(xhci); 410 411 xhci_dbg(xhci, "ERST memory map follows:\n"); 412 xhci_dbg_erst(xhci, &xhci->erst); 413 xhci_dbg(xhci, "Event ring:\n"); 414 xhci_debug_ring(xhci, xhci->event_ring); 415 xhci_dbg_ring_ptrs(xhci, xhci->event_ring); 416 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 417 temp_64 &= ~ERST_PTR_MASK; 418 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64); 419 420 xhci_dbg(xhci, "// Set the interrupt modulation register\n"); 421 temp = xhci_readl(xhci, &xhci->ir_set->irq_control); 422 temp &= ~ER_IRQ_INTERVAL_MASK; 423 temp |= (u32) 160; 424 xhci_writel(xhci, temp, &xhci->ir_set->irq_control); 425 426 /* Set the HCD state before we enable the irqs */ 427 hcd->state = HC_STATE_RUNNING; 428 temp = xhci_readl(xhci, &xhci->op_regs->command); 429 temp |= (CMD_EIE); 430 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n", 431 temp); 432 xhci_writel(xhci, temp, &xhci->op_regs->command); 433 434 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 435 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n", 436 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 437 xhci_writel(xhci, ER_IRQ_ENABLE(temp), 438 &xhci->ir_set->irq_pending); 439 xhci_print_ir_set(xhci, xhci->ir_set, 0); 440 441 if (NUM_TEST_NOOPS > 0) 442 doorbell = xhci_setup_one_noop(xhci); 443 444 temp = xhci_readl(xhci, &xhci->op_regs->command); 445 temp |= (CMD_RUN); 446 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", 447 temp); 448 xhci_writel(xhci, temp, &xhci->op_regs->command); 449 /* Flush PCI posted writes */ 450 temp = xhci_readl(xhci, &xhci->op_regs->command); 451 xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); 452 if (doorbell) 453 (*doorbell)(xhci); 454 455 xhci_dbg(xhci, "Finished xhci_run\n"); 456 return 0; 457} 458 459/* 460 * Stop xHCI driver. 461 * 462 * This function is called by the USB core when the HC driver is removed. 463 * Its opposite is xhci_run(). 464 * 465 * Disable device contexts, disable IRQs, and quiesce the HC. 466 * Reset the HC, finish any completed transactions, and cleanup memory. 467 */ 468void xhci_stop(struct usb_hcd *hcd) 469{ 470 u32 temp; 471 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 472 473 spin_lock_irq(&xhci->lock); 474 if (HC_IS_RUNNING(hcd->state)) 475 xhci_quiesce(xhci); 476 xhci_halt(xhci); 477 xhci_reset(xhci); 478 spin_unlock_irq(&xhci->lock); 479 480#if 0 /* No MSI yet */ 481 xhci_cleanup_msix(xhci); 482#endif 483#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 484 /* Tell the event ring poll function not to reschedule */ 485 xhci->zombie = 1; 486 del_timer_sync(&xhci->event_ring_timer); 487#endif 488 489 xhci_dbg(xhci, "// Disabling event ring interrupts\n"); 490 temp = xhci_readl(xhci, &xhci->op_regs->status); 491 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status); 492 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 493 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 494 &xhci->ir_set->irq_pending); 495 xhci_print_ir_set(xhci, xhci->ir_set, 0); 496 497 xhci_dbg(xhci, "cleaning up memory\n"); 498 xhci_mem_cleanup(xhci); 499 xhci_dbg(xhci, "xhci_stop completed - status = %x\n", 500 xhci_readl(xhci, &xhci->op_regs->status)); 501} 502 503/* 504 * Shutdown HC (not bus-specific) 505 * 506 * This is called when the machine is rebooting or halting. We assume that the 507 * machine will be powered off, and the HC's internal state will be reset. 508 * Don't bother to free memory. 509 */ 510void xhci_shutdown(struct usb_hcd *hcd) 511{ 512 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 513 514 spin_lock_irq(&xhci->lock); 515 xhci_halt(xhci); 516 spin_unlock_irq(&xhci->lock); 517 518#if 0 519 xhci_cleanup_msix(xhci); 520#endif 521 522 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n", 523 xhci_readl(xhci, &xhci->op_regs->status)); 524} 525 526/*-------------------------------------------------------------------------*/ 527 528/** 529 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and 530 * HCDs. Find the index for an endpoint given its descriptor. Use the return 531 * value to right shift 1 for the bitmask. 532 * 533 * Index = (epnum * 2) + direction - 1, 534 * where direction = 0 for OUT, 1 for IN. 535 * For control endpoints, the IN index is used (OUT index is unused), so 536 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2) 537 */ 538unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc) 539{ 540 unsigned int index; 541 if (usb_endpoint_xfer_control(desc)) 542 index = (unsigned int) (usb_endpoint_num(desc)*2); 543 else 544 index = (unsigned int) (usb_endpoint_num(desc)*2) + 545 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1; 546 return index; 547} 548 549/* Find the flag for this endpoint (for use in the control context). Use the 550 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is 551 * bit 1, etc. 552 */ 553unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc) 554{ 555 return 1 << (xhci_get_endpoint_index(desc) + 1); 556} 557 558/* Compute the last valid endpoint context index. Basically, this is the 559 * endpoint index plus one. For slot contexts with more than valid endpoint, 560 * we find the most significant bit set in the added contexts flags. 561 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000 562 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one. 563 */ 564static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs) 565{ 566 return fls(added_ctxs) - 1; 567} 568 569/* Returns 1 if the arguments are OK; 570 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 571 */ 572int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 573 struct usb_host_endpoint *ep, int check_ep, const char *func) { 574 if (!hcd || (check_ep && !ep) || !udev) { 575 printk(KERN_DEBUG "xHCI %s called with invalid args\n", 576 func); 577 return -EINVAL; 578 } 579 if (!udev->parent) { 580 printk(KERN_DEBUG "xHCI %s called for root hub\n", 581 func); 582 return 0; 583 } 584 if (!udev->slot_id) { 585 printk(KERN_DEBUG "xHCI %s called with unaddressed device\n", 586 func); 587 return -EINVAL; 588 } 589 return 1; 590} 591 592/* 593 * non-error returns are a promise to giveback() the urb later 594 * we drop ownership so next owner (or urb unlink) can get it 595 */ 596int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 597{ 598 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 599 unsigned long flags; 600 int ret = 0; 601 unsigned int slot_id, ep_index; 602 603 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0) 604 return -EINVAL; 605 606 slot_id = urb->dev->slot_id; 607 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 608 609 spin_lock_irqsave(&xhci->lock, flags); 610 if (!xhci->devs || !xhci->devs[slot_id]) { 611 if (!in_interrupt()) 612 dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n"); 613 ret = -EINVAL; 614 goto exit; 615 } 616 if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) { 617 if (!in_interrupt()) 618 xhci_dbg(xhci, "urb submitted during PCI suspend\n"); 619 ret = -ESHUTDOWN; 620 goto exit; 621 } 622 if (usb_endpoint_xfer_control(&urb->ep->desc)) 623 /* We have a spinlock and interrupts disabled, so we must pass 624 * atomic context to this function, which may allocate memory. 625 */ 626 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 627 slot_id, ep_index); 628 else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) 629 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 630 slot_id, ep_index); 631 else 632 ret = -EINVAL; 633exit: 634 spin_unlock_irqrestore(&xhci->lock, flags); 635 return ret; 636} 637 638/* 639 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop 640 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC 641 * should pick up where it left off in the TD, unless a Set Transfer Ring 642 * Dequeue Pointer is issued. 643 * 644 * The TRBs that make up the buffers for the canceled URB will be "removed" from 645 * the ring. Since the ring is a contiguous structure, they can't be physically 646 * removed. Instead, there are two options: 647 * 648 * 1) If the HC is in the middle of processing the URB to be canceled, we 649 * simply move the ring's dequeue pointer past those TRBs using the Set 650 * Transfer Ring Dequeue Pointer command. This will be the common case, 651 * when drivers timeout on the last submitted URB and attempt to cancel. 652 * 653 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a 654 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The 655 * HC will need to invalidate the any TRBs it has cached after the stop 656 * endpoint command, as noted in the xHCI 0.95 errata. 657 * 658 * 3) The TD may have completed by the time the Stop Endpoint Command 659 * completes, so software needs to handle that case too. 660 * 661 * This function should protect against the TD enqueueing code ringing the 662 * doorbell while this code is waiting for a Stop Endpoint command to complete. 663 * It also needs to account for multiple cancellations on happening at the same 664 * time for the same endpoint. 665 * 666 * Note that this function can be called in any context, or so says 667 * usb_hcd_unlink_urb() 668 */ 669int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 670{ 671 unsigned long flags; 672 int ret; 673 struct xhci_hcd *xhci; 674 struct xhci_td *td; 675 unsigned int ep_index; 676 struct xhci_ring *ep_ring; 677 678 xhci = hcd_to_xhci(hcd); 679 spin_lock_irqsave(&xhci->lock, flags); 680 /* Make sure the URB hasn't completed or been unlinked already */ 681 ret = usb_hcd_check_unlink_urb(hcd, urb, status); 682 if (ret || !urb->hcpriv) 683 goto done; 684 685 xhci_dbg(xhci, "Cancel URB %p\n", urb); 686 xhci_dbg(xhci, "Event ring:\n"); 687 xhci_debug_ring(xhci, xhci->event_ring); 688 ep_index = xhci_get_endpoint_index(&urb->ep->desc); 689 ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index]; 690 xhci_dbg(xhci, "Endpoint ring:\n"); 691 xhci_debug_ring(xhci, ep_ring); 692 td = (struct xhci_td *) urb->hcpriv; 693 694 ep_ring->cancels_pending++; 695 list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list); 696 /* Queue a stop endpoint command, but only if this is 697 * the first cancellation to be handled. 698 */ 699 if (ep_ring->cancels_pending == 1) { 700 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); 701 xhci_ring_cmd_db(xhci); 702 } 703done: 704 spin_unlock_irqrestore(&xhci->lock, flags); 705 return ret; 706} 707 708/* Drop an endpoint from a new bandwidth configuration for this device. 709 * Only one call to this function is allowed per endpoint before 710 * check_bandwidth() or reset_bandwidth() must be called. 711 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 712 * add the endpoint to the schedule with possibly new parameters denoted by a 713 * different endpoint descriptor in usb_host_endpoint. 714 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 715 * not allowed. 716 * 717 * The USB core will not allow URBs to be queued to an endpoint that is being 718 * disabled, so there's no need for mutual exclusion to protect 719 * the xhci->devs[slot_id] structure. 720 */ 721int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 722 struct usb_host_endpoint *ep) 723{ 724 struct xhci_hcd *xhci; 725 struct xhci_container_ctx *in_ctx, *out_ctx; 726 struct xhci_input_control_ctx *ctrl_ctx; 727 struct xhci_slot_ctx *slot_ctx; 728 unsigned int last_ctx; 729 unsigned int ep_index; 730 struct xhci_ep_ctx *ep_ctx; 731 u32 drop_flag; 732 u32 new_add_flags, new_drop_flags, new_slot_info; 733 int ret; 734 735 ret = xhci_check_args(hcd, udev, ep, 1, __func__); 736 if (ret <= 0) 737 return ret; 738 xhci = hcd_to_xhci(hcd); 739 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 740 741 drop_flag = xhci_get_endpoint_flag(&ep->desc); 742 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) { 743 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n", 744 __func__, drop_flag); 745 return 0; 746 } 747 748 if (!xhci->devs || !xhci->devs[udev->slot_id]) { 749 xhci_warn(xhci, "xHCI %s called with unaddressed device\n", 750 __func__); 751 return -EINVAL; 752 } 753 754 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 755 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 756 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 757 ep_index = xhci_get_endpoint_index(&ep->desc); 758 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 759 /* If the HC already knows the endpoint is disabled, 760 * or the HCD has noted it is disabled, ignore this request 761 */ 762 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || 763 ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { 764 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 765 __func__, ep); 766 return 0; 767 } 768 769 ctrl_ctx->drop_flags |= drop_flag; 770 new_drop_flags = ctrl_ctx->drop_flags; 771 772 ctrl_ctx->add_flags = ~drop_flag; 773 new_add_flags = ctrl_ctx->add_flags; 774 775 last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); 776 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 777 /* Update the last valid endpoint context, if we deleted the last one */ 778 if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { 779 slot_ctx->dev_info &= ~LAST_CTX_MASK; 780 slot_ctx->dev_info |= LAST_CTX(last_ctx); 781 } 782 new_slot_info = slot_ctx->dev_info; 783 784 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 785 786 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 787 (unsigned int) ep->desc.bEndpointAddress, 788 udev->slot_id, 789 (unsigned int) new_drop_flags, 790 (unsigned int) new_add_flags, 791 (unsigned int) new_slot_info); 792 return 0; 793} 794 795/* Add an endpoint to a new possible bandwidth configuration for this device. 796 * Only one call to this function is allowed per endpoint before 797 * check_bandwidth() or reset_bandwidth() must be called. 798 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will 799 * add the endpoint to the schedule with possibly new parameters denoted by a 800 * different endpoint descriptor in usb_host_endpoint. 801 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is 802 * not allowed. 803 * 804 * The USB core will not allow URBs to be queued to an endpoint until the 805 * configuration or alt setting is installed in the device, so there's no need 806 * for mutual exclusion to protect the xhci->devs[slot_id] structure. 807 */ 808int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, 809 struct usb_host_endpoint *ep) 810{ 811 struct xhci_hcd *xhci; 812 struct xhci_container_ctx *in_ctx, *out_ctx; 813 unsigned int ep_index; 814 struct xhci_ep_ctx *ep_ctx; 815 struct xhci_slot_ctx *slot_ctx; 816 struct xhci_input_control_ctx *ctrl_ctx; 817 u32 added_ctxs; 818 unsigned int last_ctx; 819 u32 new_add_flags, new_drop_flags, new_slot_info; 820 int ret = 0; 821 822 ret = xhci_check_args(hcd, udev, ep, 1, __func__); 823 if (ret <= 0) { 824 /* So we won't queue a reset ep command for a root hub */ 825 ep->hcpriv = NULL; 826 return ret; 827 } 828 xhci = hcd_to_xhci(hcd); 829 830 added_ctxs = xhci_get_endpoint_flag(&ep->desc); 831 last_ctx = xhci_last_valid_endpoint(added_ctxs); 832 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) { 833 /* FIXME when we have to issue an evaluate endpoint command to 834 * deal with ep0 max packet size changing once we get the 835 * descriptors 836 */ 837 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n", 838 __func__, added_ctxs); 839 return 0; 840 } 841 842 if (!xhci->devs || !xhci->devs[udev->slot_id]) { 843 xhci_warn(xhci, "xHCI %s called with unaddressed device\n", 844 __func__); 845 return -EINVAL; 846 } 847 848 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 849 out_ctx = xhci->devs[udev->slot_id]->out_ctx; 850 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 851 ep_index = xhci_get_endpoint_index(&ep->desc); 852 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 853 /* If the HCD has already noted the endpoint is enabled, 854 * ignore this request. 855 */ 856 if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { 857 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 858 __func__, ep); 859 return 0; 860 } 861 862 /* 863 * Configuration and alternate setting changes must be done in 864 * process context, not interrupt context (or so documenation 865 * for usb_set_interface() and usb_set_configuration() claim). 866 */ 867 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], 868 udev, ep, GFP_KERNEL) < 0) { 869 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", 870 __func__, ep->desc.bEndpointAddress); 871 return -ENOMEM; 872 } 873 874 ctrl_ctx->add_flags |= added_ctxs; 875 new_add_flags = ctrl_ctx->add_flags; 876 877 /* If xhci_endpoint_disable() was called for this endpoint, but the 878 * xHC hasn't been notified yet through the check_bandwidth() call, 879 * this re-adds a new state for the endpoint from the new endpoint 880 * descriptors. We must drop and re-add this endpoint, so we leave the 881 * drop flags alone. 882 */ 883 new_drop_flags = ctrl_ctx->drop_flags; 884 885 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 886 /* Update the last valid endpoint context, if we just added one past */ 887 if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { 888 slot_ctx->dev_info &= ~LAST_CTX_MASK; 889 slot_ctx->dev_info |= LAST_CTX(last_ctx); 890 } 891 new_slot_info = slot_ctx->dev_info; 892 893 /* Store the usb_device pointer for later use */ 894 ep->hcpriv = udev; 895 896 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", 897 (unsigned int) ep->desc.bEndpointAddress, 898 udev->slot_id, 899 (unsigned int) new_drop_flags, 900 (unsigned int) new_add_flags, 901 (unsigned int) new_slot_info); 902 return 0; 903} 904 905static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 906{ 907 struct xhci_input_control_ctx *ctrl_ctx; 908 struct xhci_ep_ctx *ep_ctx; 909 struct xhci_slot_ctx *slot_ctx; 910 int i; 911 912 /* When a device's add flag and drop flag are zero, any subsequent 913 * configure endpoint command will leave that endpoint's state 914 * untouched. Make sure we don't leave any old state in the input 915 * endpoint contexts. 916 */ 917 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 918 ctrl_ctx->drop_flags = 0; 919 ctrl_ctx->add_flags = 0; 920 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 921 slot_ctx->dev_info &= ~LAST_CTX_MASK; 922 /* Endpoint 0 is always valid */ 923 slot_ctx->dev_info |= LAST_CTX(1); 924 for (i = 1; i < 31; ++i) { 925 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 926 ep_ctx->ep_info = 0; 927 ep_ctx->ep_info2 = 0; 928 ep_ctx->deq = 0; 929 ep_ctx->tx_info = 0; 930 } 931} 932 933/* Called after one or more calls to xhci_add_endpoint() or 934 * xhci_drop_endpoint(). If this call fails, the USB core is expected 935 * to call xhci_reset_bandwidth(). 936 * 937 * Since we are in the middle of changing either configuration or 938 * installing a new alt setting, the USB core won't allow URBs to be 939 * enqueued for any endpoint on the old config or interface. Nothing 940 * else should be touching the xhci->devs[slot_id] structure, so we 941 * don't need to take the xhci->lock for manipulating that. 942 */ 943int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 944{ 945 int i; 946 int ret = 0; 947 int timeleft; 948 unsigned long flags; 949 struct xhci_hcd *xhci; 950 struct xhci_virt_device *virt_dev; 951 struct xhci_input_control_ctx *ctrl_ctx; 952 struct xhci_slot_ctx *slot_ctx; 953 954 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 955 if (ret <= 0) 956 return ret; 957 xhci = hcd_to_xhci(hcd); 958 959 if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { 960 xhci_warn(xhci, "xHCI %s called with unaddressed device\n", 961 __func__); 962 return -EINVAL; 963 } 964 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 965 virt_dev = xhci->devs[udev->slot_id]; 966 967 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 968 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 969 ctrl_ctx->add_flags |= SLOT_FLAG; 970 ctrl_ctx->add_flags &= ~EP0_FLAG; 971 ctrl_ctx->drop_flags &= ~SLOT_FLAG; 972 ctrl_ctx->drop_flags &= ~EP0_FLAG; 973 xhci_dbg(xhci, "New Input Control Context:\n"); 974 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 975 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 976 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 977 978 spin_lock_irqsave(&xhci->lock, flags); 979 ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, 980 udev->slot_id); 981 if (ret < 0) { 982 spin_unlock_irqrestore(&xhci->lock, flags); 983 xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); 984 return -ENOMEM; 985 } 986 xhci_ring_cmd_db(xhci); 987 spin_unlock_irqrestore(&xhci->lock, flags); 988 989 /* Wait for the configure endpoint command to complete */ 990 timeleft = wait_for_completion_interruptible_timeout( 991 &virt_dev->cmd_completion, 992 USB_CTRL_SET_TIMEOUT); 993 if (timeleft <= 0) { 994 xhci_warn(xhci, "%s while waiting for configure endpoint command\n", 995 timeleft == 0 ? "Timeout" : "Signal"); 996 /* FIXME cancel the configure endpoint command */ 997 return -ETIME; 998 } 999 1000 switch (virt_dev->cmd_status) { 1001 case COMP_ENOMEM: 1002 dev_warn(&udev->dev, "Not enough host controller resources " 1003 "for new device state.\n"); 1004 ret = -ENOMEM; 1005 /* FIXME: can we allocate more resources for the HC? */ 1006 break; 1007 case COMP_BW_ERR: 1008 dev_warn(&udev->dev, "Not enough bandwidth " 1009 "for new device state.\n"); 1010 ret = -ENOSPC; 1011 /* FIXME: can we go back to the old state? */ 1012 break; 1013 case COMP_TRB_ERR: 1014 /* the HCD set up something wrong */ 1015 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, " 1016 "and endpoint is not disabled.\n"); 1017 ret = -EINVAL; 1018 break; 1019 case COMP_SUCCESS: 1020 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n"); 1021 break; 1022 default: 1023 xhci_err(xhci, "ERROR: unexpected command completion " 1024 "code 0x%x.\n", virt_dev->cmd_status); 1025 ret = -EINVAL; 1026 break; 1027 } 1028 if (ret) { 1029 /* Callee should call reset_bandwidth() */ 1030 return ret; 1031 } 1032 1033 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 1034 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1035 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 1036 1037 xhci_zero_in_ctx(xhci, virt_dev); 1038 /* Free any old rings */ 1039 for (i = 1; i < 31; ++i) { 1040 if (virt_dev->new_ep_rings[i]) { 1041 xhci_ring_free(xhci, virt_dev->ep_rings[i]); 1042 virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i]; 1043 virt_dev->new_ep_rings[i] = NULL; 1044 } 1045 } 1046 1047 return ret; 1048} 1049 1050void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) 1051{ 1052 struct xhci_hcd *xhci; 1053 struct xhci_virt_device *virt_dev; 1054 int i, ret; 1055 1056 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 1057 if (ret <= 0) 1058 return; 1059 xhci = hcd_to_xhci(hcd); 1060 1061 if (!xhci->devs || !xhci->devs[udev->slot_id]) { 1062 xhci_warn(xhci, "xHCI %s called with unaddressed device\n", 1063 __func__); 1064 return; 1065 } 1066 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); 1067 virt_dev = xhci->devs[udev->slot_id]; 1068 /* Free any rings allocated for added endpoints */ 1069 for (i = 0; i < 31; ++i) { 1070 if (virt_dev->new_ep_rings[i]) { 1071 xhci_ring_free(xhci, virt_dev->new_ep_rings[i]); 1072 virt_dev->new_ep_rings[i] = NULL; 1073 } 1074 } 1075 xhci_zero_in_ctx(xhci, virt_dev); 1076} 1077 1078/* Deal with stalled endpoints. The core should have sent the control message 1079 * to clear the halt condition. However, we need to make the xHCI hardware 1080 * reset its sequence number, since a device will expect a sequence number of 1081 * zero after the halt condition is cleared. 1082 * Context: in_interrupt 1083 */ 1084void xhci_endpoint_reset(struct usb_hcd *hcd, 1085 struct usb_host_endpoint *ep) 1086{ 1087 struct xhci_hcd *xhci; 1088 struct usb_device *udev; 1089 unsigned int ep_index; 1090 unsigned long flags; 1091 int ret; 1092 struct xhci_dequeue_state deq_state; 1093 struct xhci_ring *ep_ring; 1094 1095 xhci = hcd_to_xhci(hcd); 1096 udev = (struct usb_device *) ep->hcpriv; 1097 /* Called with a root hub endpoint (or an endpoint that wasn't added 1098 * with xhci_add_endpoint() 1099 */ 1100 if (!ep->hcpriv) 1101 return; 1102 ep_index = xhci_get_endpoint_index(&ep->desc); 1103 ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index]; 1104 if (!ep_ring->stopped_td) { 1105 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n", 1106 ep->desc.bEndpointAddress); 1107 return; 1108 } 1109 1110 xhci_dbg(xhci, "Queueing reset endpoint command\n"); 1111 spin_lock_irqsave(&xhci->lock, flags); 1112 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index); 1113 /* 1114 * Can't change the ring dequeue pointer until it's transitioned to the 1115 * stopped state, which is only upon a successful reset endpoint 1116 * command. Better hope that last command worked! 1117 */ 1118 if (!ret) { 1119 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n"); 1120 /* We need to move the HW's dequeue pointer past this TD, 1121 * or it will attempt to resend it on the next doorbell ring. 1122 */ 1123 xhci_find_new_dequeue_state(xhci, udev->slot_id, 1124 ep_index, ep_ring->stopped_td, &deq_state); 1125 xhci_dbg(xhci, "Queueing new dequeue state\n"); 1126 xhci_queue_new_dequeue_state(xhci, ep_ring, 1127 udev->slot_id, 1128 ep_index, &deq_state); 1129 kfree(ep_ring->stopped_td); 1130 xhci_ring_cmd_db(xhci); 1131 } 1132 spin_unlock_irqrestore(&xhci->lock, flags); 1133 1134 if (ret) 1135 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 1136} 1137 1138/* 1139 * At this point, the struct usb_device is about to go away, the device has 1140 * disconnected, and all traffic has been stopped and the endpoints have been 1141 * disabled. Free any HC data structures associated with that device. 1142 */ 1143void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 1144{ 1145 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1146 unsigned long flags; 1147 1148 if (udev->slot_id == 0) 1149 return; 1150 1151 spin_lock_irqsave(&xhci->lock, flags); 1152 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) { 1153 spin_unlock_irqrestore(&xhci->lock, flags); 1154 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1155 return; 1156 } 1157 xhci_ring_cmd_db(xhci); 1158 spin_unlock_irqrestore(&xhci->lock, flags); 1159 /* 1160 * Event command completion handler will free any data structures 1161 * associated with the slot. XXX Can free sleep? 1162 */ 1163} 1164 1165/* 1166 * Returns 0 if the xHC ran out of device slots, the Enable Slot command 1167 * timed out, or allocating memory failed. Returns 1 on success. 1168 */ 1169int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) 1170{ 1171 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1172 unsigned long flags; 1173 int timeleft; 1174 int ret; 1175 1176 spin_lock_irqsave(&xhci->lock, flags); 1177 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); 1178 if (ret) { 1179 spin_unlock_irqrestore(&xhci->lock, flags); 1180 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1181 return 0; 1182 } 1183 xhci_ring_cmd_db(xhci); 1184 spin_unlock_irqrestore(&xhci->lock, flags); 1185 1186 /* XXX: how much time for xHC slot assignment? */ 1187 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 1188 USB_CTRL_SET_TIMEOUT); 1189 if (timeleft <= 0) { 1190 xhci_warn(xhci, "%s while waiting for a slot\n", 1191 timeleft == 0 ? "Timeout" : "Signal"); 1192 /* FIXME cancel the enable slot request */ 1193 return 0; 1194 } 1195 1196 if (!xhci->slot_id) { 1197 xhci_err(xhci, "Error while assigning device slot ID\n"); 1198 return 0; 1199 } 1200 /* xhci_alloc_virt_device() does not touch rings; no need to lock */ 1201 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_KERNEL)) { 1202 /* Disable slot, if we can do it without mem alloc */ 1203 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n"); 1204 spin_lock_irqsave(&xhci->lock, flags); 1205 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) 1206 xhci_ring_cmd_db(xhci); 1207 spin_unlock_irqrestore(&xhci->lock, flags); 1208 return 0; 1209 } 1210 udev->slot_id = xhci->slot_id; 1211 /* Is this a LS or FS device under a HS hub? */ 1212 /* Hub or peripherial? */ 1213 return 1; 1214} 1215 1216/* 1217 * Issue an Address Device command (which will issue a SetAddress request to 1218 * the device). 1219 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so 1220 * we should only issue and wait on one address command at the same time. 1221 * 1222 * We add one to the device address issued by the hardware because the USB core 1223 * uses address 1 for the root hubs (even though they're not really devices). 1224 */ 1225int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) 1226{ 1227 unsigned long flags; 1228 int timeleft; 1229 struct xhci_virt_device *virt_dev; 1230 int ret = 0; 1231 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1232 struct xhci_slot_ctx *slot_ctx; 1233 struct xhci_input_control_ctx *ctrl_ctx; 1234 u64 temp_64; 1235 1236 if (!udev->slot_id) { 1237 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); 1238 return -EINVAL; 1239 } 1240 1241 virt_dev = xhci->devs[udev->slot_id]; 1242 1243 /* If this is a Set Address to an unconfigured device, setup ep 0 */ 1244 if (!udev->config) 1245 xhci_setup_addressable_virt_dev(xhci, udev); 1246 /* Otherwise, assume the core has the device configured how it wants */ 1247 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 1248 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 1249 1250 spin_lock_irqsave(&xhci->lock, flags); 1251 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 1252 udev->slot_id); 1253 if (ret) { 1254 spin_unlock_irqrestore(&xhci->lock, flags); 1255 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); 1256 return ret; 1257 } 1258 xhci_ring_cmd_db(xhci); 1259 spin_unlock_irqrestore(&xhci->lock, flags); 1260 1261 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ 1262 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, 1263 USB_CTRL_SET_TIMEOUT); 1264 /* FIXME: From section 4.3.4: "Software shall be responsible for timing 1265 * the SetAddress() "recovery interval" required by USB and aborting the 1266 * command on a timeout. 1267 */ 1268 if (timeleft <= 0) { 1269 xhci_warn(xhci, "%s while waiting for a slot\n", 1270 timeleft == 0 ? "Timeout" : "Signal"); 1271 /* FIXME cancel the address device command */ 1272 return -ETIME; 1273 } 1274 1275 switch (virt_dev->cmd_status) { 1276 case COMP_CTX_STATE: 1277 case COMP_EBADSLT: 1278 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n", 1279 udev->slot_id); 1280 ret = -EINVAL; 1281 break; 1282 case COMP_TX_ERR: 1283 dev_warn(&udev->dev, "Device not responding to set address.\n"); 1284 ret = -EPROTO; 1285 break; 1286 case COMP_SUCCESS: 1287 xhci_dbg(xhci, "Successful Address Device command\n"); 1288 break; 1289 default: 1290 xhci_err(xhci, "ERROR: unexpected command completion " 1291 "code 0x%x.\n", virt_dev->cmd_status); 1292 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 1293 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 1294 ret = -EINVAL; 1295 break; 1296 } 1297 if (ret) { 1298 return ret; 1299 } 1300 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 1301 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64); 1302 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n", 1303 udev->slot_id, 1304 &xhci->dcbaa->dev_context_ptrs[udev->slot_id], 1305 (unsigned long long) 1306 xhci->dcbaa->dev_context_ptrs[udev->slot_id]); 1307 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 1308 (unsigned long long)virt_dev->out_ctx->dma); 1309 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 1310 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 1311 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 1312 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 1313 /* 1314 * USB core uses address 1 for the roothubs, so we add one to the 1315 * address given back to us by the HC. 1316 */ 1317 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 1318 udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; 1319 /* Zero the input context control for later use */ 1320 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1321 ctrl_ctx->add_flags = 0; 1322 ctrl_ctx->drop_flags = 0; 1323 1324 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 1325 /* XXX Meh, not sure if anyone else but choose_address uses this. */ 1326 set_bit(udev->devnum, udev->bus->devmap.devicemap); 1327 1328 return 0; 1329} 1330 1331int xhci_get_frame(struct usb_hcd *hcd) 1332{ 1333 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1334 /* EHCI mods by the periodic size. Why? */ 1335 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3; 1336} 1337 1338MODULE_DESCRIPTION(DRIVER_DESC); 1339MODULE_AUTHOR(DRIVER_AUTHOR); 1340MODULE_LICENSE("GPL"); 1341 1342static int __init xhci_hcd_init(void) 1343{ 1344#ifdef CONFIG_PCI 1345 int retval = 0; 1346 1347 retval = xhci_register_pci(); 1348 1349 if (retval < 0) { 1350 printk(KERN_DEBUG "Problem registering PCI driver."); 1351 return retval; 1352 } 1353#endif 1354 /* 1355 * Check the compiler generated sizes of structures that must be laid 1356 * out in specific ways for hardware access. 1357 */ 1358 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 1359 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8); 1360 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8); 1361 /* xhci_device_control has eight fields, and also 1362 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 1363 */ 1364 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 1365 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 1366 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8); 1367 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8); 1368 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); 1369 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ 1370 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); 1371 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8); 1372 return 0; 1373} 1374module_init(xhci_hcd_init); 1375 1376static void __exit xhci_hcd_cleanup(void) 1377{ 1378#ifdef CONFIG_PCI 1379 xhci_unregister_pci(); 1380#endif 1381} 1382module_exit(xhci_hcd_cleanup);