Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11#include <linux/pci.h>
12#include <linux/iopoll.h>
13#include <linux/irq.h>
14#include <linux/log2.h>
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/slab.h>
18#include <linux/dmi.h>
19#include <linux/dma-mapping.h>
20
21#include "xhci.h"
22#include "xhci-trace.h"
23#include "xhci-debugfs.h"
24#include "xhci-dbgcap.h"
25
26#define DRIVER_AUTHOR "Sarah Sharp"
27#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
28
29#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
30
31/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
32static int link_quirk;
33module_param(link_quirk, int, S_IRUGO | S_IWUSR);
34MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
35
36static unsigned long long quirks;
37module_param(quirks, ullong, S_IRUGO);
38MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
39
40static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
41{
42 struct xhci_segment *seg = ring->first_seg;
43
44 if (!td || !td->start_seg)
45 return false;
46 do {
47 if (seg == td->start_seg)
48 return true;
49 seg = seg->next;
50 } while (seg && seg != ring->first_seg);
51
52 return false;
53}
54
55/*
56 * xhci_handshake - spin reading hc until handshake completes or fails
57 * @ptr: address of hc register to be read
58 * @mask: bits to look at in result of read
59 * @done: value of those bits when handshake succeeds
60 * @usec: timeout in microseconds
61 *
62 * Returns negative errno, or zero on success
63 *
64 * Success happens when the "mask" bits have the specified value (hardware
65 * handshake done). There are two failure modes: "usec" have passed (major
66 * hardware flakeout), or the register reads as all-ones (hardware removed).
67 */
68int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, int usec)
69{
70 u32 result;
71 int ret;
72
73 ret = readl_poll_timeout_atomic(ptr, result,
74 (result & mask) == done ||
75 result == U32_MAX,
76 1, usec);
77 if (result == U32_MAX) /* card removed */
78 return -ENODEV;
79
80 return ret;
81}
82
83/*
84 * Disable interrupts and begin the xHCI halting process.
85 */
86void xhci_quiesce(struct xhci_hcd *xhci)
87{
88 u32 halted;
89 u32 cmd;
90 u32 mask;
91
92 mask = ~(XHCI_IRQS);
93 halted = readl(&xhci->op_regs->status) & STS_HALT;
94 if (!halted)
95 mask &= ~CMD_RUN;
96
97 cmd = readl(&xhci->op_regs->command);
98 cmd &= mask;
99 writel(cmd, &xhci->op_regs->command);
100}
101
102/*
103 * Force HC into halt state.
104 *
105 * Disable any IRQs and clear the run/stop bit.
106 * HC will complete any current and actively pipelined transactions, and
107 * should halt within 16 ms of the run/stop bit being cleared.
108 * Read HC Halted bit in the status register to see when the HC is finished.
109 */
110int xhci_halt(struct xhci_hcd *xhci)
111{
112 int ret;
113 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
114 xhci_quiesce(xhci);
115
116 ret = xhci_handshake(&xhci->op_regs->status,
117 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
118 if (ret) {
119 xhci_warn(xhci, "Host halt failed, %d\n", ret);
120 return ret;
121 }
122 xhci->xhc_state |= XHCI_STATE_HALTED;
123 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
124 return ret;
125}
126
127/*
128 * Set the run bit and wait for the host to be running.
129 */
130int xhci_start(struct xhci_hcd *xhci)
131{
132 u32 temp;
133 int ret;
134
135 temp = readl(&xhci->op_regs->command);
136 temp |= (CMD_RUN);
137 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
138 temp);
139 writel(temp, &xhci->op_regs->command);
140
141 /*
142 * Wait for the HCHalted Status bit to be 0 to indicate the host is
143 * running.
144 */
145 ret = xhci_handshake(&xhci->op_regs->status,
146 STS_HALT, 0, XHCI_MAX_HALT_USEC);
147 if (ret == -ETIMEDOUT)
148 xhci_err(xhci, "Host took too long to start, "
149 "waited %u microseconds.\n",
150 XHCI_MAX_HALT_USEC);
151 if (!ret)
152 /* clear state flags. Including dying, halted or removing */
153 xhci->xhc_state = 0;
154
155 return ret;
156}
157
158/*
159 * Reset a halted HC.
160 *
161 * This resets pipelines, timers, counters, state machines, etc.
162 * Transactions will be terminated immediately, and operational registers
163 * will be set to their defaults.
164 */
165int xhci_reset(struct xhci_hcd *xhci)
166{
167 u32 command;
168 u32 state;
169 int ret;
170
171 state = readl(&xhci->op_regs->status);
172
173 if (state == ~(u32)0) {
174 xhci_warn(xhci, "Host not accessible, reset failed.\n");
175 return -ENODEV;
176 }
177
178 if ((state & STS_HALT) == 0) {
179 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
180 return 0;
181 }
182
183 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
184 command = readl(&xhci->op_regs->command);
185 command |= CMD_RESET;
186 writel(command, &xhci->op_regs->command);
187
188 /* Existing Intel xHCI controllers require a delay of 1 mS,
189 * after setting the CMD_RESET bit, and before accessing any
190 * HC registers. This allows the HC to complete the
191 * reset operation and be ready for HC register access.
192 * Without this delay, the subsequent HC register access,
193 * may result in a system hang very rarely.
194 */
195 if (xhci->quirks & XHCI_INTEL_HOST)
196 udelay(1000);
197
198 ret = xhci_handshake(&xhci->op_regs->command,
199 CMD_RESET, 0, 10 * 1000 * 1000);
200 if (ret)
201 return ret;
202
203 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
204 usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller));
205
206 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
207 "Wait for controller to be ready for doorbell rings");
208 /*
209 * xHCI cannot write to any doorbells or operational registers other
210 * than status until the "Controller Not Ready" flag is cleared.
211 */
212 ret = xhci_handshake(&xhci->op_regs->status,
213 STS_CNR, 0, 10 * 1000 * 1000);
214
215 xhci->usb2_rhub.bus_state.port_c_suspend = 0;
216 xhci->usb2_rhub.bus_state.suspended_ports = 0;
217 xhci->usb2_rhub.bus_state.resuming_ports = 0;
218 xhci->usb3_rhub.bus_state.port_c_suspend = 0;
219 xhci->usb3_rhub.bus_state.suspended_ports = 0;
220 xhci->usb3_rhub.bus_state.resuming_ports = 0;
221
222 return ret;
223}
224
225static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
226{
227 struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
228 int err, i;
229 u64 val;
230 u32 intrs;
231
232 /*
233 * Some Renesas controllers get into a weird state if they are
234 * reset while programmed with 64bit addresses (they will preserve
235 * the top half of the address in internal, non visible
236 * registers). You end up with half the address coming from the
237 * kernel, and the other half coming from the firmware. Also,
238 * changing the programming leads to extra accesses even if the
239 * controller is supposed to be halted. The controller ends up with
240 * a fatal fault, and is then ripe for being properly reset.
241 *
242 * Special care is taken to only apply this if the device is behind
243 * an iommu. Doing anything when there is no iommu is definitely
244 * unsafe...
245 */
246 if (!(xhci->quirks & XHCI_ZERO_64B_REGS) || !device_iommu_mapped(dev))
247 return;
248
249 xhci_info(xhci, "Zeroing 64bit base registers, expecting fault\n");
250
251 /* Clear HSEIE so that faults do not get signaled */
252 val = readl(&xhci->op_regs->command);
253 val &= ~CMD_HSEIE;
254 writel(val, &xhci->op_regs->command);
255
256 /* Clear HSE (aka FATAL) */
257 val = readl(&xhci->op_regs->status);
258 val |= STS_FATAL;
259 writel(val, &xhci->op_regs->status);
260
261 /* Now zero the registers, and brace for impact */
262 val = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
263 if (upper_32_bits(val))
264 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
265 val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
266 if (upper_32_bits(val))
267 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
268
269 intrs = min_t(u32, HCS_MAX_INTRS(xhci->hcs_params1),
270 ARRAY_SIZE(xhci->run_regs->ir_set));
271
272 for (i = 0; i < intrs; i++) {
273 struct xhci_intr_reg __iomem *ir;
274
275 ir = &xhci->run_regs->ir_set[i];
276 val = xhci_read_64(xhci, &ir->erst_base);
277 if (upper_32_bits(val))
278 xhci_write_64(xhci, 0, &ir->erst_base);
279 val= xhci_read_64(xhci, &ir->erst_dequeue);
280 if (upper_32_bits(val))
281 xhci_write_64(xhci, 0, &ir->erst_dequeue);
282 }
283
284 /* Wait for the fault to appear. It will be cleared on reset */
285 err = xhci_handshake(&xhci->op_regs->status,
286 STS_FATAL, STS_FATAL,
287 XHCI_MAX_HALT_USEC);
288 if (!err)
289 xhci_info(xhci, "Fault detected\n");
290}
291
292#ifdef CONFIG_USB_PCI
293/*
294 * Set up MSI
295 */
296static int xhci_setup_msi(struct xhci_hcd *xhci)
297{
298 int ret;
299 /*
300 * TODO:Check with MSI Soc for sysdev
301 */
302 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
303
304 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
305 if (ret < 0) {
306 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
307 "failed to allocate MSI entry");
308 return ret;
309 }
310
311 ret = request_irq(pdev->irq, xhci_msi_irq,
312 0, "xhci_hcd", xhci_to_hcd(xhci));
313 if (ret) {
314 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
315 "disable MSI interrupt");
316 pci_free_irq_vectors(pdev);
317 }
318
319 return ret;
320}
321
322/*
323 * Set up MSI-X
324 */
325static int xhci_setup_msix(struct xhci_hcd *xhci)
326{
327 int i, ret = 0;
328 struct usb_hcd *hcd = xhci_to_hcd(xhci);
329 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
330
331 /*
332 * calculate number of msi-x vectors supported.
333 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
334 * with max number of interrupters based on the xhci HCSPARAMS1.
335 * - num_online_cpus: maximum msi-x vectors per CPUs core.
336 * Add additional 1 vector to ensure always available interrupt.
337 */
338 xhci->msix_count = min(num_online_cpus() + 1,
339 HCS_MAX_INTRS(xhci->hcs_params1));
340
341 ret = pci_alloc_irq_vectors(pdev, xhci->msix_count, xhci->msix_count,
342 PCI_IRQ_MSIX);
343 if (ret < 0) {
344 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
345 "Failed to enable MSI-X");
346 return ret;
347 }
348
349 for (i = 0; i < xhci->msix_count; i++) {
350 ret = request_irq(pci_irq_vector(pdev, i), xhci_msi_irq, 0,
351 "xhci_hcd", xhci_to_hcd(xhci));
352 if (ret)
353 goto disable_msix;
354 }
355
356 hcd->msix_enabled = 1;
357 return ret;
358
359disable_msix:
360 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
361 while (--i >= 0)
362 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
363 pci_free_irq_vectors(pdev);
364 return ret;
365}
366
367/* Free any IRQs and disable MSI-X */
368static void xhci_cleanup_msix(struct xhci_hcd *xhci)
369{
370 struct usb_hcd *hcd = xhci_to_hcd(xhci);
371 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
372
373 if (xhci->quirks & XHCI_PLAT)
374 return;
375
376 /* return if using legacy interrupt */
377 if (hcd->irq > 0)
378 return;
379
380 if (hcd->msix_enabled) {
381 int i;
382
383 for (i = 0; i < xhci->msix_count; i++)
384 free_irq(pci_irq_vector(pdev, i), xhci_to_hcd(xhci));
385 } else {
386 free_irq(pci_irq_vector(pdev, 0), xhci_to_hcd(xhci));
387 }
388
389 pci_free_irq_vectors(pdev);
390 hcd->msix_enabled = 0;
391}
392
393static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
394{
395 struct usb_hcd *hcd = xhci_to_hcd(xhci);
396
397 if (hcd->msix_enabled) {
398 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
399 int i;
400
401 for (i = 0; i < xhci->msix_count; i++)
402 synchronize_irq(pci_irq_vector(pdev, i));
403 }
404}
405
406static int xhci_try_enable_msi(struct usb_hcd *hcd)
407{
408 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
409 struct pci_dev *pdev;
410 int ret;
411
412 /* The xhci platform device has set up IRQs through usb_add_hcd. */
413 if (xhci->quirks & XHCI_PLAT)
414 return 0;
415
416 pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
417 /*
418 * Some Fresco Logic host controllers advertise MSI, but fail to
419 * generate interrupts. Don't even try to enable MSI.
420 */
421 if (xhci->quirks & XHCI_BROKEN_MSI)
422 goto legacy_irq;
423
424 /* unregister the legacy interrupt */
425 if (hcd->irq)
426 free_irq(hcd->irq, hcd);
427 hcd->irq = 0;
428
429 ret = xhci_setup_msix(xhci);
430 if (ret)
431 /* fall back to msi*/
432 ret = xhci_setup_msi(xhci);
433
434 if (!ret) {
435 hcd->msi_enabled = 1;
436 return 0;
437 }
438
439 if (!pdev->irq) {
440 xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
441 return -EINVAL;
442 }
443
444 legacy_irq:
445 if (!strlen(hcd->irq_descr))
446 snprintf(hcd->irq_descr, sizeof(hcd->irq_descr), "%s:usb%d",
447 hcd->driver->description, hcd->self.busnum);
448
449 /* fall back to legacy interrupt*/
450 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
451 hcd->irq_descr, hcd);
452 if (ret) {
453 xhci_err(xhci, "request interrupt %d failed\n",
454 pdev->irq);
455 return ret;
456 }
457 hcd->irq = pdev->irq;
458 return 0;
459}
460
461#else
462
463static inline int xhci_try_enable_msi(struct usb_hcd *hcd)
464{
465 return 0;
466}
467
468static inline void xhci_cleanup_msix(struct xhci_hcd *xhci)
469{
470}
471
472static inline void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
473{
474}
475
476#endif
477
478static void compliance_mode_recovery(struct timer_list *t)
479{
480 struct xhci_hcd *xhci;
481 struct usb_hcd *hcd;
482 struct xhci_hub *rhub;
483 u32 temp;
484 int i;
485
486 xhci = from_timer(xhci, t, comp_mode_recovery_timer);
487 rhub = &xhci->usb3_rhub;
488
489 for (i = 0; i < rhub->num_ports; i++) {
490 temp = readl(rhub->ports[i]->addr);
491 if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) {
492 /*
493 * Compliance Mode Detected. Letting USB Core
494 * handle the Warm Reset
495 */
496 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
497 "Compliance mode detected->port %d",
498 i + 1);
499 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
500 "Attempting compliance mode recovery");
501 hcd = xhci->shared_hcd;
502
503 if (hcd->state == HC_STATE_SUSPENDED)
504 usb_hcd_resume_root_hub(hcd);
505
506 usb_hcd_poll_rh_status(hcd);
507 }
508 }
509
510 if (xhci->port_status_u0 != ((1 << rhub->num_ports) - 1))
511 mod_timer(&xhci->comp_mode_recovery_timer,
512 jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
513}
514
515/*
516 * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver
517 * that causes ports behind that hardware to enter compliance mode sometimes.
518 * The quirk creates a timer that polls every 2 seconds the link state of
519 * each host controller's port and recovers it by issuing a Warm reset
520 * if Compliance mode is detected, otherwise the port will become "dead" (no
521 * device connections or disconnections will be detected anymore). Becasue no
522 * status event is generated when entering compliance mode (per xhci spec),
523 * this quirk is needed on systems that have the failing hardware installed.
524 */
525static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
526{
527 xhci->port_status_u0 = 0;
528 timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
529 0);
530 xhci->comp_mode_recovery_timer.expires = jiffies +
531 msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
532
533 add_timer(&xhci->comp_mode_recovery_timer);
534 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
535 "Compliance mode recovery timer initialized");
536}
537
538/*
539 * This function identifies the systems that have installed the SN65LVPE502CP
540 * USB3.0 re-driver and that need the Compliance Mode Quirk.
541 * Systems:
542 * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820
543 */
544static bool xhci_compliance_mode_recovery_timer_quirk_check(void)
545{
546 const char *dmi_product_name, *dmi_sys_vendor;
547
548 dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
549 dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
550 if (!dmi_product_name || !dmi_sys_vendor)
551 return false;
552
553 if (!(strstr(dmi_sys_vendor, "Hewlett-Packard")))
554 return false;
555
556 if (strstr(dmi_product_name, "Z420") ||
557 strstr(dmi_product_name, "Z620") ||
558 strstr(dmi_product_name, "Z820") ||
559 strstr(dmi_product_name, "Z1 Workstation"))
560 return true;
561
562 return false;
563}
564
565static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci)
566{
567 return (xhci->port_status_u0 == ((1 << xhci->usb3_rhub.num_ports) - 1));
568}
569
570
571/*
572 * Initialize memory for HCD and xHC (one-time init).
573 *
574 * Program the PAGESIZE register, initialize the device context array, create
575 * device contexts (?), set up a command ring segment (or two?), create event
576 * ring (one for now).
577 */
578static int xhci_init(struct usb_hcd *hcd)
579{
580 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
581 int retval = 0;
582
583 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
584 spin_lock_init(&xhci->lock);
585 if (xhci->hci_version == 0x95 && link_quirk) {
586 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
587 "QUIRK: Not clearing Link TRB chain bits.");
588 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
589 } else {
590 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
591 "xHCI doesn't need link TRB QUIRK");
592 }
593 retval = xhci_mem_init(xhci, GFP_KERNEL);
594 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
595
596 /* Initializing Compliance Mode Recovery Data If Needed */
597 if (xhci_compliance_mode_recovery_timer_quirk_check()) {
598 xhci->quirks |= XHCI_COMP_MODE_QUIRK;
599 compliance_mode_recovery_timer_init(xhci);
600 }
601
602 return retval;
603}
604
605/*-------------------------------------------------------------------------*/
606
607
608static int xhci_run_finished(struct xhci_hcd *xhci)
609{
610 if (xhci_start(xhci)) {
611 xhci_halt(xhci);
612 return -ENODEV;
613 }
614 xhci->shared_hcd->state = HC_STATE_RUNNING;
615 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
616
617 if (xhci->quirks & XHCI_NEC_HOST)
618 xhci_ring_cmd_db(xhci);
619
620 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
621 "Finished xhci_run for USB3 roothub");
622 return 0;
623}
624
625/*
626 * Start the HC after it was halted.
627 *
628 * This function is called by the USB core when the HC driver is added.
629 * Its opposite is xhci_stop().
630 *
631 * xhci_init() must be called once before this function can be called.
632 * Reset the HC, enable device slot contexts, program DCBAAP, and
633 * set command ring pointer and event ring pointer.
634 *
635 * Setup MSI-X vectors and enable interrupts.
636 */
637int xhci_run(struct usb_hcd *hcd)
638{
639 u32 temp;
640 u64 temp_64;
641 int ret;
642 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
643
644 /* Start the xHCI host controller running only after the USB 2.0 roothub
645 * is setup.
646 */
647
648 hcd->uses_new_polling = 1;
649 if (!usb_hcd_is_primary_hcd(hcd))
650 return xhci_run_finished(xhci);
651
652 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
653
654 ret = xhci_try_enable_msi(hcd);
655 if (ret)
656 return ret;
657
658 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
659 temp_64 &= ~ERST_PTR_MASK;
660 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
661 "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
662
663 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
664 "// Set the interrupt modulation register");
665 temp = readl(&xhci->ir_set->irq_control);
666 temp &= ~ER_IRQ_INTERVAL_MASK;
667 temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
668 writel(temp, &xhci->ir_set->irq_control);
669
670 /* Set the HCD state before we enable the irqs */
671 temp = readl(&xhci->op_regs->command);
672 temp |= (CMD_EIE);
673 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
674 "// Enable interrupts, cmd = 0x%x.", temp);
675 writel(temp, &xhci->op_regs->command);
676
677 temp = readl(&xhci->ir_set->irq_pending);
678 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
679 "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
680 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
681 writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
682
683 if (xhci->quirks & XHCI_NEC_HOST) {
684 struct xhci_command *command;
685
686 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
687 if (!command)
688 return -ENOMEM;
689
690 ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0,
691 TRB_TYPE(TRB_NEC_GET_FW));
692 if (ret)
693 xhci_free_command(xhci, command);
694 }
695 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
696 "Finished xhci_run for USB2 roothub");
697
698 xhci_dbc_init(xhci);
699
700 xhci_debugfs_init(xhci);
701
702 return 0;
703}
704EXPORT_SYMBOL_GPL(xhci_run);
705
706/*
707 * Stop xHCI driver.
708 *
709 * This function is called by the USB core when the HC driver is removed.
710 * Its opposite is xhci_run().
711 *
712 * Disable device contexts, disable IRQs, and quiesce the HC.
713 * Reset the HC, finish any completed transactions, and cleanup memory.
714 */
715static void xhci_stop(struct usb_hcd *hcd)
716{
717 u32 temp;
718 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
719
720 mutex_lock(&xhci->mutex);
721
722 /* Only halt host and free memory after both hcds are removed */
723 if (!usb_hcd_is_primary_hcd(hcd)) {
724 mutex_unlock(&xhci->mutex);
725 return;
726 }
727
728 xhci_dbc_exit(xhci);
729
730 spin_lock_irq(&xhci->lock);
731 xhci->xhc_state |= XHCI_STATE_HALTED;
732 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
733 xhci_halt(xhci);
734 xhci_reset(xhci);
735 spin_unlock_irq(&xhci->lock);
736
737 xhci_cleanup_msix(xhci);
738
739 /* Deleting Compliance Mode Recovery Timer */
740 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
741 (!(xhci_all_ports_seen_u0(xhci)))) {
742 del_timer_sync(&xhci->comp_mode_recovery_timer);
743 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
744 "%s: compliance mode recovery timer deleted",
745 __func__);
746 }
747
748 if (xhci->quirks & XHCI_AMD_PLL_FIX)
749 usb_amd_dev_put();
750
751 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
752 "// Disabling event ring interrupts");
753 temp = readl(&xhci->op_regs->status);
754 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
755 temp = readl(&xhci->ir_set->irq_pending);
756 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
757
758 xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
759 xhci_mem_cleanup(xhci);
760 xhci_debugfs_exit(xhci);
761 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
762 "xhci_stop completed - status = %x",
763 readl(&xhci->op_regs->status));
764 mutex_unlock(&xhci->mutex);
765}
766
767/*
768 * Shutdown HC (not bus-specific)
769 *
770 * This is called when the machine is rebooting or halting. We assume that the
771 * machine will be powered off, and the HC's internal state will be reset.
772 * Don't bother to free memory.
773 *
774 * This will only ever be called with the main usb_hcd (the USB3 roothub).
775 */
776void xhci_shutdown(struct usb_hcd *hcd)
777{
778 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
779
780 if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
781 usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
782
783 spin_lock_irq(&xhci->lock);
784 xhci_halt(xhci);
785 /* Workaround for spurious wakeups at shutdown with HSW */
786 if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
787 xhci_reset(xhci);
788 spin_unlock_irq(&xhci->lock);
789
790 xhci_cleanup_msix(xhci);
791
792 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
793 "xhci_shutdown completed - status = %x",
794 readl(&xhci->op_regs->status));
795}
796EXPORT_SYMBOL_GPL(xhci_shutdown);
797
798#ifdef CONFIG_PM
799static void xhci_save_registers(struct xhci_hcd *xhci)
800{
801 xhci->s3.command = readl(&xhci->op_regs->command);
802 xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
803 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
804 xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
805 xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
806 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
807 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
808 xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
809 xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
810}
811
812static void xhci_restore_registers(struct xhci_hcd *xhci)
813{
814 writel(xhci->s3.command, &xhci->op_regs->command);
815 writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
816 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
817 writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
818 writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
819 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
820 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
821 writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
822 writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
823}
824
825static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
826{
827 u64 val_64;
828
829 /* step 2: initialize command ring buffer */
830 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
831 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
832 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
833 xhci->cmd_ring->dequeue) &
834 (u64) ~CMD_RING_RSVD_BITS) |
835 xhci->cmd_ring->cycle_state;
836 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
837 "// Setting command ring address to 0x%llx",
838 (long unsigned long) val_64);
839 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
840}
841
842/*
843 * The whole command ring must be cleared to zero when we suspend the host.
844 *
845 * The host doesn't save the command ring pointer in the suspend well, so we
846 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
847 * aligned, because of the reserved bits in the command ring dequeue pointer
848 * register. Therefore, we can't just set the dequeue pointer back in the
849 * middle of the ring (TRBs are 16-byte aligned).
850 */
851static void xhci_clear_command_ring(struct xhci_hcd *xhci)
852{
853 struct xhci_ring *ring;
854 struct xhci_segment *seg;
855
856 ring = xhci->cmd_ring;
857 seg = ring->deq_seg;
858 do {
859 memset(seg->trbs, 0,
860 sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
861 seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
862 cpu_to_le32(~TRB_CYCLE);
863 seg = seg->next;
864 } while (seg != ring->deq_seg);
865
866 /* Reset the software enqueue and dequeue pointers */
867 ring->deq_seg = ring->first_seg;
868 ring->dequeue = ring->first_seg->trbs;
869 ring->enq_seg = ring->deq_seg;
870 ring->enqueue = ring->dequeue;
871
872 ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
873 /*
874 * Ring is now zeroed, so the HW should look for change of ownership
875 * when the cycle bit is set to 1.
876 */
877 ring->cycle_state = 1;
878
879 /*
880 * Reset the hardware dequeue pointer.
881 * Yes, this will need to be re-written after resume, but we're paranoid
882 * and want to make sure the hardware doesn't access bogus memory
883 * because, say, the BIOS or an SMI started the host without changing
884 * the command ring pointers.
885 */
886 xhci_set_cmd_ring_deq(xhci);
887}
888
889/*
890 * Disable port wake bits if do_wakeup is not set.
891 *
892 * Also clear a possible internal port wake state left hanging for ports that
893 * detected termination but never successfully enumerated (trained to 0U).
894 * Internal wake causes immediate xHCI wake after suspend. PORT_CSC write done
895 * at enumeration clears this wake, force one here as well for unconnected ports
896 */
897
898static void xhci_disable_hub_port_wake(struct xhci_hcd *xhci,
899 struct xhci_hub *rhub,
900 bool do_wakeup)
901{
902 unsigned long flags;
903 u32 t1, t2, portsc;
904 int i;
905
906 spin_lock_irqsave(&xhci->lock, flags);
907
908 for (i = 0; i < rhub->num_ports; i++) {
909 portsc = readl(rhub->ports[i]->addr);
910 t1 = xhci_port_state_to_neutral(portsc);
911 t2 = t1;
912
913 /* clear wake bits if do_wake is not set */
914 if (!do_wakeup)
915 t2 &= ~PORT_WAKE_BITS;
916
917 /* Don't touch csc bit if connected or connect change is set */
918 if (!(portsc & (PORT_CSC | PORT_CONNECT)))
919 t2 |= PORT_CSC;
920
921 if (t1 != t2) {
922 writel(t2, rhub->ports[i]->addr);
923 xhci_dbg(xhci, "config port %d-%d wake bits, portsc: 0x%x, write: 0x%x\n",
924 rhub->hcd->self.busnum, i + 1, portsc, t2);
925 }
926 }
927 spin_unlock_irqrestore(&xhci->lock, flags);
928}
929
930static bool xhci_pending_portevent(struct xhci_hcd *xhci)
931{
932 struct xhci_port **ports;
933 int port_index;
934 u32 status;
935 u32 portsc;
936
937 status = readl(&xhci->op_regs->status);
938 if (status & STS_EINT)
939 return true;
940 /*
941 * Checking STS_EINT is not enough as there is a lag between a change
942 * bit being set and the Port Status Change Event that it generated
943 * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
944 */
945
946 port_index = xhci->usb2_rhub.num_ports;
947 ports = xhci->usb2_rhub.ports;
948 while (port_index--) {
949 portsc = readl(ports[port_index]->addr);
950 if (portsc & PORT_CHANGE_MASK ||
951 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
952 return true;
953 }
954 port_index = xhci->usb3_rhub.num_ports;
955 ports = xhci->usb3_rhub.ports;
956 while (port_index--) {
957 portsc = readl(ports[port_index]->addr);
958 if (portsc & PORT_CHANGE_MASK ||
959 (portsc & PORT_PLS_MASK) == XDEV_RESUME)
960 return true;
961 }
962 return false;
963}
964
965/*
966 * Stop HC (not bus-specific)
967 *
968 * This is called when the machine transition into S3/S4 mode.
969 *
970 */
971int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
972{
973 int rc = 0;
974 unsigned int delay = XHCI_MAX_HALT_USEC * 2;
975 struct usb_hcd *hcd = xhci_to_hcd(xhci);
976 u32 command;
977 u32 res;
978
979 if (!hcd->state)
980 return 0;
981
982 if (hcd->state != HC_STATE_SUSPENDED ||
983 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
984 return -EINVAL;
985
986 /* Clear root port wake on bits if wakeup not allowed. */
987 xhci_disable_hub_port_wake(xhci, &xhci->usb3_rhub, do_wakeup);
988 xhci_disable_hub_port_wake(xhci, &xhci->usb2_rhub, do_wakeup);
989
990 if (!HCD_HW_ACCESSIBLE(hcd))
991 return 0;
992
993 xhci_dbc_suspend(xhci);
994
995 /* Don't poll the roothubs on bus suspend. */
996 xhci_dbg(xhci, "%s: stopping usb%d port polling.\n",
997 __func__, hcd->self.busnum);
998 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
999 del_timer_sync(&hcd->rh_timer);
1000 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1001 del_timer_sync(&xhci->shared_hcd->rh_timer);
1002
1003 if (xhci->quirks & XHCI_SUSPEND_DELAY)
1004 usleep_range(1000, 1500);
1005
1006 spin_lock_irq(&xhci->lock);
1007 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1008 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1009 /* step 1: stop endpoint */
1010 /* skipped assuming that port suspend has done */
1011
1012 /* step 2: clear Run/Stop bit */
1013 command = readl(&xhci->op_regs->command);
1014 command &= ~CMD_RUN;
1015 writel(command, &xhci->op_regs->command);
1016
1017 /* Some chips from Fresco Logic need an extraordinary delay */
1018 delay *= (xhci->quirks & XHCI_SLOW_SUSPEND) ? 10 : 1;
1019
1020 if (xhci_handshake(&xhci->op_regs->status,
1021 STS_HALT, STS_HALT, delay)) {
1022 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
1023 spin_unlock_irq(&xhci->lock);
1024 return -ETIMEDOUT;
1025 }
1026 xhci_clear_command_ring(xhci);
1027
1028 /* step 3: save registers */
1029 xhci_save_registers(xhci);
1030
1031 /* step 4: set CSS flag */
1032 command = readl(&xhci->op_regs->command);
1033 command |= CMD_CSS;
1034 writel(command, &xhci->op_regs->command);
1035 xhci->broken_suspend = 0;
1036 if (xhci_handshake(&xhci->op_regs->status,
1037 STS_SAVE, 0, 20 * 1000)) {
1038 /*
1039 * AMD SNPS xHC 3.0 occasionally does not clear the
1040 * SSS bit of USBSTS and when driver tries to poll
1041 * to see if the xHC clears BIT(8) which never happens
1042 * and driver assumes that controller is not responding
1043 * and times out. To workaround this, its good to check
1044 * if SRE and HCE bits are not set (as per xhci
1045 * Section 5.4.2) and bypass the timeout.
1046 */
1047 res = readl(&xhci->op_regs->status);
1048 if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
1049 (((res & STS_SRE) == 0) &&
1050 ((res & STS_HCE) == 0))) {
1051 xhci->broken_suspend = 1;
1052 } else {
1053 xhci_warn(xhci, "WARN: xHC save state timeout\n");
1054 spin_unlock_irq(&xhci->lock);
1055 return -ETIMEDOUT;
1056 }
1057 }
1058 spin_unlock_irq(&xhci->lock);
1059
1060 /*
1061 * Deleting Compliance Mode Recovery Timer because the xHCI Host
1062 * is about to be suspended.
1063 */
1064 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1065 (!(xhci_all_ports_seen_u0(xhci)))) {
1066 del_timer_sync(&xhci->comp_mode_recovery_timer);
1067 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1068 "%s: compliance mode recovery timer deleted",
1069 __func__);
1070 }
1071
1072 /* step 5: remove core well power */
1073 /* synchronize irq when using MSI-X */
1074 xhci_msix_sync_irqs(xhci);
1075
1076 return rc;
1077}
1078EXPORT_SYMBOL_GPL(xhci_suspend);
1079
1080/*
1081 * start xHC (not bus-specific)
1082 *
1083 * This is called when the machine transition from S3/S4 mode.
1084 *
1085 */
1086int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1087{
1088 u32 command, temp = 0;
1089 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1090 struct usb_hcd *secondary_hcd;
1091 int retval = 0;
1092 bool comp_timer_running = false;
1093 bool pending_portevent = false;
1094
1095 if (!hcd->state)
1096 return 0;
1097
1098 /* Wait a bit if either of the roothubs need to settle from the
1099 * transition into bus suspend.
1100 */
1101
1102 if (time_before(jiffies, xhci->usb2_rhub.bus_state.next_statechange) ||
1103 time_before(jiffies, xhci->usb3_rhub.bus_state.next_statechange))
1104 msleep(100);
1105
1106 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
1107 set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
1108
1109 spin_lock_irq(&xhci->lock);
1110 if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
1111 hibernated = true;
1112
1113 if (!hibernated) {
1114 /*
1115 * Some controllers might lose power during suspend, so wait
1116 * for controller not ready bit to clear, just as in xHC init.
1117 */
1118 retval = xhci_handshake(&xhci->op_regs->status,
1119 STS_CNR, 0, 10 * 1000 * 1000);
1120 if (retval) {
1121 xhci_warn(xhci, "Controller not ready at resume %d\n",
1122 retval);
1123 spin_unlock_irq(&xhci->lock);
1124 return retval;
1125 }
1126 /* step 1: restore register */
1127 xhci_restore_registers(xhci);
1128 /* step 2: initialize command ring buffer */
1129 xhci_set_cmd_ring_deq(xhci);
1130 /* step 3: restore state and start state*/
1131 /* step 3: set CRS flag */
1132 command = readl(&xhci->op_regs->command);
1133 command |= CMD_CRS;
1134 writel(command, &xhci->op_regs->command);
1135 /*
1136 * Some controllers take up to 55+ ms to complete the controller
1137 * restore so setting the timeout to 100ms. Xhci specification
1138 * doesn't mention any timeout value.
1139 */
1140 if (xhci_handshake(&xhci->op_regs->status,
1141 STS_RESTORE, 0, 100 * 1000)) {
1142 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
1143 spin_unlock_irq(&xhci->lock);
1144 return -ETIMEDOUT;
1145 }
1146 temp = readl(&xhci->op_regs->status);
1147 }
1148
1149 /* If restore operation fails, re-initialize the HC during resume */
1150 if ((temp & STS_SRE) || hibernated) {
1151
1152 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
1153 !(xhci_all_ports_seen_u0(xhci))) {
1154 del_timer_sync(&xhci->comp_mode_recovery_timer);
1155 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1156 "Compliance Mode Recovery Timer deleted!");
1157 }
1158
1159 /* Let the USB core know _both_ roothubs lost power. */
1160 usb_root_hub_lost_power(xhci->main_hcd->self.root_hub);
1161 usb_root_hub_lost_power(xhci->shared_hcd->self.root_hub);
1162
1163 xhci_dbg(xhci, "Stop HCD\n");
1164 xhci_halt(xhci);
1165 xhci_zero_64b_regs(xhci);
1166 retval = xhci_reset(xhci);
1167 spin_unlock_irq(&xhci->lock);
1168 if (retval)
1169 return retval;
1170 xhci_cleanup_msix(xhci);
1171
1172 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
1173 temp = readl(&xhci->op_regs->status);
1174 writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
1175 temp = readl(&xhci->ir_set->irq_pending);
1176 writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
1177
1178 xhci_dbg(xhci, "cleaning up memory\n");
1179 xhci_mem_cleanup(xhci);
1180 xhci_debugfs_exit(xhci);
1181 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
1182 readl(&xhci->op_regs->status));
1183
1184 /* USB core calls the PCI reinit and start functions twice:
1185 * first with the primary HCD, and then with the secondary HCD.
1186 * If we don't do the same, the host will never be started.
1187 */
1188 if (!usb_hcd_is_primary_hcd(hcd))
1189 secondary_hcd = hcd;
1190 else
1191 secondary_hcd = xhci->shared_hcd;
1192
1193 xhci_dbg(xhci, "Initialize the xhci_hcd\n");
1194 retval = xhci_init(hcd->primary_hcd);
1195 if (retval)
1196 return retval;
1197 comp_timer_running = true;
1198
1199 xhci_dbg(xhci, "Start the primary HCD\n");
1200 retval = xhci_run(hcd->primary_hcd);
1201 if (!retval) {
1202 xhci_dbg(xhci, "Start the secondary HCD\n");
1203 retval = xhci_run(secondary_hcd);
1204 }
1205 hcd->state = HC_STATE_SUSPENDED;
1206 xhci->shared_hcd->state = HC_STATE_SUSPENDED;
1207 goto done;
1208 }
1209
1210 /* step 4: set Run/Stop bit */
1211 command = readl(&xhci->op_regs->command);
1212 command |= CMD_RUN;
1213 writel(command, &xhci->op_regs->command);
1214 xhci_handshake(&xhci->op_regs->status, STS_HALT,
1215 0, 250 * 1000);
1216
1217 /* step 5: walk topology and initialize portsc,
1218 * portpmsc and portli
1219 */
1220 /* this is done in bus_resume */
1221
1222 /* step 6: restart each of the previously
1223 * Running endpoints by ringing their doorbells
1224 */
1225
1226 spin_unlock_irq(&xhci->lock);
1227
1228 xhci_dbc_resume(xhci);
1229
1230 done:
1231 if (retval == 0) {
1232 /*
1233 * Resume roothubs only if there are pending events.
1234 * USB 3 devices resend U3 LFPS wake after a 100ms delay if
1235 * the first wake signalling failed, give it that chance.
1236 */
1237 pending_portevent = xhci_pending_portevent(xhci);
1238 if (!pending_portevent) {
1239 msleep(120);
1240 pending_portevent = xhci_pending_portevent(xhci);
1241 }
1242
1243 if (pending_portevent) {
1244 usb_hcd_resume_root_hub(xhci->shared_hcd);
1245 usb_hcd_resume_root_hub(hcd);
1246 }
1247 }
1248 /*
1249 * If system is subject to the Quirk, Compliance Mode Timer needs to
1250 * be re-initialized Always after a system resume. Ports are subject
1251 * to suffer the Compliance Mode issue again. It doesn't matter if
1252 * ports have entered previously to U0 before system's suspension.
1253 */
1254 if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running)
1255 compliance_mode_recovery_timer_init(xhci);
1256
1257 if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL)
1258 usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller));
1259
1260 /* Re-enable port polling. */
1261 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
1262 __func__, hcd->self.busnum);
1263 set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1264 usb_hcd_poll_rh_status(xhci->shared_hcd);
1265 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1266 usb_hcd_poll_rh_status(hcd);
1267
1268 return retval;
1269}
1270EXPORT_SYMBOL_GPL(xhci_resume);
1271#endif /* CONFIG_PM */
1272
1273/*-------------------------------------------------------------------------*/
1274
1275static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
1276{
1277 void *temp;
1278 int ret = 0;
1279 unsigned int buf_len;
1280 enum dma_data_direction dir;
1281
1282 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1283 buf_len = urb->transfer_buffer_length;
1284
1285 temp = kzalloc_node(buf_len, GFP_ATOMIC,
1286 dev_to_node(hcd->self.sysdev));
1287
1288 if (usb_urb_dir_out(urb))
1289 sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
1290 temp, buf_len, 0);
1291
1292 urb->transfer_buffer = temp;
1293 urb->transfer_dma = dma_map_single(hcd->self.sysdev,
1294 urb->transfer_buffer,
1295 urb->transfer_buffer_length,
1296 dir);
1297
1298 if (dma_mapping_error(hcd->self.sysdev,
1299 urb->transfer_dma)) {
1300 ret = -EAGAIN;
1301 kfree(temp);
1302 } else {
1303 urb->transfer_flags |= URB_DMA_MAP_SINGLE;
1304 }
1305
1306 return ret;
1307}
1308
1309static bool xhci_urb_temp_buffer_required(struct usb_hcd *hcd,
1310 struct urb *urb)
1311{
1312 bool ret = false;
1313 unsigned int i;
1314 unsigned int len = 0;
1315 unsigned int trb_size;
1316 unsigned int max_pkt;
1317 struct scatterlist *sg;
1318 struct scatterlist *tail_sg;
1319
1320 tail_sg = urb->sg;
1321 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
1322
1323 if (!urb->num_sgs)
1324 return ret;
1325
1326 if (urb->dev->speed >= USB_SPEED_SUPER)
1327 trb_size = TRB_CACHE_SIZE_SS;
1328 else
1329 trb_size = TRB_CACHE_SIZE_HS;
1330
1331 if (urb->transfer_buffer_length != 0 &&
1332 !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1333 for_each_sg(urb->sg, sg, urb->num_sgs, i) {
1334 len = len + sg->length;
1335 if (i > trb_size - 2) {
1336 len = len - tail_sg->length;
1337 if (len < max_pkt) {
1338 ret = true;
1339 break;
1340 }
1341
1342 tail_sg = sg_next(tail_sg);
1343 }
1344 }
1345 }
1346 return ret;
1347}
1348
1349static void xhci_unmap_temp_buf(struct usb_hcd *hcd, struct urb *urb)
1350{
1351 unsigned int len;
1352 unsigned int buf_len;
1353 enum dma_data_direction dir;
1354
1355 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1356
1357 buf_len = urb->transfer_buffer_length;
1358
1359 if (IS_ENABLED(CONFIG_HAS_DMA) &&
1360 (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1361 dma_unmap_single(hcd->self.sysdev,
1362 urb->transfer_dma,
1363 urb->transfer_buffer_length,
1364 dir);
1365
1366 if (usb_urb_dir_in(urb)) {
1367 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs,
1368 urb->transfer_buffer,
1369 buf_len,
1370 0);
1371 if (len != buf_len) {
1372 xhci_dbg(hcd_to_xhci(hcd),
1373 "Copy from tmp buf to urb sg list failed\n");
1374 urb->actual_length = len;
1375 }
1376 }
1377 urb->transfer_flags &= ~URB_DMA_MAP_SINGLE;
1378 kfree(urb->transfer_buffer);
1379 urb->transfer_buffer = NULL;
1380}
1381
1382/*
1383 * Bypass the DMA mapping if URB is suitable for Immediate Transfer (IDT),
1384 * we'll copy the actual data into the TRB address register. This is limited to
1385 * transfers up to 8 bytes on output endpoints of any kind with wMaxPacketSize
1386 * >= 8 bytes. If suitable for IDT only one Transfer TRB per TD is allowed.
1387 */
1388static int xhci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1389 gfp_t mem_flags)
1390{
1391 struct xhci_hcd *xhci;
1392
1393 xhci = hcd_to_xhci(hcd);
1394
1395 if (xhci_urb_suitable_for_idt(urb))
1396 return 0;
1397
1398 if (xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) {
1399 if (xhci_urb_temp_buffer_required(hcd, urb))
1400 return xhci_map_temp_buffer(hcd, urb);
1401 }
1402 return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
1403}
1404
1405static void xhci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
1406{
1407 struct xhci_hcd *xhci;
1408 bool unmap_temp_buf = false;
1409
1410 xhci = hcd_to_xhci(hcd);
1411
1412 if (urb->num_sgs && (urb->transfer_flags & URB_DMA_MAP_SINGLE))
1413 unmap_temp_buf = true;
1414
1415 if ((xhci->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK) && unmap_temp_buf)
1416 xhci_unmap_temp_buf(hcd, urb);
1417 else
1418 usb_hcd_unmap_urb_for_dma(hcd, urb);
1419}
1420
1421/**
1422 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
1423 * HCDs. Find the index for an endpoint given its descriptor. Use the return
1424 * value to right shift 1 for the bitmask.
1425 *
1426 * Index = (epnum * 2) + direction - 1,
1427 * where direction = 0 for OUT, 1 for IN.
1428 * For control endpoints, the IN index is used (OUT index is unused), so
1429 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
1430 */
1431unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
1432{
1433 unsigned int index;
1434 if (usb_endpoint_xfer_control(desc))
1435 index = (unsigned int) (usb_endpoint_num(desc)*2);
1436 else
1437 index = (unsigned int) (usb_endpoint_num(desc)*2) +
1438 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
1439 return index;
1440}
1441EXPORT_SYMBOL_GPL(xhci_get_endpoint_index);
1442
1443/* The reverse operation to xhci_get_endpoint_index. Calculate the USB endpoint
1444 * address from the XHCI endpoint index.
1445 */
1446unsigned int xhci_get_endpoint_address(unsigned int ep_index)
1447{
1448 unsigned int number = DIV_ROUND_UP(ep_index, 2);
1449 unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
1450 return direction | number;
1451}
1452
1453/* Find the flag for this endpoint (for use in the control context). Use the
1454 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
1455 * bit 1, etc.
1456 */
1457static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
1458{
1459 return 1 << (xhci_get_endpoint_index(desc) + 1);
1460}
1461
1462/* Compute the last valid endpoint context index. Basically, this is the
1463 * endpoint index plus one. For slot contexts with more than valid endpoint,
1464 * we find the most significant bit set in the added contexts flags.
1465 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
1466 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
1467 */
1468unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
1469{
1470 return fls(added_ctxs) - 1;
1471}
1472
1473/* Returns 1 if the arguments are OK;
1474 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
1475 */
1476static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
1477 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
1478 const char *func) {
1479 struct xhci_hcd *xhci;
1480 struct xhci_virt_device *virt_dev;
1481
1482 if (!hcd || (check_ep && !ep) || !udev) {
1483 pr_debug("xHCI %s called with invalid args\n", func);
1484 return -EINVAL;
1485 }
1486 if (!udev->parent) {
1487 pr_debug("xHCI %s called for root hub\n", func);
1488 return 0;
1489 }
1490
1491 xhci = hcd_to_xhci(hcd);
1492 if (check_virt_dev) {
1493 if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
1494 xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
1495 func);
1496 return -EINVAL;
1497 }
1498
1499 virt_dev = xhci->devs[udev->slot_id];
1500 if (virt_dev->udev != udev) {
1501 xhci_dbg(xhci, "xHCI %s called with udev and "
1502 "virt_dev does not match\n", func);
1503 return -EINVAL;
1504 }
1505 }
1506
1507 if (xhci->xhc_state & XHCI_STATE_HALTED)
1508 return -ENODEV;
1509
1510 return 1;
1511}
1512
1513static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1514 struct usb_device *udev, struct xhci_command *command,
1515 bool ctx_change, bool must_succeed);
1516
1517/*
1518 * Full speed devices may have a max packet size greater than 8 bytes, but the
1519 * USB core doesn't know that until it reads the first 8 bytes of the
1520 * descriptor. If the usb_device's max packet size changes after that point,
1521 * we need to issue an evaluate context command and wait on it.
1522 */
1523static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
1524 unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
1525{
1526 struct xhci_container_ctx *out_ctx;
1527 struct xhci_input_control_ctx *ctrl_ctx;
1528 struct xhci_ep_ctx *ep_ctx;
1529 struct xhci_command *command;
1530 int max_packet_size;
1531 int hw_max_packet_size;
1532 int ret = 0;
1533
1534 out_ctx = xhci->devs[slot_id]->out_ctx;
1535 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1536 hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
1537 max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
1538 if (hw_max_packet_size != max_packet_size) {
1539 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1540 "Max Packet Size for ep 0 changed.");
1541 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1542 "Max packet size in usb_device = %d",
1543 max_packet_size);
1544 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1545 "Max packet size in xHCI HW = %d",
1546 hw_max_packet_size);
1547 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
1548 "Issuing evaluate context command.");
1549
1550 /* Set up the input context flags for the command */
1551 /* FIXME: This won't work if a non-default control endpoint
1552 * changes max packet sizes.
1553 */
1554
1555 command = xhci_alloc_command(xhci, true, mem_flags);
1556 if (!command)
1557 return -ENOMEM;
1558
1559 command->in_ctx = xhci->devs[slot_id]->in_ctx;
1560 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
1561 if (!ctrl_ctx) {
1562 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1563 __func__);
1564 ret = -ENOMEM;
1565 goto command_cleanup;
1566 }
1567 /* Set up the modified control endpoint 0 */
1568 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1569 xhci->devs[slot_id]->out_ctx, ep_index);
1570
1571 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
1572 ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
1573 ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
1574 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
1575
1576 ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG);
1577 ctrl_ctx->drop_flags = 0;
1578
1579 ret = xhci_configure_endpoint(xhci, urb->dev, command,
1580 true, false);
1581
1582 /* Clean up the input context for later use by bandwidth
1583 * functions.
1584 */
1585 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG);
1586command_cleanup:
1587 kfree(command->completion);
1588 kfree(command);
1589 }
1590 return ret;
1591}
1592
1593/*
1594 * non-error returns are a promise to giveback() the urb later
1595 * we drop ownership so next owner (or urb unlink) can get it
1596 */
1597static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
1598{
1599 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1600 unsigned long flags;
1601 int ret = 0;
1602 unsigned int slot_id, ep_index;
1603 unsigned int *ep_state;
1604 struct urb_priv *urb_priv;
1605 int num_tds;
1606
1607 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
1608 true, true, __func__) <= 0)
1609 return -EINVAL;
1610
1611 slot_id = urb->dev->slot_id;
1612 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1613 ep_state = &xhci->devs[slot_id]->eps[ep_index].ep_state;
1614
1615 if (!HCD_HW_ACCESSIBLE(hcd))
1616 return -ESHUTDOWN;
1617
1618 if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR) {
1619 xhci_dbg(xhci, "Can't queue urb, port error, link inactive\n");
1620 return -ENODEV;
1621 }
1622
1623 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
1624 num_tds = urb->number_of_packets;
1625 else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
1626 urb->transfer_buffer_length > 0 &&
1627 urb->transfer_flags & URB_ZERO_PACKET &&
1628 !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
1629 num_tds = 2;
1630 else
1631 num_tds = 1;
1632
1633 urb_priv = kzalloc(struct_size(urb_priv, td, num_tds), mem_flags);
1634 if (!urb_priv)
1635 return -ENOMEM;
1636
1637 urb_priv->num_tds = num_tds;
1638 urb_priv->num_tds_done = 0;
1639 urb->hcpriv = urb_priv;
1640
1641 trace_xhci_urb_enqueue(urb);
1642
1643 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1644 /* Check to see if the max packet size for the default control
1645 * endpoint changed during FS device enumeration
1646 */
1647 if (urb->dev->speed == USB_SPEED_FULL) {
1648 ret = xhci_check_maxpacket(xhci, slot_id,
1649 ep_index, urb, mem_flags);
1650 if (ret < 0) {
1651 xhci_urb_free_priv(urb_priv);
1652 urb->hcpriv = NULL;
1653 return ret;
1654 }
1655 }
1656 }
1657
1658 spin_lock_irqsave(&xhci->lock, flags);
1659
1660 if (xhci->xhc_state & XHCI_STATE_DYING) {
1661 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for non-responsive xHCI host.\n",
1662 urb->ep->desc.bEndpointAddress, urb);
1663 ret = -ESHUTDOWN;
1664 goto free_priv;
1665 }
1666 if (*ep_state & (EP_GETTING_STREAMS | EP_GETTING_NO_STREAMS)) {
1667 xhci_warn(xhci, "WARN: Can't enqueue URB, ep in streams transition state %x\n",
1668 *ep_state);
1669 ret = -EINVAL;
1670 goto free_priv;
1671 }
1672 if (*ep_state & EP_SOFT_CLEAR_TOGGLE) {
1673 xhci_warn(xhci, "Can't enqueue URB while manually clearing toggle\n");
1674 ret = -EINVAL;
1675 goto free_priv;
1676 }
1677
1678 switch (usb_endpoint_type(&urb->ep->desc)) {
1679
1680 case USB_ENDPOINT_XFER_CONTROL:
1681 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1682 slot_id, ep_index);
1683 break;
1684 case USB_ENDPOINT_XFER_BULK:
1685 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1686 slot_id, ep_index);
1687 break;
1688 case USB_ENDPOINT_XFER_INT:
1689 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1690 slot_id, ep_index);
1691 break;
1692 case USB_ENDPOINT_XFER_ISOC:
1693 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1694 slot_id, ep_index);
1695 }
1696
1697 if (ret) {
1698free_priv:
1699 xhci_urb_free_priv(urb_priv);
1700 urb->hcpriv = NULL;
1701 }
1702 spin_unlock_irqrestore(&xhci->lock, flags);
1703 return ret;
1704}
1705
1706/*
1707 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1708 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1709 * should pick up where it left off in the TD, unless a Set Transfer Ring
1710 * Dequeue Pointer is issued.
1711 *
1712 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1713 * the ring. Since the ring is a contiguous structure, they can't be physically
1714 * removed. Instead, there are two options:
1715 *
1716 * 1) If the HC is in the middle of processing the URB to be canceled, we
1717 * simply move the ring's dequeue pointer past those TRBs using the Set
1718 * Transfer Ring Dequeue Pointer command. This will be the common case,
1719 * when drivers timeout on the last submitted URB and attempt to cancel.
1720 *
1721 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1722 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1723 * HC will need to invalidate the any TRBs it has cached after the stop
1724 * endpoint command, as noted in the xHCI 0.95 errata.
1725 *
1726 * 3) The TD may have completed by the time the Stop Endpoint Command
1727 * completes, so software needs to handle that case too.
1728 *
1729 * This function should protect against the TD enqueueing code ringing the
1730 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1731 * It also needs to account for multiple cancellations on happening at the same
1732 * time for the same endpoint.
1733 *
1734 * Note that this function can be called in any context, or so says
1735 * usb_hcd_unlink_urb()
1736 */
1737static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1738{
1739 unsigned long flags;
1740 int ret, i;
1741 u32 temp;
1742 struct xhci_hcd *xhci;
1743 struct urb_priv *urb_priv;
1744 struct xhci_td *td;
1745 unsigned int ep_index;
1746 struct xhci_ring *ep_ring;
1747 struct xhci_virt_ep *ep;
1748 struct xhci_command *command;
1749 struct xhci_virt_device *vdev;
1750
1751 xhci = hcd_to_xhci(hcd);
1752 spin_lock_irqsave(&xhci->lock, flags);
1753
1754 trace_xhci_urb_dequeue(urb);
1755
1756 /* Make sure the URB hasn't completed or been unlinked already */
1757 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1758 if (ret)
1759 goto done;
1760
1761 /* give back URB now if we can't queue it for cancel */
1762 vdev = xhci->devs[urb->dev->slot_id];
1763 urb_priv = urb->hcpriv;
1764 if (!vdev || !urb_priv)
1765 goto err_giveback;
1766
1767 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1768 ep = &vdev->eps[ep_index];
1769 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1770 if (!ep || !ep_ring)
1771 goto err_giveback;
1772
1773 /* If xHC is dead take it down and return ALL URBs in xhci_hc_died() */
1774 temp = readl(&xhci->op_regs->status);
1775 if (temp == ~(u32)0 || xhci->xhc_state & XHCI_STATE_DYING) {
1776 xhci_hc_died(xhci);
1777 goto done;
1778 }
1779
1780 /*
1781 * check ring is not re-allocated since URB was enqueued. If it is, then
1782 * make sure none of the ring related pointers in this URB private data
1783 * are touched, such as td_list, otherwise we overwrite freed data
1784 */
1785 if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
1786 xhci_err(xhci, "Canceled URB td not found on endpoint ring");
1787 for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
1788 td = &urb_priv->td[i];
1789 if (!list_empty(&td->cancelled_td_list))
1790 list_del_init(&td->cancelled_td_list);
1791 }
1792 goto err_giveback;
1793 }
1794
1795 if (xhci->xhc_state & XHCI_STATE_HALTED) {
1796 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1797 "HC halted, freeing TD manually.");
1798 for (i = urb_priv->num_tds_done;
1799 i < urb_priv->num_tds;
1800 i++) {
1801 td = &urb_priv->td[i];
1802 if (!list_empty(&td->td_list))
1803 list_del_init(&td->td_list);
1804 if (!list_empty(&td->cancelled_td_list))
1805 list_del_init(&td->cancelled_td_list);
1806 }
1807 goto err_giveback;
1808 }
1809
1810 i = urb_priv->num_tds_done;
1811 if (i < urb_priv->num_tds)
1812 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1813 "Cancel URB %p, dev %s, ep 0x%x, "
1814 "starting at offset 0x%llx",
1815 urb, urb->dev->devpath,
1816 urb->ep->desc.bEndpointAddress,
1817 (unsigned long long) xhci_trb_virt_to_dma(
1818 urb_priv->td[i].start_seg,
1819 urb_priv->td[i].first_trb));
1820
1821 for (; i < urb_priv->num_tds; i++) {
1822 td = &urb_priv->td[i];
1823 /* TD can already be on cancelled list if ep halted on it */
1824 if (list_empty(&td->cancelled_td_list)) {
1825 td->cancel_status = TD_DIRTY;
1826 list_add_tail(&td->cancelled_td_list,
1827 &ep->cancelled_td_list);
1828 }
1829 }
1830
1831 /* Queue a stop endpoint command, but only if this is
1832 * the first cancellation to be handled.
1833 */
1834 if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
1835 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1836 if (!command) {
1837 ret = -ENOMEM;
1838 goto done;
1839 }
1840 ep->ep_state |= EP_STOP_CMD_PENDING;
1841 ep->stop_cmd_timer.expires = jiffies +
1842 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1843 add_timer(&ep->stop_cmd_timer);
1844 xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
1845 ep_index, 0);
1846 xhci_ring_cmd_db(xhci);
1847 }
1848done:
1849 spin_unlock_irqrestore(&xhci->lock, flags);
1850 return ret;
1851
1852err_giveback:
1853 if (urb_priv)
1854 xhci_urb_free_priv(urb_priv);
1855 usb_hcd_unlink_urb_from_ep(hcd, urb);
1856 spin_unlock_irqrestore(&xhci->lock, flags);
1857 usb_hcd_giveback_urb(hcd, urb, -ESHUTDOWN);
1858 return ret;
1859}
1860
1861/* Drop an endpoint from a new bandwidth configuration for this device.
1862 * Only one call to this function is allowed per endpoint before
1863 * check_bandwidth() or reset_bandwidth() must be called.
1864 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1865 * add the endpoint to the schedule with possibly new parameters denoted by a
1866 * different endpoint descriptor in usb_host_endpoint.
1867 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1868 * not allowed.
1869 *
1870 * The USB core will not allow URBs to be queued to an endpoint that is being
1871 * disabled, so there's no need for mutual exclusion to protect
1872 * the xhci->devs[slot_id] structure.
1873 */
1874int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1875 struct usb_host_endpoint *ep)
1876{
1877 struct xhci_hcd *xhci;
1878 struct xhci_container_ctx *in_ctx, *out_ctx;
1879 struct xhci_input_control_ctx *ctrl_ctx;
1880 unsigned int ep_index;
1881 struct xhci_ep_ctx *ep_ctx;
1882 u32 drop_flag;
1883 u32 new_add_flags, new_drop_flags;
1884 int ret;
1885
1886 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1887 if (ret <= 0)
1888 return ret;
1889 xhci = hcd_to_xhci(hcd);
1890 if (xhci->xhc_state & XHCI_STATE_DYING)
1891 return -ENODEV;
1892
1893 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1894 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1895 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1896 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1897 __func__, drop_flag);
1898 return 0;
1899 }
1900
1901 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1902 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1903 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1904 if (!ctrl_ctx) {
1905 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1906 __func__);
1907 return 0;
1908 }
1909
1910 ep_index = xhci_get_endpoint_index(&ep->desc);
1911 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1912 /* If the HC already knows the endpoint is disabled,
1913 * or the HCD has noted it is disabled, ignore this request
1914 */
1915 if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) ||
1916 le32_to_cpu(ctrl_ctx->drop_flags) &
1917 xhci_get_endpoint_flag(&ep->desc)) {
1918 /* Do not warn when called after a usb_device_reset */
1919 if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL)
1920 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1921 __func__, ep);
1922 return 0;
1923 }
1924
1925 ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag);
1926 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
1927
1928 ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag);
1929 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1930
1931 xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index);
1932
1933 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1934
1935 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
1936 (unsigned int) ep->desc.bEndpointAddress,
1937 udev->slot_id,
1938 (unsigned int) new_drop_flags,
1939 (unsigned int) new_add_flags);
1940 return 0;
1941}
1942EXPORT_SYMBOL_GPL(xhci_drop_endpoint);
1943
1944/* Add an endpoint to a new possible bandwidth configuration for this device.
1945 * Only one call to this function is allowed per endpoint before
1946 * check_bandwidth() or reset_bandwidth() must be called.
1947 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1948 * add the endpoint to the schedule with possibly new parameters denoted by a
1949 * different endpoint descriptor in usb_host_endpoint.
1950 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1951 * not allowed.
1952 *
1953 * The USB core will not allow URBs to be queued to an endpoint until the
1954 * configuration or alt setting is installed in the device, so there's no need
1955 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1956 */
1957int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1958 struct usb_host_endpoint *ep)
1959{
1960 struct xhci_hcd *xhci;
1961 struct xhci_container_ctx *in_ctx;
1962 unsigned int ep_index;
1963 struct xhci_input_control_ctx *ctrl_ctx;
1964 struct xhci_ep_ctx *ep_ctx;
1965 u32 added_ctxs;
1966 u32 new_add_flags, new_drop_flags;
1967 struct xhci_virt_device *virt_dev;
1968 int ret = 0;
1969
1970 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1971 if (ret <= 0) {
1972 /* So we won't queue a reset ep command for a root hub */
1973 ep->hcpriv = NULL;
1974 return ret;
1975 }
1976 xhci = hcd_to_xhci(hcd);
1977 if (xhci->xhc_state & XHCI_STATE_DYING)
1978 return -ENODEV;
1979
1980 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1981 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1982 /* FIXME when we have to issue an evaluate endpoint command to
1983 * deal with ep0 max packet size changing once we get the
1984 * descriptors
1985 */
1986 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1987 __func__, added_ctxs);
1988 return 0;
1989 }
1990
1991 virt_dev = xhci->devs[udev->slot_id];
1992 in_ctx = virt_dev->in_ctx;
1993 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
1994 if (!ctrl_ctx) {
1995 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
1996 __func__);
1997 return 0;
1998 }
1999
2000 ep_index = xhci_get_endpoint_index(&ep->desc);
2001 /* If this endpoint is already in use, and the upper layers are trying
2002 * to add it again without dropping it, reject the addition.
2003 */
2004 if (virt_dev->eps[ep_index].ring &&
2005 !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) {
2006 xhci_warn(xhci, "Trying to add endpoint 0x%x "
2007 "without dropping it.\n",
2008 (unsigned int) ep->desc.bEndpointAddress);
2009 return -EINVAL;
2010 }
2011
2012 /* If the HCD has already noted the endpoint is enabled,
2013 * ignore this request.
2014 */
2015 if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) {
2016 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
2017 __func__, ep);
2018 return 0;
2019 }
2020
2021 /*
2022 * Configuration and alternate setting changes must be done in
2023 * process context, not interrupt context (or so documenation
2024 * for usb_set_interface() and usb_set_configuration() claim).
2025 */
2026 if (xhci_endpoint_init(xhci, virt_dev, udev, ep, GFP_NOIO) < 0) {
2027 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
2028 __func__, ep->desc.bEndpointAddress);
2029 return -ENOMEM;
2030 }
2031
2032 ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs);
2033 new_add_flags = le32_to_cpu(ctrl_ctx->add_flags);
2034
2035 /* If xhci_endpoint_disable() was called for this endpoint, but the
2036 * xHC hasn't been notified yet through the check_bandwidth() call,
2037 * this re-adds a new state for the endpoint from the new endpoint
2038 * descriptors. We must drop and re-add this endpoint, so we leave the
2039 * drop flags alone.
2040 */
2041 new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
2042
2043 /* Store the usb_device pointer for later use */
2044 ep->hcpriv = udev;
2045
2046 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
2047 trace_xhci_add_endpoint(ep_ctx);
2048
2049 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n",
2050 (unsigned int) ep->desc.bEndpointAddress,
2051 udev->slot_id,
2052 (unsigned int) new_drop_flags,
2053 (unsigned int) new_add_flags);
2054 return 0;
2055}
2056EXPORT_SYMBOL_GPL(xhci_add_endpoint);
2057
2058static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
2059{
2060 struct xhci_input_control_ctx *ctrl_ctx;
2061 struct xhci_ep_ctx *ep_ctx;
2062 struct xhci_slot_ctx *slot_ctx;
2063 int i;
2064
2065 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
2066 if (!ctrl_ctx) {
2067 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2068 __func__);
2069 return;
2070 }
2071
2072 /* When a device's add flag and drop flag are zero, any subsequent
2073 * configure endpoint command will leave that endpoint's state
2074 * untouched. Make sure we don't leave any old state in the input
2075 * endpoint contexts.
2076 */
2077 ctrl_ctx->drop_flags = 0;
2078 ctrl_ctx->add_flags = 0;
2079 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2080 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
2081 /* Endpoint 0 is always valid */
2082 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
2083 for (i = 1; i < 31; i++) {
2084 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
2085 ep_ctx->ep_info = 0;
2086 ep_ctx->ep_info2 = 0;
2087 ep_ctx->deq = 0;
2088 ep_ctx->tx_info = 0;
2089 }
2090}
2091
2092static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
2093 struct usb_device *udev, u32 *cmd_status)
2094{
2095 int ret;
2096
2097 switch (*cmd_status) {
2098 case COMP_COMMAND_ABORTED:
2099 case COMP_COMMAND_RING_STOPPED:
2100 xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n");
2101 ret = -ETIME;
2102 break;
2103 case COMP_RESOURCE_ERROR:
2104 dev_warn(&udev->dev,
2105 "Not enough host controller resources for new device state.\n");
2106 ret = -ENOMEM;
2107 /* FIXME: can we allocate more resources for the HC? */
2108 break;
2109 case COMP_BANDWIDTH_ERROR:
2110 case COMP_SECONDARY_BANDWIDTH_ERROR:
2111 dev_warn(&udev->dev,
2112 "Not enough bandwidth for new device state.\n");
2113 ret = -ENOSPC;
2114 /* FIXME: can we go back to the old state? */
2115 break;
2116 case COMP_TRB_ERROR:
2117 /* the HCD set up something wrong */
2118 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
2119 "add flag = 1, "
2120 "and endpoint is not disabled.\n");
2121 ret = -EINVAL;
2122 break;
2123 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2124 dev_warn(&udev->dev,
2125 "ERROR: Incompatible device for endpoint configure command.\n");
2126 ret = -ENODEV;
2127 break;
2128 case COMP_SUCCESS:
2129 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2130 "Successful Endpoint Configure command");
2131 ret = 0;
2132 break;
2133 default:
2134 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2135 *cmd_status);
2136 ret = -EINVAL;
2137 break;
2138 }
2139 return ret;
2140}
2141
2142static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
2143 struct usb_device *udev, u32 *cmd_status)
2144{
2145 int ret;
2146
2147 switch (*cmd_status) {
2148 case COMP_COMMAND_ABORTED:
2149 case COMP_COMMAND_RING_STOPPED:
2150 xhci_warn(xhci, "Timeout while waiting for evaluate context command\n");
2151 ret = -ETIME;
2152 break;
2153 case COMP_PARAMETER_ERROR:
2154 dev_warn(&udev->dev,
2155 "WARN: xHCI driver setup invalid evaluate context command.\n");
2156 ret = -EINVAL;
2157 break;
2158 case COMP_SLOT_NOT_ENABLED_ERROR:
2159 dev_warn(&udev->dev,
2160 "WARN: slot not enabled for evaluate context command.\n");
2161 ret = -EINVAL;
2162 break;
2163 case COMP_CONTEXT_STATE_ERROR:
2164 dev_warn(&udev->dev,
2165 "WARN: invalid context state for evaluate context command.\n");
2166 ret = -EINVAL;
2167 break;
2168 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2169 dev_warn(&udev->dev,
2170 "ERROR: Incompatible device for evaluate context command.\n");
2171 ret = -ENODEV;
2172 break;
2173 case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
2174 /* Max Exit Latency too large error */
2175 dev_warn(&udev->dev, "WARN: Max Exit Latency too large\n");
2176 ret = -EINVAL;
2177 break;
2178 case COMP_SUCCESS:
2179 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2180 "Successful evaluate context command");
2181 ret = 0;
2182 break;
2183 default:
2184 xhci_err(xhci, "ERROR: unexpected command completion code 0x%x.\n",
2185 *cmd_status);
2186 ret = -EINVAL;
2187 break;
2188 }
2189 return ret;
2190}
2191
2192static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci,
2193 struct xhci_input_control_ctx *ctrl_ctx)
2194{
2195 u32 valid_add_flags;
2196 u32 valid_drop_flags;
2197
2198 /* Ignore the slot flag (bit 0), and the default control endpoint flag
2199 * (bit 1). The default control endpoint is added during the Address
2200 * Device command and is never removed until the slot is disabled.
2201 */
2202 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2203 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2204
2205 /* Use hweight32 to count the number of ones in the add flags, or
2206 * number of endpoints added. Don't count endpoints that are changed
2207 * (both added and dropped).
2208 */
2209 return hweight32(valid_add_flags) -
2210 hweight32(valid_add_flags & valid_drop_flags);
2211}
2212
2213static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci,
2214 struct xhci_input_control_ctx *ctrl_ctx)
2215{
2216 u32 valid_add_flags;
2217 u32 valid_drop_flags;
2218
2219 valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2;
2220 valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2;
2221
2222 return hweight32(valid_drop_flags) -
2223 hweight32(valid_add_flags & valid_drop_flags);
2224}
2225
2226/*
2227 * We need to reserve the new number of endpoints before the configure endpoint
2228 * command completes. We can't subtract the dropped endpoints from the number
2229 * of active endpoints until the command completes because we can oversubscribe
2230 * the host in this case:
2231 *
2232 * - the first configure endpoint command drops more endpoints than it adds
2233 * - a second configure endpoint command that adds more endpoints is queued
2234 * - the first configure endpoint command fails, so the config is unchanged
2235 * - the second command may succeed, even though there isn't enough resources
2236 *
2237 * Must be called with xhci->lock held.
2238 */
2239static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
2240 struct xhci_input_control_ctx *ctrl_ctx)
2241{
2242 u32 added_eps;
2243
2244 added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2245 if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
2246 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2247 "Not enough ep ctxs: "
2248 "%u active, need to add %u, limit is %u.",
2249 xhci->num_active_eps, added_eps,
2250 xhci->limit_active_eps);
2251 return -ENOMEM;
2252 }
2253 xhci->num_active_eps += added_eps;
2254 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2255 "Adding %u ep ctxs, %u now active.", added_eps,
2256 xhci->num_active_eps);
2257 return 0;
2258}
2259
2260/*
2261 * The configure endpoint was failed by the xHC for some other reason, so we
2262 * need to revert the resources that failed configuration would have used.
2263 *
2264 * Must be called with xhci->lock held.
2265 */
2266static void xhci_free_host_resources(struct xhci_hcd *xhci,
2267 struct xhci_input_control_ctx *ctrl_ctx)
2268{
2269 u32 num_failed_eps;
2270
2271 num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
2272 xhci->num_active_eps -= num_failed_eps;
2273 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2274 "Removing %u failed ep ctxs, %u now active.",
2275 num_failed_eps,
2276 xhci->num_active_eps);
2277}
2278
2279/*
2280 * Now that the command has completed, clean up the active endpoint count by
2281 * subtracting out the endpoints that were dropped (but not changed).
2282 *
2283 * Must be called with xhci->lock held.
2284 */
2285static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
2286 struct xhci_input_control_ctx *ctrl_ctx)
2287{
2288 u32 num_dropped_eps;
2289
2290 num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
2291 xhci->num_active_eps -= num_dropped_eps;
2292 if (num_dropped_eps)
2293 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2294 "Removing %u dropped ep ctxs, %u now active.",
2295 num_dropped_eps,
2296 xhci->num_active_eps);
2297}
2298
2299static unsigned int xhci_get_block_size(struct usb_device *udev)
2300{
2301 switch (udev->speed) {
2302 case USB_SPEED_LOW:
2303 case USB_SPEED_FULL:
2304 return FS_BLOCK;
2305 case USB_SPEED_HIGH:
2306 return HS_BLOCK;
2307 case USB_SPEED_SUPER:
2308 case USB_SPEED_SUPER_PLUS:
2309 return SS_BLOCK;
2310 case USB_SPEED_UNKNOWN:
2311 case USB_SPEED_WIRELESS:
2312 default:
2313 /* Should never happen */
2314 return 1;
2315 }
2316}
2317
2318static unsigned int
2319xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
2320{
2321 if (interval_bw->overhead[LS_OVERHEAD_TYPE])
2322 return LS_OVERHEAD;
2323 if (interval_bw->overhead[FS_OVERHEAD_TYPE])
2324 return FS_OVERHEAD;
2325 return HS_OVERHEAD;
2326}
2327
2328/* If we are changing a LS/FS device under a HS hub,
2329 * make sure (if we are activating a new TT) that the HS bus has enough
2330 * bandwidth for this new TT.
2331 */
2332static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
2333 struct xhci_virt_device *virt_dev,
2334 int old_active_eps)
2335{
2336 struct xhci_interval_bw_table *bw_table;
2337 struct xhci_tt_bw_info *tt_info;
2338
2339 /* Find the bandwidth table for the root port this TT is attached to. */
2340 bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
2341 tt_info = virt_dev->tt_info;
2342 /* If this TT already had active endpoints, the bandwidth for this TT
2343 * has already been added. Removing all periodic endpoints (and thus
2344 * making the TT enactive) will only decrease the bandwidth used.
2345 */
2346 if (old_active_eps)
2347 return 0;
2348 if (old_active_eps == 0 && tt_info->active_eps != 0) {
2349 if (bw_table->bw_used + TT_HS_OVERHEAD > HS_BW_LIMIT)
2350 return -ENOMEM;
2351 return 0;
2352 }
2353 /* Not sure why we would have no new active endpoints...
2354 *
2355 * Maybe because of an Evaluate Context change for a hub update or a
2356 * control endpoint 0 max packet size change?
2357 * FIXME: skip the bandwidth calculation in that case.
2358 */
2359 return 0;
2360}
2361
2362static int xhci_check_ss_bw(struct xhci_hcd *xhci,
2363 struct xhci_virt_device *virt_dev)
2364{
2365 unsigned int bw_reserved;
2366
2367 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_IN, 100);
2368 if (virt_dev->bw_table->ss_bw_in > (SS_BW_LIMIT_IN - bw_reserved))
2369 return -ENOMEM;
2370
2371 bw_reserved = DIV_ROUND_UP(SS_BW_RESERVED*SS_BW_LIMIT_OUT, 100);
2372 if (virt_dev->bw_table->ss_bw_out > (SS_BW_LIMIT_OUT - bw_reserved))
2373 return -ENOMEM;
2374
2375 return 0;
2376}
2377
2378/*
2379 * This algorithm is a very conservative estimate of the worst-case scheduling
2380 * scenario for any one interval. The hardware dynamically schedules the
2381 * packets, so we can't tell which microframe could be the limiting factor in
2382 * the bandwidth scheduling. This only takes into account periodic endpoints.
2383 *
2384 * Obviously, we can't solve an NP complete problem to find the minimum worst
2385 * case scenario. Instead, we come up with an estimate that is no less than
2386 * the worst case bandwidth used for any one microframe, but may be an
2387 * over-estimate.
2388 *
2389 * We walk the requirements for each endpoint by interval, starting with the
2390 * smallest interval, and place packets in the schedule where there is only one
2391 * possible way to schedule packets for that interval. In order to simplify
2392 * this algorithm, we record the largest max packet size for each interval, and
2393 * assume all packets will be that size.
2394 *
2395 * For interval 0, we obviously must schedule all packets for each interval.
2396 * The bandwidth for interval 0 is just the amount of data to be transmitted
2397 * (the sum of all max ESIT payload sizes, plus any overhead per packet times
2398 * the number of packets).
2399 *
2400 * For interval 1, we have two possible microframes to schedule those packets
2401 * in. For this algorithm, if we can schedule the same number of packets for
2402 * each possible scheduling opportunity (each microframe), we will do so. The
2403 * remaining number of packets will be saved to be transmitted in the gaps in
2404 * the next interval's scheduling sequence.
2405 *
2406 * As we move those remaining packets to be scheduled with interval 2 packets,
2407 * we have to double the number of remaining packets to transmit. This is
2408 * because the intervals are actually powers of 2, and we would be transmitting
2409 * the previous interval's packets twice in this interval. We also have to be
2410 * sure that when we look at the largest max packet size for this interval, we
2411 * also look at the largest max packet size for the remaining packets and take
2412 * the greater of the two.
2413 *
2414 * The algorithm continues to evenly distribute packets in each scheduling
2415 * opportunity, and push the remaining packets out, until we get to the last
2416 * interval. Then those packets and their associated overhead are just added
2417 * to the bandwidth used.
2418 */
2419static int xhci_check_bw_table(struct xhci_hcd *xhci,
2420 struct xhci_virt_device *virt_dev,
2421 int old_active_eps)
2422{
2423 unsigned int bw_reserved;
2424 unsigned int max_bandwidth;
2425 unsigned int bw_used;
2426 unsigned int block_size;
2427 struct xhci_interval_bw_table *bw_table;
2428 unsigned int packet_size = 0;
2429 unsigned int overhead = 0;
2430 unsigned int packets_transmitted = 0;
2431 unsigned int packets_remaining = 0;
2432 unsigned int i;
2433
2434 if (virt_dev->udev->speed >= USB_SPEED_SUPER)
2435 return xhci_check_ss_bw(xhci, virt_dev);
2436
2437 if (virt_dev->udev->speed == USB_SPEED_HIGH) {
2438 max_bandwidth = HS_BW_LIMIT;
2439 /* Convert percent of bus BW reserved to blocks reserved */
2440 bw_reserved = DIV_ROUND_UP(HS_BW_RESERVED * max_bandwidth, 100);
2441 } else {
2442 max_bandwidth = FS_BW_LIMIT;
2443 bw_reserved = DIV_ROUND_UP(FS_BW_RESERVED * max_bandwidth, 100);
2444 }
2445
2446 bw_table = virt_dev->bw_table;
2447 /* We need to translate the max packet size and max ESIT payloads into
2448 * the units the hardware uses.
2449 */
2450 block_size = xhci_get_block_size(virt_dev->udev);
2451
2452 /* If we are manipulating a LS/FS device under a HS hub, double check
2453 * that the HS bus has enough bandwidth if we are activing a new TT.
2454 */
2455 if (virt_dev->tt_info) {
2456 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2457 "Recalculating BW for rootport %u",
2458 virt_dev->real_port);
2459 if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
2460 xhci_warn(xhci, "Not enough bandwidth on HS bus for "
2461 "newly activated TT.\n");
2462 return -ENOMEM;
2463 }
2464 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2465 "Recalculating BW for TT slot %u port %u",
2466 virt_dev->tt_info->slot_id,
2467 virt_dev->tt_info->ttport);
2468 } else {
2469 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2470 "Recalculating BW for rootport %u",
2471 virt_dev->real_port);
2472 }
2473
2474 /* Add in how much bandwidth will be used for interval zero, or the
2475 * rounded max ESIT payload + number of packets * largest overhead.
2476 */
2477 bw_used = DIV_ROUND_UP(bw_table->interval0_esit_payload, block_size) +
2478 bw_table->interval_bw[0].num_packets *
2479 xhci_get_largest_overhead(&bw_table->interval_bw[0]);
2480
2481 for (i = 1; i < XHCI_MAX_INTERVAL; i++) {
2482 unsigned int bw_added;
2483 unsigned int largest_mps;
2484 unsigned int interval_overhead;
2485
2486 /*
2487 * How many packets could we transmit in this interval?
2488 * If packets didn't fit in the previous interval, we will need
2489 * to transmit that many packets twice within this interval.
2490 */
2491 packets_remaining = 2 * packets_remaining +
2492 bw_table->interval_bw[i].num_packets;
2493
2494 /* Find the largest max packet size of this or the previous
2495 * interval.
2496 */
2497 if (list_empty(&bw_table->interval_bw[i].endpoints))
2498 largest_mps = 0;
2499 else {
2500 struct xhci_virt_ep *virt_ep;
2501 struct list_head *ep_entry;
2502
2503 ep_entry = bw_table->interval_bw[i].endpoints.next;
2504 virt_ep = list_entry(ep_entry,
2505 struct xhci_virt_ep, bw_endpoint_list);
2506 /* Convert to blocks, rounding up */
2507 largest_mps = DIV_ROUND_UP(
2508 virt_ep->bw_info.max_packet_size,
2509 block_size);
2510 }
2511 if (largest_mps > packet_size)
2512 packet_size = largest_mps;
2513
2514 /* Use the larger overhead of this or the previous interval. */
2515 interval_overhead = xhci_get_largest_overhead(
2516 &bw_table->interval_bw[i]);
2517 if (interval_overhead > overhead)
2518 overhead = interval_overhead;
2519
2520 /* How many packets can we evenly distribute across
2521 * (1 << (i + 1)) possible scheduling opportunities?
2522 */
2523 packets_transmitted = packets_remaining >> (i + 1);
2524
2525 /* Add in the bandwidth used for those scheduled packets */
2526 bw_added = packets_transmitted * (overhead + packet_size);
2527
2528 /* How many packets do we have remaining to transmit? */
2529 packets_remaining = packets_remaining % (1 << (i + 1));
2530
2531 /* What largest max packet size should those packets have? */
2532 /* If we've transmitted all packets, don't carry over the
2533 * largest packet size.
2534 */
2535 if (packets_remaining == 0) {
2536 packet_size = 0;
2537 overhead = 0;
2538 } else if (packets_transmitted > 0) {
2539 /* Otherwise if we do have remaining packets, and we've
2540 * scheduled some packets in this interval, take the
2541 * largest max packet size from endpoints with this
2542 * interval.
2543 */
2544 packet_size = largest_mps;
2545 overhead = interval_overhead;
2546 }
2547 /* Otherwise carry over packet_size and overhead from the last
2548 * time we had a remainder.
2549 */
2550 bw_used += bw_added;
2551 if (bw_used > max_bandwidth) {
2552 xhci_warn(xhci, "Not enough bandwidth. "
2553 "Proposed: %u, Max: %u\n",
2554 bw_used, max_bandwidth);
2555 return -ENOMEM;
2556 }
2557 }
2558 /*
2559 * Ok, we know we have some packets left over after even-handedly
2560 * scheduling interval 15. We don't know which microframes they will
2561 * fit into, so we over-schedule and say they will be scheduled every
2562 * microframe.
2563 */
2564 if (packets_remaining > 0)
2565 bw_used += overhead + packet_size;
2566
2567 if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
2568 unsigned int port_index = virt_dev->real_port - 1;
2569
2570 /* OK, we're manipulating a HS device attached to a
2571 * root port bandwidth domain. Include the number of active TTs
2572 * in the bandwidth used.
2573 */
2574 bw_used += TT_HS_OVERHEAD *
2575 xhci->rh_bw[port_index].num_active_tts;
2576 }
2577
2578 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
2579 "Final bandwidth: %u, Limit: %u, Reserved: %u, "
2580 "Available: %u " "percent",
2581 bw_used, max_bandwidth, bw_reserved,
2582 (max_bandwidth - bw_used - bw_reserved) * 100 /
2583 max_bandwidth);
2584
2585 bw_used += bw_reserved;
2586 if (bw_used > max_bandwidth) {
2587 xhci_warn(xhci, "Not enough bandwidth. Proposed: %u, Max: %u\n",
2588 bw_used, max_bandwidth);
2589 return -ENOMEM;
2590 }
2591
2592 bw_table->bw_used = bw_used;
2593 return 0;
2594}
2595
2596static bool xhci_is_async_ep(unsigned int ep_type)
2597{
2598 return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
2599 ep_type != ISOC_IN_EP &&
2600 ep_type != INT_IN_EP);
2601}
2602
2603static bool xhci_is_sync_in_ep(unsigned int ep_type)
2604{
2605 return (ep_type == ISOC_IN_EP || ep_type == INT_IN_EP);
2606}
2607
2608static unsigned int xhci_get_ss_bw_consumed(struct xhci_bw_info *ep_bw)
2609{
2610 unsigned int mps = DIV_ROUND_UP(ep_bw->max_packet_size, SS_BLOCK);
2611
2612 if (ep_bw->ep_interval == 0)
2613 return SS_OVERHEAD_BURST +
2614 (ep_bw->mult * ep_bw->num_packets *
2615 (SS_OVERHEAD + mps));
2616 return DIV_ROUND_UP(ep_bw->mult * ep_bw->num_packets *
2617 (SS_OVERHEAD + mps + SS_OVERHEAD_BURST),
2618 1 << ep_bw->ep_interval);
2619
2620}
2621
2622static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
2623 struct xhci_bw_info *ep_bw,
2624 struct xhci_interval_bw_table *bw_table,
2625 struct usb_device *udev,
2626 struct xhci_virt_ep *virt_ep,
2627 struct xhci_tt_bw_info *tt_info)
2628{
2629 struct xhci_interval_bw *interval_bw;
2630 int normalized_interval;
2631
2632 if (xhci_is_async_ep(ep_bw->type))
2633 return;
2634
2635 if (udev->speed >= USB_SPEED_SUPER) {
2636 if (xhci_is_sync_in_ep(ep_bw->type))
2637 xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
2638 xhci_get_ss_bw_consumed(ep_bw);
2639 else
2640 xhci->devs[udev->slot_id]->bw_table->ss_bw_out -=
2641 xhci_get_ss_bw_consumed(ep_bw);
2642 return;
2643 }
2644
2645 /* SuperSpeed endpoints never get added to intervals in the table, so
2646 * this check is only valid for HS/FS/LS devices.
2647 */
2648 if (list_empty(&virt_ep->bw_endpoint_list))
2649 return;
2650 /* For LS/FS devices, we need to translate the interval expressed in
2651 * microframes to frames.
2652 */
2653 if (udev->speed == USB_SPEED_HIGH)
2654 normalized_interval = ep_bw->ep_interval;
2655 else
2656 normalized_interval = ep_bw->ep_interval - 3;
2657
2658 if (normalized_interval == 0)
2659 bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
2660 interval_bw = &bw_table->interval_bw[normalized_interval];
2661 interval_bw->num_packets -= ep_bw->num_packets;
2662 switch (udev->speed) {
2663 case USB_SPEED_LOW:
2664 interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
2665 break;
2666 case USB_SPEED_FULL:
2667 interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
2668 break;
2669 case USB_SPEED_HIGH:
2670 interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
2671 break;
2672 case USB_SPEED_SUPER:
2673 case USB_SPEED_SUPER_PLUS:
2674 case USB_SPEED_UNKNOWN:
2675 case USB_SPEED_WIRELESS:
2676 /* Should never happen because only LS/FS/HS endpoints will get
2677 * added to the endpoint list.
2678 */
2679 return;
2680 }
2681 if (tt_info)
2682 tt_info->active_eps -= 1;
2683 list_del_init(&virt_ep->bw_endpoint_list);
2684}
2685
2686static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
2687 struct xhci_bw_info *ep_bw,
2688 struct xhci_interval_bw_table *bw_table,
2689 struct usb_device *udev,
2690 struct xhci_virt_ep *virt_ep,
2691 struct xhci_tt_bw_info *tt_info)
2692{
2693 struct xhci_interval_bw *interval_bw;
2694 struct xhci_virt_ep *smaller_ep;
2695 int normalized_interval;
2696
2697 if (xhci_is_async_ep(ep_bw->type))
2698 return;
2699
2700 if (udev->speed == USB_SPEED_SUPER) {
2701 if (xhci_is_sync_in_ep(ep_bw->type))
2702 xhci->devs[udev->slot_id]->bw_table->ss_bw_in +=
2703 xhci_get_ss_bw_consumed(ep_bw);
2704 else
2705 xhci->devs[udev->slot_id]->bw_table->ss_bw_out +=
2706 xhci_get_ss_bw_consumed(ep_bw);
2707 return;
2708 }
2709
2710 /* For LS/FS devices, we need to translate the interval expressed in
2711 * microframes to frames.
2712 */
2713 if (udev->speed == USB_SPEED_HIGH)
2714 normalized_interval = ep_bw->ep_interval;
2715 else
2716 normalized_interval = ep_bw->ep_interval - 3;
2717
2718 if (normalized_interval == 0)
2719 bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
2720 interval_bw = &bw_table->interval_bw[normalized_interval];
2721 interval_bw->num_packets += ep_bw->num_packets;
2722 switch (udev->speed) {
2723 case USB_SPEED_LOW:
2724 interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
2725 break;
2726 case USB_SPEED_FULL:
2727 interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
2728 break;
2729 case USB_SPEED_HIGH:
2730 interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
2731 break;
2732 case USB_SPEED_SUPER:
2733 case USB_SPEED_SUPER_PLUS:
2734 case USB_SPEED_UNKNOWN:
2735 case USB_SPEED_WIRELESS:
2736 /* Should never happen because only LS/FS/HS endpoints will get
2737 * added to the endpoint list.
2738 */
2739 return;
2740 }
2741
2742 if (tt_info)
2743 tt_info->active_eps += 1;
2744 /* Insert the endpoint into the list, largest max packet size first. */
2745 list_for_each_entry(smaller_ep, &interval_bw->endpoints,
2746 bw_endpoint_list) {
2747 if (ep_bw->max_packet_size >=
2748 smaller_ep->bw_info.max_packet_size) {
2749 /* Add the new ep before the smaller endpoint */
2750 list_add_tail(&virt_ep->bw_endpoint_list,
2751 &smaller_ep->bw_endpoint_list);
2752 return;
2753 }
2754 }
2755 /* Add the new endpoint at the end of the list. */
2756 list_add_tail(&virt_ep->bw_endpoint_list,
2757 &interval_bw->endpoints);
2758}
2759
2760void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
2761 struct xhci_virt_device *virt_dev,
2762 int old_active_eps)
2763{
2764 struct xhci_root_port_bw_info *rh_bw_info;
2765 if (!virt_dev->tt_info)
2766 return;
2767
2768 rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
2769 if (old_active_eps == 0 &&
2770 virt_dev->tt_info->active_eps != 0) {
2771 rh_bw_info->num_active_tts += 1;
2772 rh_bw_info->bw_table.bw_used += TT_HS_OVERHEAD;
2773 } else if (old_active_eps != 0 &&
2774 virt_dev->tt_info->active_eps == 0) {
2775 rh_bw_info->num_active_tts -= 1;
2776 rh_bw_info->bw_table.bw_used -= TT_HS_OVERHEAD;
2777 }
2778}
2779
2780static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
2781 struct xhci_virt_device *virt_dev,
2782 struct xhci_container_ctx *in_ctx)
2783{
2784 struct xhci_bw_info ep_bw_info[31];
2785 int i;
2786 struct xhci_input_control_ctx *ctrl_ctx;
2787 int old_active_eps = 0;
2788
2789 if (virt_dev->tt_info)
2790 old_active_eps = virt_dev->tt_info->active_eps;
2791
2792 ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
2793 if (!ctrl_ctx) {
2794 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2795 __func__);
2796 return -ENOMEM;
2797 }
2798
2799 for (i = 0; i < 31; i++) {
2800 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2801 continue;
2802
2803 /* Make a copy of the BW info in case we need to revert this */
2804 memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
2805 sizeof(ep_bw_info[i]));
2806 /* Drop the endpoint from the interval table if the endpoint is
2807 * being dropped or changed.
2808 */
2809 if (EP_IS_DROPPED(ctrl_ctx, i))
2810 xhci_drop_ep_from_interval_table(xhci,
2811 &virt_dev->eps[i].bw_info,
2812 virt_dev->bw_table,
2813 virt_dev->udev,
2814 &virt_dev->eps[i],
2815 virt_dev->tt_info);
2816 }
2817 /* Overwrite the information stored in the endpoints' bw_info */
2818 xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
2819 for (i = 0; i < 31; i++) {
2820 /* Add any changed or added endpoints to the interval table */
2821 if (EP_IS_ADDED(ctrl_ctx, i))
2822 xhci_add_ep_to_interval_table(xhci,
2823 &virt_dev->eps[i].bw_info,
2824 virt_dev->bw_table,
2825 virt_dev->udev,
2826 &virt_dev->eps[i],
2827 virt_dev->tt_info);
2828 }
2829
2830 if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
2831 /* Ok, this fits in the bandwidth we have.
2832 * Update the number of active TTs.
2833 */
2834 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
2835 return 0;
2836 }
2837
2838 /* We don't have enough bandwidth for this, revert the stored info. */
2839 for (i = 0; i < 31; i++) {
2840 if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
2841 continue;
2842
2843 /* Drop the new copies of any added or changed endpoints from
2844 * the interval table.
2845 */
2846 if (EP_IS_ADDED(ctrl_ctx, i)) {
2847 xhci_drop_ep_from_interval_table(xhci,
2848 &virt_dev->eps[i].bw_info,
2849 virt_dev->bw_table,
2850 virt_dev->udev,
2851 &virt_dev->eps[i],
2852 virt_dev->tt_info);
2853 }
2854 /* Revert the endpoint back to its old information */
2855 memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
2856 sizeof(ep_bw_info[i]));
2857 /* Add any changed or dropped endpoints back into the table */
2858 if (EP_IS_DROPPED(ctrl_ctx, i))
2859 xhci_add_ep_to_interval_table(xhci,
2860 &virt_dev->eps[i].bw_info,
2861 virt_dev->bw_table,
2862 virt_dev->udev,
2863 &virt_dev->eps[i],
2864 virt_dev->tt_info);
2865 }
2866 return -ENOMEM;
2867}
2868
2869
2870/* Issue a configure endpoint command or evaluate context command
2871 * and wait for it to finish.
2872 */
2873static int xhci_configure_endpoint(struct xhci_hcd *xhci,
2874 struct usb_device *udev,
2875 struct xhci_command *command,
2876 bool ctx_change, bool must_succeed)
2877{
2878 int ret;
2879 unsigned long flags;
2880 struct xhci_input_control_ctx *ctrl_ctx;
2881 struct xhci_virt_device *virt_dev;
2882 struct xhci_slot_ctx *slot_ctx;
2883
2884 if (!command)
2885 return -EINVAL;
2886
2887 spin_lock_irqsave(&xhci->lock, flags);
2888
2889 if (xhci->xhc_state & XHCI_STATE_DYING) {
2890 spin_unlock_irqrestore(&xhci->lock, flags);
2891 return -ESHUTDOWN;
2892 }
2893
2894 virt_dev = xhci->devs[udev->slot_id];
2895
2896 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
2897 if (!ctrl_ctx) {
2898 spin_unlock_irqrestore(&xhci->lock, flags);
2899 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2900 __func__);
2901 return -ENOMEM;
2902 }
2903
2904 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK) &&
2905 xhci_reserve_host_resources(xhci, ctrl_ctx)) {
2906 spin_unlock_irqrestore(&xhci->lock, flags);
2907 xhci_warn(xhci, "Not enough host resources, "
2908 "active endpoint contexts = %u\n",
2909 xhci->num_active_eps);
2910 return -ENOMEM;
2911 }
2912 if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
2913 xhci_reserve_bandwidth(xhci, virt_dev, command->in_ctx)) {
2914 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2915 xhci_free_host_resources(xhci, ctrl_ctx);
2916 spin_unlock_irqrestore(&xhci->lock, flags);
2917 xhci_warn(xhci, "Not enough bandwidth\n");
2918 return -ENOMEM;
2919 }
2920
2921 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
2922
2923 trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx);
2924 trace_xhci_configure_endpoint(slot_ctx);
2925
2926 if (!ctx_change)
2927 ret = xhci_queue_configure_endpoint(xhci, command,
2928 command->in_ctx->dma,
2929 udev->slot_id, must_succeed);
2930 else
2931 ret = xhci_queue_evaluate_context(xhci, command,
2932 command->in_ctx->dma,
2933 udev->slot_id, must_succeed);
2934 if (ret < 0) {
2935 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
2936 xhci_free_host_resources(xhci, ctrl_ctx);
2937 spin_unlock_irqrestore(&xhci->lock, flags);
2938 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
2939 "FIXME allocate a new ring segment");
2940 return -ENOMEM;
2941 }
2942 xhci_ring_cmd_db(xhci);
2943 spin_unlock_irqrestore(&xhci->lock, flags);
2944
2945 /* Wait for the configure endpoint command to complete */
2946 wait_for_completion(command->completion);
2947
2948 if (!ctx_change)
2949 ret = xhci_configure_endpoint_result(xhci, udev,
2950 &command->status);
2951 else
2952 ret = xhci_evaluate_context_result(xhci, udev,
2953 &command->status);
2954
2955 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
2956 spin_lock_irqsave(&xhci->lock, flags);
2957 /* If the command failed, remove the reserved resources.
2958 * Otherwise, clean up the estimate to include dropped eps.
2959 */
2960 if (ret)
2961 xhci_free_host_resources(xhci, ctrl_ctx);
2962 else
2963 xhci_finish_resource_reservation(xhci, ctrl_ctx);
2964 spin_unlock_irqrestore(&xhci->lock, flags);
2965 }
2966 return ret;
2967}
2968
2969static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
2970 struct xhci_virt_device *vdev, int i)
2971{
2972 struct xhci_virt_ep *ep = &vdev->eps[i];
2973
2974 if (ep->ep_state & EP_HAS_STREAMS) {
2975 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on set_interface, freeing streams.\n",
2976 xhci_get_endpoint_address(i));
2977 xhci_free_stream_info(xhci, ep->stream_info);
2978 ep->stream_info = NULL;
2979 ep->ep_state &= ~EP_HAS_STREAMS;
2980 }
2981}
2982
2983/* Called after one or more calls to xhci_add_endpoint() or
2984 * xhci_drop_endpoint(). If this call fails, the USB core is expected
2985 * to call xhci_reset_bandwidth().
2986 *
2987 * Since we are in the middle of changing either configuration or
2988 * installing a new alt setting, the USB core won't allow URBs to be
2989 * enqueued for any endpoint on the old config or interface. Nothing
2990 * else should be touching the xhci->devs[slot_id] structure, so we
2991 * don't need to take the xhci->lock for manipulating that.
2992 */
2993int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
2994{
2995 int i;
2996 int ret = 0;
2997 struct xhci_hcd *xhci;
2998 struct xhci_virt_device *virt_dev;
2999 struct xhci_input_control_ctx *ctrl_ctx;
3000 struct xhci_slot_ctx *slot_ctx;
3001 struct xhci_command *command;
3002
3003 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3004 if (ret <= 0)
3005 return ret;
3006 xhci = hcd_to_xhci(hcd);
3007 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3008 (xhci->xhc_state & XHCI_STATE_REMOVING))
3009 return -ENODEV;
3010
3011 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3012 virt_dev = xhci->devs[udev->slot_id];
3013
3014 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
3015 if (!command)
3016 return -ENOMEM;
3017
3018 command->in_ctx = virt_dev->in_ctx;
3019
3020 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
3021 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3022 if (!ctrl_ctx) {
3023 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3024 __func__);
3025 ret = -ENOMEM;
3026 goto command_cleanup;
3027 }
3028 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3029 ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
3030 ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
3031
3032 /* Don't issue the command if there's no endpoints to update. */
3033 if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) &&
3034 ctrl_ctx->drop_flags == 0) {
3035 ret = 0;
3036 goto command_cleanup;
3037 }
3038 /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
3039 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
3040 for (i = 31; i >= 1; i--) {
3041 __le32 le32 = cpu_to_le32(BIT(i));
3042
3043 if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32))
3044 || (ctrl_ctx->add_flags & le32) || i == 1) {
3045 slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
3046 slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
3047 break;
3048 }
3049 }
3050
3051 ret = xhci_configure_endpoint(xhci, udev, command,
3052 false, false);
3053 if (ret)
3054 /* Callee should call reset_bandwidth() */
3055 goto command_cleanup;
3056
3057 /* Free any rings that were dropped, but not changed. */
3058 for (i = 1; i < 31; i++) {
3059 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
3060 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
3061 xhci_free_endpoint_ring(xhci, virt_dev, i);
3062 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3063 }
3064 }
3065 xhci_zero_in_ctx(xhci, virt_dev);
3066 /*
3067 * Install any rings for completely new endpoints or changed endpoints,
3068 * and free any old rings from changed endpoints.
3069 */
3070 for (i = 1; i < 31; i++) {
3071 if (!virt_dev->eps[i].new_ring)
3072 continue;
3073 /* Only free the old ring if it exists.
3074 * It may not if this is the first add of an endpoint.
3075 */
3076 if (virt_dev->eps[i].ring) {
3077 xhci_free_endpoint_ring(xhci, virt_dev, i);
3078 }
3079 xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
3080 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
3081 virt_dev->eps[i].new_ring = NULL;
3082 xhci_debugfs_create_endpoint(xhci, virt_dev, i);
3083 }
3084command_cleanup:
3085 kfree(command->completion);
3086 kfree(command);
3087
3088 return ret;
3089}
3090EXPORT_SYMBOL_GPL(xhci_check_bandwidth);
3091
3092void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3093{
3094 struct xhci_hcd *xhci;
3095 struct xhci_virt_device *virt_dev;
3096 int i, ret;
3097
3098 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3099 if (ret <= 0)
3100 return;
3101 xhci = hcd_to_xhci(hcd);
3102
3103 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3104 virt_dev = xhci->devs[udev->slot_id];
3105 /* Free any rings allocated for added endpoints */
3106 for (i = 0; i < 31; i++) {
3107 if (virt_dev->eps[i].new_ring) {
3108 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3109 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
3110 virt_dev->eps[i].new_ring = NULL;
3111 }
3112 }
3113 xhci_zero_in_ctx(xhci, virt_dev);
3114}
3115EXPORT_SYMBOL_GPL(xhci_reset_bandwidth);
3116
3117static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
3118 struct xhci_container_ctx *in_ctx,
3119 struct xhci_container_ctx *out_ctx,
3120 struct xhci_input_control_ctx *ctrl_ctx,
3121 u32 add_flags, u32 drop_flags)
3122{
3123 ctrl_ctx->add_flags = cpu_to_le32(add_flags);
3124 ctrl_ctx->drop_flags = cpu_to_le32(drop_flags);
3125 xhci_slot_copy(xhci, in_ctx, out_ctx);
3126 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
3127}
3128
3129static void xhci_endpoint_disable(struct usb_hcd *hcd,
3130 struct usb_host_endpoint *host_ep)
3131{
3132 struct xhci_hcd *xhci;
3133 struct xhci_virt_device *vdev;
3134 struct xhci_virt_ep *ep;
3135 struct usb_device *udev;
3136 unsigned long flags;
3137 unsigned int ep_index;
3138
3139 xhci = hcd_to_xhci(hcd);
3140rescan:
3141 spin_lock_irqsave(&xhci->lock, flags);
3142
3143 udev = (struct usb_device *)host_ep->hcpriv;
3144 if (!udev || !udev->slot_id)
3145 goto done;
3146
3147 vdev = xhci->devs[udev->slot_id];
3148 if (!vdev)
3149 goto done;
3150
3151 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3152 ep = &vdev->eps[ep_index];
3153 if (!ep)
3154 goto done;
3155
3156 /* wait for hub_tt_work to finish clearing hub TT */
3157 if (ep->ep_state & EP_CLEARING_TT) {
3158 spin_unlock_irqrestore(&xhci->lock, flags);
3159 schedule_timeout_uninterruptible(1);
3160 goto rescan;
3161 }
3162
3163 if (ep->ep_state)
3164 xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
3165 ep->ep_state);
3166done:
3167 host_ep->hcpriv = NULL;
3168 spin_unlock_irqrestore(&xhci->lock, flags);
3169}
3170
3171/*
3172 * Called after usb core issues a clear halt control message.
3173 * The host side of the halt should already be cleared by a reset endpoint
3174 * command issued when the STALL event was received.
3175 *
3176 * The reset endpoint command may only be issued to endpoints in the halted
3177 * state. For software that wishes to reset the data toggle or sequence number
3178 * of an endpoint that isn't in the halted state this function will issue a
3179 * configure endpoint command with the Drop and Add bits set for the target
3180 * endpoint. Refer to the additional note in xhci spcification section 4.6.8.
3181 */
3182
3183static void xhci_endpoint_reset(struct usb_hcd *hcd,
3184 struct usb_host_endpoint *host_ep)
3185{
3186 struct xhci_hcd *xhci;
3187 struct usb_device *udev;
3188 struct xhci_virt_device *vdev;
3189 struct xhci_virt_ep *ep;
3190 struct xhci_input_control_ctx *ctrl_ctx;
3191 struct xhci_command *stop_cmd, *cfg_cmd;
3192 unsigned int ep_index;
3193 unsigned long flags;
3194 u32 ep_flag;
3195 int err;
3196
3197 xhci = hcd_to_xhci(hcd);
3198 if (!host_ep->hcpriv)
3199 return;
3200 udev = (struct usb_device *) host_ep->hcpriv;
3201 vdev = xhci->devs[udev->slot_id];
3202
3203 /*
3204 * vdev may be lost due to xHC restore error and re-initialization
3205 * during S3/S4 resume. A new vdev will be allocated later by
3206 * xhci_discover_or_reset_device()
3207 */
3208 if (!udev->slot_id || !vdev)
3209 return;
3210 ep_index = xhci_get_endpoint_index(&host_ep->desc);
3211 ep = &vdev->eps[ep_index];
3212 if (!ep)
3213 return;
3214
3215 /* Bail out if toggle is already being cleared by a endpoint reset */
3216 spin_lock_irqsave(&xhci->lock, flags);
3217 if (ep->ep_state & EP_HARD_CLEAR_TOGGLE) {
3218 ep->ep_state &= ~EP_HARD_CLEAR_TOGGLE;
3219 spin_unlock_irqrestore(&xhci->lock, flags);
3220 return;
3221 }
3222 spin_unlock_irqrestore(&xhci->lock, flags);
3223 /* Only interrupt and bulk ep's use data toggle, USB2 spec 5.5.4-> */
3224 if (usb_endpoint_xfer_control(&host_ep->desc) ||
3225 usb_endpoint_xfer_isoc(&host_ep->desc))
3226 return;
3227
3228 ep_flag = xhci_get_endpoint_flag(&host_ep->desc);
3229
3230 if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG)
3231 return;
3232
3233 stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT);
3234 if (!stop_cmd)
3235 return;
3236
3237 cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT);
3238 if (!cfg_cmd)
3239 goto cleanup;
3240
3241 spin_lock_irqsave(&xhci->lock, flags);
3242
3243 /* block queuing new trbs and ringing ep doorbell */
3244 ep->ep_state |= EP_SOFT_CLEAR_TOGGLE;
3245
3246 /*
3247 * Make sure endpoint ring is empty before resetting the toggle/seq.
3248 * Driver is required to synchronously cancel all transfer request.
3249 * Stop the endpoint to force xHC to update the output context
3250 */
3251
3252 if (!list_empty(&ep->ring->td_list)) {
3253 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3254 spin_unlock_irqrestore(&xhci->lock, flags);
3255 xhci_free_command(xhci, cfg_cmd);
3256 goto cleanup;
3257 }
3258
3259 err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
3260 ep_index, 0);
3261 if (err < 0) {
3262 spin_unlock_irqrestore(&xhci->lock, flags);
3263 xhci_free_command(xhci, cfg_cmd);
3264 xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
3265 __func__, err);
3266 goto cleanup;
3267 }
3268
3269 xhci_ring_cmd_db(xhci);
3270 spin_unlock_irqrestore(&xhci->lock, flags);
3271
3272 wait_for_completion(stop_cmd->completion);
3273
3274 spin_lock_irqsave(&xhci->lock, flags);
3275
3276 /* config ep command clears toggle if add and drop ep flags are set */
3277 ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx);
3278 if (!ctrl_ctx) {
3279 spin_unlock_irqrestore(&xhci->lock, flags);
3280 xhci_free_command(xhci, cfg_cmd);
3281 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3282 __func__);
3283 goto cleanup;
3284 }
3285
3286 xhci_setup_input_ctx_for_config_ep(xhci, cfg_cmd->in_ctx, vdev->out_ctx,
3287 ctrl_ctx, ep_flag, ep_flag);
3288 xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
3289
3290 err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
3291 udev->slot_id, false);
3292 if (err < 0) {
3293 spin_unlock_irqrestore(&xhci->lock, flags);
3294 xhci_free_command(xhci, cfg_cmd);
3295 xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
3296 __func__, err);
3297 goto cleanup;
3298 }
3299
3300 xhci_ring_cmd_db(xhci);
3301 spin_unlock_irqrestore(&xhci->lock, flags);
3302
3303 wait_for_completion(cfg_cmd->completion);
3304
3305 xhci_free_command(xhci, cfg_cmd);
3306cleanup:
3307 xhci_free_command(xhci, stop_cmd);
3308 spin_lock_irqsave(&xhci->lock, flags);
3309 if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
3310 ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
3311 spin_unlock_irqrestore(&xhci->lock, flags);
3312}
3313
3314static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
3315 struct usb_device *udev, struct usb_host_endpoint *ep,
3316 unsigned int slot_id)
3317{
3318 int ret;
3319 unsigned int ep_index;
3320 unsigned int ep_state;
3321
3322 if (!ep)
3323 return -EINVAL;
3324 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
3325 if (ret <= 0)
3326 return -EINVAL;
3327 if (usb_ss_max_streams(&ep->ss_ep_comp) == 0) {
3328 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
3329 " descriptor for ep 0x%x does not support streams\n",
3330 ep->desc.bEndpointAddress);
3331 return -EINVAL;
3332 }
3333
3334 ep_index = xhci_get_endpoint_index(&ep->desc);
3335 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3336 if (ep_state & EP_HAS_STREAMS ||
3337 ep_state & EP_GETTING_STREAMS) {
3338 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
3339 "already has streams set up.\n",
3340 ep->desc.bEndpointAddress);
3341 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
3342 "dynamic stream context array reallocation.\n");
3343 return -EINVAL;
3344 }
3345 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
3346 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
3347 "endpoint 0x%x; URBs are pending.\n",
3348 ep->desc.bEndpointAddress);
3349 return -EINVAL;
3350 }
3351 return 0;
3352}
3353
3354static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
3355 unsigned int *num_streams, unsigned int *num_stream_ctxs)
3356{
3357 unsigned int max_streams;
3358
3359 /* The stream context array size must be a power of two */
3360 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
3361 /*
3362 * Find out how many primary stream array entries the host controller
3363 * supports. Later we may use secondary stream arrays (similar to 2nd
3364 * level page entries), but that's an optional feature for xHCI host
3365 * controllers. xHCs must support at least 4 stream IDs.
3366 */
3367 max_streams = HCC_MAX_PSA(xhci->hcc_params);
3368 if (*num_stream_ctxs > max_streams) {
3369 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
3370 max_streams);
3371 *num_stream_ctxs = max_streams;
3372 *num_streams = max_streams;
3373 }
3374}
3375
3376/* Returns an error code if one of the endpoint already has streams.
3377 * This does not change any data structures, it only checks and gathers
3378 * information.
3379 */
3380static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
3381 struct usb_device *udev,
3382 struct usb_host_endpoint **eps, unsigned int num_eps,
3383 unsigned int *num_streams, u32 *changed_ep_bitmask)
3384{
3385 unsigned int max_streams;
3386 unsigned int endpoint_flag;
3387 int i;
3388 int ret;
3389
3390 for (i = 0; i < num_eps; i++) {
3391 ret = xhci_check_streams_endpoint(xhci, udev,
3392 eps[i], udev->slot_id);
3393 if (ret < 0)
3394 return ret;
3395
3396 max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
3397 if (max_streams < (*num_streams - 1)) {
3398 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
3399 eps[i]->desc.bEndpointAddress,
3400 max_streams);
3401 *num_streams = max_streams+1;
3402 }
3403
3404 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
3405 if (*changed_ep_bitmask & endpoint_flag)
3406 return -EINVAL;
3407 *changed_ep_bitmask |= endpoint_flag;
3408 }
3409 return 0;
3410}
3411
3412static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
3413 struct usb_device *udev,
3414 struct usb_host_endpoint **eps, unsigned int num_eps)
3415{
3416 u32 changed_ep_bitmask = 0;
3417 unsigned int slot_id;
3418 unsigned int ep_index;
3419 unsigned int ep_state;
3420 int i;
3421
3422 slot_id = udev->slot_id;
3423 if (!xhci->devs[slot_id])
3424 return 0;
3425
3426 for (i = 0; i < num_eps; i++) {
3427 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3428 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
3429 /* Are streams already being freed for the endpoint? */
3430 if (ep_state & EP_GETTING_NO_STREAMS) {
3431 xhci_warn(xhci, "WARN Can't disable streams for "
3432 "endpoint 0x%x, "
3433 "streams are being disabled already\n",
3434 eps[i]->desc.bEndpointAddress);
3435 return 0;
3436 }
3437 /* Are there actually any streams to free? */
3438 if (!(ep_state & EP_HAS_STREAMS) &&
3439 !(ep_state & EP_GETTING_STREAMS)) {
3440 xhci_warn(xhci, "WARN Can't disable streams for "
3441 "endpoint 0x%x, "
3442 "streams are already disabled!\n",
3443 eps[i]->desc.bEndpointAddress);
3444 xhci_warn(xhci, "WARN xhci_free_streams() called "
3445 "with non-streams endpoint\n");
3446 return 0;
3447 }
3448 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
3449 }
3450 return changed_ep_bitmask;
3451}
3452
3453/*
3454 * The USB device drivers use this function (through the HCD interface in USB
3455 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
3456 * coordinate mass storage command queueing across multiple endpoints (basically
3457 * a stream ID == a task ID).
3458 *
3459 * Setting up streams involves allocating the same size stream context array
3460 * for each endpoint and issuing a configure endpoint command for all endpoints.
3461 *
3462 * Don't allow the call to succeed if one endpoint only supports one stream
3463 * (which means it doesn't support streams at all).
3464 *
3465 * Drivers may get less stream IDs than they asked for, if the host controller
3466 * hardware or endpoints claim they can't support the number of requested
3467 * stream IDs.
3468 */
3469static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
3470 struct usb_host_endpoint **eps, unsigned int num_eps,
3471 unsigned int num_streams, gfp_t mem_flags)
3472{
3473 int i, ret;
3474 struct xhci_hcd *xhci;
3475 struct xhci_virt_device *vdev;
3476 struct xhci_command *config_cmd;
3477 struct xhci_input_control_ctx *ctrl_ctx;
3478 unsigned int ep_index;
3479 unsigned int num_stream_ctxs;
3480 unsigned int max_packet;
3481 unsigned long flags;
3482 u32 changed_ep_bitmask = 0;
3483
3484 if (!eps)
3485 return -EINVAL;
3486
3487 /* Add one to the number of streams requested to account for
3488 * stream 0 that is reserved for xHCI usage.
3489 */
3490 num_streams += 1;
3491 xhci = hcd_to_xhci(hcd);
3492 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
3493 num_streams);
3494
3495 /* MaxPSASize value 0 (2 streams) means streams are not supported */
3496 if ((xhci->quirks & XHCI_BROKEN_STREAMS) ||
3497 HCC_MAX_PSA(xhci->hcc_params) < 4) {
3498 xhci_dbg(xhci, "xHCI controller does not support streams.\n");
3499 return -ENOSYS;
3500 }
3501
3502 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
3503 if (!config_cmd)
3504 return -ENOMEM;
3505
3506 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
3507 if (!ctrl_ctx) {
3508 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3509 __func__);
3510 xhci_free_command(xhci, config_cmd);
3511 return -ENOMEM;
3512 }
3513
3514 /* Check to make sure all endpoints are not already configured for
3515 * streams. While we're at it, find the maximum number of streams that
3516 * all the endpoints will support and check for duplicate endpoints.
3517 */
3518 spin_lock_irqsave(&xhci->lock, flags);
3519 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
3520 num_eps, &num_streams, &changed_ep_bitmask);
3521 if (ret < 0) {
3522 xhci_free_command(xhci, config_cmd);
3523 spin_unlock_irqrestore(&xhci->lock, flags);
3524 return ret;
3525 }
3526 if (num_streams <= 1) {
3527 xhci_warn(xhci, "WARN: endpoints can't handle "
3528 "more than one stream.\n");
3529 xhci_free_command(xhci, config_cmd);
3530 spin_unlock_irqrestore(&xhci->lock, flags);
3531 return -EINVAL;
3532 }
3533 vdev = xhci->devs[udev->slot_id];
3534 /* Mark each endpoint as being in transition, so
3535 * xhci_urb_enqueue() will reject all URBs.
3536 */
3537 for (i = 0; i < num_eps; i++) {
3538 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3539 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
3540 }
3541 spin_unlock_irqrestore(&xhci->lock, flags);
3542
3543 /* Setup internal data structures and allocate HW data structures for
3544 * streams (but don't install the HW structures in the input context
3545 * until we're sure all memory allocation succeeded).
3546 */
3547 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
3548 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
3549 num_stream_ctxs, num_streams);
3550
3551 for (i = 0; i < num_eps; i++) {
3552 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3553 max_packet = usb_endpoint_maxp(&eps[i]->desc);
3554 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
3555 num_stream_ctxs,
3556 num_streams,
3557 max_packet, mem_flags);
3558 if (!vdev->eps[ep_index].stream_info)
3559 goto cleanup;
3560 /* Set maxPstreams in endpoint context and update deq ptr to
3561 * point to stream context array. FIXME
3562 */
3563 }
3564
3565 /* Set up the input context for a configure endpoint command. */
3566 for (i = 0; i < num_eps; i++) {
3567 struct xhci_ep_ctx *ep_ctx;
3568
3569 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3570 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
3571
3572 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
3573 vdev->out_ctx, ep_index);
3574 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
3575 vdev->eps[ep_index].stream_info);
3576 }
3577 /* Tell the HW to drop its old copy of the endpoint context info
3578 * and add the updated copy from the input context.
3579 */
3580 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
3581 vdev->out_ctx, ctrl_ctx,
3582 changed_ep_bitmask, changed_ep_bitmask);
3583
3584 /* Issue and wait for the configure endpoint command */
3585 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
3586 false, false);
3587
3588 /* xHC rejected the configure endpoint command for some reason, so we
3589 * leave the old ring intact and free our internal streams data
3590 * structure.
3591 */
3592 if (ret < 0)
3593 goto cleanup;
3594
3595 spin_lock_irqsave(&xhci->lock, flags);
3596 for (i = 0; i < num_eps; i++) {
3597 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3598 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3599 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
3600 udev->slot_id, ep_index);
3601 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
3602 }
3603 xhci_free_command(xhci, config_cmd);
3604 spin_unlock_irqrestore(&xhci->lock, flags);
3605
3606 for (i = 0; i < num_eps; i++) {
3607 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3608 xhci_debugfs_create_stream_files(xhci, vdev, ep_index);
3609 }
3610 /* Subtract 1 for stream 0, which drivers can't use */
3611 return num_streams - 1;
3612
3613cleanup:
3614 /* If it didn't work, free the streams! */
3615 for (i = 0; i < num_eps; i++) {
3616 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3617 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3618 vdev->eps[ep_index].stream_info = NULL;
3619 /* FIXME Unset maxPstreams in endpoint context and
3620 * update deq ptr to point to normal string ring.
3621 */
3622 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
3623 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3624 xhci_endpoint_zero(xhci, vdev, eps[i]);
3625 }
3626 xhci_free_command(xhci, config_cmd);
3627 return -ENOMEM;
3628}
3629
3630/* Transition the endpoint from using streams to being a "normal" endpoint
3631 * without streams.
3632 *
3633 * Modify the endpoint context state, submit a configure endpoint command,
3634 * and free all endpoint rings for streams if that completes successfully.
3635 */
3636static int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
3637 struct usb_host_endpoint **eps, unsigned int num_eps,
3638 gfp_t mem_flags)
3639{
3640 int i, ret;
3641 struct xhci_hcd *xhci;
3642 struct xhci_virt_device *vdev;
3643 struct xhci_command *command;
3644 struct xhci_input_control_ctx *ctrl_ctx;
3645 unsigned int ep_index;
3646 unsigned long flags;
3647 u32 changed_ep_bitmask;
3648
3649 xhci = hcd_to_xhci(hcd);
3650 vdev = xhci->devs[udev->slot_id];
3651
3652 /* Set up a configure endpoint command to remove the streams rings */
3653 spin_lock_irqsave(&xhci->lock, flags);
3654 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
3655 udev, eps, num_eps);
3656 if (changed_ep_bitmask == 0) {
3657 spin_unlock_irqrestore(&xhci->lock, flags);
3658 return -EINVAL;
3659 }
3660
3661 /* Use the xhci_command structure from the first endpoint. We may have
3662 * allocated too many, but the driver may call xhci_free_streams() for
3663 * each endpoint it grouped into one call to xhci_alloc_streams().
3664 */
3665 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
3666 command = vdev->eps[ep_index].stream_info->free_streams_command;
3667 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
3668 if (!ctrl_ctx) {
3669 spin_unlock_irqrestore(&xhci->lock, flags);
3670 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
3671 __func__);
3672 return -EINVAL;
3673 }
3674
3675 for (i = 0; i < num_eps; i++) {
3676 struct xhci_ep_ctx *ep_ctx;
3677
3678 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3679 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
3680 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
3681 EP_GETTING_NO_STREAMS;
3682
3683 xhci_endpoint_copy(xhci, command->in_ctx,
3684 vdev->out_ctx, ep_index);
3685 xhci_setup_no_streams_ep_input_ctx(ep_ctx,
3686 &vdev->eps[ep_index]);
3687 }
3688 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
3689 vdev->out_ctx, ctrl_ctx,
3690 changed_ep_bitmask, changed_ep_bitmask);
3691 spin_unlock_irqrestore(&xhci->lock, flags);
3692
3693 /* Issue and wait for the configure endpoint command,
3694 * which must succeed.
3695 */
3696 ret = xhci_configure_endpoint(xhci, udev, command,
3697 false, true);
3698
3699 /* xHC rejected the configure endpoint command for some reason, so we
3700 * leave the streams rings intact.
3701 */
3702 if (ret < 0)
3703 return ret;
3704
3705 spin_lock_irqsave(&xhci->lock, flags);
3706 for (i = 0; i < num_eps; i++) {
3707 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
3708 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
3709 vdev->eps[ep_index].stream_info = NULL;
3710 /* FIXME Unset maxPstreams in endpoint context and
3711 * update deq ptr to point to normal string ring.
3712 */
3713 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
3714 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
3715 }
3716 spin_unlock_irqrestore(&xhci->lock, flags);
3717
3718 return 0;
3719}
3720
3721/*
3722 * Deletes endpoint resources for endpoints that were active before a Reset
3723 * Device command, or a Disable Slot command. The Reset Device command leaves
3724 * the control endpoint intact, whereas the Disable Slot command deletes it.
3725 *
3726 * Must be called with xhci->lock held.
3727 */
3728void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
3729 struct xhci_virt_device *virt_dev, bool drop_control_ep)
3730{
3731 int i;
3732 unsigned int num_dropped_eps = 0;
3733 unsigned int drop_flags = 0;
3734
3735 for (i = (drop_control_ep ? 0 : 1); i < 31; i++) {
3736 if (virt_dev->eps[i].ring) {
3737 drop_flags |= 1 << i;
3738 num_dropped_eps++;
3739 }
3740 }
3741 xhci->num_active_eps -= num_dropped_eps;
3742 if (num_dropped_eps)
3743 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
3744 "Dropped %u ep ctxs, flags = 0x%x, "
3745 "%u now active.",
3746 num_dropped_eps, drop_flags,
3747 xhci->num_active_eps);
3748}
3749
3750/*
3751 * This submits a Reset Device Command, which will set the device state to 0,
3752 * set the device address to 0, and disable all the endpoints except the default
3753 * control endpoint. The USB core should come back and call
3754 * xhci_address_device(), and then re-set up the configuration. If this is
3755 * called because of a usb_reset_and_verify_device(), then the old alternate
3756 * settings will be re-installed through the normal bandwidth allocation
3757 * functions.
3758 *
3759 * Wait for the Reset Device command to finish. Remove all structures
3760 * associated with the endpoints that were disabled. Clear the input device
3761 * structure? Reset the control endpoint 0 max packet size?
3762 *
3763 * If the virt_dev to be reset does not exist or does not match the udev,
3764 * it means the device is lost, possibly due to the xHC restore error and
3765 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
3766 * re-allocate the device.
3767 */
3768static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
3769 struct usb_device *udev)
3770{
3771 int ret, i;
3772 unsigned long flags;
3773 struct xhci_hcd *xhci;
3774 unsigned int slot_id;
3775 struct xhci_virt_device *virt_dev;
3776 struct xhci_command *reset_device_cmd;
3777 struct xhci_slot_ctx *slot_ctx;
3778 int old_active_eps = 0;
3779
3780 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
3781 if (ret <= 0)
3782 return ret;
3783 xhci = hcd_to_xhci(hcd);
3784 slot_id = udev->slot_id;
3785 virt_dev = xhci->devs[slot_id];
3786 if (!virt_dev) {
3787 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3788 "not exist. Re-allocate the device\n", slot_id);
3789 ret = xhci_alloc_dev(hcd, udev);
3790 if (ret == 1)
3791 return 0;
3792 else
3793 return -EINVAL;
3794 }
3795
3796 if (virt_dev->tt_info)
3797 old_active_eps = virt_dev->tt_info->active_eps;
3798
3799 if (virt_dev->udev != udev) {
3800 /* If the virt_dev and the udev does not match, this virt_dev
3801 * may belong to another udev.
3802 * Re-allocate the device.
3803 */
3804 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
3805 "not match the udev. Re-allocate the device\n",
3806 slot_id);
3807 ret = xhci_alloc_dev(hcd, udev);
3808 if (ret == 1)
3809 return 0;
3810 else
3811 return -EINVAL;
3812 }
3813
3814 /* If device is not setup, there is no point in resetting it */
3815 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3816 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
3817 SLOT_STATE_DISABLED)
3818 return 0;
3819
3820 trace_xhci_discover_or_reset_device(slot_ctx);
3821
3822 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
3823 /* Allocate the command structure that holds the struct completion.
3824 * Assume we're in process context, since the normal device reset
3825 * process has to wait for the device anyway. Storage devices are
3826 * reset as part of error handling, so use GFP_NOIO instead of
3827 * GFP_KERNEL.
3828 */
3829 reset_device_cmd = xhci_alloc_command(xhci, true, GFP_NOIO);
3830 if (!reset_device_cmd) {
3831 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
3832 return -ENOMEM;
3833 }
3834
3835 /* Attempt to submit the Reset Device command to the command ring */
3836 spin_lock_irqsave(&xhci->lock, flags);
3837
3838 ret = xhci_queue_reset_device(xhci, reset_device_cmd, slot_id);
3839 if (ret) {
3840 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
3841 spin_unlock_irqrestore(&xhci->lock, flags);
3842 goto command_cleanup;
3843 }
3844 xhci_ring_cmd_db(xhci);
3845 spin_unlock_irqrestore(&xhci->lock, flags);
3846
3847 /* Wait for the Reset Device command to finish */
3848 wait_for_completion(reset_device_cmd->completion);
3849
3850 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
3851 * unless we tried to reset a slot ID that wasn't enabled,
3852 * or the device wasn't in the addressed or configured state.
3853 */
3854 ret = reset_device_cmd->status;
3855 switch (ret) {
3856 case COMP_COMMAND_ABORTED:
3857 case COMP_COMMAND_RING_STOPPED:
3858 xhci_warn(xhci, "Timeout waiting for reset device command\n");
3859 ret = -ETIME;
3860 goto command_cleanup;
3861 case COMP_SLOT_NOT_ENABLED_ERROR: /* 0.95 completion for bad slot ID */
3862 case COMP_CONTEXT_STATE_ERROR: /* 0.96 completion code for same thing */
3863 xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
3864 slot_id,
3865 xhci_get_slot_state(xhci, virt_dev->out_ctx));
3866 xhci_dbg(xhci, "Not freeing device rings.\n");
3867 /* Don't treat this as an error. May change my mind later. */
3868 ret = 0;
3869 goto command_cleanup;
3870 case COMP_SUCCESS:
3871 xhci_dbg(xhci, "Successful reset device command.\n");
3872 break;
3873 default:
3874 if (xhci_is_vendor_info_code(xhci, ret))
3875 break;
3876 xhci_warn(xhci, "Unknown completion code %u for "
3877 "reset device command.\n", ret);
3878 ret = -EINVAL;
3879 goto command_cleanup;
3880 }
3881
3882 /* Free up host controller endpoint resources */
3883 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
3884 spin_lock_irqsave(&xhci->lock, flags);
3885 /* Don't delete the default control endpoint resources */
3886 xhci_free_device_endpoint_resources(xhci, virt_dev, false);
3887 spin_unlock_irqrestore(&xhci->lock, flags);
3888 }
3889
3890 /* Everything but endpoint 0 is disabled, so free the rings. */
3891 for (i = 1; i < 31; i++) {
3892 struct xhci_virt_ep *ep = &virt_dev->eps[i];
3893
3894 if (ep->ep_state & EP_HAS_STREAMS) {
3895 xhci_warn(xhci, "WARN: endpoint 0x%02x has streams on device reset, freeing streams.\n",
3896 xhci_get_endpoint_address(i));
3897 xhci_free_stream_info(xhci, ep->stream_info);
3898 ep->stream_info = NULL;
3899 ep->ep_state &= ~EP_HAS_STREAMS;
3900 }
3901
3902 if (ep->ring) {
3903 xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
3904 xhci_free_endpoint_ring(xhci, virt_dev, i);
3905 }
3906 if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
3907 xhci_drop_ep_from_interval_table(xhci,
3908 &virt_dev->eps[i].bw_info,
3909 virt_dev->bw_table,
3910 udev,
3911 &virt_dev->eps[i],
3912 virt_dev->tt_info);
3913 xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
3914 }
3915 /* If necessary, update the number of active TTs on this root port */
3916 xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
3917 virt_dev->flags = 0;
3918 ret = 0;
3919
3920command_cleanup:
3921 xhci_free_command(xhci, reset_device_cmd);
3922 return ret;
3923}
3924
3925/*
3926 * At this point, the struct usb_device is about to go away, the device has
3927 * disconnected, and all traffic has been stopped and the endpoints have been
3928 * disabled. Free any HC data structures associated with that device.
3929 */
3930static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3931{
3932 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3933 struct xhci_virt_device *virt_dev;
3934 struct xhci_slot_ctx *slot_ctx;
3935 int i, ret;
3936
3937#ifndef CONFIG_USB_DEFAULT_PERSIST
3938 /*
3939 * We called pm_runtime_get_noresume when the device was attached.
3940 * Decrement the counter here to allow controller to runtime suspend
3941 * if no devices remain.
3942 */
3943 if (xhci->quirks & XHCI_RESET_ON_RESUME)
3944 pm_runtime_put_noidle(hcd->self.controller);
3945#endif
3946
3947 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3948 /* If the host is halted due to driver unload, we still need to free the
3949 * device.
3950 */
3951 if (ret <= 0 && ret != -ENODEV)
3952 return;
3953
3954 virt_dev = xhci->devs[udev->slot_id];
3955 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
3956 trace_xhci_free_dev(slot_ctx);
3957
3958 /* Stop any wayward timer functions (which may grab the lock) */
3959 for (i = 0; i < 31; i++) {
3960 virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
3961 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
3962 }
3963 virt_dev->udev = NULL;
3964 ret = xhci_disable_slot(xhci, udev->slot_id);
3965 if (ret)
3966 xhci_free_virt_device(xhci, udev->slot_id);
3967}
3968
3969int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
3970{
3971 struct xhci_command *command;
3972 unsigned long flags;
3973 u32 state;
3974 int ret = 0;
3975
3976 command = xhci_alloc_command(xhci, false, GFP_KERNEL);
3977 if (!command)
3978 return -ENOMEM;
3979
3980 xhci_debugfs_remove_slot(xhci, slot_id);
3981
3982 spin_lock_irqsave(&xhci->lock, flags);
3983 /* Don't disable the slot if the host controller is dead. */
3984 state = readl(&xhci->op_regs->status);
3985 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
3986 (xhci->xhc_state & XHCI_STATE_HALTED)) {
3987 spin_unlock_irqrestore(&xhci->lock, flags);
3988 kfree(command);
3989 return -ENODEV;
3990 }
3991
3992 ret = xhci_queue_slot_control(xhci, command, TRB_DISABLE_SLOT,
3993 slot_id);
3994 if (ret) {
3995 spin_unlock_irqrestore(&xhci->lock, flags);
3996 kfree(command);
3997 return ret;
3998 }
3999 xhci_ring_cmd_db(xhci);
4000 spin_unlock_irqrestore(&xhci->lock, flags);
4001 return ret;
4002}
4003
4004/*
4005 * Checks if we have enough host controller resources for the default control
4006 * endpoint.
4007 *
4008 * Must be called with xhci->lock held.
4009 */
4010static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
4011{
4012 if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
4013 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4014 "Not enough ep ctxs: "
4015 "%u active, need to add 1, limit is %u.",
4016 xhci->num_active_eps, xhci->limit_active_eps);
4017 return -ENOMEM;
4018 }
4019 xhci->num_active_eps += 1;
4020 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
4021 "Adding 1 ep ctx, %u now active.",
4022 xhci->num_active_eps);
4023 return 0;
4024}
4025
4026
4027/*
4028 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
4029 * timed out, or allocating memory failed. Returns 1 on success.
4030 */
4031int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
4032{
4033 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4034 struct xhci_virt_device *vdev;
4035 struct xhci_slot_ctx *slot_ctx;
4036 unsigned long flags;
4037 int ret, slot_id;
4038 struct xhci_command *command;
4039
4040 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4041 if (!command)
4042 return 0;
4043
4044 spin_lock_irqsave(&xhci->lock, flags);
4045 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
4046 if (ret) {
4047 spin_unlock_irqrestore(&xhci->lock, flags);
4048 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
4049 xhci_free_command(xhci, command);
4050 return 0;
4051 }
4052 xhci_ring_cmd_db(xhci);
4053 spin_unlock_irqrestore(&xhci->lock, flags);
4054
4055 wait_for_completion(command->completion);
4056 slot_id = command->slot_id;
4057
4058 if (!slot_id || command->status != COMP_SUCCESS) {
4059 xhci_err(xhci, "Error while assigning device slot ID\n");
4060 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
4061 HCS_MAX_SLOTS(
4062 readl(&xhci->cap_regs->hcs_params1)));
4063 xhci_free_command(xhci, command);
4064 return 0;
4065 }
4066
4067 xhci_free_command(xhci, command);
4068
4069 if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK)) {
4070 spin_lock_irqsave(&xhci->lock, flags);
4071 ret = xhci_reserve_host_control_ep_resources(xhci);
4072 if (ret) {
4073 spin_unlock_irqrestore(&xhci->lock, flags);
4074 xhci_warn(xhci, "Not enough host resources, "
4075 "active endpoint contexts = %u\n",
4076 xhci->num_active_eps);
4077 goto disable_slot;
4078 }
4079 spin_unlock_irqrestore(&xhci->lock, flags);
4080 }
4081 /* Use GFP_NOIO, since this function can be called from
4082 * xhci_discover_or_reset_device(), which may be called as part of
4083 * mass storage driver error handling.
4084 */
4085 if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
4086 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
4087 goto disable_slot;
4088 }
4089 vdev = xhci->devs[slot_id];
4090 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
4091 trace_xhci_alloc_dev(slot_ctx);
4092
4093 udev->slot_id = slot_id;
4094
4095 xhci_debugfs_create_slot(xhci, slot_id);
4096
4097#ifndef CONFIG_USB_DEFAULT_PERSIST
4098 /*
4099 * If resetting upon resume, we can't put the controller into runtime
4100 * suspend if there is a device attached.
4101 */
4102 if (xhci->quirks & XHCI_RESET_ON_RESUME)
4103 pm_runtime_get_noresume(hcd->self.controller);
4104#endif
4105
4106 /* Is this a LS or FS device under a HS hub? */
4107 /* Hub or peripherial? */
4108 return 1;
4109
4110disable_slot:
4111 ret = xhci_disable_slot(xhci, udev->slot_id);
4112 if (ret)
4113 xhci_free_virt_device(xhci, udev->slot_id);
4114
4115 return 0;
4116}
4117
4118/*
4119 * Issue an Address Device command and optionally send a corresponding
4120 * SetAddress request to the device.
4121 */
4122static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4123 enum xhci_setup_dev setup)
4124{
4125 const char *act = setup == SETUP_CONTEXT_ONLY ? "context" : "address";
4126 unsigned long flags;
4127 struct xhci_virt_device *virt_dev;
4128 int ret = 0;
4129 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4130 struct xhci_slot_ctx *slot_ctx;
4131 struct xhci_input_control_ctx *ctrl_ctx;
4132 u64 temp_64;
4133 struct xhci_command *command = NULL;
4134
4135 mutex_lock(&xhci->mutex);
4136
4137 if (xhci->xhc_state) { /* dying, removing or halted */
4138 ret = -ESHUTDOWN;
4139 goto out;
4140 }
4141
4142 if (!udev->slot_id) {
4143 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4144 "Bad Slot ID %d", udev->slot_id);
4145 ret = -EINVAL;
4146 goto out;
4147 }
4148
4149 virt_dev = xhci->devs[udev->slot_id];
4150
4151 if (WARN_ON(!virt_dev)) {
4152 /*
4153 * In plug/unplug torture test with an NEC controller,
4154 * a zero-dereference was observed once due to virt_dev = 0.
4155 * Print useful debug rather than crash if it is observed again!
4156 */
4157 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
4158 udev->slot_id);
4159 ret = -EINVAL;
4160 goto out;
4161 }
4162 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4163 trace_xhci_setup_device_slot(slot_ctx);
4164
4165 if (setup == SETUP_CONTEXT_ONLY) {
4166 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
4167 SLOT_STATE_DEFAULT) {
4168 xhci_dbg(xhci, "Slot already in default state\n");
4169 goto out;
4170 }
4171 }
4172
4173 command = xhci_alloc_command(xhci, true, GFP_KERNEL);
4174 if (!command) {
4175 ret = -ENOMEM;
4176 goto out;
4177 }
4178
4179 command->in_ctx = virt_dev->in_ctx;
4180
4181 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
4182 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
4183 if (!ctrl_ctx) {
4184 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4185 __func__);
4186 ret = -EINVAL;
4187 goto out;
4188 }
4189 /*
4190 * If this is the first Set Address since device plug-in or
4191 * virt_device realloaction after a resume with an xHCI power loss,
4192 * then set up the slot context.
4193 */
4194 if (!slot_ctx->dev_info)
4195 xhci_setup_addressable_virt_dev(xhci, udev);
4196 /* Otherwise, update the control endpoint ring enqueue pointer. */
4197 else
4198 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
4199 ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
4200 ctrl_ctx->drop_flags = 0;
4201
4202 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4203 le32_to_cpu(slot_ctx->dev_info) >> 27);
4204
4205 trace_xhci_address_ctrl_ctx(ctrl_ctx);
4206 spin_lock_irqsave(&xhci->lock, flags);
4207 trace_xhci_setup_device(virt_dev);
4208 ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma,
4209 udev->slot_id, setup);
4210 if (ret) {
4211 spin_unlock_irqrestore(&xhci->lock, flags);
4212 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4213 "FIXME: allocate a command ring segment");
4214 goto out;
4215 }
4216 xhci_ring_cmd_db(xhci);
4217 spin_unlock_irqrestore(&xhci->lock, flags);
4218
4219 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
4220 wait_for_completion(command->completion);
4221
4222 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
4223 * the SetAddress() "recovery interval" required by USB and aborting the
4224 * command on a timeout.
4225 */
4226 switch (command->status) {
4227 case COMP_COMMAND_ABORTED:
4228 case COMP_COMMAND_RING_STOPPED:
4229 xhci_warn(xhci, "Timeout while waiting for setup device command\n");
4230 ret = -ETIME;
4231 break;
4232 case COMP_CONTEXT_STATE_ERROR:
4233 case COMP_SLOT_NOT_ENABLED_ERROR:
4234 xhci_err(xhci, "Setup ERROR: setup %s command for slot %d.\n",
4235 act, udev->slot_id);
4236 ret = -EINVAL;
4237 break;
4238 case COMP_USB_TRANSACTION_ERROR:
4239 dev_warn(&udev->dev, "Device not responding to setup %s.\n", act);
4240
4241 mutex_unlock(&xhci->mutex);
4242 ret = xhci_disable_slot(xhci, udev->slot_id);
4243 if (!ret)
4244 xhci_alloc_dev(hcd, udev);
4245 kfree(command->completion);
4246 kfree(command);
4247 return -EPROTO;
4248 case COMP_INCOMPATIBLE_DEVICE_ERROR:
4249 dev_warn(&udev->dev,
4250 "ERROR: Incompatible device for setup %s command\n", act);
4251 ret = -ENODEV;
4252 break;
4253 case COMP_SUCCESS:
4254 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4255 "Successful setup %s command", act);
4256 break;
4257 default:
4258 xhci_err(xhci,
4259 "ERROR: unexpected setup %s command completion code 0x%x.\n",
4260 act, command->status);
4261 trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
4262 ret = -EINVAL;
4263 break;
4264 }
4265 if (ret)
4266 goto out;
4267 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
4268 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4269 "Op regs DCBAA ptr = %#016llx", temp_64);
4270 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4271 "Slot ID %d dcbaa entry @%p = %#016llx",
4272 udev->slot_id,
4273 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
4274 (unsigned long long)
4275 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
4276 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4277 "Output Context DMA address = %#08llx",
4278 (unsigned long long)virt_dev->out_ctx->dma);
4279 trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
4280 le32_to_cpu(slot_ctx->dev_info) >> 27);
4281 /*
4282 * USB core uses address 1 for the roothubs, so we add one to the
4283 * address given back to us by the HC.
4284 */
4285 trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
4286 le32_to_cpu(slot_ctx->dev_info) >> 27);
4287 /* Zero the input context control for later use */
4288 ctrl_ctx->add_flags = 0;
4289 ctrl_ctx->drop_flags = 0;
4290 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
4291 udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4292
4293 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4294 "Internal device address = %d",
4295 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
4296out:
4297 mutex_unlock(&xhci->mutex);
4298 if (command) {
4299 kfree(command->completion);
4300 kfree(command);
4301 }
4302 return ret;
4303}
4304
4305static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
4306{
4307 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
4308}
4309
4310static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
4311{
4312 return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ONLY);
4313}
4314
4315/*
4316 * Transfer the port index into real index in the HW port status
4317 * registers. Caculate offset between the port's PORTSC register
4318 * and port status base. Divide the number of per port register
4319 * to get the real index. The raw port number bases 1.
4320 */
4321int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
4322{
4323 struct xhci_hub *rhub;
4324
4325 rhub = xhci_get_rhub(hcd);
4326 return rhub->ports[port1 - 1]->hw_portnum + 1;
4327}
4328
4329/*
4330 * Issue an Evaluate Context command to change the Maximum Exit Latency in the
4331 * slot context. If that succeeds, store the new MEL in the xhci_virt_device.
4332 */
4333static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
4334 struct usb_device *udev, u16 max_exit_latency)
4335{
4336 struct xhci_virt_device *virt_dev;
4337 struct xhci_command *command;
4338 struct xhci_input_control_ctx *ctrl_ctx;
4339 struct xhci_slot_ctx *slot_ctx;
4340 unsigned long flags;
4341 int ret;
4342
4343 spin_lock_irqsave(&xhci->lock, flags);
4344
4345 virt_dev = xhci->devs[udev->slot_id];
4346
4347 /*
4348 * virt_dev might not exists yet if xHC resumed from hibernate (S4) and
4349 * xHC was re-initialized. Exit latency will be set later after
4350 * hub_port_finish_reset() is done and xhci->devs[] are re-allocated
4351 */
4352
4353 if (!virt_dev || max_exit_latency == virt_dev->current_mel) {
4354 spin_unlock_irqrestore(&xhci->lock, flags);
4355 return 0;
4356 }
4357
4358 /* Attempt to issue an Evaluate Context command to change the MEL. */
4359 command = xhci->lpm_command;
4360 ctrl_ctx = xhci_get_input_control_ctx(command->in_ctx);
4361 if (!ctrl_ctx) {
4362 spin_unlock_irqrestore(&xhci->lock, flags);
4363 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
4364 __func__);
4365 return -ENOMEM;
4366 }
4367
4368 xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
4369 spin_unlock_irqrestore(&xhci->lock, flags);
4370
4371 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
4372 slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx);
4373 slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
4374 slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
4375 slot_ctx->dev_state = 0;
4376
4377 xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
4378 "Set up evaluate context for LPM MEL change.");
4379
4380 /* Issue and wait for the evaluate context command. */
4381 ret = xhci_configure_endpoint(xhci, udev, command,
4382 true, true);
4383
4384 if (!ret) {
4385 spin_lock_irqsave(&xhci->lock, flags);
4386 virt_dev->current_mel = max_exit_latency;
4387 spin_unlock_irqrestore(&xhci->lock, flags);
4388 }
4389 return ret;
4390}
4391
4392#ifdef CONFIG_PM
4393
4394/* BESL to HIRD Encoding array for USB2 LPM */
4395static int xhci_besl_encoding[16] = {125, 150, 200, 300, 400, 500, 1000, 2000,
4396 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000};
4397
4398/* Calculate HIRD/BESL for USB2 PORTPMSC*/
4399static int xhci_calculate_hird_besl(struct xhci_hcd *xhci,
4400 struct usb_device *udev)
4401{
4402 int u2del, besl, besl_host;
4403 int besl_device = 0;
4404 u32 field;
4405
4406 u2del = HCS_U2_LATENCY(xhci->hcs_params3);
4407 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4408
4409 if (field & USB_BESL_SUPPORT) {
4410 for (besl_host = 0; besl_host < 16; besl_host++) {
4411 if (xhci_besl_encoding[besl_host] >= u2del)
4412 break;
4413 }
4414 /* Use baseline BESL value as default */
4415 if (field & USB_BESL_BASELINE_VALID)
4416 besl_device = USB_GET_BESL_BASELINE(field);
4417 else if (field & USB_BESL_DEEP_VALID)
4418 besl_device = USB_GET_BESL_DEEP(field);
4419 } else {
4420 if (u2del <= 50)
4421 besl_host = 0;
4422 else
4423 besl_host = (u2del - 51) / 75 + 1;
4424 }
4425
4426 besl = besl_host + besl_device;
4427 if (besl > 15)
4428 besl = 15;
4429
4430 return besl;
4431}
4432
4433/* Calculate BESLD, L1 timeout and HIRDM for USB2 PORTHLPMC */
4434static int xhci_calculate_usb2_hw_lpm_params(struct usb_device *udev)
4435{
4436 u32 field;
4437 int l1;
4438 int besld = 0;
4439 int hirdm = 0;
4440
4441 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4442
4443 /* xHCI l1 is set in steps of 256us, xHCI 1.0 section 5.4.11.2 */
4444 l1 = udev->l1_params.timeout / 256;
4445
4446 /* device has preferred BESLD */
4447 if (field & USB_BESL_DEEP_VALID) {
4448 besld = USB_GET_BESL_DEEP(field);
4449 hirdm = 1;
4450 }
4451
4452 return PORT_BESLD(besld) | PORT_L1_TIMEOUT(l1) | PORT_HIRDM(hirdm);
4453}
4454
4455static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
4456 struct usb_device *udev, int enable)
4457{
4458 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4459 struct xhci_port **ports;
4460 __le32 __iomem *pm_addr, *hlpm_addr;
4461 u32 pm_val, hlpm_val, field;
4462 unsigned int port_num;
4463 unsigned long flags;
4464 int hird, exit_latency;
4465 int ret;
4466
4467 if (xhci->quirks & XHCI_HW_LPM_DISABLE)
4468 return -EPERM;
4469
4470 if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
4471 !udev->lpm_capable)
4472 return -EPERM;
4473
4474 if (!udev->parent || udev->parent->parent ||
4475 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4476 return -EPERM;
4477
4478 if (udev->usb2_hw_lpm_capable != 1)
4479 return -EPERM;
4480
4481 spin_lock_irqsave(&xhci->lock, flags);
4482
4483 ports = xhci->usb2_rhub.ports;
4484 port_num = udev->portnum - 1;
4485 pm_addr = ports[port_num]->addr + PORTPMSC;
4486 pm_val = readl(pm_addr);
4487 hlpm_addr = ports[port_num]->addr + PORTHLPMC;
4488
4489 xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
4490 enable ? "enable" : "disable", port_num + 1);
4491
4492 if (enable) {
4493 /* Host supports BESL timeout instead of HIRD */
4494 if (udev->usb2_hw_lpm_besl_capable) {
4495 /* if device doesn't have a preferred BESL value use a
4496 * default one which works with mixed HIRD and BESL
4497 * systems. See XHCI_DEFAULT_BESL definition in xhci.h
4498 */
4499 field = le32_to_cpu(udev->bos->ext_cap->bmAttributes);
4500 if ((field & USB_BESL_SUPPORT) &&
4501 (field & USB_BESL_BASELINE_VALID))
4502 hird = USB_GET_BESL_BASELINE(field);
4503 else
4504 hird = udev->l1_params.besl;
4505
4506 exit_latency = xhci_besl_encoding[hird];
4507 spin_unlock_irqrestore(&xhci->lock, flags);
4508
4509 /* USB 3.0 code dedicate one xhci->lpm_command->in_ctx
4510 * input context for link powermanagement evaluate
4511 * context commands. It is protected by hcd->bandwidth
4512 * mutex and is shared by all devices. We need to set
4513 * the max ext latency in USB 2 BESL LPM as well, so
4514 * use the same mutex and xhci_change_max_exit_latency()
4515 */
4516 mutex_lock(hcd->bandwidth_mutex);
4517 ret = xhci_change_max_exit_latency(xhci, udev,
4518 exit_latency);
4519 mutex_unlock(hcd->bandwidth_mutex);
4520
4521 if (ret < 0)
4522 return ret;
4523 spin_lock_irqsave(&xhci->lock, flags);
4524
4525 hlpm_val = xhci_calculate_usb2_hw_lpm_params(udev);
4526 writel(hlpm_val, hlpm_addr);
4527 /* flush write */
4528 readl(hlpm_addr);
4529 } else {
4530 hird = xhci_calculate_hird_besl(xhci, udev);
4531 }
4532
4533 pm_val &= ~PORT_HIRD_MASK;
4534 pm_val |= PORT_HIRD(hird) | PORT_RWE | PORT_L1DS(udev->slot_id);
4535 writel(pm_val, pm_addr);
4536 pm_val = readl(pm_addr);
4537 pm_val |= PORT_HLE;
4538 writel(pm_val, pm_addr);
4539 /* flush write */
4540 readl(pm_addr);
4541 } else {
4542 pm_val &= ~(PORT_HLE | PORT_RWE | PORT_HIRD_MASK | PORT_L1DS_MASK);
4543 writel(pm_val, pm_addr);
4544 /* flush write */
4545 readl(pm_addr);
4546 if (udev->usb2_hw_lpm_besl_capable) {
4547 spin_unlock_irqrestore(&xhci->lock, flags);
4548 mutex_lock(hcd->bandwidth_mutex);
4549 xhci_change_max_exit_latency(xhci, udev, 0);
4550 mutex_unlock(hcd->bandwidth_mutex);
4551 readl_poll_timeout(ports[port_num]->addr, pm_val,
4552 (pm_val & PORT_PLS_MASK) == XDEV_U0,
4553 100, 10000);
4554 return 0;
4555 }
4556 }
4557
4558 spin_unlock_irqrestore(&xhci->lock, flags);
4559 return 0;
4560}
4561
4562/* check if a usb2 port supports a given extened capability protocol
4563 * only USB2 ports extended protocol capability values are cached.
4564 * Return 1 if capability is supported
4565 */
4566static int xhci_check_usb2_port_capability(struct xhci_hcd *xhci, int port,
4567 unsigned capability)
4568{
4569 u32 port_offset, port_count;
4570 int i;
4571
4572 for (i = 0; i < xhci->num_ext_caps; i++) {
4573 if (xhci->ext_caps[i] & capability) {
4574 /* port offsets starts at 1 */
4575 port_offset = XHCI_EXT_PORT_OFF(xhci->ext_caps[i]) - 1;
4576 port_count = XHCI_EXT_PORT_COUNT(xhci->ext_caps[i]);
4577 if (port >= port_offset &&
4578 port < port_offset + port_count)
4579 return 1;
4580 }
4581 }
4582 return 0;
4583}
4584
4585static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
4586{
4587 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4588 int portnum = udev->portnum - 1;
4589
4590 if (hcd->speed >= HCD_USB3 || !udev->lpm_capable)
4591 return 0;
4592
4593 /* we only support lpm for non-hub device connected to root hub yet */
4594 if (!udev->parent || udev->parent->parent ||
4595 udev->descriptor.bDeviceClass == USB_CLASS_HUB)
4596 return 0;
4597
4598 if (xhci->hw_lpm_support == 1 &&
4599 xhci_check_usb2_port_capability(
4600 xhci, portnum, XHCI_HLC)) {
4601 udev->usb2_hw_lpm_capable = 1;
4602 udev->l1_params.timeout = XHCI_L1_TIMEOUT;
4603 udev->l1_params.besl = XHCI_DEFAULT_BESL;
4604 if (xhci_check_usb2_port_capability(xhci, portnum,
4605 XHCI_BLC))
4606 udev->usb2_hw_lpm_besl_capable = 1;
4607 }
4608
4609 return 0;
4610}
4611
4612/*---------------------- USB 3.0 Link PM functions ------------------------*/
4613
4614/* Service interval in nanoseconds = 2^(bInterval - 1) * 125us * 1000ns / 1us */
4615static unsigned long long xhci_service_interval_to_ns(
4616 struct usb_endpoint_descriptor *desc)
4617{
4618 return (1ULL << (desc->bInterval - 1)) * 125 * 1000;
4619}
4620
4621static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
4622 enum usb3_link_state state)
4623{
4624 unsigned long long sel;
4625 unsigned long long pel;
4626 unsigned int max_sel_pel;
4627 char *state_name;
4628
4629 switch (state) {
4630 case USB3_LPM_U1:
4631 /* Convert SEL and PEL stored in nanoseconds to microseconds */
4632 sel = DIV_ROUND_UP(udev->u1_params.sel, 1000);
4633 pel = DIV_ROUND_UP(udev->u1_params.pel, 1000);
4634 max_sel_pel = USB3_LPM_MAX_U1_SEL_PEL;
4635 state_name = "U1";
4636 break;
4637 case USB3_LPM_U2:
4638 sel = DIV_ROUND_UP(udev->u2_params.sel, 1000);
4639 pel = DIV_ROUND_UP(udev->u2_params.pel, 1000);
4640 max_sel_pel = USB3_LPM_MAX_U2_SEL_PEL;
4641 state_name = "U2";
4642 break;
4643 default:
4644 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
4645 __func__);
4646 return USB3_LPM_DISABLED;
4647 }
4648
4649 if (sel <= max_sel_pel && pel <= max_sel_pel)
4650 return USB3_LPM_DEVICE_INITIATED;
4651
4652 if (sel > max_sel_pel)
4653 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4654 "due to long SEL %llu ms\n",
4655 state_name, sel);
4656 else
4657 dev_dbg(&udev->dev, "Device-initiated %s disabled "
4658 "due to long PEL %llu ms\n",
4659 state_name, pel);
4660 return USB3_LPM_DISABLED;
4661}
4662
4663/* The U1 timeout should be the maximum of the following values:
4664 * - For control endpoints, U1 system exit latency (SEL) * 3
4665 * - For bulk endpoints, U1 SEL * 5
4666 * - For interrupt endpoints:
4667 * - Notification EPs, U1 SEL * 3
4668 * - Periodic EPs, max(105% of bInterval, U1 SEL * 2)
4669 * - For isochronous endpoints, max(105% of bInterval, U1 SEL * 2)
4670 */
4671static unsigned long long xhci_calculate_intel_u1_timeout(
4672 struct usb_device *udev,
4673 struct usb_endpoint_descriptor *desc)
4674{
4675 unsigned long long timeout_ns;
4676 int ep_type;
4677 int intr_type;
4678
4679 ep_type = usb_endpoint_type(desc);
4680 switch (ep_type) {
4681 case USB_ENDPOINT_XFER_CONTROL:
4682 timeout_ns = udev->u1_params.sel * 3;
4683 break;
4684 case USB_ENDPOINT_XFER_BULK:
4685 timeout_ns = udev->u1_params.sel * 5;
4686 break;
4687 case USB_ENDPOINT_XFER_INT:
4688 intr_type = usb_endpoint_interrupt_type(desc);
4689 if (intr_type == USB_ENDPOINT_INTR_NOTIFICATION) {
4690 timeout_ns = udev->u1_params.sel * 3;
4691 break;
4692 }
4693 /* Otherwise the calculation is the same as isoc eps */
4694 fallthrough;
4695 case USB_ENDPOINT_XFER_ISOC:
4696 timeout_ns = xhci_service_interval_to_ns(desc);
4697 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns * 105, 100);
4698 if (timeout_ns < udev->u1_params.sel * 2)
4699 timeout_ns = udev->u1_params.sel * 2;
4700 break;
4701 default:
4702 return 0;
4703 }
4704
4705 return timeout_ns;
4706}
4707
4708/* Returns the hub-encoded U1 timeout value. */
4709static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
4710 struct usb_device *udev,
4711 struct usb_endpoint_descriptor *desc)
4712{
4713 unsigned long long timeout_ns;
4714
4715 /* Prevent U1 if service interval is shorter than U1 exit latency */
4716 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4717 if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
4718 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
4719 return USB3_LPM_DISABLED;
4720 }
4721 }
4722
4723 if (xhci->quirks & XHCI_INTEL_HOST)
4724 timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
4725 else
4726 timeout_ns = udev->u1_params.sel;
4727
4728 /* The U1 timeout is encoded in 1us intervals.
4729 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
4730 */
4731 if (timeout_ns == USB3_LPM_DISABLED)
4732 timeout_ns = 1;
4733 else
4734 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 1000);
4735
4736 /* If the necessary timeout value is bigger than what we can set in the
4737 * USB 3.0 hub, we have to disable hub-initiated U1.
4738 */
4739 if (timeout_ns <= USB3_LPM_U1_MAX_TIMEOUT)
4740 return timeout_ns;
4741 dev_dbg(&udev->dev, "Hub-initiated U1 disabled "
4742 "due to long timeout %llu ms\n", timeout_ns);
4743 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U1);
4744}
4745
4746/* The U2 timeout should be the maximum of:
4747 * - 10 ms (to avoid the bandwidth impact on the scheduler)
4748 * - largest bInterval of any active periodic endpoint (to avoid going
4749 * into lower power link states between intervals).
4750 * - the U2 Exit Latency of the device
4751 */
4752static unsigned long long xhci_calculate_intel_u2_timeout(
4753 struct usb_device *udev,
4754 struct usb_endpoint_descriptor *desc)
4755{
4756 unsigned long long timeout_ns;
4757 unsigned long long u2_del_ns;
4758
4759 timeout_ns = 10 * 1000 * 1000;
4760
4761 if ((usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) &&
4762 (xhci_service_interval_to_ns(desc) > timeout_ns))
4763 timeout_ns = xhci_service_interval_to_ns(desc);
4764
4765 u2_del_ns = le16_to_cpu(udev->bos->ss_cap->bU2DevExitLat) * 1000ULL;
4766 if (u2_del_ns > timeout_ns)
4767 timeout_ns = u2_del_ns;
4768
4769 return timeout_ns;
4770}
4771
4772/* Returns the hub-encoded U2 timeout value. */
4773static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
4774 struct usb_device *udev,
4775 struct usb_endpoint_descriptor *desc)
4776{
4777 unsigned long long timeout_ns;
4778
4779 /* Prevent U2 if service interval is shorter than U2 exit latency */
4780 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
4781 if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
4782 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
4783 return USB3_LPM_DISABLED;
4784 }
4785 }
4786
4787 if (xhci->quirks & XHCI_INTEL_HOST)
4788 timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
4789 else
4790 timeout_ns = udev->u2_params.sel;
4791
4792 /* The U2 timeout is encoded in 256us intervals */
4793 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
4794 /* If the necessary timeout value is bigger than what we can set in the
4795 * USB 3.0 hub, we have to disable hub-initiated U2.
4796 */
4797 if (timeout_ns <= USB3_LPM_U2_MAX_TIMEOUT)
4798 return timeout_ns;
4799 dev_dbg(&udev->dev, "Hub-initiated U2 disabled "
4800 "due to long timeout %llu ms\n", timeout_ns);
4801 return xhci_get_timeout_no_hub_lpm(udev, USB3_LPM_U2);
4802}
4803
4804static u16 xhci_call_host_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4805 struct usb_device *udev,
4806 struct usb_endpoint_descriptor *desc,
4807 enum usb3_link_state state,
4808 u16 *timeout)
4809{
4810 if (state == USB3_LPM_U1)
4811 return xhci_calculate_u1_timeout(xhci, udev, desc);
4812 else if (state == USB3_LPM_U2)
4813 return xhci_calculate_u2_timeout(xhci, udev, desc);
4814
4815 return USB3_LPM_DISABLED;
4816}
4817
4818static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
4819 struct usb_device *udev,
4820 struct usb_endpoint_descriptor *desc,
4821 enum usb3_link_state state,
4822 u16 *timeout)
4823{
4824 u16 alt_timeout;
4825
4826 alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
4827 desc, state, timeout);
4828
4829 /* If we found we can't enable hub-initiated LPM, and
4830 * the U1 or U2 exit latency was too high to allow
4831 * device-initiated LPM as well, then we will disable LPM
4832 * for this device, so stop searching any further.
4833 */
4834 if (alt_timeout == USB3_LPM_DISABLED) {
4835 *timeout = alt_timeout;
4836 return -E2BIG;
4837 }
4838 if (alt_timeout > *timeout)
4839 *timeout = alt_timeout;
4840 return 0;
4841}
4842
4843static int xhci_update_timeout_for_interface(struct xhci_hcd *xhci,
4844 struct usb_device *udev,
4845 struct usb_host_interface *alt,
4846 enum usb3_link_state state,
4847 u16 *timeout)
4848{
4849 int j;
4850
4851 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
4852 if (xhci_update_timeout_for_endpoint(xhci, udev,
4853 &alt->endpoint[j].desc, state, timeout))
4854 return -E2BIG;
4855 }
4856 return 0;
4857}
4858
4859static int xhci_check_intel_tier_policy(struct usb_device *udev,
4860 enum usb3_link_state state)
4861{
4862 struct usb_device *parent;
4863 unsigned int num_hubs;
4864
4865 if (state == USB3_LPM_U2)
4866 return 0;
4867
4868 /* Don't enable U1 if the device is on a 2nd tier hub or lower. */
4869 for (parent = udev->parent, num_hubs = 0; parent->parent;
4870 parent = parent->parent)
4871 num_hubs++;
4872
4873 if (num_hubs < 2)
4874 return 0;
4875
4876 dev_dbg(&udev->dev, "Disabling U1 link state for device"
4877 " below second-tier hub.\n");
4878 dev_dbg(&udev->dev, "Plug device into first-tier hub "
4879 "to decrease power consumption.\n");
4880 return -E2BIG;
4881}
4882
4883static int xhci_check_tier_policy(struct xhci_hcd *xhci,
4884 struct usb_device *udev,
4885 enum usb3_link_state state)
4886{
4887 if (xhci->quirks & XHCI_INTEL_HOST)
4888 return xhci_check_intel_tier_policy(udev, state);
4889 else
4890 return 0;
4891}
4892
4893/* Returns the U1 or U2 timeout that should be enabled.
4894 * If the tier check or timeout setting functions return with a non-zero exit
4895 * code, that means the timeout value has been finalized and we shouldn't look
4896 * at any more endpoints.
4897 */
4898static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
4899 struct usb_device *udev, enum usb3_link_state state)
4900{
4901 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4902 struct usb_host_config *config;
4903 char *state_name;
4904 int i;
4905 u16 timeout = USB3_LPM_DISABLED;
4906
4907 if (state == USB3_LPM_U1)
4908 state_name = "U1";
4909 else if (state == USB3_LPM_U2)
4910 state_name = "U2";
4911 else {
4912 dev_warn(&udev->dev, "Can't enable unknown link state %i\n",
4913 state);
4914 return timeout;
4915 }
4916
4917 if (xhci_check_tier_policy(xhci, udev, state) < 0)
4918 return timeout;
4919
4920 /* Gather some information about the currently installed configuration
4921 * and alternate interface settings.
4922 */
4923 if (xhci_update_timeout_for_endpoint(xhci, udev, &udev->ep0.desc,
4924 state, &timeout))
4925 return timeout;
4926
4927 config = udev->actconfig;
4928 if (!config)
4929 return timeout;
4930
4931 for (i = 0; i < config->desc.bNumInterfaces; i++) {
4932 struct usb_driver *driver;
4933 struct usb_interface *intf = config->interface[i];
4934
4935 if (!intf)
4936 continue;
4937
4938 /* Check if any currently bound drivers want hub-initiated LPM
4939 * disabled.
4940 */
4941 if (intf->dev.driver) {
4942 driver = to_usb_driver(intf->dev.driver);
4943 if (driver && driver->disable_hub_initiated_lpm) {
4944 dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
4945 state_name, driver->name);
4946 timeout = xhci_get_timeout_no_hub_lpm(udev,
4947 state);
4948 if (timeout == USB3_LPM_DISABLED)
4949 return timeout;
4950 }
4951 }
4952
4953 /* Not sure how this could happen... */
4954 if (!intf->cur_altsetting)
4955 continue;
4956
4957 if (xhci_update_timeout_for_interface(xhci, udev,
4958 intf->cur_altsetting,
4959 state, &timeout))
4960 return timeout;
4961 }
4962 return timeout;
4963}
4964
4965static int calculate_max_exit_latency(struct usb_device *udev,
4966 enum usb3_link_state state_changed,
4967 u16 hub_encoded_timeout)
4968{
4969 unsigned long long u1_mel_us = 0;
4970 unsigned long long u2_mel_us = 0;
4971 unsigned long long mel_us = 0;
4972 bool disabling_u1;
4973 bool disabling_u2;
4974 bool enabling_u1;
4975 bool enabling_u2;
4976
4977 disabling_u1 = (state_changed == USB3_LPM_U1 &&
4978 hub_encoded_timeout == USB3_LPM_DISABLED);
4979 disabling_u2 = (state_changed == USB3_LPM_U2 &&
4980 hub_encoded_timeout == USB3_LPM_DISABLED);
4981
4982 enabling_u1 = (state_changed == USB3_LPM_U1 &&
4983 hub_encoded_timeout != USB3_LPM_DISABLED);
4984 enabling_u2 = (state_changed == USB3_LPM_U2 &&
4985 hub_encoded_timeout != USB3_LPM_DISABLED);
4986
4987 /* If U1 was already enabled and we're not disabling it,
4988 * or we're going to enable U1, account for the U1 max exit latency.
4989 */
4990 if ((udev->u1_params.timeout != USB3_LPM_DISABLED && !disabling_u1) ||
4991 enabling_u1)
4992 u1_mel_us = DIV_ROUND_UP(udev->u1_params.mel, 1000);
4993 if ((udev->u2_params.timeout != USB3_LPM_DISABLED && !disabling_u2) ||
4994 enabling_u2)
4995 u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
4996
4997 if (u1_mel_us > u2_mel_us)
4998 mel_us = u1_mel_us;
4999 else
5000 mel_us = u2_mel_us;
5001 /* xHCI host controller max exit latency field is only 16 bits wide. */
5002 if (mel_us > MAX_EXIT) {
5003 dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
5004 "is too big.\n", mel_us);
5005 return -E2BIG;
5006 }
5007 return mel_us;
5008}
5009
5010/* Returns the USB3 hub-encoded value for the U1/U2 timeout. */
5011static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5012 struct usb_device *udev, enum usb3_link_state state)
5013{
5014 struct xhci_hcd *xhci;
5015 u16 hub_encoded_timeout;
5016 int mel;
5017 int ret;
5018
5019 xhci = hcd_to_xhci(hcd);
5020 /* The LPM timeout values are pretty host-controller specific, so don't
5021 * enable hub-initiated timeouts unless the vendor has provided
5022 * information about their timeout algorithm.
5023 */
5024 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5025 !xhci->devs[udev->slot_id])
5026 return USB3_LPM_DISABLED;
5027
5028 hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
5029 mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
5030 if (mel < 0) {
5031 /* Max Exit Latency is too big, disable LPM. */
5032 hub_encoded_timeout = USB3_LPM_DISABLED;
5033 mel = 0;
5034 }
5035
5036 ret = xhci_change_max_exit_latency(xhci, udev, mel);
5037 if (ret)
5038 return ret;
5039 return hub_encoded_timeout;
5040}
5041
5042static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5043 struct usb_device *udev, enum usb3_link_state state)
5044{
5045 struct xhci_hcd *xhci;
5046 u16 mel;
5047
5048 xhci = hcd_to_xhci(hcd);
5049 if (!xhci || !(xhci->quirks & XHCI_LPM_SUPPORT) ||
5050 !xhci->devs[udev->slot_id])
5051 return 0;
5052
5053 mel = calculate_max_exit_latency(udev, state, USB3_LPM_DISABLED);
5054 return xhci_change_max_exit_latency(xhci, udev, mel);
5055}
5056#else /* CONFIG_PM */
5057
5058static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
5059 struct usb_device *udev, int enable)
5060{
5061 return 0;
5062}
5063
5064static int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev)
5065{
5066 return 0;
5067}
5068
5069static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
5070 struct usb_device *udev, enum usb3_link_state state)
5071{
5072 return USB3_LPM_DISABLED;
5073}
5074
5075static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
5076 struct usb_device *udev, enum usb3_link_state state)
5077{
5078 return 0;
5079}
5080#endif /* CONFIG_PM */
5081
5082/*-------------------------------------------------------------------------*/
5083
5084/* Once a hub descriptor is fetched for a device, we need to update the xHC's
5085 * internal data structures for the device.
5086 */
5087static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
5088 struct usb_tt *tt, gfp_t mem_flags)
5089{
5090 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5091 struct xhci_virt_device *vdev;
5092 struct xhci_command *config_cmd;
5093 struct xhci_input_control_ctx *ctrl_ctx;
5094 struct xhci_slot_ctx *slot_ctx;
5095 unsigned long flags;
5096 unsigned think_time;
5097 int ret;
5098
5099 /* Ignore root hubs */
5100 if (!hdev->parent)
5101 return 0;
5102
5103 vdev = xhci->devs[hdev->slot_id];
5104 if (!vdev) {
5105 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
5106 return -EINVAL;
5107 }
5108
5109 config_cmd = xhci_alloc_command_with_ctx(xhci, true, mem_flags);
5110 if (!config_cmd)
5111 return -ENOMEM;
5112
5113 ctrl_ctx = xhci_get_input_control_ctx(config_cmd->in_ctx);
5114 if (!ctrl_ctx) {
5115 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
5116 __func__);
5117 xhci_free_command(xhci, config_cmd);
5118 return -ENOMEM;
5119 }
5120
5121 spin_lock_irqsave(&xhci->lock, flags);
5122 if (hdev->speed == USB_SPEED_HIGH &&
5123 xhci_alloc_tt_info(xhci, vdev, hdev, tt, GFP_ATOMIC)) {
5124 xhci_dbg(xhci, "Could not allocate xHCI TT structure.\n");
5125 xhci_free_command(xhci, config_cmd);
5126 spin_unlock_irqrestore(&xhci->lock, flags);
5127 return -ENOMEM;
5128 }
5129
5130 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
5131 ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
5132 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
5133 slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
5134 /*
5135 * refer to section 6.2.2: MTT should be 0 for full speed hub,
5136 * but it may be already set to 1 when setup an xHCI virtual
5137 * device, so clear it anyway.
5138 */
5139 if (tt->multi)
5140 slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
5141 else if (hdev->speed == USB_SPEED_FULL)
5142 slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
5143
5144 if (xhci->hci_version > 0x95) {
5145 xhci_dbg(xhci, "xHCI version %x needs hub "
5146 "TT think time and number of ports\n",
5147 (unsigned int) xhci->hci_version);
5148 slot_ctx->dev_info2 |= cpu_to_le32(XHCI_MAX_PORTS(hdev->maxchild));
5149 /* Set TT think time - convert from ns to FS bit times.
5150 * 0 = 8 FS bit times, 1 = 16 FS bit times,
5151 * 2 = 24 FS bit times, 3 = 32 FS bit times.
5152 *
5153 * xHCI 1.0: this field shall be 0 if the device is not a
5154 * High-spped hub.
5155 */
5156 think_time = tt->think_time;
5157 if (think_time != 0)
5158 think_time = (think_time / 666) - 1;
5159 if (xhci->hci_version < 0x100 || hdev->speed == USB_SPEED_HIGH)
5160 slot_ctx->tt_info |=
5161 cpu_to_le32(TT_THINK_TIME(think_time));
5162 } else {
5163 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
5164 "TT think time or number of ports\n",
5165 (unsigned int) xhci->hci_version);
5166 }
5167 slot_ctx->dev_state = 0;
5168 spin_unlock_irqrestore(&xhci->lock, flags);
5169
5170 xhci_dbg(xhci, "Set up %s for hub device.\n",
5171 (xhci->hci_version > 0x95) ?
5172 "configure endpoint" : "evaluate context");
5173
5174 /* Issue and wait for the configure endpoint or
5175 * evaluate context command.
5176 */
5177 if (xhci->hci_version > 0x95)
5178 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5179 false, false);
5180 else
5181 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
5182 true, false);
5183
5184 xhci_free_command(xhci, config_cmd);
5185 return ret;
5186}
5187
5188static int xhci_get_frame(struct usb_hcd *hcd)
5189{
5190 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5191 /* EHCI mods by the periodic size. Why? */
5192 return readl(&xhci->run_regs->microframe_index) >> 3;
5193}
5194
5195int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
5196{
5197 struct xhci_hcd *xhci;
5198 /*
5199 * TODO: Check with DWC3 clients for sysdev according to
5200 * quirks
5201 */
5202 struct device *dev = hcd->self.sysdev;
5203 unsigned int minor_rev;
5204 int retval;
5205
5206 /* Accept arbitrarily long scatter-gather lists */
5207 hcd->self.sg_tablesize = ~0;
5208
5209 /* support to build packet from discontinuous buffers */
5210 hcd->self.no_sg_constraint = 1;
5211
5212 /* XHCI controllers don't stop the ep queue on short packets :| */
5213 hcd->self.no_stop_on_short = 1;
5214
5215 xhci = hcd_to_xhci(hcd);
5216
5217 if (usb_hcd_is_primary_hcd(hcd)) {
5218 xhci->main_hcd = hcd;
5219 xhci->usb2_rhub.hcd = hcd;
5220 /* Mark the first roothub as being USB 2.0.
5221 * The xHCI driver will register the USB 3.0 roothub.
5222 */
5223 hcd->speed = HCD_USB2;
5224 hcd->self.root_hub->speed = USB_SPEED_HIGH;
5225 /*
5226 * USB 2.0 roothub under xHCI has an integrated TT,
5227 * (rate matching hub) as opposed to having an OHCI/UHCI
5228 * companion controller.
5229 */
5230 hcd->has_tt = 1;
5231 } else {
5232 /*
5233 * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
5234 * should return 0x31 for sbrn, or that the minor revision
5235 * is a two digit BCD containig minor and sub-minor numbers.
5236 * This was later clarified in xHCI 1.2.
5237 *
5238 * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
5239 * minor revision set to 0x1 instead of 0x10.
5240 */
5241 if (xhci->usb3_rhub.min_rev == 0x1)
5242 minor_rev = 1;
5243 else
5244 minor_rev = xhci->usb3_rhub.min_rev / 0x10;
5245
5246 switch (minor_rev) {
5247 case 2:
5248 hcd->speed = HCD_USB32;
5249 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5250 hcd->self.root_hub->rx_lanes = 2;
5251 hcd->self.root_hub->tx_lanes = 2;
5252 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x2;
5253 break;
5254 case 1:
5255 hcd->speed = HCD_USB31;
5256 hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
5257 hcd->self.root_hub->ssp_rate = USB_SSP_GEN_2x1;
5258 break;
5259 }
5260 xhci_info(xhci, "Host supports USB 3.%x %sSuperSpeed\n",
5261 minor_rev,
5262 minor_rev ? "Enhanced " : "");
5263
5264 xhci->usb3_rhub.hcd = hcd;
5265 /* xHCI private pointer was set in xhci_pci_probe for the second
5266 * registered roothub.
5267 */
5268 return 0;
5269 }
5270
5271 mutex_init(&xhci->mutex);
5272 xhci->cap_regs = hcd->regs;
5273 xhci->op_regs = hcd->regs +
5274 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
5275 xhci->run_regs = hcd->regs +
5276 (readl(&xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
5277 /* Cache read-only capability registers */
5278 xhci->hcs_params1 = readl(&xhci->cap_regs->hcs_params1);
5279 xhci->hcs_params2 = readl(&xhci->cap_regs->hcs_params2);
5280 xhci->hcs_params3 = readl(&xhci->cap_regs->hcs_params3);
5281 xhci->hcc_params = readl(&xhci->cap_regs->hc_capbase);
5282 xhci->hci_version = HC_VERSION(xhci->hcc_params);
5283 xhci->hcc_params = readl(&xhci->cap_regs->hcc_params);
5284 if (xhci->hci_version > 0x100)
5285 xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
5286
5287 xhci->quirks |= quirks;
5288
5289 get_quirks(dev, xhci);
5290
5291 /* In xhci controllers which follow xhci 1.0 spec gives a spurious
5292 * success event after a short transfer. This quirk will ignore such
5293 * spurious event.
5294 */
5295 if (xhci->hci_version > 0x96)
5296 xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
5297
5298 /* Make sure the HC is halted. */
5299 retval = xhci_halt(xhci);
5300 if (retval)
5301 return retval;
5302
5303 xhci_zero_64b_regs(xhci);
5304
5305 xhci_dbg(xhci, "Resetting HCD\n");
5306 /* Reset the internal HC memory state and registers. */
5307 retval = xhci_reset(xhci);
5308 if (retval)
5309 return retval;
5310 xhci_dbg(xhci, "Reset complete\n");
5311
5312 /*
5313 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
5314 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
5315 * address memory pointers actually. So, this driver clears the AC64
5316 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
5317 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
5318 */
5319 if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
5320 xhci->hcc_params &= ~BIT(0);
5321
5322 /* Set dma_mask and coherent_dma_mask to 64-bits,
5323 * if xHC supports 64-bit addressing */
5324 if (HCC_64BIT_ADDR(xhci->hcc_params) &&
5325 !dma_set_mask(dev, DMA_BIT_MASK(64))) {
5326 xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
5327 dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
5328 } else {
5329 /*
5330 * This is to avoid error in cases where a 32-bit USB
5331 * controller is used on a 64-bit capable system.
5332 */
5333 retval = dma_set_mask(dev, DMA_BIT_MASK(32));
5334 if (retval)
5335 return retval;
5336 xhci_dbg(xhci, "Enabling 32-bit DMA addresses.\n");
5337 dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
5338 }
5339
5340 xhci_dbg(xhci, "Calling HCD init\n");
5341 /* Initialize HCD and host controller data structures. */
5342 retval = xhci_init(hcd);
5343 if (retval)
5344 return retval;
5345 xhci_dbg(xhci, "Called HCD init\n");
5346
5347 xhci_info(xhci, "hcc params 0x%08x hci version 0x%x quirks 0x%016llx\n",
5348 xhci->hcc_params, xhci->hci_version, xhci->quirks);
5349
5350 return 0;
5351}
5352EXPORT_SYMBOL_GPL(xhci_gen_setup);
5353
5354static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
5355 struct usb_host_endpoint *ep)
5356{
5357 struct xhci_hcd *xhci;
5358 struct usb_device *udev;
5359 unsigned int slot_id;
5360 unsigned int ep_index;
5361 unsigned long flags;
5362
5363 xhci = hcd_to_xhci(hcd);
5364
5365 spin_lock_irqsave(&xhci->lock, flags);
5366 udev = (struct usb_device *)ep->hcpriv;
5367 slot_id = udev->slot_id;
5368 ep_index = xhci_get_endpoint_index(&ep->desc);
5369
5370 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
5371 xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
5372 spin_unlock_irqrestore(&xhci->lock, flags);
5373}
5374
5375static const struct hc_driver xhci_hc_driver = {
5376 .description = "xhci-hcd",
5377 .product_desc = "xHCI Host Controller",
5378 .hcd_priv_size = sizeof(struct xhci_hcd),
5379
5380 /*
5381 * generic hardware linkage
5382 */
5383 .irq = xhci_irq,
5384 .flags = HCD_MEMORY | HCD_DMA | HCD_USB3 | HCD_SHARED |
5385 HCD_BH,
5386
5387 /*
5388 * basic lifecycle operations
5389 */
5390 .reset = NULL, /* set in xhci_init_driver() */
5391 .start = xhci_run,
5392 .stop = xhci_stop,
5393 .shutdown = xhci_shutdown,
5394
5395 /*
5396 * managing i/o requests and associated device resources
5397 */
5398 .map_urb_for_dma = xhci_map_urb_for_dma,
5399 .unmap_urb_for_dma = xhci_unmap_urb_for_dma,
5400 .urb_enqueue = xhci_urb_enqueue,
5401 .urb_dequeue = xhci_urb_dequeue,
5402 .alloc_dev = xhci_alloc_dev,
5403 .free_dev = xhci_free_dev,
5404 .alloc_streams = xhci_alloc_streams,
5405 .free_streams = xhci_free_streams,
5406 .add_endpoint = xhci_add_endpoint,
5407 .drop_endpoint = xhci_drop_endpoint,
5408 .endpoint_disable = xhci_endpoint_disable,
5409 .endpoint_reset = xhci_endpoint_reset,
5410 .check_bandwidth = xhci_check_bandwidth,
5411 .reset_bandwidth = xhci_reset_bandwidth,
5412 .address_device = xhci_address_device,
5413 .enable_device = xhci_enable_device,
5414 .update_hub_device = xhci_update_hub_device,
5415 .reset_device = xhci_discover_or_reset_device,
5416
5417 /*
5418 * scheduling support
5419 */
5420 .get_frame_number = xhci_get_frame,
5421
5422 /*
5423 * root hub support
5424 */
5425 .hub_control = xhci_hub_control,
5426 .hub_status_data = xhci_hub_status_data,
5427 .bus_suspend = xhci_bus_suspend,
5428 .bus_resume = xhci_bus_resume,
5429 .get_resuming_ports = xhci_get_resuming_ports,
5430
5431 /*
5432 * call back when device connected and addressed
5433 */
5434 .update_device = xhci_update_device,
5435 .set_usb2_hw_lpm = xhci_set_usb2_hardware_lpm,
5436 .enable_usb3_lpm_timeout = xhci_enable_usb3_lpm_timeout,
5437 .disable_usb3_lpm_timeout = xhci_disable_usb3_lpm_timeout,
5438 .find_raw_port_number = xhci_find_raw_port_number,
5439 .clear_tt_buffer_complete = xhci_clear_tt_buffer_complete,
5440};
5441
5442void xhci_init_driver(struct hc_driver *drv,
5443 const struct xhci_driver_overrides *over)
5444{
5445 BUG_ON(!over);
5446
5447 /* Copy the generic table to drv then apply the overrides */
5448 *drv = xhci_hc_driver;
5449
5450 if (over) {
5451 drv->hcd_priv_size += over->extra_priv_size;
5452 if (over->reset)
5453 drv->reset = over->reset;
5454 if (over->start)
5455 drv->start = over->start;
5456 if (over->add_endpoint)
5457 drv->add_endpoint = over->add_endpoint;
5458 if (over->drop_endpoint)
5459 drv->drop_endpoint = over->drop_endpoint;
5460 if (over->check_bandwidth)
5461 drv->check_bandwidth = over->check_bandwidth;
5462 if (over->reset_bandwidth)
5463 drv->reset_bandwidth = over->reset_bandwidth;
5464 }
5465}
5466EXPORT_SYMBOL_GPL(xhci_init_driver);
5467
5468MODULE_DESCRIPTION(DRIVER_DESC);
5469MODULE_AUTHOR(DRIVER_AUTHOR);
5470MODULE_LICENSE("GPL");
5471
5472static int __init xhci_hcd_init(void)
5473{
5474 /*
5475 * Check the compiler generated sizes of structures that must be laid
5476 * out in specific ways for hardware access.
5477 */
5478 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
5479 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
5480 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
5481 /* xhci_device_control has eight fields, and also
5482 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
5483 */
5484 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
5485 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
5486 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
5487 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 8*32/8);
5488 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
5489 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
5490 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
5491
5492 if (usb_disabled())
5493 return -ENODEV;
5494
5495 xhci_debugfs_create_root();
5496
5497 return 0;
5498}
5499
5500/*
5501 * If an init function is provided, an exit function must also be provided
5502 * to allow module unload.
5503 */
5504static void __exit xhci_hcd_fini(void)
5505{
5506 xhci_debugfs_remove_root();
5507}
5508
5509module_init(xhci_hcd_init);
5510module_exit(xhci_hcd_fini);