Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * xHCI host controller driver
4 *
5 * Copyright (C) 2008 Intel Corp.
6 *
7 * Author: Sarah Sharp
8 * Some code borrowed from the Linux EHCI driver.
9 */
10
11/*
12 * Ring initialization rules:
13 * 1. Each segment is initialized to zero, except for link TRBs.
14 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
15 * Consumer Cycle State (CCS), depending on ring function.
16 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
17 *
18 * Ring behavior rules:
19 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
20 * least one free TRB in the ring. This is useful if you want to turn that
21 * into a link TRB and expand the ring.
22 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
23 * link TRB, then load the pointer with the address in the link TRB. If the
24 * link TRB had its toggle bit set, you may need to update the ring cycle
25 * state (see cycle bit rules). You may have to do this multiple times
26 * until you reach a non-link TRB.
27 * 3. A ring is full if enqueue++ (for the definition of increment above)
28 * equals the dequeue pointer.
29 *
30 * Cycle bit rules:
31 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
32 * in a link TRB, it must toggle the ring cycle state.
33 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
34 * in a link TRB, it must toggle the ring cycle state.
35 *
36 * Producer rules:
37 * 1. Check if ring is full before you enqueue.
38 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
39 * Update enqueue pointer between each write (which may update the ring
40 * cycle state).
41 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
42 * and endpoint rings. If HC is the producer for the event ring,
43 * and it generates an interrupt according to interrupt modulation rules.
44 *
45 * Consumer rules:
46 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
47 * the TRB is owned by the consumer.
48 * 2. Update dequeue pointer (which may update the ring cycle state) and
49 * continue processing TRBs until you reach a TRB which is not owned by you.
50 * 3. Notify the producer. SW is the consumer for the event ring, and it
51 * updates event ring dequeue pointer. HC is the consumer for the command and
52 * endpoint rings; it generates events on the event ring for these.
53 */
54
55#include <linux/scatterlist.h>
56#include <linux/slab.h>
57#include <linux/dma-mapping.h>
58#include "xhci.h"
59#include "xhci-trace.h"
60
61static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
62 u32 field1, u32 field2,
63 u32 field3, u32 field4, bool command_must_succeed);
64
65/*
66 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
67 * address of the TRB.
68 */
69dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
70 union xhci_trb *trb)
71{
72 unsigned long segment_offset;
73
74 if (!seg || !trb || trb < seg->trbs)
75 return 0;
76 /* offset in TRBs */
77 segment_offset = trb - seg->trbs;
78 if (segment_offset >= TRBS_PER_SEGMENT)
79 return 0;
80 return seg->dma + (segment_offset * sizeof(*trb));
81}
82
83static bool trb_is_noop(union xhci_trb *trb)
84{
85 return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
86}
87
88static bool trb_is_link(union xhci_trb *trb)
89{
90 return TRB_TYPE_LINK_LE32(trb->link.control);
91}
92
93static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
94{
95 return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
96}
97
98static bool last_trb_on_ring(struct xhci_ring *ring,
99 struct xhci_segment *seg, union xhci_trb *trb)
100{
101 return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
102}
103
104static bool link_trb_toggles_cycle(union xhci_trb *trb)
105{
106 return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
107}
108
109static bool last_td_in_urb(struct xhci_td *td)
110{
111 struct urb_priv *urb_priv = td->urb->hcpriv;
112
113 return urb_priv->num_tds_done == urb_priv->num_tds;
114}
115
116static void inc_td_cnt(struct urb *urb)
117{
118 struct urb_priv *urb_priv = urb->hcpriv;
119
120 urb_priv->num_tds_done++;
121}
122
123static void trb_to_noop(union xhci_trb *trb, u32 noop_type)
124{
125 if (trb_is_link(trb)) {
126 /* unchain chained link TRBs */
127 trb->link.control &= cpu_to_le32(~TRB_CHAIN);
128 } else {
129 trb->generic.field[0] = 0;
130 trb->generic.field[1] = 0;
131 trb->generic.field[2] = 0;
132 /* Preserve only the cycle bit of this TRB */
133 trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
134 trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
135 }
136}
137
138/* Updates trb to point to the next TRB in the ring, and updates seg if the next
139 * TRB is in a new segment. This does not skip over link TRBs, and it does not
140 * effect the ring dequeue or enqueue pointers.
141 */
142static void next_trb(struct xhci_hcd *xhci,
143 struct xhci_ring *ring,
144 struct xhci_segment **seg,
145 union xhci_trb **trb)
146{
147 if (trb_is_link(*trb)) {
148 *seg = (*seg)->next;
149 *trb = ((*seg)->trbs);
150 } else {
151 (*trb)++;
152 }
153}
154
155/*
156 * See Cycle bit rules. SW is the consumer for the event ring only.
157 */
158void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
159{
160 unsigned int link_trb_count = 0;
161
162 /* event ring doesn't have link trbs, check for last trb */
163 if (ring->type == TYPE_EVENT) {
164 if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
165 ring->dequeue++;
166 goto out;
167 }
168 if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
169 ring->cycle_state ^= 1;
170 ring->deq_seg = ring->deq_seg->next;
171 ring->dequeue = ring->deq_seg->trbs;
172 goto out;
173 }
174
175 /* All other rings have link trbs */
176 if (!trb_is_link(ring->dequeue)) {
177 if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
178 xhci_warn(xhci, "Missing link TRB at end of segment\n");
179 } else {
180 ring->dequeue++;
181 ring->num_trbs_free++;
182 }
183 }
184
185 while (trb_is_link(ring->dequeue)) {
186 ring->deq_seg = ring->deq_seg->next;
187 ring->dequeue = ring->deq_seg->trbs;
188
189 if (link_trb_count++ > ring->num_segs) {
190 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
191 break;
192 }
193 }
194out:
195 trace_xhci_inc_deq(ring);
196
197 return;
198}
199
200/*
201 * See Cycle bit rules. SW is the consumer for the event ring only.
202 *
203 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
204 * chain bit is set), then set the chain bit in all the following link TRBs.
205 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
206 * have their chain bit cleared (so that each Link TRB is a separate TD).
207 *
208 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
209 * set, but other sections talk about dealing with the chain bit set. This was
210 * fixed in the 0.96 specification errata, but we have to assume that all 0.95
211 * xHCI hardware can't handle the chain bit being cleared on a link TRB.
212 *
213 * @more_trbs_coming: Will you enqueue more TRBs before calling
214 * prepare_transfer()?
215 */
216static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
217 bool more_trbs_coming)
218{
219 u32 chain;
220 union xhci_trb *next;
221 unsigned int link_trb_count = 0;
222
223 chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
224 /* If this is not event ring, there is one less usable TRB */
225 if (!trb_is_link(ring->enqueue))
226 ring->num_trbs_free--;
227
228 if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
229 xhci_err(xhci, "Tried to move enqueue past ring segment\n");
230 return;
231 }
232
233 next = ++(ring->enqueue);
234
235 /* Update the dequeue pointer further if that was a link TRB */
236 while (trb_is_link(next)) {
237
238 /*
239 * If the caller doesn't plan on enqueueing more TDs before
240 * ringing the doorbell, then we don't want to give the link TRB
241 * to the hardware just yet. We'll give the link TRB back in
242 * prepare_ring() just before we enqueue the TD at the top of
243 * the ring.
244 */
245 if (!chain && !more_trbs_coming)
246 break;
247
248 /* If we're not dealing with 0.95 hardware or isoc rings on
249 * AMD 0.96 host, carry over the chain bit of the previous TRB
250 * (which may mean the chain bit is cleared).
251 */
252 if (!(ring->type == TYPE_ISOC &&
253 (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
254 !xhci_link_trb_quirk(xhci)) {
255 next->link.control &= cpu_to_le32(~TRB_CHAIN);
256 next->link.control |= cpu_to_le32(chain);
257 }
258 /* Give this link TRB to the hardware */
259 wmb();
260 next->link.control ^= cpu_to_le32(TRB_CYCLE);
261
262 /* Toggle the cycle bit after the last ring segment. */
263 if (link_trb_toggles_cycle(next))
264 ring->cycle_state ^= 1;
265
266 ring->enq_seg = ring->enq_seg->next;
267 ring->enqueue = ring->enq_seg->trbs;
268 next = ring->enqueue;
269
270 if (link_trb_count++ > ring->num_segs) {
271 xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
272 break;
273 }
274 }
275
276 trace_xhci_inc_enq(ring);
277}
278
279/*
280 * Check to see if there's room to enqueue num_trbs on the ring and make sure
281 * enqueue pointer will not advance into dequeue segment. See rules above.
282 */
283static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
284 unsigned int num_trbs)
285{
286 int num_trbs_in_deq_seg;
287
288 if (ring->num_trbs_free < num_trbs)
289 return 0;
290
291 if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
292 num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
293 if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
294 return 0;
295 }
296
297 return 1;
298}
299
300/* Ring the host controller doorbell after placing a command on the ring */
301void xhci_ring_cmd_db(struct xhci_hcd *xhci)
302{
303 if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
304 return;
305
306 xhci_dbg(xhci, "// Ding dong!\n");
307
308 trace_xhci_ring_host_doorbell(0, DB_VALUE_HOST);
309
310 writel(DB_VALUE_HOST, &xhci->dba->doorbell[0]);
311 /* Flush PCI posted writes */
312 readl(&xhci->dba->doorbell[0]);
313}
314
315static bool xhci_mod_cmd_timer(struct xhci_hcd *xhci, unsigned long delay)
316{
317 return mod_delayed_work(system_wq, &xhci->cmd_timer, delay);
318}
319
320static struct xhci_command *xhci_next_queued_cmd(struct xhci_hcd *xhci)
321{
322 return list_first_entry_or_null(&xhci->cmd_list, struct xhci_command,
323 cmd_list);
324}
325
326/*
327 * Turn all commands on command ring with status set to "aborted" to no-op trbs.
328 * If there are other commands waiting then restart the ring and kick the timer.
329 * This must be called with command ring stopped and xhci->lock held.
330 */
331static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
332 struct xhci_command *cur_cmd)
333{
334 struct xhci_command *i_cmd;
335
336 /* Turn all aborted commands in list to no-ops, then restart */
337 list_for_each_entry(i_cmd, &xhci->cmd_list, cmd_list) {
338
339 if (i_cmd->status != COMP_COMMAND_ABORTED)
340 continue;
341
342 i_cmd->status = COMP_COMMAND_RING_STOPPED;
343
344 xhci_dbg(xhci, "Turn aborted command %p to no-op\n",
345 i_cmd->command_trb);
346
347 trb_to_noop(i_cmd->command_trb, TRB_CMD_NOOP);
348
349 /*
350 * caller waiting for completion is called when command
351 * completion event is received for these no-op commands
352 */
353 }
354
355 xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
356
357 /* ring command ring doorbell to restart the command ring */
358 if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) &&
359 !(xhci->xhc_state & XHCI_STATE_DYING)) {
360 xhci->current_cmd = cur_cmd;
361 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
362 xhci_ring_cmd_db(xhci);
363 }
364}
365
366/* Must be called with xhci->lock held, releases and aquires lock back */
367static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
368{
369 struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
370 union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
371 u64 crcr;
372 int ret;
373
374 xhci_dbg(xhci, "Abort command ring\n");
375
376 reinit_completion(&xhci->cmd_ring_stop_completion);
377
378 /*
379 * The control bits like command stop, abort are located in lower
380 * dword of the command ring control register.
381 * Some controllers require all 64 bits to be written to abort the ring.
382 * Make sure the upper dword is valid, pointing to the next command,
383 * avoiding corrupting the command ring pointer in case the command ring
384 * is stopped by the time the upper dword is written.
385 */
386 next_trb(xhci, NULL, &new_seg, &new_deq);
387 if (trb_is_link(new_deq))
388 next_trb(xhci, NULL, &new_seg, &new_deq);
389
390 crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
391 xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
392
393 /* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
394 * completion of the Command Abort operation. If CRR is not negated in 5
395 * seconds then driver handles it as if host died (-ENODEV).
396 * In the future we should distinguish between -ENODEV and -ETIMEDOUT
397 * and try to recover a -ETIMEDOUT with a host controller reset.
398 */
399 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
400 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
401 if (ret < 0) {
402 xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret);
403 xhci_halt(xhci);
404 xhci_hc_died(xhci);
405 return ret;
406 }
407 /*
408 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
409 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
410 * but the completion event in never sent. Wait 2 secs (arbitrary
411 * number) to handle those cases after negation of CMD_RING_RUNNING.
412 */
413 spin_unlock_irqrestore(&xhci->lock, flags);
414 ret = wait_for_completion_timeout(&xhci->cmd_ring_stop_completion,
415 msecs_to_jiffies(2000));
416 spin_lock_irqsave(&xhci->lock, flags);
417 if (!ret) {
418 xhci_dbg(xhci, "No stop event for abort, ring start fail?\n");
419 xhci_cleanup_command_queue(xhci);
420 } else {
421 xhci_handle_stopped_cmd_ring(xhci, xhci_next_queued_cmd(xhci));
422 }
423 return 0;
424}
425
426void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
427 unsigned int slot_id,
428 unsigned int ep_index,
429 unsigned int stream_id)
430{
431 __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
432 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
433 unsigned int ep_state = ep->ep_state;
434
435 /* Don't ring the doorbell for this endpoint if there are pending
436 * cancellations because we don't want to interrupt processing.
437 * We don't want to restart any stream rings if there's a set dequeue
438 * pointer command pending because the device can choose to start any
439 * stream once the endpoint is on the HW schedule.
440 */
441 if ((ep_state & EP_STOP_CMD_PENDING) || (ep_state & SET_DEQ_PENDING) ||
442 (ep_state & EP_HALTED) || (ep_state & EP_CLEARING_TT))
443 return;
444
445 trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
446
447 writel(DB_VALUE(ep_index, stream_id), db_addr);
448 /* flush the write */
449 readl(db_addr);
450}
451
452/* Ring the doorbell for any rings with pending URBs */
453static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
454 unsigned int slot_id,
455 unsigned int ep_index)
456{
457 unsigned int stream_id;
458 struct xhci_virt_ep *ep;
459
460 ep = &xhci->devs[slot_id]->eps[ep_index];
461
462 /* A ring has pending URBs if its TD list is not empty */
463 if (!(ep->ep_state & EP_HAS_STREAMS)) {
464 if (ep->ring && !(list_empty(&ep->ring->td_list)))
465 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
466 return;
467 }
468
469 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
470 stream_id++) {
471 struct xhci_stream_info *stream_info = ep->stream_info;
472 if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
473 xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
474 stream_id);
475 }
476}
477
478void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
479 unsigned int slot_id,
480 unsigned int ep_index)
481{
482 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
483}
484
485static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
486 unsigned int slot_id,
487 unsigned int ep_index)
488{
489 if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
490 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
491 return NULL;
492 }
493 if (ep_index >= EP_CTX_PER_DEV) {
494 xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
495 return NULL;
496 }
497 if (!xhci->devs[slot_id]) {
498 xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
499 return NULL;
500 }
501
502 return &xhci->devs[slot_id]->eps[ep_index];
503}
504
505static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
506 struct xhci_virt_ep *ep,
507 unsigned int stream_id)
508{
509 /* common case, no streams */
510 if (!(ep->ep_state & EP_HAS_STREAMS))
511 return ep->ring;
512
513 if (!ep->stream_info)
514 return NULL;
515
516 if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
517 xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
518 stream_id, ep->vdev->slot_id, ep->ep_index);
519 return NULL;
520 }
521
522 return ep->stream_info->stream_rings[stream_id];
523}
524
525/* Get the right ring for the given slot_id, ep_index and stream_id.
526 * If the endpoint supports streams, boundary check the URB's stream ID.
527 * If the endpoint doesn't support streams, return the singular endpoint ring.
528 */
529struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
530 unsigned int slot_id, unsigned int ep_index,
531 unsigned int stream_id)
532{
533 struct xhci_virt_ep *ep;
534
535 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
536 if (!ep)
537 return NULL;
538
539 return xhci_virt_ep_to_ring(xhci, ep, stream_id);
540}
541
542
543/*
544 * Get the hw dequeue pointer xHC stopped on, either directly from the
545 * endpoint context, or if streams are in use from the stream context.
546 * The returned hw_dequeue contains the lowest four bits with cycle state
547 * and possbile stream context type.
548 */
549static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
550 unsigned int ep_index, unsigned int stream_id)
551{
552 struct xhci_ep_ctx *ep_ctx;
553 struct xhci_stream_ctx *st_ctx;
554 struct xhci_virt_ep *ep;
555
556 ep = &vdev->eps[ep_index];
557
558 if (ep->ep_state & EP_HAS_STREAMS) {
559 st_ctx = &ep->stream_info->stream_ctx_array[stream_id];
560 return le64_to_cpu(st_ctx->stream_ring);
561 }
562 ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
563 return le64_to_cpu(ep_ctx->deq);
564}
565
566static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
567 unsigned int slot_id, unsigned int ep_index,
568 unsigned int stream_id, struct xhci_td *td)
569{
570 struct xhci_virt_device *dev = xhci->devs[slot_id];
571 struct xhci_virt_ep *ep = &dev->eps[ep_index];
572 struct xhci_ring *ep_ring;
573 struct xhci_command *cmd;
574 struct xhci_segment *new_seg;
575 struct xhci_segment *halted_seg = NULL;
576 union xhci_trb *new_deq;
577 int new_cycle;
578 union xhci_trb *halted_trb;
579 int index = 0;
580 dma_addr_t addr;
581 u64 hw_dequeue;
582 bool cycle_found = false;
583 bool td_last_trb_found = false;
584 u32 trb_sct = 0;
585 int ret;
586
587 ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
588 ep_index, stream_id);
589 if (!ep_ring) {
590 xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
591 stream_id);
592 return -ENODEV;
593 }
594 /*
595 * A cancelled TD can complete with a stall if HW cached the trb.
596 * In this case driver can't find td, but if the ring is empty we
597 * can move the dequeue pointer to the current enqueue position.
598 * We shouldn't hit this anymore as cached cancelled TRBs are given back
599 * after clearing the cache, but be on the safe side and keep it anyway
600 */
601 if (!td) {
602 if (list_empty(&ep_ring->td_list)) {
603 new_seg = ep_ring->enq_seg;
604 new_deq = ep_ring->enqueue;
605 new_cycle = ep_ring->cycle_state;
606 xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
607 goto deq_found;
608 } else {
609 xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
610 return -EINVAL;
611 }
612 }
613
614 hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
615 new_seg = ep_ring->deq_seg;
616 new_deq = ep_ring->dequeue;
617
618 /*
619 * Quirk: xHC write-back of the DCS field in the hardware dequeue
620 * pointer is wrong - use the cycle state of the TRB pointed to by
621 * the dequeue pointer.
622 */
623 if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS &&
624 !(ep->ep_state & EP_HAS_STREAMS))
625 halted_seg = trb_in_td(xhci, td->start_seg,
626 td->first_trb, td->last_trb,
627 hw_dequeue & ~0xf, false);
628 if (halted_seg) {
629 index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) /
630 sizeof(*halted_trb);
631 halted_trb = &halted_seg->trbs[index];
632 new_cycle = halted_trb->generic.field[3] & 0x1;
633 xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n",
634 (u8)(hw_dequeue & 0x1), index, new_cycle);
635 } else {
636 new_cycle = hw_dequeue & 0x1;
637 }
638
639 /*
640 * We want to find the pointer, segment and cycle state of the new trb
641 * (the one after current TD's last_trb). We know the cycle state at
642 * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
643 * found.
644 */
645 do {
646 if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq)
647 == (dma_addr_t)(hw_dequeue & ~0xf)) {
648 cycle_found = true;
649 if (td_last_trb_found)
650 break;
651 }
652 if (new_deq == td->last_trb)
653 td_last_trb_found = true;
654
655 if (cycle_found && trb_is_link(new_deq) &&
656 link_trb_toggles_cycle(new_deq))
657 new_cycle ^= 0x1;
658
659 next_trb(xhci, ep_ring, &new_seg, &new_deq);
660
661 /* Search wrapped around, bail out */
662 if (new_deq == ep->ring->dequeue) {
663 xhci_err(xhci, "Error: Failed finding new dequeue state\n");
664 return -EINVAL;
665 }
666
667 } while (!cycle_found || !td_last_trb_found);
668
669deq_found:
670
671 /* Don't update the ring cycle state for the producer (us). */
672 addr = xhci_trb_virt_to_dma(new_seg, new_deq);
673 if (addr == 0) {
674 xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
675 xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
676 return -EINVAL;
677 }
678
679 if ((ep->ep_state & SET_DEQ_PENDING)) {
680 xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
681 &addr);
682 return -EBUSY;
683 }
684
685 /* This function gets called from contexts where it cannot sleep */
686 cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
687 if (!cmd) {
688 xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
689 return -ENOMEM;
690 }
691
692 if (stream_id)
693 trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
694 ret = queue_command(xhci, cmd,
695 lower_32_bits(addr) | trb_sct | new_cycle,
696 upper_32_bits(addr),
697 STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
698 EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
699 if (ret < 0) {
700 xhci_free_command(xhci, cmd);
701 return ret;
702 }
703 ep->queued_deq_seg = new_seg;
704 ep->queued_deq_ptr = new_deq;
705
706 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
707 "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
708
709 /* Stop the TD queueing code from ringing the doorbell until
710 * this command completes. The HC won't set the dequeue pointer
711 * if the ring is running, and ringing the doorbell starts the
712 * ring running.
713 */
714 ep->ep_state |= SET_DEQ_PENDING;
715 xhci_ring_cmd_db(xhci);
716 return 0;
717}
718
719/* flip_cycle means flip the cycle bit of all but the first and last TRB.
720 * (The last TRB actually points to the ring enqueue pointer, which is not part
721 * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
722 */
723static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
724 struct xhci_td *td, bool flip_cycle)
725{
726 struct xhci_segment *seg = td->start_seg;
727 union xhci_trb *trb = td->first_trb;
728
729 while (1) {
730 trb_to_noop(trb, TRB_TR_NOOP);
731
732 /* flip cycle if asked to */
733 if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
734 trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
735
736 if (trb == td->last_trb)
737 break;
738
739 next_trb(xhci, ep_ring, &seg, &trb);
740 }
741}
742
743/*
744 * Must be called with xhci->lock held in interrupt context,
745 * releases and re-acquires xhci->lock
746 */
747static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
748 struct xhci_td *cur_td, int status)
749{
750 struct urb *urb = cur_td->urb;
751 struct urb_priv *urb_priv = urb->hcpriv;
752 struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
753
754 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
755 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
756 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
757 if (xhci->quirks & XHCI_AMD_PLL_FIX)
758 usb_amd_quirk_pll_enable();
759 }
760 }
761 xhci_urb_free_priv(urb_priv);
762 usb_hcd_unlink_urb_from_ep(hcd, urb);
763 trace_xhci_urb_giveback(urb);
764 usb_hcd_giveback_urb(hcd, urb, status);
765}
766
767static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
768 struct xhci_ring *ring, struct xhci_td *td)
769{
770 struct device *dev = xhci_to_hcd(xhci)->self.controller;
771 struct xhci_segment *seg = td->bounce_seg;
772 struct urb *urb = td->urb;
773 size_t len;
774
775 if (!ring || !seg || !urb)
776 return;
777
778 if (usb_urb_dir_out(urb)) {
779 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
780 DMA_TO_DEVICE);
781 return;
782 }
783
784 dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
785 DMA_FROM_DEVICE);
786 /* for in tranfers we need to copy the data from bounce to sg */
787 if (urb->num_sgs) {
788 len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
789 seg->bounce_len, seg->bounce_offs);
790 if (len != seg->bounce_len)
791 xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
792 len, seg->bounce_len);
793 } else {
794 memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
795 seg->bounce_len);
796 }
797 seg->bounce_len = 0;
798 seg->bounce_offs = 0;
799}
800
801static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
802 struct xhci_ring *ep_ring, int status)
803{
804 struct urb *urb = NULL;
805
806 /* Clean up the endpoint's TD list */
807 urb = td->urb;
808
809 /* if a bounce buffer was used to align this td then unmap it */
810 xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
811
812 /* Do one last check of the actual transfer length.
813 * If the host controller said we transferred more data than the buffer
814 * length, urb->actual_length will be a very big number (since it's
815 * unsigned). Play it safe and say we didn't transfer anything.
816 */
817 if (urb->actual_length > urb->transfer_buffer_length) {
818 xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
819 urb->transfer_buffer_length, urb->actual_length);
820 urb->actual_length = 0;
821 status = 0;
822 }
823 /* TD might be removed from td_list if we are giving back a cancelled URB */
824 if (!list_empty(&td->td_list))
825 list_del_init(&td->td_list);
826 /* Giving back a cancelled URB, or if a slated TD completed anyway */
827 if (!list_empty(&td->cancelled_td_list))
828 list_del_init(&td->cancelled_td_list);
829
830 inc_td_cnt(urb);
831 /* Giveback the urb when all the tds are completed */
832 if (last_td_in_urb(td)) {
833 if ((urb->actual_length != urb->transfer_buffer_length &&
834 (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
835 (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
836 xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
837 urb, urb->actual_length,
838 urb->transfer_buffer_length, status);
839
840 /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
841 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
842 status = 0;
843 xhci_giveback_urb_in_irq(xhci, td, status);
844 }
845
846 return 0;
847}
848
849
850/* Complete the cancelled URBs we unlinked from td_list. */
851static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
852{
853 struct xhci_ring *ring;
854 struct xhci_td *td, *tmp_td;
855
856 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
857 cancelled_td_list) {
858
859 ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
860
861 if (td->cancel_status == TD_CLEARED) {
862 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
863 __func__, td->urb);
864 xhci_td_cleanup(ep->xhci, td, ring, td->status);
865 } else {
866 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
867 __func__, td->urb, td->cancel_status);
868 }
869 if (ep->xhci->xhc_state & XHCI_STATE_DYING)
870 return;
871 }
872}
873
874static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
875 unsigned int ep_index, enum xhci_ep_reset_type reset_type)
876{
877 struct xhci_command *command;
878 int ret = 0;
879
880 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
881 if (!command) {
882 ret = -ENOMEM;
883 goto done;
884 }
885
886 xhci_dbg(xhci, "%s-reset ep %u, slot %u\n",
887 (reset_type == EP_HARD_RESET) ? "Hard" : "Soft",
888 ep_index, slot_id);
889
890 ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
891done:
892 if (ret)
893 xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
894 slot_id, ep_index, ret);
895 return ret;
896}
897
898static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
899 struct xhci_virt_ep *ep,
900 struct xhci_td *td,
901 enum xhci_ep_reset_type reset_type)
902{
903 unsigned int slot_id = ep->vdev->slot_id;
904 int err;
905
906 /*
907 * Avoid resetting endpoint if link is inactive. Can cause host hang.
908 * Device will be reset soon to recover the link so don't do anything
909 */
910 if (ep->vdev->flags & VDEV_PORT_ERROR)
911 return -ENODEV;
912
913 /* add td to cancelled list and let reset ep handler take care of it */
914 if (reset_type == EP_HARD_RESET) {
915 ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
916 if (td && list_empty(&td->cancelled_td_list)) {
917 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
918 td->cancel_status = TD_HALTED;
919 }
920 }
921
922 if (ep->ep_state & EP_HALTED) {
923 xhci_dbg(xhci, "Reset ep command for ep_index %d already pending\n",
924 ep->ep_index);
925 return 0;
926 }
927
928 err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
929 if (err)
930 return err;
931
932 ep->ep_state |= EP_HALTED;
933
934 xhci_ring_cmd_db(xhci);
935
936 return 0;
937}
938
939/*
940 * Fix up the ep ring first, so HW stops executing cancelled TDs.
941 * We have the xHCI lock, so nothing can modify this list until we drop it.
942 * We're also in the event handler, so we can't get re-interrupted if another
943 * Stop Endpoint command completes.
944 *
945 * only call this when ring is not in a running state
946 */
947
948static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
949{
950 struct xhci_hcd *xhci;
951 struct xhci_td *td = NULL;
952 struct xhci_td *tmp_td = NULL;
953 struct xhci_td *cached_td = NULL;
954 struct xhci_ring *ring;
955 u64 hw_deq;
956 unsigned int slot_id = ep->vdev->slot_id;
957 int err;
958
959 xhci = ep->xhci;
960
961 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
962 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
963 "Removing canceled TD starting at 0x%llx (dma) in stream %u URB %p",
964 (unsigned long long)xhci_trb_virt_to_dma(
965 td->start_seg, td->first_trb),
966 td->urb->stream_id, td->urb);
967 list_del_init(&td->td_list);
968 ring = xhci_urb_to_transfer_ring(xhci, td->urb);
969 if (!ring) {
970 xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
971 td->urb, td->urb->stream_id);
972 continue;
973 }
974 /*
975 * If a ring stopped on the TD we need to cancel then we have to
976 * move the xHC endpoint ring dequeue pointer past this TD.
977 * Rings halted due to STALL may show hw_deq is past the stalled
978 * TD, but still require a set TR Deq command to flush xHC cache.
979 */
980 hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
981 td->urb->stream_id);
982 hw_deq &= ~0xf;
983
984 if (td->cancel_status == TD_HALTED ||
985 trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) {
986 switch (td->cancel_status) {
987 case TD_CLEARED: /* TD is already no-op */
988 case TD_CLEARING_CACHE: /* set TR deq command already queued */
989 break;
990 case TD_DIRTY: /* TD is cached, clear it */
991 case TD_HALTED:
992 td->cancel_status = TD_CLEARING_CACHE;
993 if (cached_td)
994 /* FIXME stream case, several stopped rings */
995 xhci_dbg(xhci,
996 "Move dq past stream %u URB %p instead of stream %u URB %p\n",
997 td->urb->stream_id, td->urb,
998 cached_td->urb->stream_id, cached_td->urb);
999 cached_td = td;
1000 break;
1001 }
1002 } else {
1003 td_to_noop(xhci, ring, td, false);
1004 td->cancel_status = TD_CLEARED;
1005 }
1006 }
1007
1008 /* If there's no need to move the dequeue pointer then we're done */
1009 if (!cached_td)
1010 return 0;
1011
1012 err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
1013 cached_td->urb->stream_id,
1014 cached_td);
1015 if (err) {
1016 /* Failed to move past cached td, just set cached TDs to no-op */
1017 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
1018 if (td->cancel_status != TD_CLEARING_CACHE)
1019 continue;
1020 xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n",
1021 td->urb);
1022 td_to_noop(xhci, ring, td, false);
1023 td->cancel_status = TD_CLEARED;
1024 }
1025 }
1026 return 0;
1027}
1028
1029/*
1030 * Returns the TD the endpoint ring halted on.
1031 * Only call for non-running rings without streams.
1032 */
1033static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
1034{
1035 struct xhci_td *td;
1036 u64 hw_deq;
1037
1038 if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
1039 hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
1040 hw_deq &= ~0xf;
1041 td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
1042 if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
1043 td->last_trb, hw_deq, false))
1044 return td;
1045 }
1046 return NULL;
1047}
1048
1049/*
1050 * When we get a command completion for a Stop Endpoint Command, we need to
1051 * unlink any cancelled TDs from the ring. There are two ways to do that:
1052 *
1053 * 1. If the HW was in the middle of processing the TD that needs to be
1054 * cancelled, then we must move the ring's dequeue pointer past the last TRB
1055 * in the TD with a Set Dequeue Pointer Command.
1056 * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
1057 * bit cleared) so that the HW will skip over them.
1058 */
1059static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
1060 union xhci_trb *trb, u32 comp_code)
1061{
1062 unsigned int ep_index;
1063 struct xhci_virt_ep *ep;
1064 struct xhci_ep_ctx *ep_ctx;
1065 struct xhci_td *td = NULL;
1066 enum xhci_ep_reset_type reset_type;
1067 struct xhci_command *command;
1068 int err;
1069
1070 if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
1071 if (!xhci->devs[slot_id])
1072 xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
1073 slot_id);
1074 return;
1075 }
1076
1077 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1078 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1079 if (!ep)
1080 return;
1081
1082 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1083
1084 trace_xhci_handle_cmd_stop_ep(ep_ctx);
1085
1086 if (comp_code == COMP_CONTEXT_STATE_ERROR) {
1087 /*
1088 * If stop endpoint command raced with a halting endpoint we need to
1089 * reset the host side endpoint first.
1090 * If the TD we halted on isn't cancelled the TD should be given back
1091 * with a proper error code, and the ring dequeue moved past the TD.
1092 * If streams case we can't find hw_deq, or the TD we halted on so do a
1093 * soft reset.
1094 *
1095 * Proper error code is unknown here, it would be -EPIPE if device side
1096 * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error)
1097 * We use -EPROTO, if device is stalled it should return a stall error on
1098 * next transfer, which then will return -EPIPE, and device side stall is
1099 * noted and cleared by class driver.
1100 */
1101 switch (GET_EP_CTX_STATE(ep_ctx)) {
1102 case EP_STATE_HALTED:
1103 xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
1104 if (ep->ep_state & EP_HAS_STREAMS) {
1105 reset_type = EP_SOFT_RESET;
1106 } else {
1107 reset_type = EP_HARD_RESET;
1108 td = find_halted_td(ep);
1109 if (td)
1110 td->status = -EPROTO;
1111 }
1112 /* reset ep, reset handler cleans up cancelled tds */
1113 err = xhci_handle_halted_endpoint(xhci, ep, td, reset_type);
1114 if (err)
1115 break;
1116 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1117 return;
1118 case EP_STATE_RUNNING:
1119 /* Race, HW handled stop ep cmd before ep was running */
1120 xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
1121
1122 command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
1123 if (!command) {
1124 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1125 return;
1126 }
1127 xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
1128 xhci_ring_cmd_db(xhci);
1129
1130 return;
1131 default:
1132 break;
1133 }
1134 }
1135
1136 /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
1137 xhci_invalidate_cancelled_tds(ep);
1138 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1139
1140 /* Otherwise ring the doorbell(s) to restart queued transfers */
1141 xhci_giveback_invalidated_tds(ep);
1142 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1143}
1144
1145static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
1146{
1147 struct xhci_td *cur_td;
1148 struct xhci_td *tmp;
1149
1150 list_for_each_entry_safe(cur_td, tmp, &ring->td_list, td_list) {
1151 list_del_init(&cur_td->td_list);
1152
1153 if (!list_empty(&cur_td->cancelled_td_list))
1154 list_del_init(&cur_td->cancelled_td_list);
1155
1156 xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
1157
1158 inc_td_cnt(cur_td->urb);
1159 if (last_td_in_urb(cur_td))
1160 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1161 }
1162}
1163
1164static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
1165 int slot_id, int ep_index)
1166{
1167 struct xhci_td *cur_td;
1168 struct xhci_td *tmp;
1169 struct xhci_virt_ep *ep;
1170 struct xhci_ring *ring;
1171
1172 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1173 if (!ep)
1174 return;
1175
1176 if ((ep->ep_state & EP_HAS_STREAMS) ||
1177 (ep->ep_state & EP_GETTING_NO_STREAMS)) {
1178 int stream_id;
1179
1180 for (stream_id = 1; stream_id < ep->stream_info->num_streams;
1181 stream_id++) {
1182 ring = ep->stream_info->stream_rings[stream_id];
1183 if (!ring)
1184 continue;
1185
1186 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1187 "Killing URBs for slot ID %u, ep index %u, stream %u",
1188 slot_id, ep_index, stream_id);
1189 xhci_kill_ring_urbs(xhci, ring);
1190 }
1191 } else {
1192 ring = ep->ring;
1193 if (!ring)
1194 return;
1195 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1196 "Killing URBs for slot ID %u, ep index %u",
1197 slot_id, ep_index);
1198 xhci_kill_ring_urbs(xhci, ring);
1199 }
1200
1201 list_for_each_entry_safe(cur_td, tmp, &ep->cancelled_td_list,
1202 cancelled_td_list) {
1203 list_del_init(&cur_td->cancelled_td_list);
1204 inc_td_cnt(cur_td->urb);
1205
1206 if (last_td_in_urb(cur_td))
1207 xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
1208 }
1209}
1210
1211/*
1212 * host controller died, register read returns 0xffffffff
1213 * Complete pending commands, mark them ABORTED.
1214 * URBs need to be given back as usb core might be waiting with device locks
1215 * held for the URBs to finish during device disconnect, blocking host remove.
1216 *
1217 * Call with xhci->lock held.
1218 * lock is relased and re-acquired while giving back urb.
1219 */
1220void xhci_hc_died(struct xhci_hcd *xhci)
1221{
1222 int i, j;
1223
1224 if (xhci->xhc_state & XHCI_STATE_DYING)
1225 return;
1226
1227 xhci_err(xhci, "xHCI host controller not responding, assume dead\n");
1228 xhci->xhc_state |= XHCI_STATE_DYING;
1229
1230 xhci_cleanup_command_queue(xhci);
1231
1232 /* return any pending urbs, remove may be waiting for them */
1233 for (i = 0; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
1234 if (!xhci->devs[i])
1235 continue;
1236 for (j = 0; j < 31; j++)
1237 xhci_kill_endpoint_urbs(xhci, i, j);
1238 }
1239
1240 /* inform usb core hc died if PCI remove isn't already handling it */
1241 if (!(xhci->xhc_state & XHCI_STATE_REMOVING))
1242 usb_hc_died(xhci_to_hcd(xhci));
1243}
1244
1245static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
1246 struct xhci_virt_device *dev,
1247 struct xhci_ring *ep_ring,
1248 unsigned int ep_index)
1249{
1250 union xhci_trb *dequeue_temp;
1251 int num_trbs_free_temp;
1252 bool revert = false;
1253
1254 num_trbs_free_temp = ep_ring->num_trbs_free;
1255 dequeue_temp = ep_ring->dequeue;
1256
1257 /* If we get two back-to-back stalls, and the first stalled transfer
1258 * ends just before a link TRB, the dequeue pointer will be left on
1259 * the link TRB by the code in the while loop. So we have to update
1260 * the dequeue pointer one segment further, or we'll jump off
1261 * the segment into la-la-land.
1262 */
1263 if (trb_is_link(ep_ring->dequeue)) {
1264 ep_ring->deq_seg = ep_ring->deq_seg->next;
1265 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1266 }
1267
1268 while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
1269 /* We have more usable TRBs */
1270 ep_ring->num_trbs_free++;
1271 ep_ring->dequeue++;
1272 if (trb_is_link(ep_ring->dequeue)) {
1273 if (ep_ring->dequeue ==
1274 dev->eps[ep_index].queued_deq_ptr)
1275 break;
1276 ep_ring->deq_seg = ep_ring->deq_seg->next;
1277 ep_ring->dequeue = ep_ring->deq_seg->trbs;
1278 }
1279 if (ep_ring->dequeue == dequeue_temp) {
1280 revert = true;
1281 break;
1282 }
1283 }
1284
1285 if (revert) {
1286 xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
1287 ep_ring->num_trbs_free = num_trbs_free_temp;
1288 }
1289}
1290
1291/*
1292 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
1293 * we need to clear the set deq pending flag in the endpoint ring state, so that
1294 * the TD queueing code can ring the doorbell again. We also need to ring the
1295 * endpoint doorbell to restart the ring, but only if there aren't more
1296 * cancellations pending.
1297 */
1298static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
1299 union xhci_trb *trb, u32 cmd_comp_code)
1300{
1301 unsigned int ep_index;
1302 unsigned int stream_id;
1303 struct xhci_ring *ep_ring;
1304 struct xhci_virt_ep *ep;
1305 struct xhci_ep_ctx *ep_ctx;
1306 struct xhci_slot_ctx *slot_ctx;
1307 struct xhci_td *td, *tmp_td;
1308
1309 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1310 stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
1311 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1312 if (!ep)
1313 return;
1314
1315 ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
1316 if (!ep_ring) {
1317 xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
1318 stream_id);
1319 /* XXX: Harmless??? */
1320 goto cleanup;
1321 }
1322
1323 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1324 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
1325 trace_xhci_handle_cmd_set_deq(slot_ctx);
1326 trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
1327
1328 if (cmd_comp_code != COMP_SUCCESS) {
1329 unsigned int ep_state;
1330 unsigned int slot_state;
1331
1332 switch (cmd_comp_code) {
1333 case COMP_TRB_ERROR:
1334 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because of stream ID configuration\n");
1335 break;
1336 case COMP_CONTEXT_STATE_ERROR:
1337 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due to incorrect slot or ep state.\n");
1338 ep_state = GET_EP_CTX_STATE(ep_ctx);
1339 slot_state = le32_to_cpu(slot_ctx->dev_state);
1340 slot_state = GET_SLOT_STATE(slot_state);
1341 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1342 "Slot state = %u, EP state = %u",
1343 slot_state, ep_state);
1344 break;
1345 case COMP_SLOT_NOT_ENABLED_ERROR:
1346 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because slot %u was not enabled.\n",
1347 slot_id);
1348 break;
1349 default:
1350 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown completion code of %u.\n",
1351 cmd_comp_code);
1352 break;
1353 }
1354 /* OK what do we do now? The endpoint state is hosed, and we
1355 * should never get to this point if the synchronization between
1356 * queueing, and endpoint state are correct. This might happen
1357 * if the device gets disconnected after we've finished
1358 * cancelling URBs, which might not be an error...
1359 */
1360 } else {
1361 u64 deq;
1362 /* 4.6.10 deq ptr is written to the stream ctx for streams */
1363 if (ep->ep_state & EP_HAS_STREAMS) {
1364 struct xhci_stream_ctx *ctx =
1365 &ep->stream_info->stream_ctx_array[stream_id];
1366 deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK;
1367 } else {
1368 deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK;
1369 }
1370 xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
1371 "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq);
1372 if (xhci_trb_virt_to_dma(ep->queued_deq_seg,
1373 ep->queued_deq_ptr) == deq) {
1374 /* Update the ring's dequeue segment and dequeue pointer
1375 * to reflect the new position.
1376 */
1377 update_ring_for_set_deq_completion(xhci, ep->vdev,
1378 ep_ring, ep_index);
1379 } else {
1380 xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
1381 xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
1382 ep->queued_deq_seg, ep->queued_deq_ptr);
1383 }
1384 }
1385 /* HW cached TDs cleared from cache, give them back */
1386 list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
1387 cancelled_td_list) {
1388 ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
1389 if (td->cancel_status == TD_CLEARING_CACHE) {
1390 td->cancel_status = TD_CLEARED;
1391 xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n",
1392 __func__, td->urb);
1393 xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
1394 } else {
1395 xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n",
1396 __func__, td->urb, td->cancel_status);
1397 }
1398 }
1399cleanup:
1400 ep->ep_state &= ~SET_DEQ_PENDING;
1401 ep->queued_deq_seg = NULL;
1402 ep->queued_deq_ptr = NULL;
1403 /* Restart any rings with pending URBs */
1404 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1405}
1406
1407static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1408 union xhci_trb *trb, u32 cmd_comp_code)
1409{
1410 struct xhci_virt_ep *ep;
1411 struct xhci_ep_ctx *ep_ctx;
1412 unsigned int ep_index;
1413
1414 ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
1415 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
1416 if (!ep)
1417 return;
1418
1419 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
1420 trace_xhci_handle_cmd_reset_ep(ep_ctx);
1421
1422 /* This command will only fail if the endpoint wasn't halted,
1423 * but we don't care.
1424 */
1425 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
1426 "Ignoring reset ep completion code of %u", cmd_comp_code);
1427
1428 /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
1429 xhci_invalidate_cancelled_tds(ep);
1430
1431 /* Clear our internal halted state */
1432 ep->ep_state &= ~EP_HALTED;
1433
1434 xhci_giveback_invalidated_tds(ep);
1435
1436 /* if this was a soft reset, then restart */
1437 if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
1438 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1439}
1440
1441static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
1442 struct xhci_command *command, u32 cmd_comp_code)
1443{
1444 if (cmd_comp_code == COMP_SUCCESS)
1445 command->slot_id = slot_id;
1446 else
1447 command->slot_id = 0;
1448}
1449
1450static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
1451{
1452 struct xhci_virt_device *virt_dev;
1453 struct xhci_slot_ctx *slot_ctx;
1454
1455 virt_dev = xhci->devs[slot_id];
1456 if (!virt_dev)
1457 return;
1458
1459 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
1460 trace_xhci_handle_cmd_disable_slot(slot_ctx);
1461
1462 if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
1463 /* Delete default control endpoint resources */
1464 xhci_free_device_endpoint_resources(xhci, virt_dev, true);
1465}
1466
1467static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
1468 u32 cmd_comp_code)
1469{
1470 struct xhci_virt_device *virt_dev;
1471 struct xhci_input_control_ctx *ctrl_ctx;
1472 struct xhci_ep_ctx *ep_ctx;
1473 unsigned int ep_index;
1474 u32 add_flags;
1475
1476 /*
1477 * Configure endpoint commands can come from the USB core configuration
1478 * or alt setting changes, or when streams were being configured.
1479 */
1480
1481 virt_dev = xhci->devs[slot_id];
1482 if (!virt_dev)
1483 return;
1484 ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
1485 if (!ctrl_ctx) {
1486 xhci_warn(xhci, "Could not get input context, bad type.\n");
1487 return;
1488 }
1489
1490 add_flags = le32_to_cpu(ctrl_ctx->add_flags);
1491
1492 /* Input ctx add_flags are the endpoint index plus one */
1493 ep_index = xhci_last_valid_endpoint(add_flags) - 1;
1494
1495 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index);
1496 trace_xhci_handle_cmd_config_ep(ep_ctx);
1497
1498 return;
1499}
1500
1501static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
1502{
1503 struct xhci_virt_device *vdev;
1504 struct xhci_slot_ctx *slot_ctx;
1505
1506 vdev = xhci->devs[slot_id];
1507 if (!vdev)
1508 return;
1509 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1510 trace_xhci_handle_cmd_addr_dev(slot_ctx);
1511}
1512
1513static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
1514{
1515 struct xhci_virt_device *vdev;
1516 struct xhci_slot_ctx *slot_ctx;
1517
1518 vdev = xhci->devs[slot_id];
1519 if (!vdev) {
1520 xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
1521 slot_id);
1522 return;
1523 }
1524 slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
1525 trace_xhci_handle_cmd_reset_dev(slot_ctx);
1526
1527 xhci_dbg(xhci, "Completed reset device command.\n");
1528}
1529
1530static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
1531 struct xhci_event_cmd *event)
1532{
1533 if (!(xhci->quirks & XHCI_NEC_HOST)) {
1534 xhci_warn(xhci, "WARN NEC_GET_FW command on non-NEC host\n");
1535 return;
1536 }
1537 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
1538 "NEC firmware version %2x.%02x",
1539 NEC_FW_MAJOR(le32_to_cpu(event->status)),
1540 NEC_FW_MINOR(le32_to_cpu(event->status)));
1541}
1542
1543static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
1544{
1545 list_del(&cmd->cmd_list);
1546
1547 if (cmd->completion) {
1548 cmd->status = status;
1549 complete(cmd->completion);
1550 } else {
1551 kfree(cmd);
1552 }
1553}
1554
1555void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
1556{
1557 struct xhci_command *cur_cmd, *tmp_cmd;
1558 xhci->current_cmd = NULL;
1559 list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
1560 xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
1561}
1562
1563void xhci_handle_command_timeout(struct work_struct *work)
1564{
1565 struct xhci_hcd *xhci;
1566 unsigned long flags;
1567 char str[XHCI_MSG_MAX];
1568 u64 hw_ring_state;
1569 u32 cmd_field3;
1570 u32 usbsts;
1571
1572 xhci = container_of(to_delayed_work(work), struct xhci_hcd, cmd_timer);
1573
1574 spin_lock_irqsave(&xhci->lock, flags);
1575
1576 /*
1577 * If timeout work is pending, or current_cmd is NULL, it means we
1578 * raced with command completion. Command is handled so just return.
1579 */
1580 if (!xhci->current_cmd || delayed_work_pending(&xhci->cmd_timer)) {
1581 spin_unlock_irqrestore(&xhci->lock, flags);
1582 return;
1583 }
1584
1585 cmd_field3 = le32_to_cpu(xhci->current_cmd->command_trb->generic.field[3]);
1586 usbsts = readl(&xhci->op_regs->status);
1587 xhci_dbg(xhci, "Command timeout, USBSTS:%s\n", xhci_decode_usbsts(str, usbsts));
1588
1589 /* Bail out and tear down xhci if a stop endpoint command failed */
1590 if (TRB_FIELD_TO_TYPE(cmd_field3) == TRB_STOP_RING) {
1591 struct xhci_virt_ep *ep;
1592
1593 xhci_warn(xhci, "xHCI host not responding to stop endpoint command\n");
1594
1595 ep = xhci_get_virt_ep(xhci, TRB_TO_SLOT_ID(cmd_field3),
1596 TRB_TO_EP_INDEX(cmd_field3));
1597 if (ep)
1598 ep->ep_state &= ~EP_STOP_CMD_PENDING;
1599
1600 xhci_halt(xhci);
1601 xhci_hc_died(xhci);
1602 goto time_out_completed;
1603 }
1604
1605 /* mark this command to be cancelled */
1606 xhci->current_cmd->status = COMP_COMMAND_ABORTED;
1607
1608 /* Make sure command ring is running before aborting it */
1609 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
1610 if (hw_ring_state == ~(u64)0) {
1611 xhci_hc_died(xhci);
1612 goto time_out_completed;
1613 }
1614
1615 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
1616 (hw_ring_state & CMD_RING_RUNNING)) {
1617 /* Prevent new doorbell, and start command abort */
1618 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
1619 xhci_dbg(xhci, "Command timeout\n");
1620 xhci_abort_cmd_ring(xhci, flags);
1621 goto time_out_completed;
1622 }
1623
1624 /* host removed. Bail out */
1625 if (xhci->xhc_state & XHCI_STATE_REMOVING) {
1626 xhci_dbg(xhci, "host removed, ring start fail?\n");
1627 xhci_cleanup_command_queue(xhci);
1628
1629 goto time_out_completed;
1630 }
1631
1632 /* command timeout on stopped ring, ring can't be aborted */
1633 xhci_dbg(xhci, "Command timeout on stopped ring\n");
1634 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
1635
1636time_out_completed:
1637 spin_unlock_irqrestore(&xhci->lock, flags);
1638 return;
1639}
1640
1641static void handle_cmd_completion(struct xhci_hcd *xhci,
1642 struct xhci_event_cmd *event)
1643{
1644 unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
1645 u64 cmd_dma;
1646 dma_addr_t cmd_dequeue_dma;
1647 u32 cmd_comp_code;
1648 union xhci_trb *cmd_trb;
1649 struct xhci_command *cmd;
1650 u32 cmd_type;
1651
1652 if (slot_id >= MAX_HC_SLOTS) {
1653 xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
1654 return;
1655 }
1656
1657 cmd_dma = le64_to_cpu(event->cmd_trb);
1658 cmd_trb = xhci->cmd_ring->dequeue;
1659
1660 trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic);
1661
1662 cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
1663 cmd_trb);
1664 /*
1665 * Check whether the completion event is for our internal kept
1666 * command.
1667 */
1668 if (!cmd_dequeue_dma || cmd_dma != (u64)cmd_dequeue_dma) {
1669 xhci_warn(xhci,
1670 "ERROR mismatched command completion event\n");
1671 return;
1672 }
1673
1674 cmd = list_first_entry(&xhci->cmd_list, struct xhci_command, cmd_list);
1675
1676 cancel_delayed_work(&xhci->cmd_timer);
1677
1678 cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
1679
1680 /* If CMD ring stopped we own the trbs between enqueue and dequeue */
1681 if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) {
1682 complete_all(&xhci->cmd_ring_stop_completion);
1683 return;
1684 }
1685
1686 if (cmd->command_trb != xhci->cmd_ring->dequeue) {
1687 xhci_err(xhci,
1688 "Command completion event does not match command\n");
1689 return;
1690 }
1691
1692 /*
1693 * Host aborted the command ring, check if the current command was
1694 * supposed to be aborted, otherwise continue normally.
1695 * The command ring is stopped now, but the xHC will issue a Command
1696 * Ring Stopped event which will cause us to restart it.
1697 */
1698 if (cmd_comp_code == COMP_COMMAND_ABORTED) {
1699 xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
1700 if (cmd->status == COMP_COMMAND_ABORTED) {
1701 if (xhci->current_cmd == cmd)
1702 xhci->current_cmd = NULL;
1703 goto event_handled;
1704 }
1705 }
1706
1707 cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
1708 switch (cmd_type) {
1709 case TRB_ENABLE_SLOT:
1710 xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code);
1711 break;
1712 case TRB_DISABLE_SLOT:
1713 xhci_handle_cmd_disable_slot(xhci, slot_id);
1714 break;
1715 case TRB_CONFIG_EP:
1716 if (!cmd->completion)
1717 xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
1718 break;
1719 case TRB_EVAL_CONTEXT:
1720 break;
1721 case TRB_ADDR_DEV:
1722 xhci_handle_cmd_addr_dev(xhci, slot_id);
1723 break;
1724 case TRB_STOP_RING:
1725 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1726 le32_to_cpu(cmd_trb->generic.field[3])));
1727 if (!cmd->completion)
1728 xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
1729 cmd_comp_code);
1730 break;
1731 case TRB_SET_DEQ:
1732 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1733 le32_to_cpu(cmd_trb->generic.field[3])));
1734 xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
1735 break;
1736 case TRB_CMD_NOOP:
1737 /* Is this an aborted command turned to NO-OP? */
1738 if (cmd->status == COMP_COMMAND_RING_STOPPED)
1739 cmd_comp_code = COMP_COMMAND_RING_STOPPED;
1740 break;
1741 case TRB_RESET_EP:
1742 WARN_ON(slot_id != TRB_TO_SLOT_ID(
1743 le32_to_cpu(cmd_trb->generic.field[3])));
1744 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1745 break;
1746 case TRB_RESET_DEV:
1747 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1748 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1749 */
1750 slot_id = TRB_TO_SLOT_ID(
1751 le32_to_cpu(cmd_trb->generic.field[3]));
1752 xhci_handle_cmd_reset_dev(xhci, slot_id);
1753 break;
1754 case TRB_NEC_GET_FW:
1755 xhci_handle_cmd_nec_get_fw(xhci, event);
1756 break;
1757 default:
1758 /* Skip over unknown commands on the event ring */
1759 xhci_info(xhci, "INFO unknown command type %d\n", cmd_type);
1760 break;
1761 }
1762
1763 /* restart timer if this wasn't the last command */
1764 if (!list_is_singular(&xhci->cmd_list)) {
1765 xhci->current_cmd = list_first_entry(&cmd->cmd_list,
1766 struct xhci_command, cmd_list);
1767 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
1768 } else if (xhci->current_cmd == cmd) {
1769 xhci->current_cmd = NULL;
1770 }
1771
1772event_handled:
1773 xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
1774
1775 inc_deq(xhci, xhci->cmd_ring);
1776}
1777
1778static void handle_vendor_event(struct xhci_hcd *xhci,
1779 union xhci_trb *event, u32 trb_type)
1780{
1781 xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
1782 if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
1783 handle_cmd_completion(xhci, &event->event_cmd);
1784}
1785
1786static void handle_device_notification(struct xhci_hcd *xhci,
1787 union xhci_trb *event)
1788{
1789 u32 slot_id;
1790 struct usb_device *udev;
1791
1792 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3]));
1793 if (!xhci->devs[slot_id]) {
1794 xhci_warn(xhci, "Device Notification event for "
1795 "unused slot %u\n", slot_id);
1796 return;
1797 }
1798
1799 xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
1800 slot_id);
1801 udev = xhci->devs[slot_id]->udev;
1802 if (udev && udev->parent)
1803 usb_wakeup_notification(udev->parent, udev->portnum);
1804}
1805
1806/*
1807 * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
1808 * Controller.
1809 * As per ThunderX2errata-129 USB 2 device may come up as USB 1
1810 * If a connection to a USB 1 device is followed by another connection
1811 * to a USB 2 device.
1812 *
1813 * Reset the PHY after the USB device is disconnected if device speed
1814 * is less than HCD_USB3.
1815 * Retry the reset sequence max of 4 times checking the PLL lock status.
1816 *
1817 */
1818static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
1819{
1820 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1821 u32 pll_lock_check;
1822 u32 retry_count = 4;
1823
1824 do {
1825 /* Assert PHY reset */
1826 writel(0x6F, hcd->regs + 0x1048);
1827 udelay(10);
1828 /* De-assert the PHY reset */
1829 writel(0x7F, hcd->regs + 0x1048);
1830 udelay(200);
1831 pll_lock_check = readl(hcd->regs + 0x1070);
1832 } while (!(pll_lock_check & 0x1) && --retry_count);
1833}
1834
1835static void handle_port_status(struct xhci_hcd *xhci,
1836 struct xhci_interrupter *ir,
1837 union xhci_trb *event)
1838{
1839 struct usb_hcd *hcd;
1840 u32 port_id;
1841 u32 portsc, cmd_reg;
1842 int max_ports;
1843 int slot_id;
1844 unsigned int hcd_portnum;
1845 struct xhci_bus_state *bus_state;
1846 bool bogus_port_status = false;
1847 struct xhci_port *port;
1848
1849 /* Port status change events always have a successful completion code */
1850 if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
1851 xhci_warn(xhci,
1852 "WARN: xHC returned failed port status event\n");
1853
1854 port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
1855 max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1856
1857 if ((port_id <= 0) || (port_id > max_ports)) {
1858 xhci_warn(xhci, "Port change event with invalid port ID %d\n",
1859 port_id);
1860 inc_deq(xhci, ir->event_ring);
1861 return;
1862 }
1863
1864 port = &xhci->hw_ports[port_id - 1];
1865 if (!port || !port->rhub || port->hcd_portnum == DUPLICATE_ENTRY) {
1866 xhci_warn(xhci, "Port change event, no port for port ID %u\n",
1867 port_id);
1868 bogus_port_status = true;
1869 goto cleanup;
1870 }
1871
1872 /* We might get interrupts after shared_hcd is removed */
1873 if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
1874 xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
1875 bogus_port_status = true;
1876 goto cleanup;
1877 }
1878
1879 hcd = port->rhub->hcd;
1880 bus_state = &port->rhub->bus_state;
1881 hcd_portnum = port->hcd_portnum;
1882 portsc = readl(port->addr);
1883
1884 xhci_dbg(xhci, "Port change event, %d-%d, id %d, portsc: 0x%x\n",
1885 hcd->self.busnum, hcd_portnum + 1, port_id, portsc);
1886
1887 trace_xhci_handle_port_status(hcd_portnum, portsc);
1888
1889 if (hcd->state == HC_STATE_SUSPENDED) {
1890 xhci_dbg(xhci, "resume root hub\n");
1891 usb_hcd_resume_root_hub(hcd);
1892 }
1893
1894 if (hcd->speed >= HCD_USB3 &&
1895 (portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
1896 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
1897 if (slot_id && xhci->devs[slot_id])
1898 xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
1899 }
1900
1901 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
1902 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1903
1904 cmd_reg = readl(&xhci->op_regs->command);
1905 if (!(cmd_reg & CMD_RUN)) {
1906 xhci_warn(xhci, "xHC is not running.\n");
1907 goto cleanup;
1908 }
1909
1910 if (DEV_SUPERSPEED_ANY(portsc)) {
1911 xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
1912 /* Set a flag to say the port signaled remote wakeup,
1913 * so we can tell the difference between the end of
1914 * device and host initiated resume.
1915 */
1916 bus_state->port_remote_wakeup |= 1 << hcd_portnum;
1917 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1918 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
1919 xhci_set_link_state(xhci, port, XDEV_U0);
1920 /* Need to wait until the next link state change
1921 * indicates the device is actually in U0.
1922 */
1923 bogus_port_status = true;
1924 goto cleanup;
1925 } else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
1926 xhci_dbg(xhci, "resume HS port %d\n", port_id);
1927 port->resume_timestamp = jiffies +
1928 msecs_to_jiffies(USB_RESUME_TIMEOUT);
1929 set_bit(hcd_portnum, &bus_state->resuming_ports);
1930 /* Do the rest in GetPortStatus after resume time delay.
1931 * Avoid polling roothub status before that so that a
1932 * usb device auto-resume latency around ~40ms.
1933 */
1934 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1935 mod_timer(&hcd->rh_timer,
1936 port->resume_timestamp);
1937 usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
1938 bogus_port_status = true;
1939 }
1940 }
1941
1942 if ((portsc & PORT_PLC) &&
1943 DEV_SUPERSPEED_ANY(portsc) &&
1944 ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
1945 (portsc & PORT_PLS_MASK) == XDEV_U1 ||
1946 (portsc & PORT_PLS_MASK) == XDEV_U2)) {
1947 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1948 complete(&port->u3exit_done);
1949 /* We've just brought the device into U0/1/2 through either the
1950 * Resume state after a device remote wakeup, or through the
1951 * U3Exit state after a host-initiated resume. If it's a device
1952 * initiated remote wake, don't pass up the link state change,
1953 * so the roothub behavior is consistent with external
1954 * USB 3.0 hub behavior.
1955 */
1956 slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
1957 if (slot_id && xhci->devs[slot_id])
1958 xhci_ring_device(xhci, slot_id);
1959 if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
1960 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1961 usb_wakeup_notification(hcd->self.root_hub,
1962 hcd_portnum + 1);
1963 bogus_port_status = true;
1964 goto cleanup;
1965 }
1966 }
1967
1968 /*
1969 * Check to see if xhci-hub.c is waiting on RExit to U0 transition (or
1970 * RExit to a disconnect state). If so, let the driver know it's
1971 * out of the RExit state.
1972 */
1973 if (hcd->speed < HCD_USB3 && port->rexit_active) {
1974 complete(&port->rexit_done);
1975 port->rexit_active = false;
1976 bogus_port_status = true;
1977 goto cleanup;
1978 }
1979
1980 if (hcd->speed < HCD_USB3) {
1981 xhci_test_and_clear_bit(xhci, port, PORT_PLC);
1982 if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
1983 (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
1984 xhci_cavium_reset_phy_quirk(xhci);
1985 }
1986
1987cleanup:
1988 /* Update event ring dequeue pointer before dropping the lock */
1989 inc_deq(xhci, ir->event_ring);
1990
1991 /* Don't make the USB core poll the roothub if we got a bad port status
1992 * change event. Besides, at that point we can't tell which roothub
1993 * (USB 2.0 or USB 3.0) to kick.
1994 */
1995 if (bogus_port_status)
1996 return;
1997
1998 /*
1999 * xHCI port-status-change events occur when the "or" of all the
2000 * status-change bits in the portsc register changes from 0 to 1.
2001 * New status changes won't cause an event if any other change
2002 * bits are still set. When an event occurs, switch over to
2003 * polling to avoid losing status changes.
2004 */
2005 xhci_dbg(xhci, "%s: starting usb%d port polling.\n",
2006 __func__, hcd->self.busnum);
2007 set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
2008 spin_unlock(&xhci->lock);
2009 /* Pass this up to the core */
2010 usb_hcd_poll_rh_status(hcd);
2011 spin_lock(&xhci->lock);
2012}
2013
2014/*
2015 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
2016 * at end_trb, which may be in another segment. If the suspect DMA address is a
2017 * TRB in this TD, this function returns that TRB's segment. Otherwise it
2018 * returns 0.
2019 */
2020struct xhci_segment *trb_in_td(struct xhci_hcd *xhci,
2021 struct xhci_segment *start_seg,
2022 union xhci_trb *start_trb,
2023 union xhci_trb *end_trb,
2024 dma_addr_t suspect_dma,
2025 bool debug)
2026{
2027 dma_addr_t start_dma;
2028 dma_addr_t end_seg_dma;
2029 dma_addr_t end_trb_dma;
2030 struct xhci_segment *cur_seg;
2031
2032 start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
2033 cur_seg = start_seg;
2034
2035 do {
2036 if (start_dma == 0)
2037 return NULL;
2038 /* We may get an event for a Link TRB in the middle of a TD */
2039 end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
2040 &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
2041 /* If the end TRB isn't in this segment, this is set to 0 */
2042 end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
2043
2044 if (debug)
2045 xhci_warn(xhci,
2046 "Looking for event-dma %016llx trb-start %016llx trb-end %016llx seg-start %016llx seg-end %016llx\n",
2047 (unsigned long long)suspect_dma,
2048 (unsigned long long)start_dma,
2049 (unsigned long long)end_trb_dma,
2050 (unsigned long long)cur_seg->dma,
2051 (unsigned long long)end_seg_dma);
2052
2053 if (end_trb_dma > 0) {
2054 /* The end TRB is in this segment, so suspect should be here */
2055 if (start_dma <= end_trb_dma) {
2056 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
2057 return cur_seg;
2058 } else {
2059 /* Case for one segment with
2060 * a TD wrapped around to the top
2061 */
2062 if ((suspect_dma >= start_dma &&
2063 suspect_dma <= end_seg_dma) ||
2064 (suspect_dma >= cur_seg->dma &&
2065 suspect_dma <= end_trb_dma))
2066 return cur_seg;
2067 }
2068 return NULL;
2069 } else {
2070 /* Might still be somewhere in this segment */
2071 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
2072 return cur_seg;
2073 }
2074 cur_seg = cur_seg->next;
2075 start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
2076 } while (cur_seg != start_seg);
2077
2078 return NULL;
2079}
2080
2081static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
2082 struct xhci_virt_ep *ep)
2083{
2084 /*
2085 * As part of low/full-speed endpoint-halt processing
2086 * we must clear the TT buffer (USB 2.0 specification 11.17.5).
2087 */
2088 if (td->urb->dev->tt && !usb_pipeint(td->urb->pipe) &&
2089 (td->urb->dev->tt->hub != xhci_to_hcd(xhci)->self.root_hub) &&
2090 !(ep->ep_state & EP_CLEARING_TT)) {
2091 ep->ep_state |= EP_CLEARING_TT;
2092 td->urb->ep->hcpriv = td->urb->dev;
2093 if (usb_hub_clear_tt_buffer(td->urb))
2094 ep->ep_state &= ~EP_CLEARING_TT;
2095 }
2096}
2097
2098/* Check if an error has halted the endpoint ring. The class driver will
2099 * cleanup the halt for a non-default control endpoint if we indicate a stall.
2100 * However, a babble and other errors also halt the endpoint ring, and the class
2101 * driver won't clear the halt in that case, so we need to issue a Set Transfer
2102 * Ring Dequeue Pointer command manually.
2103 */
2104static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
2105 struct xhci_ep_ctx *ep_ctx,
2106 unsigned int trb_comp_code)
2107{
2108 /* TRB completion codes that may require a manual halt cleanup */
2109 if (trb_comp_code == COMP_USB_TRANSACTION_ERROR ||
2110 trb_comp_code == COMP_BABBLE_DETECTED_ERROR ||
2111 trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR)
2112 /* The 0.95 spec says a babbling control endpoint
2113 * is not halted. The 0.96 spec says it is. Some HW
2114 * claims to be 0.95 compliant, but it halts the control
2115 * endpoint anyway. Check if a babble halted the
2116 * endpoint.
2117 */
2118 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED)
2119 return 1;
2120
2121 return 0;
2122}
2123
2124int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
2125{
2126 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
2127 /* Vendor defined "informational" completion code,
2128 * treat as not-an-error.
2129 */
2130 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
2131 trb_comp_code);
2132 xhci_dbg(xhci, "Treating code as success.\n");
2133 return 1;
2134 }
2135 return 0;
2136}
2137
2138static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2139 struct xhci_ring *ep_ring, struct xhci_td *td,
2140 u32 trb_comp_code)
2141{
2142 struct xhci_ep_ctx *ep_ctx;
2143
2144 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2145
2146 switch (trb_comp_code) {
2147 case COMP_STOPPED_LENGTH_INVALID:
2148 case COMP_STOPPED_SHORT_PACKET:
2149 case COMP_STOPPED:
2150 /*
2151 * The "Stop Endpoint" completion will take care of any
2152 * stopped TDs. A stopped TD may be restarted, so don't update
2153 * the ring dequeue pointer or take this TD off any lists yet.
2154 */
2155 return 0;
2156 case COMP_USB_TRANSACTION_ERROR:
2157 case COMP_BABBLE_DETECTED_ERROR:
2158 case COMP_SPLIT_TRANSACTION_ERROR:
2159 /*
2160 * If endpoint context state is not halted we might be
2161 * racing with a reset endpoint command issued by a unsuccessful
2162 * stop endpoint completion (context error). In that case the
2163 * td should be on the cancelled list, and EP_HALTED flag set.
2164 *
2165 * Or then it's not halted due to the 0.95 spec stating that a
2166 * babbling control endpoint should not halt. The 0.96 spec
2167 * again says it should. Some HW claims to be 0.95 compliant,
2168 * but it halts the control endpoint anyway.
2169 */
2170 if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
2171 /*
2172 * If EP_HALTED is set and TD is on the cancelled list
2173 * the TD and dequeue pointer will be handled by reset
2174 * ep command completion
2175 */
2176 if ((ep->ep_state & EP_HALTED) &&
2177 !list_empty(&td->cancelled_td_list)) {
2178 xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
2179 (unsigned long long)xhci_trb_virt_to_dma(
2180 td->start_seg, td->first_trb));
2181 return 0;
2182 }
2183 /* endpoint not halted, don't reset it */
2184 break;
2185 }
2186 /* Almost same procedure as for STALL_ERROR below */
2187 xhci_clear_hub_tt_buffer(xhci, td, ep);
2188 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
2189 return 0;
2190 case COMP_STALL_ERROR:
2191 /*
2192 * xhci internal endpoint state will go to a "halt" state for
2193 * any stall, including default control pipe protocol stall.
2194 * To clear the host side halt we need to issue a reset endpoint
2195 * command, followed by a set dequeue command to move past the
2196 * TD.
2197 * Class drivers clear the device side halt from a functional
2198 * stall later. Hub TT buffer should only be cleared for FS/LS
2199 * devices behind HS hubs for functional stalls.
2200 */
2201 if (ep->ep_index != 0)
2202 xhci_clear_hub_tt_buffer(xhci, td, ep);
2203
2204 xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET);
2205
2206 return 0; /* xhci_handle_halted_endpoint marked td cancelled */
2207 default:
2208 break;
2209 }
2210
2211 /* Update ring dequeue pointer */
2212 ep_ring->dequeue = td->last_trb;
2213 ep_ring->deq_seg = td->last_trb_seg;
2214 ep_ring->num_trbs_free += td->num_trbs - 1;
2215 inc_deq(xhci, ep_ring);
2216
2217 return xhci_td_cleanup(xhci, td, ep_ring, td->status);
2218}
2219
2220/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
2221static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
2222 union xhci_trb *stop_trb)
2223{
2224 u32 sum;
2225 union xhci_trb *trb = ring->dequeue;
2226 struct xhci_segment *seg = ring->deq_seg;
2227
2228 for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) {
2229 if (!trb_is_noop(trb) && !trb_is_link(trb))
2230 sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
2231 }
2232 return sum;
2233}
2234
2235/*
2236 * Process control tds, update urb status and actual_length.
2237 */
2238static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2239 struct xhci_ring *ep_ring, struct xhci_td *td,
2240 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2241{
2242 struct xhci_ep_ctx *ep_ctx;
2243 u32 trb_comp_code;
2244 u32 remaining, requested;
2245 u32 trb_type;
2246
2247 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
2248 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
2249 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2250 requested = td->urb->transfer_buffer_length;
2251 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2252
2253 switch (trb_comp_code) {
2254 case COMP_SUCCESS:
2255 if (trb_type != TRB_STATUS) {
2256 xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
2257 (trb_type == TRB_DATA) ? "data" : "setup");
2258 td->status = -ESHUTDOWN;
2259 break;
2260 }
2261 td->status = 0;
2262 break;
2263 case COMP_SHORT_PACKET:
2264 td->status = 0;
2265 break;
2266 case COMP_STOPPED_SHORT_PACKET:
2267 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2268 td->urb->actual_length = remaining;
2269 else
2270 xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
2271 goto finish_td;
2272 case COMP_STOPPED:
2273 switch (trb_type) {
2274 case TRB_SETUP:
2275 td->urb->actual_length = 0;
2276 goto finish_td;
2277 case TRB_DATA:
2278 case TRB_NORMAL:
2279 td->urb->actual_length = requested - remaining;
2280 goto finish_td;
2281 case TRB_STATUS:
2282 td->urb->actual_length = requested;
2283 goto finish_td;
2284 default:
2285 xhci_warn(xhci, "WARN: unexpected TRB Type %d\n",
2286 trb_type);
2287 goto finish_td;
2288 }
2289 case COMP_STOPPED_LENGTH_INVALID:
2290 goto finish_td;
2291 default:
2292 if (!xhci_requires_manual_halt_cleanup(xhci,
2293 ep_ctx, trb_comp_code))
2294 break;
2295 xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
2296 trb_comp_code, ep->ep_index);
2297 fallthrough;
2298 case COMP_STALL_ERROR:
2299 /* Did we transfer part of the data (middle) phase? */
2300 if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
2301 td->urb->actual_length = requested - remaining;
2302 else if (!td->urb_length_set)
2303 td->urb->actual_length = 0;
2304 goto finish_td;
2305 }
2306
2307 /* stopped at setup stage, no data transferred */
2308 if (trb_type == TRB_SETUP)
2309 goto finish_td;
2310
2311 /*
2312 * if on data stage then update the actual_length of the URB and flag it
2313 * as set, so it won't be overwritten in the event for the last TRB.
2314 */
2315 if (trb_type == TRB_DATA ||
2316 trb_type == TRB_NORMAL) {
2317 td->urb_length_set = true;
2318 td->urb->actual_length = requested - remaining;
2319 xhci_dbg(xhci, "Waiting for status stage event\n");
2320 return 0;
2321 }
2322
2323 /* at status stage */
2324 if (!td->urb_length_set)
2325 td->urb->actual_length = requested;
2326
2327finish_td:
2328 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2329}
2330
2331/*
2332 * Process isochronous tds, update urb packet status and actual_length.
2333 */
2334static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2335 struct xhci_ring *ep_ring, struct xhci_td *td,
2336 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2337{
2338 struct urb_priv *urb_priv;
2339 int idx;
2340 struct usb_iso_packet_descriptor *frame;
2341 u32 trb_comp_code;
2342 bool sum_trbs_for_length = false;
2343 u32 remaining, requested, ep_trb_len;
2344 int short_framestatus;
2345
2346 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2347 urb_priv = td->urb->hcpriv;
2348 idx = urb_priv->num_tds_done;
2349 frame = &td->urb->iso_frame_desc[idx];
2350 requested = frame->length;
2351 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2352 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2353 short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
2354 -EREMOTEIO : 0;
2355
2356 /* handle completion code */
2357 switch (trb_comp_code) {
2358 case COMP_SUCCESS:
2359 if (remaining) {
2360 frame->status = short_framestatus;
2361 if (xhci->quirks & XHCI_TRUST_TX_LENGTH)
2362 sum_trbs_for_length = true;
2363 break;
2364 }
2365 frame->status = 0;
2366 break;
2367 case COMP_SHORT_PACKET:
2368 frame->status = short_framestatus;
2369 sum_trbs_for_length = true;
2370 break;
2371 case COMP_BANDWIDTH_OVERRUN_ERROR:
2372 frame->status = -ECOMM;
2373 break;
2374 case COMP_ISOCH_BUFFER_OVERRUN:
2375 case COMP_BABBLE_DETECTED_ERROR:
2376 frame->status = -EOVERFLOW;
2377 break;
2378 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2379 case COMP_STALL_ERROR:
2380 frame->status = -EPROTO;
2381 break;
2382 case COMP_USB_TRANSACTION_ERROR:
2383 frame->status = -EPROTO;
2384 if (ep_trb != td->last_trb)
2385 return 0;
2386 break;
2387 case COMP_STOPPED:
2388 sum_trbs_for_length = true;
2389 break;
2390 case COMP_STOPPED_SHORT_PACKET:
2391 /* field normally containing residue now contains tranferred */
2392 frame->status = short_framestatus;
2393 requested = remaining;
2394 break;
2395 case COMP_STOPPED_LENGTH_INVALID:
2396 requested = 0;
2397 remaining = 0;
2398 break;
2399 default:
2400 sum_trbs_for_length = true;
2401 frame->status = -1;
2402 break;
2403 }
2404
2405 if (sum_trbs_for_length)
2406 frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
2407 ep_trb_len - remaining;
2408 else
2409 frame->actual_length = requested;
2410
2411 td->urb->actual_length += frame->actual_length;
2412
2413 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2414}
2415
2416static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
2417 struct xhci_virt_ep *ep, int status)
2418{
2419 struct urb_priv *urb_priv;
2420 struct usb_iso_packet_descriptor *frame;
2421 int idx;
2422
2423 urb_priv = td->urb->hcpriv;
2424 idx = urb_priv->num_tds_done;
2425 frame = &td->urb->iso_frame_desc[idx];
2426
2427 /* The transfer is partly done. */
2428 frame->status = -EXDEV;
2429
2430 /* calc actual length */
2431 frame->actual_length = 0;
2432
2433 /* Update ring dequeue pointer */
2434 ep->ring->dequeue = td->last_trb;
2435 ep->ring->deq_seg = td->last_trb_seg;
2436 ep->ring->num_trbs_free += td->num_trbs - 1;
2437 inc_deq(xhci, ep->ring);
2438
2439 return xhci_td_cleanup(xhci, td, ep->ring, status);
2440}
2441
2442/*
2443 * Process bulk and interrupt tds, update urb status and actual_length.
2444 */
2445static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
2446 struct xhci_ring *ep_ring, struct xhci_td *td,
2447 union xhci_trb *ep_trb, struct xhci_transfer_event *event)
2448{
2449 struct xhci_slot_ctx *slot_ctx;
2450 u32 trb_comp_code;
2451 u32 remaining, requested, ep_trb_len;
2452
2453 slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
2454 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2455 remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
2456 ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
2457 requested = td->urb->transfer_buffer_length;
2458
2459 switch (trb_comp_code) {
2460 case COMP_SUCCESS:
2461 ep->err_count = 0;
2462 /* handle success with untransferred data as short packet */
2463 if (ep_trb != td->last_trb || remaining) {
2464 xhci_warn(xhci, "WARN Successful completion on short TX\n");
2465 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2466 td->urb->ep->desc.bEndpointAddress,
2467 requested, remaining);
2468 }
2469 td->status = 0;
2470 break;
2471 case COMP_SHORT_PACKET:
2472 xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
2473 td->urb->ep->desc.bEndpointAddress,
2474 requested, remaining);
2475 td->status = 0;
2476 break;
2477 case COMP_STOPPED_SHORT_PACKET:
2478 td->urb->actual_length = remaining;
2479 goto finish_td;
2480 case COMP_STOPPED_LENGTH_INVALID:
2481 /* stopped on ep trb with invalid length, exclude it */
2482 ep_trb_len = 0;
2483 remaining = 0;
2484 break;
2485 case COMP_USB_TRANSACTION_ERROR:
2486 if (xhci->quirks & XHCI_NO_SOFT_RETRY ||
2487 (ep->err_count++ > MAX_SOFT_RETRY) ||
2488 le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
2489 break;
2490
2491 td->status = 0;
2492
2493 xhci_handle_halted_endpoint(xhci, ep, td, EP_SOFT_RESET);
2494 return 0;
2495 default:
2496 /* do nothing */
2497 break;
2498 }
2499
2500 if (ep_trb == td->last_trb)
2501 td->urb->actual_length = requested - remaining;
2502 else
2503 td->urb->actual_length =
2504 sum_trb_lengths(xhci, ep_ring, ep_trb) +
2505 ep_trb_len - remaining;
2506finish_td:
2507 if (remaining > requested) {
2508 xhci_warn(xhci, "bad transfer trb length %d in event trb\n",
2509 remaining);
2510 td->urb->actual_length = 0;
2511 }
2512
2513 return finish_td(xhci, ep, ep_ring, td, trb_comp_code);
2514}
2515
2516/*
2517 * If this function returns an error condition, it means it got a Transfer
2518 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
2519 * At this point, the host controller is probably hosed and should be reset.
2520 */
2521static int handle_tx_event(struct xhci_hcd *xhci,
2522 struct xhci_interrupter *ir,
2523 struct xhci_transfer_event *event)
2524{
2525 struct xhci_virt_ep *ep;
2526 struct xhci_ring *ep_ring;
2527 unsigned int slot_id;
2528 int ep_index;
2529 struct xhci_td *td = NULL;
2530 dma_addr_t ep_trb_dma;
2531 struct xhci_segment *ep_seg;
2532 union xhci_trb *ep_trb;
2533 int status = -EINPROGRESS;
2534 struct xhci_ep_ctx *ep_ctx;
2535 u32 trb_comp_code;
2536 int td_num = 0;
2537 bool handling_skipped_tds = false;
2538
2539 slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
2540 ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
2541 trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
2542 ep_trb_dma = le64_to_cpu(event->buffer);
2543
2544 ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
2545 if (!ep) {
2546 xhci_err(xhci, "ERROR Invalid Transfer event\n");
2547 goto err_out;
2548 }
2549
2550 ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
2551 ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
2552
2553 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
2554 xhci_err(xhci,
2555 "ERROR Transfer event for disabled endpoint slot %u ep %u\n",
2556 slot_id, ep_index);
2557 goto err_out;
2558 }
2559
2560 /* Some transfer events don't always point to a trb, see xhci 4.17.4 */
2561 if (!ep_ring) {
2562 switch (trb_comp_code) {
2563 case COMP_STALL_ERROR:
2564 case COMP_USB_TRANSACTION_ERROR:
2565 case COMP_INVALID_STREAM_TYPE_ERROR:
2566 case COMP_INVALID_STREAM_ID_ERROR:
2567 xhci_dbg(xhci, "Stream transaction error ep %u no id\n",
2568 ep_index);
2569 if (ep->err_count++ > MAX_SOFT_RETRY)
2570 xhci_handle_halted_endpoint(xhci, ep, NULL,
2571 EP_HARD_RESET);
2572 else
2573 xhci_handle_halted_endpoint(xhci, ep, NULL,
2574 EP_SOFT_RESET);
2575 goto cleanup;
2576 case COMP_RING_UNDERRUN:
2577 case COMP_RING_OVERRUN:
2578 case COMP_STOPPED_LENGTH_INVALID:
2579 goto cleanup;
2580 default:
2581 xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
2582 slot_id, ep_index);
2583 goto err_out;
2584 }
2585 }
2586
2587 /* Count current td numbers if ep->skip is set */
2588 if (ep->skip)
2589 td_num += list_count_nodes(&ep_ring->td_list);
2590
2591 /* Look for common error cases */
2592 switch (trb_comp_code) {
2593 /* Skip codes that require special handling depending on
2594 * transfer type
2595 */
2596 case COMP_SUCCESS:
2597 if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0)
2598 break;
2599 if (xhci->quirks & XHCI_TRUST_TX_LENGTH ||
2600 ep_ring->last_td_was_short)
2601 trb_comp_code = COMP_SHORT_PACKET;
2602 else
2603 xhci_warn_ratelimited(xhci,
2604 "WARN Successful completion on short TX for slot %u ep %u: needs XHCI_TRUST_TX_LENGTH quirk?\n",
2605 slot_id, ep_index);
2606 break;
2607 case COMP_SHORT_PACKET:
2608 break;
2609 /* Completion codes for endpoint stopped state */
2610 case COMP_STOPPED:
2611 xhci_dbg(xhci, "Stopped on Transfer TRB for slot %u ep %u\n",
2612 slot_id, ep_index);
2613 break;
2614 case COMP_STOPPED_LENGTH_INVALID:
2615 xhci_dbg(xhci,
2616 "Stopped on No-op or Link TRB for slot %u ep %u\n",
2617 slot_id, ep_index);
2618 break;
2619 case COMP_STOPPED_SHORT_PACKET:
2620 xhci_dbg(xhci,
2621 "Stopped with short packet transfer detected for slot %u ep %u\n",
2622 slot_id, ep_index);
2623 break;
2624 /* Completion codes for endpoint halted state */
2625 case COMP_STALL_ERROR:
2626 xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
2627 ep_index);
2628 status = -EPIPE;
2629 break;
2630 case COMP_SPLIT_TRANSACTION_ERROR:
2631 xhci_dbg(xhci, "Split transaction error for slot %u ep %u\n",
2632 slot_id, ep_index);
2633 status = -EPROTO;
2634 break;
2635 case COMP_USB_TRANSACTION_ERROR:
2636 xhci_dbg(xhci, "Transfer error for slot %u ep %u on endpoint\n",
2637 slot_id, ep_index);
2638 status = -EPROTO;
2639 break;
2640 case COMP_BABBLE_DETECTED_ERROR:
2641 xhci_dbg(xhci, "Babble error for slot %u ep %u on endpoint\n",
2642 slot_id, ep_index);
2643 status = -EOVERFLOW;
2644 break;
2645 /* Completion codes for endpoint error state */
2646 case COMP_TRB_ERROR:
2647 xhci_warn(xhci,
2648 "WARN: TRB error for slot %u ep %u on endpoint\n",
2649 slot_id, ep_index);
2650 status = -EILSEQ;
2651 break;
2652 /* completion codes not indicating endpoint state change */
2653 case COMP_DATA_BUFFER_ERROR:
2654 xhci_warn(xhci,
2655 "WARN: HC couldn't access mem fast enough for slot %u ep %u\n",
2656 slot_id, ep_index);
2657 status = -ENOSR;
2658 break;
2659 case COMP_BANDWIDTH_OVERRUN_ERROR:
2660 xhci_warn(xhci,
2661 "WARN: bandwidth overrun event for slot %u ep %u on endpoint\n",
2662 slot_id, ep_index);
2663 break;
2664 case COMP_ISOCH_BUFFER_OVERRUN:
2665 xhci_warn(xhci,
2666 "WARN: buffer overrun event for slot %u ep %u on endpoint",
2667 slot_id, ep_index);
2668 break;
2669 case COMP_RING_UNDERRUN:
2670 /*
2671 * When the Isoch ring is empty, the xHC will generate
2672 * a Ring Overrun Event for IN Isoch endpoint or Ring
2673 * Underrun Event for OUT Isoch endpoint.
2674 */
2675 xhci_dbg(xhci, "underrun event on endpoint\n");
2676 if (!list_empty(&ep_ring->td_list))
2677 xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
2678 "still with TDs queued?\n",
2679 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2680 ep_index);
2681 goto cleanup;
2682 case COMP_RING_OVERRUN:
2683 xhci_dbg(xhci, "overrun event on endpoint\n");
2684 if (!list_empty(&ep_ring->td_list))
2685 xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
2686 "still with TDs queued?\n",
2687 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2688 ep_index);
2689 goto cleanup;
2690 case COMP_MISSED_SERVICE_ERROR:
2691 /*
2692 * When encounter missed service error, one or more isoc tds
2693 * may be missed by xHC.
2694 * Set skip flag of the ep_ring; Complete the missed tds as
2695 * short transfer when process the ep_ring next time.
2696 */
2697 ep->skip = true;
2698 xhci_dbg(xhci,
2699 "Miss service interval error for slot %u ep %u, set skip flag\n",
2700 slot_id, ep_index);
2701 goto cleanup;
2702 case COMP_NO_PING_RESPONSE_ERROR:
2703 ep->skip = true;
2704 xhci_dbg(xhci,
2705 "No Ping response error for slot %u ep %u, Skip one Isoc TD\n",
2706 slot_id, ep_index);
2707 goto cleanup;
2708
2709 case COMP_INCOMPATIBLE_DEVICE_ERROR:
2710 /* needs disable slot command to recover */
2711 xhci_warn(xhci,
2712 "WARN: detect an incompatible device for slot %u ep %u",
2713 slot_id, ep_index);
2714 status = -EPROTO;
2715 break;
2716 default:
2717 if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
2718 status = 0;
2719 break;
2720 }
2721 xhci_warn(xhci,
2722 "ERROR Unknown event condition %u for slot %u ep %u , HC probably busted\n",
2723 trb_comp_code, slot_id, ep_index);
2724 goto cleanup;
2725 }
2726
2727 do {
2728 /* This TRB should be in the TD at the head of this ring's
2729 * TD list.
2730 */
2731 if (list_empty(&ep_ring->td_list)) {
2732 /*
2733 * Don't print wanings if it's due to a stopped endpoint
2734 * generating an extra completion event if the device
2735 * was suspended. Or, a event for the last TRB of a
2736 * short TD we already got a short event for.
2737 * The short TD is already removed from the TD list.
2738 */
2739
2740 if (!(trb_comp_code == COMP_STOPPED ||
2741 trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
2742 ep_ring->last_td_was_short)) {
2743 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2744 TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2745 ep_index);
2746 }
2747 if (ep->skip) {
2748 ep->skip = false;
2749 xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n",
2750 slot_id, ep_index);
2751 }
2752 if (trb_comp_code == COMP_STALL_ERROR ||
2753 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2754 trb_comp_code)) {
2755 xhci_handle_halted_endpoint(xhci, ep, NULL,
2756 EP_HARD_RESET);
2757 }
2758 goto cleanup;
2759 }
2760
2761 /* We've skipped all the TDs on the ep ring when ep->skip set */
2762 if (ep->skip && td_num == 0) {
2763 ep->skip = false;
2764 xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n",
2765 slot_id, ep_index);
2766 goto cleanup;
2767 }
2768
2769 td = list_first_entry(&ep_ring->td_list, struct xhci_td,
2770 td_list);
2771 if (ep->skip)
2772 td_num--;
2773
2774 /* Is this a TRB in the currently executing TD? */
2775 ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
2776 td->last_trb, ep_trb_dma, false);
2777
2778 /*
2779 * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
2780 * is not in the current TD pointed by ep_ring->dequeue because
2781 * that the hardware dequeue pointer still at the previous TRB
2782 * of the current TD. The previous TRB maybe a Link TD or the
2783 * last TRB of the previous TD. The command completion handle
2784 * will take care the rest.
2785 */
2786 if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
2787 trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
2788 goto cleanup;
2789 }
2790
2791 if (!ep_seg) {
2792 if (!ep->skip ||
2793 !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
2794 /* Some host controllers give a spurious
2795 * successful event after a short transfer.
2796 * Ignore it.
2797 */
2798 if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
2799 ep_ring->last_td_was_short) {
2800 ep_ring->last_td_was_short = false;
2801 goto cleanup;
2802 }
2803 /* HC is busted, give up! */
2804 xhci_err(xhci,
2805 "ERROR Transfer event TRB DMA ptr not "
2806 "part of current TD ep_index %d "
2807 "comp_code %u\n", ep_index,
2808 trb_comp_code);
2809 trb_in_td(xhci, ep_ring->deq_seg,
2810 ep_ring->dequeue, td->last_trb,
2811 ep_trb_dma, true);
2812 return -ESHUTDOWN;
2813 }
2814
2815 skip_isoc_td(xhci, td, ep, status);
2816 goto cleanup;
2817 }
2818 if (trb_comp_code == COMP_SHORT_PACKET)
2819 ep_ring->last_td_was_short = true;
2820 else
2821 ep_ring->last_td_was_short = false;
2822
2823 if (ep->skip) {
2824 xhci_dbg(xhci,
2825 "Found td. Clear skip flag for slot %u ep %u.\n",
2826 slot_id, ep_index);
2827 ep->skip = false;
2828 }
2829
2830 ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) /
2831 sizeof(*ep_trb)];
2832
2833 trace_xhci_handle_transfer(ep_ring,
2834 (struct xhci_generic_trb *) ep_trb);
2835
2836 /*
2837 * No-op TRB could trigger interrupts in a case where
2838 * a URB was killed and a STALL_ERROR happens right
2839 * after the endpoint ring stopped. Reset the halted
2840 * endpoint. Otherwise, the endpoint remains stalled
2841 * indefinitely.
2842 */
2843
2844 if (trb_is_noop(ep_trb)) {
2845 if (trb_comp_code == COMP_STALL_ERROR ||
2846 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
2847 trb_comp_code))
2848 xhci_handle_halted_endpoint(xhci, ep, td,
2849 EP_HARD_RESET);
2850 goto cleanup;
2851 }
2852
2853 td->status = status;
2854
2855 /* update the urb's actual_length and give back to the core */
2856 if (usb_endpoint_xfer_control(&td->urb->ep->desc))
2857 process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event);
2858 else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
2859 process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event);
2860 else
2861 process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event);
2862cleanup:
2863 handling_skipped_tds = ep->skip &&
2864 trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
2865 trb_comp_code != COMP_NO_PING_RESPONSE_ERROR;
2866
2867 /*
2868 * Do not update event ring dequeue pointer if we're in a loop
2869 * processing missed tds.
2870 */
2871 if (!handling_skipped_tds)
2872 inc_deq(xhci, ir->event_ring);
2873
2874 /*
2875 * If ep->skip is set, it means there are missed tds on the
2876 * endpoint ring need to take care of.
2877 * Process them as short transfer until reach the td pointed by
2878 * the event.
2879 */
2880 } while (handling_skipped_tds);
2881
2882 return 0;
2883
2884err_out:
2885 xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
2886 (unsigned long long) xhci_trb_virt_to_dma(
2887 ir->event_ring->deq_seg,
2888 ir->event_ring->dequeue),
2889 lower_32_bits(le64_to_cpu(event->buffer)),
2890 upper_32_bits(le64_to_cpu(event->buffer)),
2891 le32_to_cpu(event->transfer_len),
2892 le32_to_cpu(event->flags));
2893 return -ENODEV;
2894}
2895
2896/*
2897 * This function handles all OS-owned events on the event ring. It may drop
2898 * xhci->lock between event processing (e.g. to pass up port status changes).
2899 * Returns >0 for "possibly more events to process" (caller should call again),
2900 * otherwise 0 if done. In future, <0 returns should indicate error code.
2901 */
2902static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
2903{
2904 union xhci_trb *event;
2905 int update_ptrs = 1;
2906 u32 trb_type;
2907 int ret;
2908
2909 /* Event ring hasn't been allocated yet. */
2910 if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
2911 xhci_err(xhci, "ERROR interrupter not ready\n");
2912 return -ENOMEM;
2913 }
2914
2915 event = ir->event_ring->dequeue;
2916 /* Does the HC or OS own the TRB? */
2917 if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
2918 ir->event_ring->cycle_state)
2919 return 0;
2920
2921 trace_xhci_handle_event(ir->event_ring, &event->generic);
2922
2923 /*
2924 * Barrier between reading the TRB_CYCLE (valid) flag above and any
2925 * speculative reads of the event's flags/data below.
2926 */
2927 rmb();
2928 trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
2929 /* FIXME: Handle more event types. */
2930
2931 switch (trb_type) {
2932 case TRB_COMPLETION:
2933 handle_cmd_completion(xhci, &event->event_cmd);
2934 break;
2935 case TRB_PORT_STATUS:
2936 handle_port_status(xhci, ir, event);
2937 update_ptrs = 0;
2938 break;
2939 case TRB_TRANSFER:
2940 ret = handle_tx_event(xhci, ir, &event->trans_event);
2941 if (ret >= 0)
2942 update_ptrs = 0;
2943 break;
2944 case TRB_DEV_NOTE:
2945 handle_device_notification(xhci, event);
2946 break;
2947 default:
2948 if (trb_type >= TRB_VENDOR_DEFINED_LOW)
2949 handle_vendor_event(xhci, event, trb_type);
2950 else
2951 xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
2952 }
2953 /* Any of the above functions may drop and re-acquire the lock, so check
2954 * to make sure a watchdog timer didn't mark the host as non-responsive.
2955 */
2956 if (xhci->xhc_state & XHCI_STATE_DYING) {
2957 xhci_dbg(xhci, "xHCI host dying, returning from "
2958 "event handler.\n");
2959 return 0;
2960 }
2961
2962 if (update_ptrs)
2963 /* Update SW event ring dequeue pointer */
2964 inc_deq(xhci, ir->event_ring);
2965
2966 /* Are there more items on the event ring? Caller will call us again to
2967 * check.
2968 */
2969 return 1;
2970}
2971
2972/*
2973 * Update Event Ring Dequeue Pointer:
2974 * - When all events have finished
2975 * - To avoid "Event Ring Full Error" condition
2976 */
2977static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
2978 struct xhci_interrupter *ir,
2979 union xhci_trb *event_ring_deq)
2980{
2981 u64 temp_64;
2982 dma_addr_t deq;
2983
2984 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
2985 /* If necessary, update the HW's version of the event ring deq ptr. */
2986 if (event_ring_deq != ir->event_ring->dequeue) {
2987 deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
2988 ir->event_ring->dequeue);
2989 if (deq == 0)
2990 xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
2991 /*
2992 * Per 4.9.4, Software writes to the ERDP register shall
2993 * always advance the Event Ring Dequeue Pointer value.
2994 */
2995 if ((temp_64 & (u64) ~ERST_PTR_MASK) ==
2996 ((u64) deq & (u64) ~ERST_PTR_MASK))
2997 return;
2998
2999 /* Update HC event ring dequeue pointer */
3000 temp_64 &= ERST_PTR_MASK;
3001 temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
3002 }
3003
3004 /* Clear the event handler busy flag (RW1C) */
3005 temp_64 |= ERST_EHB;
3006 xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
3007}
3008
3009/*
3010 * xHCI spec says we can get an interrupt, and if the HC has an error condition,
3011 * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
3012 * indicators of an event TRB error, but we check the status *first* to be safe.
3013 */
3014irqreturn_t xhci_irq(struct usb_hcd *hcd)
3015{
3016 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3017 union xhci_trb *event_ring_deq;
3018 struct xhci_interrupter *ir;
3019 irqreturn_t ret = IRQ_NONE;
3020 u64 temp_64;
3021 u32 status;
3022 int event_loop = 0;
3023
3024 spin_lock(&xhci->lock);
3025 /* Check if the xHC generated the interrupt, or the irq is shared */
3026 status = readl(&xhci->op_regs->status);
3027 if (status == ~(u32)0) {
3028 xhci_hc_died(xhci);
3029 ret = IRQ_HANDLED;
3030 goto out;
3031 }
3032
3033 if (!(status & STS_EINT))
3034 goto out;
3035
3036 if (status & STS_HCE) {
3037 xhci_warn(xhci, "WARNING: Host Controller Error\n");
3038 goto out;
3039 }
3040
3041 if (status & STS_FATAL) {
3042 xhci_warn(xhci, "WARNING: Host System Error\n");
3043 xhci_halt(xhci);
3044 ret = IRQ_HANDLED;
3045 goto out;
3046 }
3047
3048 /*
3049 * Clear the op reg interrupt status first,
3050 * so we can receive interrupts from other MSI-X interrupters.
3051 * Write 1 to clear the interrupt status.
3052 */
3053 status |= STS_EINT;
3054 writel(status, &xhci->op_regs->status);
3055
3056 /* This is the handler of the primary interrupter */
3057 ir = xhci->interrupter;
3058 if (!hcd->msi_enabled) {
3059 u32 irq_pending;
3060 irq_pending = readl(&ir->ir_set->irq_pending);
3061 irq_pending |= IMAN_IP;
3062 writel(irq_pending, &ir->ir_set->irq_pending);
3063 }
3064
3065 if (xhci->xhc_state & XHCI_STATE_DYING ||
3066 xhci->xhc_state & XHCI_STATE_HALTED) {
3067 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
3068 "Shouldn't IRQs be disabled?\n");
3069 /* Clear the event handler busy flag (RW1C);
3070 * the event ring should be empty.
3071 */
3072 temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
3073 xhci_write_64(xhci, temp_64 | ERST_EHB,
3074 &ir->ir_set->erst_dequeue);
3075 ret = IRQ_HANDLED;
3076 goto out;
3077 }
3078
3079 event_ring_deq = ir->event_ring->dequeue;
3080 /* FIXME this should be a delayed service routine
3081 * that clears the EHB.
3082 */
3083 while (xhci_handle_event(xhci, ir) > 0) {
3084 if (event_loop++ < TRBS_PER_SEGMENT / 2)
3085 continue;
3086 xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
3087 event_ring_deq = ir->event_ring->dequeue;
3088
3089 /* ring is half-full, force isoc trbs to interrupt more often */
3090 if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
3091 xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
3092
3093 event_loop = 0;
3094 }
3095
3096 xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
3097 ret = IRQ_HANDLED;
3098
3099out:
3100 spin_unlock(&xhci->lock);
3101
3102 return ret;
3103}
3104
3105irqreturn_t xhci_msi_irq(int irq, void *hcd)
3106{
3107 return xhci_irq(hcd);
3108}
3109EXPORT_SYMBOL_GPL(xhci_msi_irq);
3110
3111/**** Endpoint Ring Operations ****/
3112
3113/*
3114 * Generic function for queueing a TRB on a ring.
3115 * The caller must have checked to make sure there's room on the ring.
3116 *
3117 * @more_trbs_coming: Will you enqueue more TRBs before calling
3118 * prepare_transfer()?
3119 */
3120static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
3121 bool more_trbs_coming,
3122 u32 field1, u32 field2, u32 field3, u32 field4)
3123{
3124 struct xhci_generic_trb *trb;
3125
3126 trb = &ring->enqueue->generic;
3127 trb->field[0] = cpu_to_le32(field1);
3128 trb->field[1] = cpu_to_le32(field2);
3129 trb->field[2] = cpu_to_le32(field3);
3130 /* make sure TRB is fully written before giving it to the controller */
3131 wmb();
3132 trb->field[3] = cpu_to_le32(field4);
3133
3134 trace_xhci_queue_trb(ring, trb);
3135
3136 inc_enq(xhci, ring, more_trbs_coming);
3137}
3138
3139/*
3140 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
3141 * FIXME allocate segments if the ring is full.
3142 */
3143static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
3144 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
3145{
3146 unsigned int num_trbs_needed;
3147 unsigned int link_trb_count = 0;
3148
3149 /* Make sure the endpoint has been added to xHC schedule */
3150 switch (ep_state) {
3151 case EP_STATE_DISABLED:
3152 /*
3153 * USB core changed config/interfaces without notifying us,
3154 * or hardware is reporting the wrong state.
3155 */
3156 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
3157 return -ENOENT;
3158 case EP_STATE_ERROR:
3159 xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
3160 /* FIXME event handling code for error needs to clear it */
3161 /* XXX not sure if this should be -ENOENT or not */
3162 return -EINVAL;
3163 case EP_STATE_HALTED:
3164 xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
3165 break;
3166 case EP_STATE_STOPPED:
3167 case EP_STATE_RUNNING:
3168 break;
3169 default:
3170 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
3171 /*
3172 * FIXME issue Configure Endpoint command to try to get the HC
3173 * back into a known state.
3174 */
3175 return -EINVAL;
3176 }
3177
3178 while (1) {
3179 if (room_on_ring(xhci, ep_ring, num_trbs))
3180 break;
3181
3182 if (ep_ring == xhci->cmd_ring) {
3183 xhci_err(xhci, "Do not support expand command ring\n");
3184 return -ENOMEM;
3185 }
3186
3187 xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
3188 "ERROR no room on ep ring, try ring expansion");
3189 num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
3190 if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
3191 mem_flags)) {
3192 xhci_err(xhci, "Ring expansion failed\n");
3193 return -ENOMEM;
3194 }
3195 }
3196
3197 while (trb_is_link(ep_ring->enqueue)) {
3198 /* If we're not dealing with 0.95 hardware or isoc rings
3199 * on AMD 0.96 host, clear the chain bit.
3200 */
3201 if (!xhci_link_trb_quirk(xhci) &&
3202 !(ep_ring->type == TYPE_ISOC &&
3203 (xhci->quirks & XHCI_AMD_0x96_HOST)))
3204 ep_ring->enqueue->link.control &=
3205 cpu_to_le32(~TRB_CHAIN);
3206 else
3207 ep_ring->enqueue->link.control |=
3208 cpu_to_le32(TRB_CHAIN);
3209
3210 wmb();
3211 ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
3212
3213 /* Toggle the cycle bit after the last ring segment. */
3214 if (link_trb_toggles_cycle(ep_ring->enqueue))
3215 ep_ring->cycle_state ^= 1;
3216
3217 ep_ring->enq_seg = ep_ring->enq_seg->next;
3218 ep_ring->enqueue = ep_ring->enq_seg->trbs;
3219
3220 /* prevent infinite loop if all first trbs are link trbs */
3221 if (link_trb_count++ > ep_ring->num_segs) {
3222 xhci_warn(xhci, "Ring is an endless link TRB loop\n");
3223 return -EINVAL;
3224 }
3225 }
3226
3227 if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
3228 xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
3229 return -EINVAL;
3230 }
3231
3232 return 0;
3233}
3234
3235static int prepare_transfer(struct xhci_hcd *xhci,
3236 struct xhci_virt_device *xdev,
3237 unsigned int ep_index,
3238 unsigned int stream_id,
3239 unsigned int num_trbs,
3240 struct urb *urb,
3241 unsigned int td_index,
3242 gfp_t mem_flags)
3243{
3244 int ret;
3245 struct urb_priv *urb_priv;
3246 struct xhci_td *td;
3247 struct xhci_ring *ep_ring;
3248 struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
3249
3250 ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
3251 stream_id);
3252 if (!ep_ring) {
3253 xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
3254 stream_id);
3255 return -EINVAL;
3256 }
3257
3258 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
3259 num_trbs, mem_flags);
3260 if (ret)
3261 return ret;
3262
3263 urb_priv = urb->hcpriv;
3264 td = &urb_priv->td[td_index];
3265
3266 INIT_LIST_HEAD(&td->td_list);
3267 INIT_LIST_HEAD(&td->cancelled_td_list);
3268
3269 if (td_index == 0) {
3270 ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
3271 if (unlikely(ret))
3272 return ret;
3273 }
3274
3275 td->urb = urb;
3276 /* Add this TD to the tail of the endpoint ring's TD list */
3277 list_add_tail(&td->td_list, &ep_ring->td_list);
3278 td->start_seg = ep_ring->enq_seg;
3279 td->first_trb = ep_ring->enqueue;
3280
3281 return 0;
3282}
3283
3284unsigned int count_trbs(u64 addr, u64 len)
3285{
3286 unsigned int num_trbs;
3287
3288 num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
3289 TRB_MAX_BUFF_SIZE);
3290 if (num_trbs == 0)
3291 num_trbs++;
3292
3293 return num_trbs;
3294}
3295
3296static inline unsigned int count_trbs_needed(struct urb *urb)
3297{
3298 return count_trbs(urb->transfer_dma, urb->transfer_buffer_length);
3299}
3300
3301static unsigned int count_sg_trbs_needed(struct urb *urb)
3302{
3303 struct scatterlist *sg;
3304 unsigned int i, len, full_len, num_trbs = 0;
3305
3306 full_len = urb->transfer_buffer_length;
3307
3308 for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
3309 len = sg_dma_len(sg);
3310 num_trbs += count_trbs(sg_dma_address(sg), len);
3311 len = min_t(unsigned int, len, full_len);
3312 full_len -= len;
3313 if (full_len == 0)
3314 break;
3315 }
3316
3317 return num_trbs;
3318}
3319
3320static unsigned int count_isoc_trbs_needed(struct urb *urb, int i)
3321{
3322 u64 addr, len;
3323
3324 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
3325 len = urb->iso_frame_desc[i].length;
3326
3327 return count_trbs(addr, len);
3328}
3329
3330static void check_trb_math(struct urb *urb, int running_total)
3331{
3332 if (unlikely(running_total != urb->transfer_buffer_length))
3333 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
3334 "queued %#x (%d), asked for %#x (%d)\n",
3335 __func__,
3336 urb->ep->desc.bEndpointAddress,
3337 running_total, running_total,
3338 urb->transfer_buffer_length,
3339 urb->transfer_buffer_length);
3340}
3341
3342static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
3343 unsigned int ep_index, unsigned int stream_id, int start_cycle,
3344 struct xhci_generic_trb *start_trb)
3345{
3346 /*
3347 * Pass all the TRBs to the hardware at once and make sure this write
3348 * isn't reordered.
3349 */
3350 wmb();
3351 if (start_cycle)
3352 start_trb->field[3] |= cpu_to_le32(start_cycle);
3353 else
3354 start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
3355 xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
3356}
3357
3358static void check_interval(struct xhci_hcd *xhci, struct urb *urb,
3359 struct xhci_ep_ctx *ep_ctx)
3360{
3361 int xhci_interval;
3362 int ep_interval;
3363
3364 xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
3365 ep_interval = urb->interval;
3366
3367 /* Convert to microframes */
3368 if (urb->dev->speed == USB_SPEED_LOW ||
3369 urb->dev->speed == USB_SPEED_FULL)
3370 ep_interval *= 8;
3371
3372 /* FIXME change this to a warning and a suggestion to use the new API
3373 * to set the polling interval (once the API is added).
3374 */
3375 if (xhci_interval != ep_interval) {
3376 dev_dbg_ratelimited(&urb->dev->dev,
3377 "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
3378 ep_interval, ep_interval == 1 ? "" : "s",
3379 xhci_interval, xhci_interval == 1 ? "" : "s");
3380 urb->interval = xhci_interval;
3381 /* Convert back to frames for LS/FS devices */
3382 if (urb->dev->speed == USB_SPEED_LOW ||
3383 urb->dev->speed == USB_SPEED_FULL)
3384 urb->interval /= 8;
3385 }
3386}
3387
3388/*
3389 * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
3390 * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
3391 * (comprised of sg list entries) can take several service intervals to
3392 * transmit.
3393 */
3394int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3395 struct urb *urb, int slot_id, unsigned int ep_index)
3396{
3397 struct xhci_ep_ctx *ep_ctx;
3398
3399 ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index);
3400 check_interval(xhci, urb, ep_ctx);
3401
3402 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
3403}
3404
3405/*
3406 * For xHCI 1.0 host controllers, TD size is the number of max packet sized
3407 * packets remaining in the TD (*not* including this TRB).
3408 *
3409 * Total TD packet count = total_packet_count =
3410 * DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
3411 *
3412 * Packets transferred up to and including this TRB = packets_transferred =
3413 * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
3414 *
3415 * TD size = total_packet_count - packets_transferred
3416 *
3417 * For xHCI 0.96 and older, TD size field should be the remaining bytes
3418 * including this TRB, right shifted by 10
3419 *
3420 * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
3421 * This is taken care of in the TRB_TD_SIZE() macro
3422 *
3423 * The last TRB in a TD must have the TD size set to zero.
3424 */
3425static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
3426 int trb_buff_len, unsigned int td_total_len,
3427 struct urb *urb, bool more_trbs_coming)
3428{
3429 u32 maxp, total_packet_count;
3430
3431 /* MTK xHCI 0.96 contains some features from 1.0 */
3432 if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST))
3433 return ((td_total_len - transferred) >> 10);
3434
3435 /* One TRB with a zero-length data packet. */
3436 if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
3437 trb_buff_len == td_total_len)
3438 return 0;
3439
3440 /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */
3441 if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100))
3442 trb_buff_len = 0;
3443
3444 maxp = usb_endpoint_maxp(&urb->ep->desc);
3445 total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
3446
3447 /* Queueing functions don't count the current TRB into transferred */
3448 return (total_packet_count - ((transferred + trb_buff_len) / maxp));
3449}
3450
3451
3452static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
3453 u32 *trb_buff_len, struct xhci_segment *seg)
3454{
3455 struct device *dev = xhci_to_hcd(xhci)->self.controller;
3456 unsigned int unalign;
3457 unsigned int max_pkt;
3458 u32 new_buff_len;
3459 size_t len;
3460
3461 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
3462 unalign = (enqd_len + *trb_buff_len) % max_pkt;
3463
3464 /* we got lucky, last normal TRB data on segment is packet aligned */
3465 if (unalign == 0)
3466 return 0;
3467
3468 xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
3469 unalign, *trb_buff_len);
3470
3471 /* is the last nornal TRB alignable by splitting it */
3472 if (*trb_buff_len > unalign) {
3473 *trb_buff_len -= unalign;
3474 xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
3475 return 0;
3476 }
3477
3478 /*
3479 * We want enqd_len + trb_buff_len to sum up to a number aligned to
3480 * number which is divisible by the endpoint's wMaxPacketSize. IOW:
3481 * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
3482 */
3483 new_buff_len = max_pkt - (enqd_len % max_pkt);
3484
3485 if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
3486 new_buff_len = (urb->transfer_buffer_length - enqd_len);
3487
3488 /* create a max max_pkt sized bounce buffer pointed to by last trb */
3489 if (usb_urb_dir_out(urb)) {
3490 if (urb->num_sgs) {
3491 len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
3492 seg->bounce_buf, new_buff_len, enqd_len);
3493 if (len != new_buff_len)
3494 xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
3495 len, new_buff_len);
3496 } else {
3497 memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
3498 }
3499
3500 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3501 max_pkt, DMA_TO_DEVICE);
3502 } else {
3503 seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
3504 max_pkt, DMA_FROM_DEVICE);
3505 }
3506
3507 if (dma_mapping_error(dev, seg->bounce_dma)) {
3508 /* try without aligning. Some host controllers survive */
3509 xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
3510 return 0;
3511 }
3512 *trb_buff_len = new_buff_len;
3513 seg->bounce_len = new_buff_len;
3514 seg->bounce_offs = enqd_len;
3515
3516 xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
3517
3518 return 1;
3519}
3520
3521/* This is very similar to what ehci-q.c qtd_fill() does */
3522int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3523 struct urb *urb, int slot_id, unsigned int ep_index)
3524{
3525 struct xhci_ring *ring;
3526 struct urb_priv *urb_priv;
3527 struct xhci_td *td;
3528 struct xhci_generic_trb *start_trb;
3529 struct scatterlist *sg = NULL;
3530 bool more_trbs_coming = true;
3531 bool need_zero_pkt = false;
3532 bool first_trb = true;
3533 unsigned int num_trbs;
3534 unsigned int start_cycle, num_sgs = 0;
3535 unsigned int enqd_len, block_len, trb_buff_len, full_len;
3536 int sent_len, ret;
3537 u32 field, length_field, remainder;
3538 u64 addr, send_addr;
3539
3540 ring = xhci_urb_to_transfer_ring(xhci, urb);
3541 if (!ring)
3542 return -EINVAL;
3543
3544 full_len = urb->transfer_buffer_length;
3545 /* If we have scatter/gather list, we use it. */
3546 if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
3547 num_sgs = urb->num_mapped_sgs;
3548 sg = urb->sg;
3549 addr = (u64) sg_dma_address(sg);
3550 block_len = sg_dma_len(sg);
3551 num_trbs = count_sg_trbs_needed(urb);
3552 } else {
3553 num_trbs = count_trbs_needed(urb);
3554 addr = (u64) urb->transfer_dma;
3555 block_len = full_len;
3556 }
3557 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3558 ep_index, urb->stream_id,
3559 num_trbs, urb, 0, mem_flags);
3560 if (unlikely(ret < 0))
3561 return ret;
3562
3563 urb_priv = urb->hcpriv;
3564
3565 /* Deal with URB_ZERO_PACKET - need one more td/trb */
3566 if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->num_tds > 1)
3567 need_zero_pkt = true;
3568
3569 td = &urb_priv->td[0];
3570
3571 /*
3572 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3573 * until we've finished creating all the other TRBs. The ring's cycle
3574 * state may change as we enqueue the other TRBs, so save it too.
3575 */
3576 start_trb = &ring->enqueue->generic;
3577 start_cycle = ring->cycle_state;
3578 send_addr = addr;
3579
3580 /* Queue the TRBs, even if they are zero-length */
3581 for (enqd_len = 0; first_trb || enqd_len < full_len;
3582 enqd_len += trb_buff_len) {
3583 field = TRB_TYPE(TRB_NORMAL);
3584
3585 /* TRB buffer should not cross 64KB boundaries */
3586 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
3587 trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
3588
3589 if (enqd_len + trb_buff_len > full_len)
3590 trb_buff_len = full_len - enqd_len;
3591
3592 /* Don't change the cycle bit of the first TRB until later */
3593 if (first_trb) {
3594 first_trb = false;
3595 if (start_cycle == 0)
3596 field |= TRB_CYCLE;
3597 } else
3598 field |= ring->cycle_state;
3599
3600 /* Chain all the TRBs together; clear the chain bit in the last
3601 * TRB to indicate it's the last TRB in the chain.
3602 */
3603 if (enqd_len + trb_buff_len < full_len) {
3604 field |= TRB_CHAIN;
3605 if (trb_is_link(ring->enqueue + 1)) {
3606 if (xhci_align_td(xhci, urb, enqd_len,
3607 &trb_buff_len,
3608 ring->enq_seg)) {
3609 send_addr = ring->enq_seg->bounce_dma;
3610 /* assuming TD won't span 2 segs */
3611 td->bounce_seg = ring->enq_seg;
3612 }
3613 }
3614 }
3615 if (enqd_len + trb_buff_len >= full_len) {
3616 field &= ~TRB_CHAIN;
3617 field |= TRB_IOC;
3618 more_trbs_coming = false;
3619 td->last_trb = ring->enqueue;
3620 td->last_trb_seg = ring->enq_seg;
3621 if (xhci_urb_suitable_for_idt(urb)) {
3622 memcpy(&send_addr, urb->transfer_buffer,
3623 trb_buff_len);
3624 le64_to_cpus(&send_addr);
3625 field |= TRB_IDT;
3626 }
3627 }
3628
3629 /* Only set interrupt on short packet for IN endpoints */
3630 if (usb_urb_dir_in(urb))
3631 field |= TRB_ISP;
3632
3633 /* Set the TRB length, TD size, and interrupter fields. */
3634 remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
3635 full_len, urb, more_trbs_coming);
3636
3637 length_field = TRB_LEN(trb_buff_len) |
3638 TRB_TD_SIZE(remainder) |
3639 TRB_INTR_TARGET(0);
3640
3641 queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
3642 lower_32_bits(send_addr),
3643 upper_32_bits(send_addr),
3644 length_field,
3645 field);
3646 td->num_trbs++;
3647 addr += trb_buff_len;
3648 sent_len = trb_buff_len;
3649
3650 while (sg && sent_len >= block_len) {
3651 /* New sg entry */
3652 --num_sgs;
3653 sent_len -= block_len;
3654 sg = sg_next(sg);
3655 if (num_sgs != 0 && sg) {
3656 block_len = sg_dma_len(sg);
3657 addr = (u64) sg_dma_address(sg);
3658 addr += sent_len;
3659 }
3660 }
3661 block_len -= sent_len;
3662 send_addr = addr;
3663 }
3664
3665 if (need_zero_pkt) {
3666 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3667 ep_index, urb->stream_id,
3668 1, urb, 1, mem_flags);
3669 urb_priv->td[1].last_trb = ring->enqueue;
3670 urb_priv->td[1].last_trb_seg = ring->enq_seg;
3671 field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
3672 queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
3673 urb_priv->td[1].num_trbs++;
3674 }
3675
3676 check_trb_math(urb, enqd_len);
3677 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
3678 start_cycle, start_trb);
3679 return 0;
3680}
3681
3682/* Caller must have locked xhci->lock */
3683int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3684 struct urb *urb, int slot_id, unsigned int ep_index)
3685{
3686 struct xhci_ring *ep_ring;
3687 int num_trbs;
3688 int ret;
3689 struct usb_ctrlrequest *setup;
3690 struct xhci_generic_trb *start_trb;
3691 int start_cycle;
3692 u32 field;
3693 struct urb_priv *urb_priv;
3694 struct xhci_td *td;
3695
3696 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
3697 if (!ep_ring)
3698 return -EINVAL;
3699
3700 /*
3701 * Need to copy setup packet into setup TRB, so we can't use the setup
3702 * DMA address.
3703 */
3704 if (!urb->setup_packet)
3705 return -EINVAL;
3706
3707 /* 1 TRB for setup, 1 for status */
3708 num_trbs = 2;
3709 /*
3710 * Don't need to check if we need additional event data and normal TRBs,
3711 * since data in control transfers will never get bigger than 16MB
3712 * XXX: can we get a buffer that crosses 64KB boundaries?
3713 */
3714 if (urb->transfer_buffer_length > 0)
3715 num_trbs++;
3716 ret = prepare_transfer(xhci, xhci->devs[slot_id],
3717 ep_index, urb->stream_id,
3718 num_trbs, urb, 0, mem_flags);
3719 if (ret < 0)
3720 return ret;
3721
3722 urb_priv = urb->hcpriv;
3723 td = &urb_priv->td[0];
3724 td->num_trbs = num_trbs;
3725
3726 /*
3727 * Don't give the first TRB to the hardware (by toggling the cycle bit)
3728 * until we've finished creating all the other TRBs. The ring's cycle
3729 * state may change as we enqueue the other TRBs, so save it too.
3730 */
3731 start_trb = &ep_ring->enqueue->generic;
3732 start_cycle = ep_ring->cycle_state;
3733
3734 /* Queue setup TRB - see section 6.4.1.2.1 */
3735 /* FIXME better way to translate setup_packet into two u32 fields? */
3736 setup = (struct usb_ctrlrequest *) urb->setup_packet;
3737 field = 0;
3738 field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
3739 if (start_cycle == 0)
3740 field |= 0x1;
3741
3742 /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
3743 if ((xhci->hci_version >= 0x100) || (xhci->quirks & XHCI_MTK_HOST)) {
3744 if (urb->transfer_buffer_length > 0) {
3745 if (setup->bRequestType & USB_DIR_IN)
3746 field |= TRB_TX_TYPE(TRB_DATA_IN);
3747 else
3748 field |= TRB_TX_TYPE(TRB_DATA_OUT);
3749 }
3750 }
3751
3752 queue_trb(xhci, ep_ring, true,
3753 setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
3754 le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
3755 TRB_LEN(8) | TRB_INTR_TARGET(0),
3756 /* Immediate data in pointer */
3757 field);
3758
3759 /* If there's data, queue data TRBs */
3760 /* Only set interrupt on short packet for IN endpoints */
3761 if (usb_urb_dir_in(urb))
3762 field = TRB_ISP | TRB_TYPE(TRB_DATA);
3763 else
3764 field = TRB_TYPE(TRB_DATA);
3765
3766 if (urb->transfer_buffer_length > 0) {
3767 u32 length_field, remainder;
3768 u64 addr;
3769
3770 if (xhci_urb_suitable_for_idt(urb)) {
3771 memcpy(&addr, urb->transfer_buffer,
3772 urb->transfer_buffer_length);
3773 le64_to_cpus(&addr);
3774 field |= TRB_IDT;
3775 } else {
3776 addr = (u64) urb->transfer_dma;
3777 }
3778
3779 remainder = xhci_td_remainder(xhci, 0,
3780 urb->transfer_buffer_length,
3781 urb->transfer_buffer_length,
3782 urb, 1);
3783 length_field = TRB_LEN(urb->transfer_buffer_length) |
3784 TRB_TD_SIZE(remainder) |
3785 TRB_INTR_TARGET(0);
3786 if (setup->bRequestType & USB_DIR_IN)
3787 field |= TRB_DIR_IN;
3788 queue_trb(xhci, ep_ring, true,
3789 lower_32_bits(addr),
3790 upper_32_bits(addr),
3791 length_field,
3792 field | ep_ring->cycle_state);
3793 }
3794
3795 /* Save the DMA address of the last TRB in the TD */
3796 td->last_trb = ep_ring->enqueue;
3797 td->last_trb_seg = ep_ring->enq_seg;
3798
3799 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
3800 /* If the device sent data, the status stage is an OUT transfer */
3801 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
3802 field = 0;
3803 else
3804 field = TRB_DIR_IN;
3805 queue_trb(xhci, ep_ring, false,
3806 0,
3807 0,
3808 TRB_INTR_TARGET(0),
3809 /* Event on completion */
3810 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
3811
3812 giveback_first_trb(xhci, slot_id, ep_index, 0,
3813 start_cycle, start_trb);
3814 return 0;
3815}
3816
3817/*
3818 * The transfer burst count field of the isochronous TRB defines the number of
3819 * bursts that are required to move all packets in this TD. Only SuperSpeed
3820 * devices can burst up to bMaxBurst number of packets per service interval.
3821 * This field is zero based, meaning a value of zero in the field means one
3822 * burst. Basically, for everything but SuperSpeed devices, this field will be
3823 * zero. Only xHCI 1.0 host controllers support this field.
3824 */
3825static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3826 struct urb *urb, unsigned int total_packet_count)
3827{
3828 unsigned int max_burst;
3829
3830 if (xhci->hci_version < 0x100 || urb->dev->speed < USB_SPEED_SUPER)
3831 return 0;
3832
3833 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3834 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3835}
3836
3837/*
3838 * Returns the number of packets in the last "burst" of packets. This field is
3839 * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
3840 * the last burst packet count is equal to the total number of packets in the
3841 * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
3842 * must contain (bMaxBurst + 1) number of packets, but the last burst can
3843 * contain 1 to (bMaxBurst + 1) packets.
3844 */
3845static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
3846 struct urb *urb, unsigned int total_packet_count)
3847{
3848 unsigned int max_burst;
3849 unsigned int residue;
3850
3851 if (xhci->hci_version < 0x100)
3852 return 0;
3853
3854 if (urb->dev->speed >= USB_SPEED_SUPER) {
3855 /* bMaxBurst is zero based: 0 means 1 packet per burst */
3856 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3857 residue = total_packet_count % (max_burst + 1);
3858 /* If residue is zero, the last burst contains (max_burst + 1)
3859 * number of packets, but the TLBPC field is zero-based.
3860 */
3861 if (residue == 0)
3862 return max_burst;
3863 return residue - 1;
3864 }
3865 if (total_packet_count == 0)
3866 return 0;
3867 return total_packet_count - 1;
3868}
3869
3870/*
3871 * Calculates Frame ID field of the isochronous TRB identifies the
3872 * target frame that the Interval associated with this Isochronous
3873 * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
3874 *
3875 * Returns actual frame id on success, negative value on error.
3876 */
3877static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
3878 struct urb *urb, int index)
3879{
3880 int start_frame, ist, ret = 0;
3881 int start_frame_id, end_frame_id, current_frame_id;
3882
3883 if (urb->dev->speed == USB_SPEED_LOW ||
3884 urb->dev->speed == USB_SPEED_FULL)
3885 start_frame = urb->start_frame + index * urb->interval;
3886 else
3887 start_frame = (urb->start_frame + index * urb->interval) >> 3;
3888
3889 /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
3890 *
3891 * If bit [3] of IST is cleared to '0', software can add a TRB no
3892 * later than IST[2:0] Microframes before that TRB is scheduled to
3893 * be executed.
3894 * If bit [3] of IST is set to '1', software can add a TRB no later
3895 * than IST[2:0] Frames before that TRB is scheduled to be executed.
3896 */
3897 ist = HCS_IST(xhci->hcs_params2) & 0x7;
3898 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
3899 ist <<= 3;
3900
3901 /* Software shall not schedule an Isoch TD with a Frame ID value that
3902 * is less than the Start Frame ID or greater than the End Frame ID,
3903 * where:
3904 *
3905 * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
3906 * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
3907 *
3908 * Both the End Frame ID and Start Frame ID values are calculated
3909 * in microframes. When software determines the valid Frame ID value;
3910 * The End Frame ID value should be rounded down to the nearest Frame
3911 * boundary, and the Start Frame ID value should be rounded up to the
3912 * nearest Frame boundary.
3913 */
3914 current_frame_id = readl(&xhci->run_regs->microframe_index);
3915 start_frame_id = roundup(current_frame_id + ist + 1, 8);
3916 end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
3917
3918 start_frame &= 0x7ff;
3919 start_frame_id = (start_frame_id >> 3) & 0x7ff;
3920 end_frame_id = (end_frame_id >> 3) & 0x7ff;
3921
3922 xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
3923 __func__, index, readl(&xhci->run_regs->microframe_index),
3924 start_frame_id, end_frame_id, start_frame);
3925
3926 if (start_frame_id < end_frame_id) {
3927 if (start_frame > end_frame_id ||
3928 start_frame < start_frame_id)
3929 ret = -EINVAL;
3930 } else if (start_frame_id > end_frame_id) {
3931 if ((start_frame > end_frame_id &&
3932 start_frame < start_frame_id))
3933 ret = -EINVAL;
3934 } else {
3935 ret = -EINVAL;
3936 }
3937
3938 if (index == 0) {
3939 if (ret == -EINVAL || start_frame == start_frame_id) {
3940 start_frame = start_frame_id + 1;
3941 if (urb->dev->speed == USB_SPEED_LOW ||
3942 urb->dev->speed == USB_SPEED_FULL)
3943 urb->start_frame = start_frame;
3944 else
3945 urb->start_frame = start_frame << 3;
3946 ret = 0;
3947 }
3948 }
3949
3950 if (ret) {
3951 xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
3952 start_frame, current_frame_id, index,
3953 start_frame_id, end_frame_id);
3954 xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
3955 return ret;
3956 }
3957
3958 return start_frame;
3959}
3960
3961/* Check if we should generate event interrupt for a TD in an isoc URB */
3962static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
3963{
3964 if (xhci->hci_version < 0x100)
3965 return false;
3966 /* always generate an event interrupt for the last TD */
3967 if (i == num_tds - 1)
3968 return false;
3969 /*
3970 * If AVOID_BEI is set the host handles full event rings poorly,
3971 * generate an event at least every 8th TD to clear the event ring
3972 */
3973 if (i && xhci->quirks & XHCI_AVOID_BEI)
3974 return !!(i % xhci->isoc_bei_interval);
3975
3976 return true;
3977}
3978
3979/* This is for isoc transfer */
3980static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
3981 struct urb *urb, int slot_id, unsigned int ep_index)
3982{
3983 struct xhci_ring *ep_ring;
3984 struct urb_priv *urb_priv;
3985 struct xhci_td *td;
3986 int num_tds, trbs_per_td;
3987 struct xhci_generic_trb *start_trb;
3988 bool first_trb;
3989 int start_cycle;
3990 u32 field, length_field;
3991 int running_total, trb_buff_len, td_len, td_remain_len, ret;
3992 u64 start_addr, addr;
3993 int i, j;
3994 bool more_trbs_coming;
3995 struct xhci_virt_ep *xep;
3996 int frame_id;
3997
3998 xep = &xhci->devs[slot_id]->eps[ep_index];
3999 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
4000
4001 num_tds = urb->number_of_packets;
4002 if (num_tds < 1) {
4003 xhci_dbg(xhci, "Isoc URB with zero packets?\n");
4004 return -EINVAL;
4005 }
4006 start_addr = (u64) urb->transfer_dma;
4007 start_trb = &ep_ring->enqueue->generic;
4008 start_cycle = ep_ring->cycle_state;
4009
4010 urb_priv = urb->hcpriv;
4011 /* Queue the TRBs for each TD, even if they are zero-length */
4012 for (i = 0; i < num_tds; i++) {
4013 unsigned int total_pkt_count, max_pkt;
4014 unsigned int burst_count, last_burst_pkt_count;
4015 u32 sia_frame_id;
4016
4017 first_trb = true;
4018 running_total = 0;
4019 addr = start_addr + urb->iso_frame_desc[i].offset;
4020 td_len = urb->iso_frame_desc[i].length;
4021 td_remain_len = td_len;
4022 max_pkt = usb_endpoint_maxp(&urb->ep->desc);
4023 total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
4024
4025 /* A zero-length transfer still involves at least one packet. */
4026 if (total_pkt_count == 0)
4027 total_pkt_count++;
4028 burst_count = xhci_get_burst_count(xhci, urb, total_pkt_count);
4029 last_burst_pkt_count = xhci_get_last_burst_packet_count(xhci,
4030 urb, total_pkt_count);
4031
4032 trbs_per_td = count_isoc_trbs_needed(urb, i);
4033
4034 ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
4035 urb->stream_id, trbs_per_td, urb, i, mem_flags);
4036 if (ret < 0) {
4037 if (i == 0)
4038 return ret;
4039 goto cleanup;
4040 }
4041 td = &urb_priv->td[i];
4042 td->num_trbs = trbs_per_td;
4043 /* use SIA as default, if frame id is used overwrite it */
4044 sia_frame_id = TRB_SIA;
4045 if (!(urb->transfer_flags & URB_ISO_ASAP) &&
4046 HCC_CFC(xhci->hcc_params)) {
4047 frame_id = xhci_get_isoc_frame_id(xhci, urb, i);
4048 if (frame_id >= 0)
4049 sia_frame_id = TRB_FRAME_ID(frame_id);
4050 }
4051 /*
4052 * Set isoc specific data for the first TRB in a TD.
4053 * Prevent HW from getting the TRBs by keeping the cycle state
4054 * inverted in the first TDs isoc TRB.
4055 */
4056 field = TRB_TYPE(TRB_ISOC) |
4057 TRB_TLBPC(last_burst_pkt_count) |
4058 sia_frame_id |
4059 (i ? ep_ring->cycle_state : !start_cycle);
4060
4061 /* xhci 1.1 with ETE uses TD_Size field for TBC, old is Rsvdz */
4062 if (!xep->use_extended_tbc)
4063 field |= TRB_TBC(burst_count);
4064
4065 /* fill the rest of the TRB fields, and remaining normal TRBs */
4066 for (j = 0; j < trbs_per_td; j++) {
4067 u32 remainder = 0;
4068
4069 /* only first TRB is isoc, overwrite otherwise */
4070 if (!first_trb)
4071 field = TRB_TYPE(TRB_NORMAL) |
4072 ep_ring->cycle_state;
4073
4074 /* Only set interrupt on short packet for IN EPs */
4075 if (usb_urb_dir_in(urb))
4076 field |= TRB_ISP;
4077
4078 /* Set the chain bit for all except the last TRB */
4079 if (j < trbs_per_td - 1) {
4080 more_trbs_coming = true;
4081 field |= TRB_CHAIN;
4082 } else {
4083 more_trbs_coming = false;
4084 td->last_trb = ep_ring->enqueue;
4085 td->last_trb_seg = ep_ring->enq_seg;
4086 field |= TRB_IOC;
4087 if (trb_block_event_intr(xhci, num_tds, i))
4088 field |= TRB_BEI;
4089 }
4090 /* Calculate TRB length */
4091 trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
4092 if (trb_buff_len > td_remain_len)
4093 trb_buff_len = td_remain_len;
4094
4095 /* Set the TRB length, TD size, & interrupter fields. */
4096 remainder = xhci_td_remainder(xhci, running_total,
4097 trb_buff_len, td_len,
4098 urb, more_trbs_coming);
4099
4100 length_field = TRB_LEN(trb_buff_len) |
4101 TRB_INTR_TARGET(0);
4102
4103 /* xhci 1.1 with ETE uses TD Size field for TBC */
4104 if (first_trb && xep->use_extended_tbc)
4105 length_field |= TRB_TD_SIZE_TBC(burst_count);
4106 else
4107 length_field |= TRB_TD_SIZE(remainder);
4108 first_trb = false;
4109
4110 queue_trb(xhci, ep_ring, more_trbs_coming,
4111 lower_32_bits(addr),
4112 upper_32_bits(addr),
4113 length_field,
4114 field);
4115 running_total += trb_buff_len;
4116
4117 addr += trb_buff_len;
4118 td_remain_len -= trb_buff_len;
4119 }
4120
4121 /* Check TD length */
4122 if (running_total != td_len) {
4123 xhci_err(xhci, "ISOC TD length unmatch\n");
4124 ret = -EINVAL;
4125 goto cleanup;
4126 }
4127 }
4128
4129 /* store the next frame id */
4130 if (HCC_CFC(xhci->hcc_params))
4131 xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
4132
4133 if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
4134 if (xhci->quirks & XHCI_AMD_PLL_FIX)
4135 usb_amd_quirk_pll_disable();
4136 }
4137 xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
4138
4139 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
4140 start_cycle, start_trb);
4141 return 0;
4142cleanup:
4143 /* Clean up a partially enqueued isoc transfer. */
4144
4145 for (i--; i >= 0; i--)
4146 list_del_init(&urb_priv->td[i].td_list);
4147
4148 /* Use the first TD as a temporary variable to turn the TDs we've queued
4149 * into No-ops with a software-owned cycle bit. That way the hardware
4150 * won't accidentally start executing bogus TDs when we partially
4151 * overwrite them. td->first_trb and td->start_seg are already set.
4152 */
4153 urb_priv->td[0].last_trb = ep_ring->enqueue;
4154 /* Every TRB except the first & last will have its cycle bit flipped. */
4155 td_to_noop(xhci, ep_ring, &urb_priv->td[0], true);
4156
4157 /* Reset the ring enqueue back to the first TRB and its cycle bit. */
4158 ep_ring->enqueue = urb_priv->td[0].first_trb;
4159 ep_ring->enq_seg = urb_priv->td[0].start_seg;
4160 ep_ring->cycle_state = start_cycle;
4161 ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
4162 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
4163 return ret;
4164}
4165
4166/*
4167 * Check transfer ring to guarantee there is enough room for the urb.
4168 * Update ISO URB start_frame and interval.
4169 * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
4170 * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
4171 * Contiguous Frame ID is not supported by HC.
4172 */
4173int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
4174 struct urb *urb, int slot_id, unsigned int ep_index)
4175{
4176 struct xhci_virt_device *xdev;
4177 struct xhci_ring *ep_ring;
4178 struct xhci_ep_ctx *ep_ctx;
4179 int start_frame;
4180 int num_tds, num_trbs, i;
4181 int ret;
4182 struct xhci_virt_ep *xep;
4183 int ist;
4184
4185 xdev = xhci->devs[slot_id];
4186 xep = &xhci->devs[slot_id]->eps[ep_index];
4187 ep_ring = xdev->eps[ep_index].ring;
4188 ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
4189
4190 num_trbs = 0;
4191 num_tds = urb->number_of_packets;
4192 for (i = 0; i < num_tds; i++)
4193 num_trbs += count_isoc_trbs_needed(urb, i);
4194
4195 /* Check the ring to guarantee there is enough room for the whole urb.
4196 * Do not insert any td of the urb to the ring if the check failed.
4197 */
4198 ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx),
4199 num_trbs, mem_flags);
4200 if (ret)
4201 return ret;
4202
4203 /*
4204 * Check interval value. This should be done before we start to
4205 * calculate the start frame value.
4206 */
4207 check_interval(xhci, urb, ep_ctx);
4208
4209 /* Calculate the start frame and put it in urb->start_frame. */
4210 if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
4211 if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) {
4212 urb->start_frame = xep->next_frame_id;
4213 goto skip_start_over;
4214 }
4215 }
4216
4217 start_frame = readl(&xhci->run_regs->microframe_index);
4218 start_frame &= 0x3fff;
4219 /*
4220 * Round up to the next frame and consider the time before trb really
4221 * gets scheduled by hardare.
4222 */
4223 ist = HCS_IST(xhci->hcs_params2) & 0x7;
4224 if (HCS_IST(xhci->hcs_params2) & (1 << 3))
4225 ist <<= 3;
4226 start_frame += ist + XHCI_CFC_DELAY;
4227 start_frame = roundup(start_frame, 8);
4228
4229 /*
4230 * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
4231 * is greate than 8 microframes.
4232 */
4233 if (urb->dev->speed == USB_SPEED_LOW ||
4234 urb->dev->speed == USB_SPEED_FULL) {
4235 start_frame = roundup(start_frame, urb->interval << 3);
4236 urb->start_frame = start_frame >> 3;
4237 } else {
4238 start_frame = roundup(start_frame, urb->interval);
4239 urb->start_frame = start_frame;
4240 }
4241
4242skip_start_over:
4243 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
4244
4245 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
4246}
4247
4248/**** Command Ring Operations ****/
4249
4250/* Generic function for queueing a command TRB on the command ring.
4251 * Check to make sure there's room on the command ring for one command TRB.
4252 * Also check that there's room reserved for commands that must not fail.
4253 * If this is a command that must not fail, meaning command_must_succeed = TRUE,
4254 * then only check for the number of reserved spots.
4255 * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
4256 * because the command event handler may want to resubmit a failed command.
4257 */
4258static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4259 u32 field1, u32 field2,
4260 u32 field3, u32 field4, bool command_must_succeed)
4261{
4262 int reserved_trbs = xhci->cmd_ring_reserved_trbs;
4263 int ret;
4264
4265 if ((xhci->xhc_state & XHCI_STATE_DYING) ||
4266 (xhci->xhc_state & XHCI_STATE_HALTED)) {
4267 xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
4268 return -ESHUTDOWN;
4269 }
4270
4271 if (!command_must_succeed)
4272 reserved_trbs++;
4273
4274 ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
4275 reserved_trbs, GFP_ATOMIC);
4276 if (ret < 0) {
4277 xhci_err(xhci, "ERR: No room for command on command ring\n");
4278 if (command_must_succeed)
4279 xhci_err(xhci, "ERR: Reserved TRB counting for "
4280 "unfailable commands failed.\n");
4281 return ret;
4282 }
4283
4284 cmd->command_trb = xhci->cmd_ring->enqueue;
4285
4286 /* if there are no other commands queued we start the timeout timer */
4287 if (list_empty(&xhci->cmd_list)) {
4288 xhci->current_cmd = cmd;
4289 xhci_mod_cmd_timer(xhci, XHCI_CMD_DEFAULT_TIMEOUT);
4290 }
4291
4292 list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
4293
4294 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
4295 field4 | xhci->cmd_ring->cycle_state);
4296 return 0;
4297}
4298
4299/* Queue a slot enable or disable request on the command ring */
4300int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
4301 u32 trb_type, u32 slot_id)
4302{
4303 return queue_command(xhci, cmd, 0, 0, 0,
4304 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
4305}
4306
4307/* Queue an address device command TRB */
4308int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4309 dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
4310{
4311 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4312 upper_32_bits(in_ctx_ptr), 0,
4313 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
4314 | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
4315}
4316
4317int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
4318 u32 field1, u32 field2, u32 field3, u32 field4)
4319{
4320 return queue_command(xhci, cmd, field1, field2, field3, field4, false);
4321}
4322
4323/* Queue a reset device command TRB */
4324int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
4325 u32 slot_id)
4326{
4327 return queue_command(xhci, cmd, 0, 0, 0,
4328 TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
4329 false);
4330}
4331
4332/* Queue a configure endpoint command TRB */
4333int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
4334 struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
4335 u32 slot_id, bool command_must_succeed)
4336{
4337 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4338 upper_32_bits(in_ctx_ptr), 0,
4339 TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
4340 command_must_succeed);
4341}
4342
4343/* Queue an evaluate context command TRB */
4344int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
4345 dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
4346{
4347 return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
4348 upper_32_bits(in_ctx_ptr), 0,
4349 TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
4350 command_must_succeed);
4351}
4352
4353/*
4354 * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
4355 * activity on an endpoint that is about to be suspended.
4356 */
4357int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
4358 int slot_id, unsigned int ep_index, int suspend)
4359{
4360 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4361 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4362 u32 type = TRB_TYPE(TRB_STOP_RING);
4363 u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
4364
4365 return queue_command(xhci, cmd, 0, 0, 0,
4366 trb_slot_id | trb_ep_index | type | trb_suspend, false);
4367}
4368
4369int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
4370 int slot_id, unsigned int ep_index,
4371 enum xhci_ep_reset_type reset_type)
4372{
4373 u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
4374 u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
4375 u32 type = TRB_TYPE(TRB_RESET_EP);
4376
4377 if (reset_type == EP_SOFT_RESET)
4378 type |= TRB_TSP;
4379
4380 return queue_command(xhci, cmd, 0, 0, 0,
4381 trb_slot_id | trb_ep_index | type, false);
4382}