Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

USB: EHCI: split ehci_qh into hw and sw parts

The ehci_qh structure merged hw and sw together which is not good:
1. More and more items are being added into ehci_qh, the ehci_qh software
part are unnecessary to be allocated in DMA qh_pool.
2. If HCD has local SRAM, the sw part will consume it too, and it won't
bring any benefit.
3. For non-cache-coherence system, the entire ehci_qh is uncachable, actually
we only need the hw part to be uncacheable. Spliting them will let the sw
part to be cacheable.

Signed-off-by: Alek Du <alek.du@intel.com>
Cc: David Brownell <dbrownell@users.sourceforge.net>
CC: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Alek Du and committed by
Greg Kroah-Hartman
3807e26d 403dbd36

+127 -81
+24 -19
drivers/usb/host/ehci-dbg.c
··· 134 134 static void __maybe_unused 135 135 dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh) 136 136 { 137 + struct ehci_qh_hw *hw = qh->hw; 138 + 137 139 ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label, 138 - qh, qh->hw_next, qh->hw_info1, qh->hw_info2, 139 - qh->hw_current); 140 - dbg_qtd ("overlay", ehci, (struct ehci_qtd *) &qh->hw_qtd_next); 140 + qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current); 141 + dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next); 141 142 } 142 143 143 144 static void __maybe_unused ··· 401 400 char *next = *nextp; 402 401 char mark; 403 402 __le32 list_end = EHCI_LIST_END(ehci); 403 + struct ehci_qh_hw *hw = qh->hw; 404 404 405 - if (qh->hw_qtd_next == list_end) /* NEC does this */ 405 + if (hw->hw_qtd_next == list_end) /* NEC does this */ 406 406 mark = '@'; 407 407 else 408 - mark = token_mark(ehci, qh->hw_token); 408 + mark = token_mark(ehci, hw->hw_token); 409 409 if (mark == '/') { /* qh_alt_next controls qh advance? */ 410 - if ((qh->hw_alt_next & QTD_MASK(ehci)) 411 - == ehci->async->hw_alt_next) 410 + if ((hw->hw_alt_next & QTD_MASK(ehci)) 411 + == ehci->async->hw->hw_alt_next) 412 412 mark = '#'; /* blocked */ 413 - else if (qh->hw_alt_next == list_end) 413 + else if (hw->hw_alt_next == list_end) 414 414 mark = '.'; /* use hw_qtd_next */ 415 415 /* else alt_next points to some other qtd */ 416 416 } 417 - scratch = hc32_to_cpup(ehci, &qh->hw_info1); 418 - hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &qh->hw_current) : 0; 417 + scratch = hc32_to_cpup(ehci, &hw->hw_info1); 418 + hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0; 419 419 temp = scnprintf (next, size, 420 420 "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)", 421 421 qh, scratch & 0x007f, 422 422 speed_char (scratch), 423 423 (scratch >> 8) & 0x000f, 424 - scratch, hc32_to_cpup(ehci, &qh->hw_info2), 425 - hc32_to_cpup(ehci, &qh->hw_token), mark, 426 - (cpu_to_hc32(ehci, QTD_TOGGLE) & qh->hw_token) 424 + scratch, hc32_to_cpup(ehci, &hw->hw_info2), 425 + hc32_to_cpup(ehci, &hw->hw_token), mark, 426 + (cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token) 427 427 ? "data1" : "data0", 428 - (hc32_to_cpup(ehci, &qh->hw_alt_next) >> 1) & 0x0f); 428 + (hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f); 429 429 size -= temp; 430 430 next += temp; 431 431 ··· 437 435 mark = ' '; 438 436 if (hw_curr == td->qtd_dma) 439 437 mark = '*'; 440 - else if (qh->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) 438 + else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) 441 439 mark = '+'; 442 440 else if (QTD_LENGTH (scratch)) { 443 - if (td->hw_alt_next == ehci->async->hw_alt_next) 441 + if (td->hw_alt_next == ehci->async->hw->hw_alt_next) 444 442 mark = '#'; 445 443 else if (td->hw_alt_next != list_end) 446 444 mark = '/'; ··· 552 550 next += temp; 553 551 554 552 do { 553 + struct ehci_qh_hw *hw; 554 + 555 555 switch (hc32_to_cpu(ehci, tag)) { 556 556 case Q_TYPE_QH: 557 + hw = p.qh->hw; 557 558 temp = scnprintf (next, size, " qh%d-%04x/%p", 558 559 p.qh->period, 559 560 hc32_to_cpup(ehci, 560 - &p.qh->hw_info2) 561 + &hw->hw_info2) 561 562 /* uframe masks */ 562 563 & (QH_CMASK | QH_SMASK), 563 564 p.qh); ··· 581 576 /* show more info the first time around */ 582 577 if (temp == seen_count) { 583 578 u32 scratch = hc32_to_cpup(ehci, 584 - &p.qh->hw_info1); 579 + &hw->hw_info1); 585 580 struct ehci_qtd *qtd; 586 581 char *type = ""; 587 582 ··· 614 609 } else 615 610 temp = 0; 616 611 if (p.qh) { 617 - tag = Q_NEXT_TYPE(ehci, p.qh->hw_next); 612 + tag = Q_NEXT_TYPE(ehci, hw->hw_next); 618 613 p = p.qh->qh_next; 619 614 } 620 615 break;
+8 -6
drivers/usb/host/ehci-hcd.c
··· 507 507 u32 temp; 508 508 int retval; 509 509 u32 hcc_params; 510 + struct ehci_qh_hw *hw; 510 511 511 512 spin_lock_init(&ehci->lock); 512 513 ··· 551 550 * from automatically advancing to the next td after short reads. 552 551 */ 553 552 ehci->async->qh_next.qh = NULL; 554 - ehci->async->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); 555 - ehci->async->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); 556 - ehci->async->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); 557 - ehci->async->hw_qtd_next = EHCI_LIST_END(ehci); 553 + hw = ehci->async->hw; 554 + hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); 555 + hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); 556 + hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); 557 + hw->hw_qtd_next = EHCI_LIST_END(ehci); 558 558 ehci->async->qh_state = QH_STATE_LINKED; 559 - ehci->async->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); 559 + hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); 560 560 561 561 /* clear interrupt enables, set irq latency */ 562 562 if (log2_irq_thresh < 0 || log2_irq_thresh > 6) ··· 987 985 /* endpoints can be iso streams. for now, we don't 988 986 * accelerate iso completions ... so spin a while. 989 987 */ 990 - if (qh->hw_info1 == 0) { 988 + if (qh->hw->hw_info1 == 0) { 991 989 ehci_vdbg (ehci, "iso delay\n"); 992 990 goto idle_timeout; 993 991 }
+17 -9
drivers/usb/host/ehci-mem.c
··· 75 75 } 76 76 if (qh->dummy) 77 77 ehci_qtd_free (ehci, qh->dummy); 78 - dma_pool_free (ehci->qh_pool, qh, qh->qh_dma); 78 + dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); 79 + kfree(qh); 79 80 } 80 81 81 82 static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) ··· 84 83 struct ehci_qh *qh; 85 84 dma_addr_t dma; 86 85 87 - qh = (struct ehci_qh *) 88 - dma_pool_alloc (ehci->qh_pool, flags, &dma); 86 + qh = kzalloc(sizeof *qh, GFP_ATOMIC); 89 87 if (!qh) 90 - return qh; 91 - 92 - memset (qh, 0, sizeof *qh); 88 + goto done; 89 + qh->hw = (struct ehci_qh_hw *) 90 + dma_pool_alloc(ehci->qh_pool, flags, &dma); 91 + if (!qh->hw) 92 + goto fail; 93 + memset(qh->hw, 0, sizeof *qh->hw); 93 94 qh->refcount = 1; 94 95 qh->ehci = ehci; 95 96 qh->qh_dma = dma; ··· 102 99 qh->dummy = ehci_qtd_alloc (ehci, flags); 103 100 if (qh->dummy == NULL) { 104 101 ehci_dbg (ehci, "no dummy td\n"); 105 - dma_pool_free (ehci->qh_pool, qh, qh->qh_dma); 106 - qh = NULL; 102 + goto fail1; 107 103 } 104 + done: 108 105 return qh; 106 + fail1: 107 + dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); 108 + fail: 109 + kfree(qh); 110 + return NULL; 109 111 } 110 112 111 113 /* to share a qh (cpu threads, or hc) */ ··· 188 180 /* QHs for control/bulk/intr transfers */ 189 181 ehci->qh_pool = dma_pool_create ("ehci_qh", 190 182 ehci_to_hcd(ehci)->self.controller, 191 - sizeof (struct ehci_qh), 183 + sizeof(struct ehci_qh_hw), 192 184 32 /* byte alignment (for hw parts) */, 193 185 4096 /* can't cross 4K */); 194 186 if (!ehci->qh_pool) {
+28 -22
drivers/usb/host/ehci-q.c
··· 87 87 static inline void 88 88 qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) 89 89 { 90 + struct ehci_qh_hw *hw = qh->hw; 91 + 90 92 /* writes to an active overlay are unsafe */ 91 93 BUG_ON(qh->qh_state != QH_STATE_IDLE); 92 94 93 - qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 94 - qh->hw_alt_next = EHCI_LIST_END(ehci); 95 + hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); 96 + hw->hw_alt_next = EHCI_LIST_END(ehci); 95 97 96 98 /* Except for control endpoints, we make hardware maintain data 97 99 * toggle (like OHCI) ... here (re)initialize the toggle in the QH, 98 100 * and set the pseudo-toggle in udev. Only usb_clear_halt() will 99 101 * ever clear it. 100 102 */ 101 - if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { 103 + if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { 102 104 unsigned is_out, epnum; 103 105 104 106 is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); 105 - epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f; 107 + epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; 106 108 if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { 107 - qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); 109 + hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); 108 110 usb_settoggle (qh->dev, epnum, is_out, 1); 109 111 } 110 112 } 111 113 112 114 /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ 113 115 wmb (); 114 - qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 116 + hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); 115 117 } 116 118 117 119 /* if it weren't for a common silicon quirk (writing the dummy into the qh ··· 131 129 qtd = list_entry (qh->qtd_list.next, 132 130 struct ehci_qtd, qtd_list); 133 131 /* first qtd may already be partially processed */ 134 - if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current) 132 + if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) 135 133 qtd = NULL; 136 134 } 137 135 ··· 262 260 struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; 263 261 264 262 /* S-mask in a QH means it's an interrupt urb */ 265 - if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { 263 + if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { 266 264 267 265 /* ... update hc-wide periodic stats (for usbfs) */ 268 266 ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; ··· 317 315 unsigned count = 0; 318 316 u8 state; 319 317 __le32 halt = HALT_BIT(ehci); 318 + struct ehci_qh_hw *hw = qh->hw; 320 319 321 320 if (unlikely (list_empty (&qh->qtd_list))) 322 321 return count; ··· 395 392 qtd->hw_token = cpu_to_hc32(ehci, 396 393 token); 397 394 wmb(); 398 - qh->hw_token = cpu_to_hc32(ehci, token); 395 + hw->hw_token = cpu_to_hc32(ehci, 396 + token); 399 397 goto retry_xacterr; 400 398 } 401 399 stopped = 1; ··· 439 435 /* qh unlinked; token in overlay may be most current */ 440 436 if (state == QH_STATE_IDLE 441 437 && cpu_to_hc32(ehci, qtd->qtd_dma) 442 - == qh->hw_current) { 443 - token = hc32_to_cpu(ehci, qh->hw_token); 438 + == hw->hw_current) { 439 + token = hc32_to_cpu(ehci, hw->hw_token); 444 440 445 441 /* An unlink may leave an incomplete 446 442 * async transaction in the TT buffer. ··· 453 449 * patch the qh later and so that completions can't 454 450 * activate it while we "know" it's stopped. 455 451 */ 456 - if ((halt & qh->hw_token) == 0) { 452 + if ((halt & hw->hw_token) == 0) { 457 453 halt: 458 - qh->hw_token |= halt; 454 + hw->hw_token |= halt; 459 455 wmb (); 460 456 } 461 457 } ··· 514 510 * it after fault cleanup, or recovering from silicon wrongly 515 511 * overlaying the dummy qtd (which reduces DMA chatter). 516 512 */ 517 - if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) { 513 + if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) { 518 514 switch (state) { 519 515 case QH_STATE_IDLE: 520 516 qh_refresh(ehci, qh); ··· 532 528 * except maybe high bandwidth ... 533 529 */ 534 530 if ((cpu_to_hc32(ehci, QH_SMASK) 535 - & qh->hw_info2) != 0) { 531 + & hw->hw_info2) != 0) { 536 532 intr_deschedule (ehci, qh); 537 533 (void) qh_schedule (ehci, qh); 538 534 } else ··· 653 649 * (this will usually be overridden later.) 654 650 */ 655 651 if (is_input) 656 - qtd->hw_alt_next = ehci->async->hw_alt_next; 652 + qtd->hw_alt_next = ehci->async->hw->hw_alt_next; 657 653 658 654 /* qh makes control packets use qtd toggle; maybe switch it */ 659 655 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) ··· 748 744 int is_input, type; 749 745 int maxp = 0; 750 746 struct usb_tt *tt = urb->dev->tt; 747 + struct ehci_qh_hw *hw; 751 748 752 749 if (!qh) 753 750 return qh; ··· 895 890 896 891 /* init as live, toggle clear, advance to dummy */ 897 892 qh->qh_state = QH_STATE_IDLE; 898 - qh->hw_info1 = cpu_to_hc32(ehci, info1); 899 - qh->hw_info2 = cpu_to_hc32(ehci, info2); 893 + hw = qh->hw; 894 + hw->hw_info1 = cpu_to_hc32(ehci, info1); 895 + hw->hw_info2 = cpu_to_hc32(ehci, info2); 900 896 usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); 901 897 qh_refresh (ehci, qh); 902 898 return qh; ··· 939 933 940 934 /* splice right after start */ 941 935 qh->qh_next = head->qh_next; 942 - qh->hw_next = head->hw_next; 936 + qh->hw->hw_next = head->hw->hw_next; 943 937 wmb (); 944 938 945 939 head->qh_next.qh = qh; 946 - head->hw_next = dma; 940 + head->hw->hw_next = dma; 947 941 948 942 qh_get(qh); 949 943 qh->xacterrs = 0; ··· 990 984 991 985 /* usb_reset_device() briefly reverts to address 0 */ 992 986 if (usb_pipedevice (urb->pipe) == 0) 993 - qh->hw_info1 &= ~qh_addr_mask; 987 + qh->hw->hw_info1 &= ~qh_addr_mask; 994 988 } 995 989 996 990 /* just one way to queue requests: swap with the dummy qtd. ··· 1175 1169 while (prev->qh_next.qh != qh) 1176 1170 prev = prev->qh_next.qh; 1177 1171 1178 - prev->hw_next = qh->hw_next; 1172 + prev->hw->hw_next = qh->hw->hw_next; 1179 1173 prev->qh_next = qh->qh_next; 1180 1174 wmb (); 1181 1175
+44 -22
drivers/usb/host/ehci-sched.c
··· 60 60 } 61 61 } 62 62 63 + static __hc32 * 64 + shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic, 65 + __hc32 tag) 66 + { 67 + switch (hc32_to_cpu(ehci, tag)) { 68 + /* our ehci_shadow.qh is actually software part */ 69 + case Q_TYPE_QH: 70 + return &periodic->qh->hw->hw_next; 71 + /* others are hw parts */ 72 + default: 73 + return periodic->hw_next; 74 + } 75 + } 76 + 63 77 /* caller must hold ehci->lock */ 64 78 static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) 65 79 { ··· 85 71 while (here.ptr && here.ptr != ptr) { 86 72 prev_p = periodic_next_shadow(ehci, prev_p, 87 73 Q_NEXT_TYPE(ehci, *hw_p)); 88 - hw_p = here.hw_next; 74 + hw_p = shadow_next_periodic(ehci, &here, 75 + Q_NEXT_TYPE(ehci, *hw_p)); 89 76 here = *prev_p; 90 77 } 91 78 /* an interrupt entry (at list end) could have been shared */ ··· 98 83 */ 99 84 *prev_p = *periodic_next_shadow(ehci, &here, 100 85 Q_NEXT_TYPE(ehci, *hw_p)); 101 - *hw_p = *here.hw_next; 86 + *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)); 102 87 } 103 88 104 89 /* how many of the uframe's 125 usecs are allocated? */ ··· 108 93 __hc32 *hw_p = &ehci->periodic [frame]; 109 94 union ehci_shadow *q = &ehci->pshadow [frame]; 110 95 unsigned usecs = 0; 96 + struct ehci_qh_hw *hw; 111 97 112 98 while (q->ptr) { 113 99 switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { 114 100 case Q_TYPE_QH: 101 + hw = q->qh->hw; 115 102 /* is it in the S-mask? */ 116 - if (q->qh->hw_info2 & cpu_to_hc32(ehci, 1 << uframe)) 103 + if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe)) 117 104 usecs += q->qh->usecs; 118 105 /* ... or C-mask? */ 119 - if (q->qh->hw_info2 & cpu_to_hc32(ehci, 106 + if (hw->hw_info2 & cpu_to_hc32(ehci, 120 107 1 << (8 + uframe))) 121 108 usecs += q->qh->c_usecs; 122 - hw_p = &q->qh->hw_next; 109 + hw_p = &hw->hw_next; 123 110 q = &q->qh->qh_next; 124 111 break; 125 112 // case Q_TYPE_FSTN: ··· 254 237 continue; 255 238 case Q_TYPE_QH: 256 239 if (same_tt(dev, q->qh->dev)) { 257 - uf = tt_start_uframe(ehci, q->qh->hw_info2); 240 + uf = tt_start_uframe(ehci, q->qh->hw->hw_info2); 258 241 tt_usecs[uf] += q->qh->tt_usecs; 259 242 } 260 - hw_p = &q->qh->hw_next; 243 + hw_p = &q->qh->hw->hw_next; 261 244 q = &q->qh->qh_next; 262 245 continue; 263 246 case Q_TYPE_SITD: ··· 392 375 for (; frame < ehci->periodic_size; frame += period) { 393 376 union ehci_shadow here; 394 377 __hc32 type; 378 + struct ehci_qh_hw *hw; 395 379 396 380 here = ehci->pshadow [frame]; 397 381 type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]); ··· 403 385 here = here.itd->itd_next; 404 386 continue; 405 387 case Q_TYPE_QH: 388 + hw = here.qh->hw; 406 389 if (same_tt (dev, here.qh->dev)) { 407 390 u32 mask; 408 391 409 392 mask = hc32_to_cpu(ehci, 410 - here.qh->hw_info2); 393 + hw->hw_info2); 411 394 /* "knows" no gap is needed */ 412 395 mask |= mask >> 8; 413 396 if (mask & uf_mask) 414 397 break; 415 398 } 416 - type = Q_NEXT_TYPE(ehci, here.qh->hw_next); 399 + type = Q_NEXT_TYPE(ehci, hw->hw_next); 417 400 here = here.qh->qh_next; 418 401 continue; 419 402 case Q_TYPE_SITD: ··· 517 498 518 499 dev_dbg (&qh->dev->dev, 519 500 "link qh%d-%04x/%p start %d [%d/%d us]\n", 520 - period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK), 501 + period, hc32_to_cpup(ehci, &qh->hw->hw_info2) 502 + & (QH_CMASK | QH_SMASK), 521 503 qh, qh->start, qh->usecs, qh->c_usecs); 522 504 523 505 /* high bandwidth, or otherwise every microframe */ ··· 537 517 if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) 538 518 break; 539 519 prev = periodic_next_shadow(ehci, prev, type); 540 - hw_p = &here.qh->hw_next; 520 + hw_p = shadow_next_periodic(ehci, &here, type); 541 521 here = *prev; 542 522 } 543 523 ··· 548 528 if (qh->period > here.qh->period) 549 529 break; 550 530 prev = &here.qh->qh_next; 551 - hw_p = &here.qh->hw_next; 531 + hw_p = &here.qh->hw->hw_next; 552 532 here = *prev; 553 533 } 554 534 /* link in this qh, unless some earlier pass did that */ 555 535 if (qh != here.qh) { 556 536 qh->qh_next = here; 557 537 if (here.qh) 558 - qh->hw_next = *hw_p; 538 + qh->hw->hw_next = *hw_p; 559 539 wmb (); 560 540 prev->qh = qh; 561 541 *hw_p = QH_NEXT (ehci, qh->qh_dma); ··· 601 581 dev_dbg (&qh->dev->dev, 602 582 "unlink qh%d-%04x/%p start %d [%d/%d us]\n", 603 583 qh->period, 604 - hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK), 584 + hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK), 605 585 qh, qh->start, qh->usecs, qh->c_usecs); 606 586 607 587 /* qh->qh_next still "live" to HC */ ··· 616 596 static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) 617 597 { 618 598 unsigned wait; 599 + struct ehci_qh_hw *hw = qh->hw; 619 600 620 601 qh_unlink_periodic (ehci, qh); 621 602 ··· 627 606 */ 628 607 if (list_empty (&qh->qtd_list) 629 608 || (cpu_to_hc32(ehci, QH_CMASK) 630 - & qh->hw_info2) != 0) 609 + & hw->hw_info2) != 0) 631 610 wait = 2; 632 611 else 633 612 wait = 55; /* worst case: 3 * 1024 */ 634 613 635 614 udelay (wait); 636 615 qh->qh_state = QH_STATE_IDLE; 637 - qh->hw_next = EHCI_LIST_END(ehci); 616 + hw->hw_next = EHCI_LIST_END(ehci); 638 617 wmb (); 639 618 } 640 619 ··· 760 739 unsigned uframe; 761 740 __hc32 c_mask; 762 741 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ 742 + struct ehci_qh_hw *hw = qh->hw; 763 743 764 744 qh_refresh(ehci, qh); 765 - qh->hw_next = EHCI_LIST_END(ehci); 745 + hw->hw_next = EHCI_LIST_END(ehci); 766 746 frame = qh->start; 767 747 768 748 /* reuse the previous schedule slots, if we can */ 769 749 if (frame < qh->period) { 770 - uframe = ffs(hc32_to_cpup(ehci, &qh->hw_info2) & QH_SMASK); 750 + uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK); 771 751 status = check_intr_schedule (ehci, frame, --uframe, 772 752 qh, &c_mask); 773 753 } else { ··· 806 784 qh->start = frame; 807 785 808 786 /* reset S-frame and (maybe) C-frame masks */ 809 - qh->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); 810 - qh->hw_info2 |= qh->period 787 + hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); 788 + hw->hw_info2 |= qh->period 811 789 ? cpu_to_hc32(ehci, 1 << uframe) 812 790 : cpu_to_hc32(ehci, QH_SMASK); 813 - qh->hw_info2 |= c_mask; 791 + hw->hw_info2 |= c_mask; 814 792 } else 815 793 ehci_dbg (ehci, "reused qh %p schedule\n", qh); 816 794 ··· 2210 2188 case Q_TYPE_QH: 2211 2189 /* handle any completions */ 2212 2190 temp.qh = qh_get (q.qh); 2213 - type = Q_NEXT_TYPE(ehci, q.qh->hw_next); 2191 + type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next); 2214 2192 q = q.qh->qh_next; 2215 2193 modified = qh_completions (ehci, temp.qh); 2216 2194 if (unlikely (list_empty (&temp.qh->qtd_list)))
+6 -3
drivers/usb/host/ehci.h
··· 299 299 * These appear in both the async and (for interrupt) periodic schedules. 300 300 */ 301 301 302 - struct ehci_qh { 303 - /* first part defined by EHCI spec */ 302 + /* first part defined by EHCI spec */ 303 + struct ehci_qh_hw { 304 304 __hc32 hw_next; /* see EHCI 3.6.1 */ 305 305 __hc32 hw_info1; /* see EHCI 3.6.2 */ 306 306 #define QH_HEAD 0x00008000 ··· 318 318 __hc32 hw_token; 319 319 __hc32 hw_buf [5]; 320 320 __hc32 hw_buf_hi [5]; 321 + } __attribute__ ((aligned(32))); 321 322 323 + struct ehci_qh { 324 + struct ehci_qh_hw *hw; 322 325 /* the rest is HCD-private */ 323 326 dma_addr_t qh_dma; /* address of qh */ 324 327 union ehci_shadow qh_next; /* ptr to qh; or periodic */ ··· 361 358 362 359 struct usb_device *dev; /* access to TT */ 363 360 unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ 364 - } __attribute__ ((aligned (32))); 361 + }; 365 362 366 363 /*-------------------------------------------------------------------------*/ 367 364