Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

UHCI: Eliminate asynchronous skeleton Queue Headers

This patch (as856) attempts to improve the performance of uhci-hcd by
removing the asynchronous skeleton Queue Headers. They don't contain
any useful information but the controller has to read through them at
least once every millisecond, incurring a non-zero DMA overhead.

Now all the asynchronous queues are combined, along with the period-1
interrupt queue, into a single list with a single skeleton QH. The
start of the low-speed control, full-speed control, and bulk sublists
is determined by linear search. Since there should rarely be more
than a couple of QHs in the list, the searches should incur a much
smaller total load than keeping the skeleton QHs.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Alan Stern and committed by
Greg Kroah-Hartman
17230acd 28b9325e

+255 -112
+28 -22
drivers/usb/host/uhci-debug.c
··· 220 220 return out - buf; 221 221 } 222 222 223 - static const char * const qh_names[] = { 224 - "skel_unlink_qh", "skel_iso_qh", 225 - "skel_int128_qh", "skel_int64_qh", 226 - "skel_int32_qh", "skel_int16_qh", 227 - "skel_int8_qh", "skel_int4_qh", 228 - "skel_int2_qh", "skel_int1_qh", 229 - "skel_ls_control_qh", "skel_fs_control_qh", 230 - "skel_bulk_qh", "skel_term_qh" 231 - }; 232 - 233 223 static int uhci_show_sc(int port, unsigned short status, char *buf, int len) 234 224 { 235 225 char *out = buf; ··· 342 352 struct uhci_td *td; 343 353 struct list_head *tmp, *head; 344 354 int nframes, nerrs; 355 + __le32 link; 356 + 357 + static const char * const qh_names[] = { 358 + "unlink", "iso", "int128", "int64", "int32", "int16", 359 + "int8", "int4", "int2", "async", "term" 360 + }; 345 361 346 362 out += uhci_show_root_hub_state(uhci, out, len - (out - buf)); 347 363 out += sprintf(out, "HC status\n"); ··· 370 374 nframes = 10; 371 375 nerrs = 0; 372 376 for (i = 0; i < UHCI_NUMFRAMES; ++i) { 373 - __le32 link, qh_dma; 377 + __le32 qh_dma; 374 378 375 379 j = 0; 376 380 td = uhci->frame_cpu[i]; ··· 426 430 427 431 for (i = 0; i < UHCI_NUM_SKELQH; ++i) { 428 432 int cnt = 0; 433 + __le32 fsbr_link = 0; 429 434 430 435 qh = uhci->skelqh[i]; 431 - out += sprintf(out, "- %s\n", qh_names[i]); \ 436 + out += sprintf(out, "- skel_%s_qh\n", qh_names[i]); \ 432 437 out += uhci_show_qh(qh, out, len - (out - buf), 4); 433 438 434 439 /* Last QH is the Terminating QH, it's different */ 435 - if (i == UHCI_NUM_SKELQH - 1) { 436 - if (qh->link != UHCI_PTR_TERM) 437 - out += sprintf(out, " bandwidth reclamation on!\n"); 438 - 440 + if (i == SKEL_TERM) { 439 441 if (qh_element(qh) != LINK_TO_TD(uhci->term_td)) 440 442 out += sprintf(out, " skel_term_qh element is not set to term_td!\n"); 441 - 443 + if (link == LINK_TO_QH(uhci->skel_term_qh)) 444 + goto check_qh_link; 442 445 continue; 443 446 } 444 447 445 - j = (i < 9) ? 9 : i+1; /* Next skeleton */ 446 448 head = &qh->node; 447 449 tmp = head->next; 448 450 ··· 450 456 if (++cnt <= 10) 451 457 out += uhci_show_qh(qh, out, 452 458 len - (out - buf), 4); 459 + if (!fsbr_link && qh->skel >= SKEL_FSBR) 460 + fsbr_link = LINK_TO_QH(qh); 453 461 } 454 462 if ((cnt -= 10) > 0) 455 463 out += sprintf(out, " Skipped %d QHs\n", cnt); 456 464 457 - if (i > 1 && i < UHCI_NUM_SKELQH - 1) { 458 - if (qh->link != LINK_TO_QH(uhci->skelqh[j])) 459 - out += sprintf(out, " last QH not linked to next skeleton!\n"); 460 - } 465 + link = UHCI_PTR_TERM; 466 + if (i <= SKEL_ISO) 467 + ; 468 + else if (i < SKEL_ASYNC) 469 + link = LINK_TO_QH(uhci->skel_async_qh); 470 + else if (!uhci->fsbr_is_on) 471 + ; 472 + else if (fsbr_link) 473 + link = fsbr_link; 474 + else 475 + link = LINK_TO_QH(uhci->skel_term_qh); 476 + check_qh_link: 477 + if (qh->link != link) 478 + out += sprintf(out, " last QH not linked to next skeleton!\n"); 461 479 } 462 480 463 481 return out - buf;
+21 -31
drivers/usb/host/uhci-hcd.c
··· 13 13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 14 14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 15 15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 16 - * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu 16 + * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu 17 17 * 18 18 * Intel documents this fairly well, and as far as I know there 19 19 * are no royalties or anything like that, but even so there are ··· 107 107 * interrupt QHs, which will help spread out bandwidth utilization. 108 108 * 109 109 * ffs (Find First bit Set) does exactly what we need: 110 - * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8], 111 - * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc. 110 + * 1,3,5,... => ffs = 0 => use period-2 QH = skelqh[8], 111 + * 2,6,10,... => ffs = 1 => use period-4 QH = skelqh[7], etc. 112 112 * ffs >= 7 => not on any high-period queue, so use 113 - * skel_int1_qh = skelqh[9]. 113 + * period-1 QH = skelqh[9]. 114 114 * Add in UHCI_NUMFRAMES to insure at least one bit is set. 115 115 */ 116 116 skelnum = 8 - (int) __ffs(frame | UHCI_NUMFRAMES); ··· 540 540 * 541 541 * The hardware doesn't really know any difference 542 542 * in the queues, but the order does matter for the 543 - * protocols higher up. The order is: 543 + * protocols higher up. The order in which the queues 544 + * are encountered by the hardware is: 544 545 * 545 - * - any isochronous events handled before any 546 + * - All isochronous events are handled before any 546 547 * of the queues. We don't do that here, because 547 548 * we'll create the actual TD entries on demand. 548 - * - The first queue is the interrupt queue. 549 - * - The second queue is the control queue, split into low- and full-speed 550 - * - The third queue is bulk queue. 551 - * - The fourth queue is the bandwidth reclamation queue, which loops back 552 - * to the full-speed control queue. 549 + * - The first queue is the high-period interrupt queue. 550 + * - The second queue is the period-1 interrupt and async 551 + * (low-speed control, full-speed control, then bulk) queue. 552 + * - The third queue is the terminating bandwidth reclamation queue, 553 + * which contains no members, loops back to itself, and is present 554 + * only when FSBR is on and there are no full-speed control or bulk QHs. 553 555 */ 554 556 static int uhci_start(struct usb_hcd *hcd) 555 557 { ··· 628 626 } 629 627 630 628 /* 631 - * 8 Interrupt queues; link all higher int queues to int1, 632 - * then link int1 to control and control to bulk 629 + * 8 Interrupt queues; link all higher int queues to int1 = async 633 630 */ 634 - uhci->skel_int128_qh->link = 635 - uhci->skel_int64_qh->link = 636 - uhci->skel_int32_qh->link = 637 - uhci->skel_int16_qh->link = 638 - uhci->skel_int8_qh->link = 639 - uhci->skel_int4_qh->link = 640 - uhci->skel_int2_qh->link = LINK_TO_QH( 641 - uhci->skel_int1_qh); 642 - 643 - uhci->skel_int1_qh->link = LINK_TO_QH(uhci->skel_ls_control_qh); 644 - uhci->skel_ls_control_qh->link = LINK_TO_QH(uhci->skel_fs_control_qh); 645 - uhci->skel_fs_control_qh->link = LINK_TO_QH(uhci->skel_bulk_qh); 646 - uhci->skel_bulk_qh->link = LINK_TO_QH(uhci->skel_term_qh); 631 + for (i = SKEL_ISO + 1; i < SKEL_ASYNC; ++i) 632 + uhci->skelqh[i]->link = LINK_TO_QH(uhci->skel_async_qh); 633 + uhci->skel_async_qh->link = uhci->skel_term_qh->link = UHCI_PTR_TERM; 647 634 648 635 /* This dummy TD is to work around a bug in Intel PIIX controllers */ 649 636 uhci_fill_td(uhci->term_td, 0, uhci_explen(0) | 650 - (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); 651 - uhci->term_td->link = LINK_TO_TD(uhci->term_td); 652 - 653 - uhci->skel_term_qh->link = UHCI_PTR_TERM; 654 - uhci->skel_term_qh->element = LINK_TO_TD(uhci->term_td); 637 + (0x7f << TD_TOKEN_DEVADDR_SHIFT) | USB_PID_IN, 0); 638 + uhci->term_td->link = UHCI_PTR_TERM; 639 + uhci->skel_async_qh->element = uhci->skel_term_qh->element = 640 + LINK_TO_TD(uhci->term_td); 655 641 656 642 /* 657 643 * Fill the frame list: make all entries point to the proper
+35 -37
drivers/usb/host/uhci-hcd.h
··· 135 135 struct usb_host_endpoint *hep; /* Endpoint information */ 136 136 struct usb_device *udev; 137 137 struct list_head queue; /* Queue of urbps for this QH */ 138 - struct uhci_qh *skel; /* Skeleton for this QH */ 139 138 struct uhci_td *dummy_td; /* Dummy TD to end the queue */ 140 139 struct uhci_td *post_td; /* Last TD completed */ 141 140 ··· 150 151 151 152 int state; /* QH_STATE_xxx; see above */ 152 153 int type; /* Queue type (control, bulk, etc) */ 154 + int skel; /* Skeleton queue number */ 153 155 154 156 unsigned int initial_toggle:1; /* Endpoint's current toggle value */ 155 157 unsigned int needs_fixup:1; /* Must fix the TD toggle values */ ··· 276 276 /* 277 277 * The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for 278 278 * automatic queuing. To make it easy to insert entries into the schedule, 279 - * we have a skeleton of QHs for each predefined Interrupt latency, 280 - * low-speed control, full-speed control, bulk, and terminating QH 281 - * (see explanation for the terminating QH below). 279 + * we have a skeleton of QHs for each predefined Interrupt latency. 280 + * Asynchronous QHs (low-speed control, full-speed control, and bulk) 281 + * go onto the period-1 interrupt list, since they all get accessed on 282 + * every frame. 282 283 * 283 - * When we want to add a new QH, we add it to the end of the list for the 284 - * skeleton QH. For instance, the schedule list can look like this: 284 + * When we want to add a new QH, we add it to the list starting from the 285 + * appropriate skeleton QH. For instance, the schedule can look like this: 285 286 * 286 287 * skel int128 QH 287 288 * dev 1 interrupt QH ··· 290 289 * skel int64 QH 291 290 * skel int32 QH 292 291 * ... 293 - * skel int1 QH 294 - * skel low-speed control QH 295 - * dev 5 control QH 296 - * skel full-speed control QH 297 - * skel bulk QH 292 + * skel int1 + async QH 293 + * dev 5 low-speed control QH 298 294 * dev 1 bulk QH 299 295 * dev 2 bulk QH 300 - * skel terminating QH 301 296 * 302 - * The terminating QH is used for 2 reasons: 303 - * - To place a terminating TD which is used to workaround a PIIX bug 304 - * (see Intel errata for explanation), and 305 - * - To loop back to the full-speed control queue for full-speed bandwidth 306 - * reclamation. 297 + * There is a special terminating QH used to keep full-speed bandwidth 298 + * reclamation active when no full-speed control or bulk QHs are linked 299 + * into the schedule. It has an inactive TD (to work around a PIIX bug, 300 + * see the Intel errata) and it points back to itself. 307 301 * 308 - * There's a special skeleton QH for Isochronous QHs. It never appears 309 - * on the schedule, and Isochronous TDs go on the schedule before the 302 + * There's a special skeleton QH for Isochronous QHs which never appears 303 + * on the schedule. Isochronous TDs go on the schedule before the 310 304 * the skeleton QHs. The hardware accesses them directly rather than 311 305 * through their QH, which is used only for bookkeeping purposes. 312 306 * While the UHCI spec doesn't forbid the use of QHs for Isochronous, 313 307 * it doesn't use them either. And the spec says that queues never 314 308 * advance on an error completion status, which makes them totally 315 309 * unsuitable for Isochronous transfers. 310 + * 311 + * There's also a special skeleton QH used for QHs which are in the process 312 + * of unlinking and so may still be in use by the hardware. It too never 313 + * appears on the schedule. 316 314 */ 317 315 318 - #define UHCI_NUM_SKELQH 14 319 - #define skel_unlink_qh skelqh[0] 320 - #define skel_iso_qh skelqh[1] 321 - #define skel_int128_qh skelqh[2] 322 - #define skel_int64_qh skelqh[3] 323 - #define skel_int32_qh skelqh[4] 324 - #define skel_int16_qh skelqh[5] 325 - #define skel_int8_qh skelqh[6] 326 - #define skel_int4_qh skelqh[7] 327 - #define skel_int2_qh skelqh[8] 328 - #define skel_int1_qh skelqh[9] 329 - #define skel_ls_control_qh skelqh[10] 330 - #define skel_fs_control_qh skelqh[11] 331 - #define skel_bulk_qh skelqh[12] 332 - #define skel_term_qh skelqh[13] 316 + #define UHCI_NUM_SKELQH 11 317 + #define SKEL_UNLINK 0 318 + #define skel_unlink_qh skelqh[SKEL_UNLINK] 319 + #define SKEL_ISO 1 320 + #define skel_iso_qh skelqh[SKEL_ISO] 321 + /* int128, int64, ..., int1 = 2, 3, ..., 9 */ 322 + #define SKEL_INDEX(exponent) (9 - exponent) 323 + #define SKEL_ASYNC 9 324 + #define skel_async_qh skelqh[SKEL_ASYNC] 325 + #define SKEL_TERM 10 326 + #define skel_term_qh skelqh[SKEL_TERM] 333 327 334 - /* Find the skelqh entry corresponding to an interval exponent */ 335 - #define UHCI_SKEL_INDEX(exponent) (9 - exponent) 336 - 328 + /* The following entries refer to sublists of skel_async_qh */ 329 + #define SKEL_LS_CONTROL 20 330 + #define SKEL_FS_CONTROL 21 331 + #define SKEL_FSBR SKEL_FS_CONTROL 332 + #define SKEL_BULK 22 337 333 338 334 /* 339 335 * The UHCI controller and root hub
+171 -22
drivers/usb/host/uhci-q.c
··· 13 13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface 14 14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). 15 15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) 16 - * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu 16 + * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu 17 17 */ 18 18 19 19 ··· 45 45 */ 46 46 static void uhci_fsbr_on(struct uhci_hcd *uhci) 47 47 { 48 + struct uhci_qh *fsbr_qh, *lqh, *tqh; 49 + 48 50 uhci->fsbr_is_on = 1; 49 - uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_fs_control_qh); 51 + lqh = list_entry(uhci->skel_async_qh->node.prev, 52 + struct uhci_qh, node); 53 + 54 + /* Find the first FSBR QH. Linear search through the list is 55 + * acceptable because normally FSBR gets turned on as soon as 56 + * one QH needs it. */ 57 + fsbr_qh = NULL; 58 + list_for_each_entry_reverse(tqh, &uhci->skel_async_qh->node, node) { 59 + if (tqh->skel < SKEL_FSBR) 60 + break; 61 + fsbr_qh = tqh; 62 + } 63 + 64 + /* No FSBR QH means we must insert the terminating skeleton QH */ 65 + if (!fsbr_qh) { 66 + uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_term_qh); 67 + wmb(); 68 + lqh->link = uhci->skel_term_qh->link; 69 + 70 + /* Otherwise loop the last QH to the first FSBR QH */ 71 + } else 72 + lqh->link = LINK_TO_QH(fsbr_qh); 50 73 } 51 74 52 75 static void uhci_fsbr_off(struct uhci_hcd *uhci) 53 76 { 77 + struct uhci_qh *lqh; 78 + 54 79 uhci->fsbr_is_on = 0; 55 - uhci->skel_term_qh->link = UHCI_PTR_TERM; 80 + lqh = list_entry(uhci->skel_async_qh->node.prev, 81 + struct uhci_qh, node); 82 + 83 + /* End the async list normally and unlink the terminating QH */ 84 + lqh->link = uhci->skel_term_qh->link = UHCI_PTR_TERM; 56 85 } 57 86 58 87 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) ··· 433 404 } 434 405 435 406 /* 407 + * Link an Isochronous QH into its skeleton's list 408 + */ 409 + static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh) 410 + { 411 + list_add_tail(&qh->node, &uhci->skel_iso_qh->node); 412 + 413 + /* Isochronous QHs aren't linked by the hardware */ 414 + } 415 + 416 + /* 417 + * Link a high-period interrupt QH into the schedule at the end of its 418 + * skeleton's list 419 + */ 420 + static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) 421 + { 422 + struct uhci_qh *pqh; 423 + 424 + list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node); 425 + 426 + pqh = list_entry(qh->node.prev, struct uhci_qh, node); 427 + qh->link = pqh->link; 428 + wmb(); 429 + pqh->link = LINK_TO_QH(qh); 430 + } 431 + 432 + /* 433 + * Link a period-1 interrupt or async QH into the schedule at the 434 + * correct spot in the async skeleton's list, and update the FSBR link 435 + */ 436 + static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) 437 + { 438 + struct uhci_qh *pqh, *lqh; 439 + __le32 link_to_new_qh; 440 + __le32 *extra_link = &link_to_new_qh; 441 + 442 + /* Find the predecessor QH for our new one and insert it in the list. 443 + * The list of QHs is expected to be short, so linear search won't 444 + * take too long. */ 445 + list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) { 446 + if (pqh->skel <= qh->skel) 447 + break; 448 + } 449 + list_add(&qh->node, &pqh->node); 450 + qh->link = pqh->link; 451 + 452 + link_to_new_qh = LINK_TO_QH(qh); 453 + 454 + /* If this is now the first FSBR QH, take special action */ 455 + if (uhci->fsbr_is_on && pqh->skel < SKEL_FSBR && 456 + qh->skel >= SKEL_FSBR) { 457 + lqh = list_entry(uhci->skel_async_qh->node.prev, 458 + struct uhci_qh, node); 459 + 460 + /* If the new QH is also the last one, we must unlink 461 + * the terminating skeleton QH and make the new QH point 462 + * back to itself. */ 463 + if (qh == lqh) { 464 + qh->link = link_to_new_qh; 465 + extra_link = &uhci->skel_term_qh->link; 466 + 467 + /* Otherwise the last QH must point to the new QH */ 468 + } else 469 + extra_link = &lqh->link; 470 + } 471 + 472 + /* Link it into the schedule */ 473 + wmb(); 474 + *extra_link = pqh->link = link_to_new_qh; 475 + } 476 + 477 + /* 436 478 * Put a QH on the schedule in both hardware and software 437 479 */ 438 480 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 439 481 { 440 - struct uhci_qh *pqh; 441 - 442 482 WARN_ON(list_empty(&qh->queue)); 443 483 444 484 /* Set the element pointer if it isn't set already. ··· 529 431 return; 530 432 qh->state = QH_STATE_ACTIVE; 531 433 532 - /* Move the QH from its old list to the end of the appropriate 434 + /* Move the QH from its old list to the correct spot in the appropriate 533 435 * skeleton's list */ 534 436 if (qh == uhci->next_qh) 535 437 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, 536 438 node); 537 - list_move_tail(&qh->node, &qh->skel->node); 439 + list_del(&qh->node); 538 440 539 - /* Link it into the schedule */ 441 + if (qh->skel == SKEL_ISO) 442 + link_iso(uhci, qh); 443 + else if (qh->skel < SKEL_ASYNC) 444 + link_interrupt(uhci, qh); 445 + else 446 + link_async(uhci, qh); 447 + } 448 + 449 + /* 450 + * Unlink a high-period interrupt QH from the schedule 451 + */ 452 + static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) 453 + { 454 + struct uhci_qh *pqh; 455 + 540 456 pqh = list_entry(qh->node.prev, struct uhci_qh, node); 541 - qh->link = pqh->link; 542 - wmb(); 543 - pqh->link = LINK_TO_QH(qh); 457 + pqh->link = qh->link; 458 + mb(); 459 + } 460 + 461 + /* 462 + * Unlink a period-1 interrupt or async QH from the schedule 463 + */ 464 + static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) 465 + { 466 + struct uhci_qh *pqh, *lqh; 467 + __le32 link_to_next_qh = qh->link; 468 + 469 + pqh = list_entry(qh->node.prev, struct uhci_qh, node); 470 + 471 + /* If this is the first FSBQ QH, take special action */ 472 + if (uhci->fsbr_is_on && pqh->skel < SKEL_FSBR && 473 + qh->skel >= SKEL_FSBR) { 474 + lqh = list_entry(uhci->skel_async_qh->node.prev, 475 + struct uhci_qh, node); 476 + 477 + /* If this QH is also the last one, we must link in 478 + * the terminating skeleton QH. */ 479 + if (qh == lqh) { 480 + link_to_next_qh = LINK_TO_QH(uhci->skel_term_qh); 481 + uhci->skel_term_qh->link = link_to_next_qh; 482 + wmb(); 483 + qh->link = link_to_next_qh; 484 + 485 + /* Otherwise the last QH must point to the new first FSBR QH */ 486 + } else 487 + lqh->link = link_to_next_qh; 488 + } 489 + 490 + pqh->link = link_to_next_qh; 491 + mb(); 544 492 } 545 493 546 494 /* ··· 594 450 */ 595 451 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) 596 452 { 597 - struct uhci_qh *pqh; 598 - 599 453 if (qh->state == QH_STATE_UNLINKING) 600 454 return; 601 455 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); 602 456 qh->state = QH_STATE_UNLINKING; 603 457 604 458 /* Unlink the QH from the schedule and record when we did it */ 605 - pqh = list_entry(qh->node.prev, struct uhci_qh, node); 606 - pqh->link = qh->link; 607 - mb(); 459 + if (qh->skel == SKEL_ISO) 460 + ; 461 + else if (qh->skel < SKEL_ASYNC) 462 + unlink_interrupt(uhci, qh); 463 + else 464 + unlink_async(uhci, qh); 608 465 609 466 uhci_get_current_frame_number(uhci); 610 467 qh->unlink_frame = uhci->frame_number; ··· 841 696 dma_addr_t data = urb->transfer_dma; 842 697 __le32 *plink; 843 698 struct urb_priv *urbp = urb->hcpriv; 699 + int skel; 844 700 845 701 /* The "pipe" thing contains the destination in bits 8--18 */ 846 702 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; ··· 942 796 * isn't in the CONFIGURED state. */ 943 797 if (urb->dev->speed == USB_SPEED_LOW || 944 798 urb->dev->state != USB_STATE_CONFIGURED) 945 - qh->skel = uhci->skel_ls_control_qh; 799 + skel = SKEL_LS_CONTROL; 946 800 else { 947 - qh->skel = uhci->skel_fs_control_qh; 801 + skel = SKEL_FS_CONTROL; 948 802 uhci_add_fsbr(uhci, urb); 949 803 } 804 + if (qh->state != QH_STATE_ACTIVE) 805 + qh->skel = skel; 950 806 951 807 urb->actual_length = -8; /* Account for the SETUP packet */ 952 808 return 0; ··· 1078 930 return -ENOMEM; 1079 931 } 1080 932 1081 - static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, 933 + static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, 1082 934 struct uhci_qh *qh) 1083 935 { 1084 936 int ret; ··· 1087 939 if (urb->dev->speed == USB_SPEED_LOW) 1088 940 return -EINVAL; 1089 941 1090 - qh->skel = uhci->skel_bulk_qh; 942 + if (qh->state != QH_STATE_ACTIVE) 943 + qh->skel = SKEL_BULK; 1091 944 ret = uhci_submit_common(uhci, urb, qh); 1092 945 if (ret == 0) 1093 946 uhci_add_fsbr(uhci, urb); ··· 1116 967 if (exponent < 0) 1117 968 return -EINVAL; 1118 969 qh->period = 1 << exponent; 1119 - qh->skel = uhci->skelqh[UHCI_SKEL_INDEX(exponent)]; 970 + qh->skel = SKEL_INDEX(exponent); 1120 971 1121 972 /* For now, interrupt phase is fixed by the layout 1122 973 * of the QH lists. */ ··· 1364 1215 qh->iso_status = 0; 1365 1216 } 1366 1217 1367 - qh->skel = uhci->skel_iso_qh; 1218 + qh->skel = SKEL_ISO; 1368 1219 if (!qh->bandwidth_reserved) 1369 1220 uhci_reserve_bandwidth(uhci, qh); 1370 1221 return 0;