Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

USB: xhci: Support for 64-byte contexts

Adds support for controllers that use 64-byte contexts. The following context
data structures are affected by this: Device, Input, Input Control, Endpoint,
and Slot. To accommodate the use of either 32 or 64-byte contexts, a Device or
Input context can only be accessed through functions which look-up and return
pointers to their contained contexts.

Signed-off-by: John Youn <johnyoun@synopsys.com>
Acked-by: Sarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

John Youn and committed by
Greg Kroah-Hartman
d115b048 28c2d2ef

+287 -163
+80 -45
drivers/usb/host/xhci-dbg.c
··· 393 393 upper_32_bits(val)); 394 394 } 395 395 396 - dma_addr_t xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_slot_ctx *slot, dma_addr_t dma) 396 + /* Print the last 32 bytes for 64-byte contexts */ 397 + static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) 398 + { 399 + int i; 400 + for (i = 0; i < 4; ++i) { 401 + xhci_dbg(xhci, "@%p (virt) @%08llx " 402 + "(dma) %#08llx - rsvd64[%d]\n", 403 + &ctx[4 + i], (unsigned long long)dma, 404 + ctx[4 + i], i); 405 + dma += 8; 406 + } 407 + } 408 + 409 + void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) 397 410 { 398 411 /* Fields are 32 bits wide, DMA addresses are in bytes */ 399 412 int field_size = 32 / 8; 400 413 int i; 401 414 415 + struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); 416 + dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx); 417 + int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); 418 + 402 419 xhci_dbg(xhci, "Slot Context:\n"); 403 420 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info\n", 404 - &slot->dev_info, 405 - (unsigned long long)dma, slot->dev_info); 421 + &slot_ctx->dev_info, 422 + (unsigned long long)dma, slot_ctx->dev_info); 406 423 dma += field_size; 407 424 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_info2\n", 408 - &slot->dev_info2, 409 - (unsigned long long)dma, slot->dev_info2); 425 + &slot_ctx->dev_info2, 426 + (unsigned long long)dma, slot_ctx->dev_info2); 410 427 dma += field_size; 411 428 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tt_info\n", 412 - &slot->tt_info, 413 - (unsigned long long)dma, slot->tt_info); 429 + &slot_ctx->tt_info, 430 + (unsigned long long)dma, slot_ctx->tt_info); 414 431 dma += field_size; 415 432 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - dev_state\n", 416 - &slot->dev_state, 417 - (unsigned long long)dma, slot->dev_state); 433 + &slot_ctx->dev_state, 434 + (unsigned long long)dma, slot_ctx->dev_state); 418 435 dma += field_size; 419 436 for (i = 0; i < 4; ++i) { 420 437 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", 421 - &slot->reserved[i], (unsigned long long)dma, 422 - slot->reserved[i], i); 438 + &slot_ctx->reserved[i], (unsigned long long)dma, 439 + slot_ctx->reserved[i], i); 423 440 dma += field_size; 424 441 } 425 442 426 - return dma; 443 + if (csz) 444 + dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); 427 445 } 428 446 429 - dma_addr_t xhci_dbg_ep_ctx(struct xhci_hcd *xhci, struct xhci_ep_ctx *ep, dma_addr_t dma, unsigned int last_ep) 447 + void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, 448 + struct xhci_container_ctx *ctx, 449 + unsigned int last_ep) 430 450 { 431 451 int i, j; 432 452 int last_ep_ctx = 31; 433 453 /* Fields are 32 bits wide, DMA addresses are in bytes */ 434 454 int field_size = 32 / 8; 455 + int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); 435 456 436 457 if (last_ep < 31) 437 458 last_ep_ctx = last_ep + 1; 438 459 for (i = 0; i < last_ep_ctx; ++i) { 460 + struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i); 461 + dma_addr_t dma = ctx->dma + 462 + ((unsigned long)ep_ctx - (unsigned long)ctx); 463 + 439 464 xhci_dbg(xhci, "Endpoint %02d Context:\n", i); 440 465 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n", 441 - &ep[i].ep_info, 442 - (unsigned long long)dma, ep[i].ep_info); 466 + &ep_ctx->ep_info, 467 + (unsigned long long)dma, ep_ctx->ep_info); 443 468 dma += field_size; 444 469 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info2\n", 445 - &ep[i].ep_info2, 446 - (unsigned long long)dma, ep[i].ep_info2); 470 + &ep_ctx->ep_info2, 471 + (unsigned long long)dma, ep_ctx->ep_info2); 447 472 dma += field_size; 448 473 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08llx - deq\n", 449 - &ep[i].deq, 450 - (unsigned long long)dma, ep[i].deq); 474 + &ep_ctx->deq, 475 + (unsigned long long)dma, ep_ctx->deq); 451 476 dma += 2*field_size; 452 477 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - tx_info\n", 453 - &ep[i].tx_info, 454 - (unsigned long long)dma, ep[i].tx_info); 478 + &ep_ctx->tx_info, 479 + (unsigned long long)dma, ep_ctx->tx_info); 455 480 dma += field_size; 456 481 for (j = 0; j < 3; ++j) { 457 482 xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", 458 - &ep[i].reserved[j], 483 + &ep_ctx->reserved[j], 459 484 (unsigned long long)dma, 460 - ep[i].reserved[j], j); 485 + ep_ctx->reserved[j], j); 461 486 dma += field_size; 462 487 } 488 + 489 + if (csz) 490 + dbg_rsvd64(xhci, (u64 *)ep_ctx, dma); 463 491 } 464 - return dma; 465 492 } 466 493 467 - void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep) 494 + void xhci_dbg_ctx(struct xhci_hcd *xhci, 495 + struct xhci_container_ctx *ctx, 496 + unsigned int last_ep) 468 497 { 469 498 int i; 470 499 /* Fields are 32 bits wide, DMA addresses are in bytes */ 471 500 int field_size = 32 / 8; 501 + struct xhci_slot_ctx *slot_ctx; 502 + dma_addr_t dma = ctx->dma; 503 + int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params); 472 504 473 - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", 474 - &ctx->drop_flags, (unsigned long long)dma, 475 - ctx->drop_flags); 476 - dma += field_size; 477 - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", 478 - &ctx->add_flags, (unsigned long long)dma, 479 - ctx->add_flags); 480 - dma += field_size; 481 - for (i = 0; i < 6; ++i) { 482 - xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd[%d]\n", 483 - &ctx->rsvd[i], (unsigned long long)dma, 484 - ctx->rsvd[i], i); 505 + if (ctx->type == XHCI_CTX_TYPE_INPUT) { 506 + struct xhci_input_control_ctx *ctrl_ctx = 507 + xhci_get_input_control_ctx(xhci, ctx); 508 + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - drop flags\n", 509 + &ctrl_ctx->drop_flags, (unsigned long long)dma, 510 + ctrl_ctx->drop_flags); 485 511 dma += field_size; 486 - } 487 - dma = xhci_dbg_slot_ctx(xhci, &ctx->slot, dma); 488 - dma = xhci_dbg_ep_ctx(xhci, ctx->ep, dma, last_ep); 489 - } 512 + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - add flags\n", 513 + &ctrl_ctx->add_flags, (unsigned long long)dma, 514 + ctrl_ctx->add_flags); 515 + dma += field_size; 516 + for (i = 0; i < 6; ++i) { 517 + xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - rsvd2[%d]\n", 518 + &ctrl_ctx->rsvd2[i], (unsigned long long)dma, 519 + ctrl_ctx->rsvd2[i], i); 520 + dma += field_size; 521 + } 490 522 491 - void xhci_dbg_device_ctx(struct xhci_hcd *xhci, struct xhci_device_ctx *ctx, dma_addr_t dma, unsigned int last_ep) 492 - { 493 - dma = xhci_dbg_slot_ctx(xhci, &ctx->slot, dma); 494 - dma = xhci_dbg_ep_ctx(xhci, ctx->ep, dma, last_ep); 523 + if (csz) 524 + dbg_rsvd64(xhci, (u64 *)ctrl_ctx, dma); 525 + } 526 + 527 + slot_ctx = xhci_get_slot_ctx(xhci, ctx); 528 + xhci_dbg_slot_ctx(xhci, ctx); 529 + xhci_dbg_ep_ctx(xhci, ctx, last_ep); 495 530 }
+71 -50
drivers/usb/host/xhci-hcd.c
··· 722 722 struct usb_host_endpoint *ep) 723 723 { 724 724 struct xhci_hcd *xhci; 725 - struct xhci_device_control *in_ctx; 725 + struct xhci_container_ctx *in_ctx, *out_ctx; 726 + struct xhci_input_control_ctx *ctrl_ctx; 727 + struct xhci_slot_ctx *slot_ctx; 726 728 unsigned int last_ctx; 727 729 unsigned int ep_index; 728 730 struct xhci_ep_ctx *ep_ctx; ··· 752 750 } 753 751 754 752 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 753 + out_ctx = xhci->devs[udev->slot_id]->out_ctx; 754 + ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 755 755 ep_index = xhci_get_endpoint_index(&ep->desc); 756 - ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; 756 + ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 757 757 /* If the HC already knows the endpoint is disabled, 758 758 * or the HCD has noted it is disabled, ignore this request 759 759 */ 760 760 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED || 761 - in_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { 761 + ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) { 762 762 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", 763 763 __func__, ep); 764 764 return 0; 765 765 } 766 766 767 - in_ctx->drop_flags |= drop_flag; 768 - new_drop_flags = in_ctx->drop_flags; 767 + ctrl_ctx->drop_flags |= drop_flag; 768 + new_drop_flags = ctrl_ctx->drop_flags; 769 769 770 - in_ctx->add_flags = ~drop_flag; 771 - new_add_flags = in_ctx->add_flags; 770 + ctrl_ctx->add_flags = ~drop_flag; 771 + new_add_flags = ctrl_ctx->add_flags; 772 772 773 - last_ctx = xhci_last_valid_endpoint(in_ctx->add_flags); 773 + last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags); 774 + slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 774 775 /* Update the last valid endpoint context, if we deleted the last one */ 775 - if ((in_ctx->slot.dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { 776 - in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 777 - in_ctx->slot.dev_info |= LAST_CTX(last_ctx); 776 + if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) { 777 + slot_ctx->dev_info &= ~LAST_CTX_MASK; 778 + slot_ctx->dev_info |= LAST_CTX(last_ctx); 778 779 } 779 - new_slot_info = in_ctx->slot.dev_info; 780 + new_slot_info = slot_ctx->dev_info; 780 781 781 782 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); 782 783 ··· 809 804 struct usb_host_endpoint *ep) 810 805 { 811 806 struct xhci_hcd *xhci; 812 - struct xhci_device_control *in_ctx; 807 + struct xhci_container_ctx *in_ctx, *out_ctx; 813 808 unsigned int ep_index; 814 809 struct xhci_ep_ctx *ep_ctx; 810 + struct xhci_slot_ctx *slot_ctx; 811 + struct xhci_input_control_ctx *ctrl_ctx; 815 812 u32 added_ctxs; 816 813 unsigned int last_ctx; 817 814 u32 new_add_flags, new_drop_flags, new_slot_info; ··· 846 839 } 847 840 848 841 in_ctx = xhci->devs[udev->slot_id]->in_ctx; 842 + out_ctx = xhci->devs[udev->slot_id]->out_ctx; 843 + ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx); 849 844 ep_index = xhci_get_endpoint_index(&ep->desc); 850 - ep_ctx = &xhci->devs[udev->slot_id]->out_ctx->ep[ep_index]; 845 + ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); 851 846 /* If the HCD has already noted the endpoint is enabled, 852 847 * ignore this request. 853 848 */ 854 - if (in_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { 849 + if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) { 855 850 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", 856 851 __func__, ep); 857 852 return 0; ··· 871 862 return -ENOMEM; 872 863 } 873 864 874 - in_ctx->add_flags |= added_ctxs; 875 - new_add_flags = in_ctx->add_flags; 865 + ctrl_ctx->add_flags |= added_ctxs; 866 + new_add_flags = ctrl_ctx->add_flags; 876 867 877 868 /* If xhci_endpoint_disable() was called for this endpoint, but the 878 869 * xHC hasn't been notified yet through the check_bandwidth() call, ··· 880 871 * descriptors. We must drop and re-add this endpoint, so we leave the 881 872 * drop flags alone. 882 873 */ 883 - new_drop_flags = in_ctx->drop_flags; 874 + new_drop_flags = ctrl_ctx->drop_flags; 884 875 876 + slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); 885 877 /* Update the last valid endpoint context, if we just added one past */ 886 - if ((in_ctx->slot.dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { 887 - in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 888 - in_ctx->slot.dev_info |= LAST_CTX(last_ctx); 878 + if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) { 879 + slot_ctx->dev_info &= ~LAST_CTX_MASK; 880 + slot_ctx->dev_info |= LAST_CTX(last_ctx); 889 881 } 890 - new_slot_info = in_ctx->slot.dev_info; 882 + new_slot_info = slot_ctx->dev_info; 891 883 892 884 /* Store the usb_device pointer for later use */ 893 885 ep->hcpriv = udev; ··· 902 892 return 0; 903 893 } 904 894 905 - static void xhci_zero_in_ctx(struct xhci_virt_device *virt_dev) 895 + static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev) 906 896 { 897 + struct xhci_input_control_ctx *ctrl_ctx; 907 898 struct xhci_ep_ctx *ep_ctx; 899 + struct xhci_slot_ctx *slot_ctx; 908 900 int i; 909 901 910 902 /* When a device's add flag and drop flag are zero, any subsequent ··· 914 902 * untouched. Make sure we don't leave any old state in the input 915 903 * endpoint contexts. 916 904 */ 917 - virt_dev->in_ctx->drop_flags = 0; 918 - virt_dev->in_ctx->add_flags = 0; 919 - virt_dev->in_ctx->slot.dev_info &= ~LAST_CTX_MASK; 905 + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 906 + ctrl_ctx->drop_flags = 0; 907 + ctrl_ctx->add_flags = 0; 908 + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 909 + slot_ctx->dev_info &= ~LAST_CTX_MASK; 920 910 /* Endpoint 0 is always valid */ 921 - virt_dev->in_ctx->slot.dev_info |= LAST_CTX(1); 911 + slot_ctx->dev_info |= LAST_CTX(1); 922 912 for (i = 1; i < 31; ++i) { 923 - ep_ctx = &virt_dev->in_ctx->ep[i]; 913 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); 924 914 ep_ctx->ep_info = 0; 925 915 ep_ctx->ep_info2 = 0; 926 916 ep_ctx->deq = 0; ··· 948 934 unsigned long flags; 949 935 struct xhci_hcd *xhci; 950 936 struct xhci_virt_device *virt_dev; 937 + struct xhci_input_control_ctx *ctrl_ctx; 938 + struct xhci_slot_ctx *slot_ctx; 951 939 952 940 ret = xhci_check_args(hcd, udev, NULL, 0, __func__); 953 941 if (ret <= 0) ··· 965 949 virt_dev = xhci->devs[udev->slot_id]; 966 950 967 951 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */ 968 - virt_dev->in_ctx->add_flags |= SLOT_FLAG; 969 - virt_dev->in_ctx->add_flags &= ~EP0_FLAG; 970 - virt_dev->in_ctx->drop_flags &= ~SLOT_FLAG; 971 - virt_dev->in_ctx->drop_flags &= ~EP0_FLAG; 952 + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 953 + ctrl_ctx->add_flags |= SLOT_FLAG; 954 + ctrl_ctx->add_flags &= ~EP0_FLAG; 955 + ctrl_ctx->drop_flags &= ~SLOT_FLAG; 956 + ctrl_ctx->drop_flags &= ~EP0_FLAG; 972 957 xhci_dbg(xhci, "New Input Control Context:\n"); 973 - xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 974 - LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 958 + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); 959 + xhci_dbg_ctx(xhci, virt_dev->in_ctx, 960 + LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 975 961 976 962 spin_lock_irqsave(&xhci->lock, flags); 977 - ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx_dma, 963 + ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma, 978 964 udev->slot_id); 979 965 if (ret < 0) { 980 966 spin_unlock_irqrestore(&xhci->lock, flags); ··· 1031 1013 } 1032 1014 1033 1015 xhci_dbg(xhci, "Output context after successful config ep cmd:\n"); 1034 - xhci_dbg_device_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 1035 - LAST_CTX_TO_EP_NUM(virt_dev->in_ctx->slot.dev_info)); 1016 + xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1017 + LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 1036 1018 1037 - xhci_zero_in_ctx(virt_dev); 1019 + xhci_zero_in_ctx(xhci, virt_dev); 1038 1020 /* Free any old rings */ 1039 1021 for (i = 1; i < 31; ++i) { 1040 1022 if (virt_dev->new_ep_rings[i]) { ··· 1072 1054 virt_dev->new_ep_rings[i] = NULL; 1073 1055 } 1074 1056 } 1075 - xhci_zero_in_ctx(virt_dev); 1057 + xhci_zero_in_ctx(xhci, virt_dev); 1076 1058 } 1077 1059 1078 1060 /* Deal with stalled endpoints. The core should have sent the control message ··· 1205 1187 struct xhci_virt_device *virt_dev; 1206 1188 int ret = 0; 1207 1189 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1190 + struct xhci_slot_ctx *slot_ctx; 1191 + struct xhci_input_control_ctx *ctrl_ctx; 1208 1192 u64 temp_64; 1209 1193 1210 1194 if (!udev->slot_id) { ··· 1221 1201 xhci_setup_addressable_virt_dev(xhci, udev); 1222 1202 /* Otherwise, assume the core has the device configured how it wants */ 1223 1203 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 1224 - xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); 1204 + xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 1225 1205 1226 1206 spin_lock_irqsave(&xhci->lock, flags); 1227 - ret = xhci_queue_address_device(xhci, virt_dev->in_ctx_dma, 1228 - udev->slot_id); 1207 + ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, 1208 + udev->slot_id); 1229 1209 if (ret) { 1230 1210 spin_unlock_irqrestore(&xhci->lock, flags); 1231 1211 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); ··· 1266 1246 xhci_err(xhci, "ERROR: unexpected command completion " 1267 1247 "code 0x%x.\n", virt_dev->cmd_status); 1268 1248 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 1269 - xhci_dbg_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); 1249 + xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 1270 1250 ret = -EINVAL; 1271 1251 break; 1272 1252 } ··· 1281 1261 (unsigned long long) 1282 1262 xhci->dcbaa->dev_context_ptrs[udev->slot_id]); 1283 1263 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n", 1284 - (unsigned long long)virt_dev->out_ctx_dma); 1264 + (unsigned long long)virt_dev->out_ctx->dma); 1285 1265 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id); 1286 - xhci_dbg_ctx(xhci, virt_dev->in_ctx, virt_dev->in_ctx_dma, 2); 1266 + xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); 1287 1267 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id); 1288 - xhci_dbg_device_ctx(xhci, virt_dev->out_ctx, virt_dev->out_ctx_dma, 2); 1268 + xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2); 1289 1269 /* 1290 1270 * USB core uses address 1 for the roothubs, so we add one to the 1291 1271 * address given back to us by the HC. 1292 1272 */ 1293 - udev->devnum = (virt_dev->out_ctx->slot.dev_state & DEV_ADDR_MASK) + 1; 1273 + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); 1274 + udev->devnum = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1; 1294 1275 /* Zero the input context control for later use */ 1295 - virt_dev->in_ctx->add_flags = 0; 1296 - virt_dev->in_ctx->drop_flags = 0; 1276 + ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx); 1277 + ctrl_ctx->add_flags = 0; 1278 + ctrl_ctx->drop_flags = 0; 1297 1279 1298 1280 xhci_dbg(xhci, "Device address = %d\n", udev->devnum); 1299 1281 /* XXX Meh, not sure if anyone else but choose_address uses this. */ ··· 1337 1315 /* xhci_device_control has eight fields, and also 1338 1316 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx 1339 1317 */ 1340 - BUILD_BUG_ON(sizeof(struct xhci_device_control) != (8+8+8*31)*32/8); 1341 1318 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8); 1342 1319 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8); 1343 1320 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
+87 -34
drivers/usb/host/xhci-mem.c
··· 189 189 return 0; 190 190 } 191 191 192 + #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 193 + 194 + struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 195 + int type, gfp_t flags) 196 + { 197 + struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); 198 + if (!ctx) 199 + return NULL; 200 + 201 + BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); 202 + ctx->type = type; 203 + ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; 204 + if (type == XHCI_CTX_TYPE_INPUT) 205 + ctx->size += CTX_SIZE(xhci->hcc_params); 206 + 207 + ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); 208 + memset(ctx->bytes, 0, ctx->size); 209 + return ctx; 210 + } 211 + 212 + void xhci_free_container_ctx(struct xhci_hcd *xhci, 213 + struct xhci_container_ctx *ctx) 214 + { 215 + dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); 216 + kfree(ctx); 217 + } 218 + 219 + struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, 220 + struct xhci_container_ctx *ctx) 221 + { 222 + BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); 223 + return (struct xhci_input_control_ctx *)ctx->bytes; 224 + } 225 + 226 + struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, 227 + struct xhci_container_ctx *ctx) 228 + { 229 + if (ctx->type == XHCI_CTX_TYPE_DEVICE) 230 + return (struct xhci_slot_ctx *)ctx->bytes; 231 + 232 + return (struct xhci_slot_ctx *) 233 + (ctx->bytes + CTX_SIZE(xhci->hcc_params)); 234 + } 235 + 236 + struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, 237 + struct xhci_container_ctx *ctx, 238 + unsigned int ep_index) 239 + { 240 + /* increment ep index by offset of start of ep ctx array */ 241 + ep_index++; 242 + if (ctx->type == XHCI_CTX_TYPE_INPUT) 243 + ep_index++; 244 + 245 + return (struct xhci_ep_ctx *) 246 + (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 247 + } 248 + 192 249 /* All the xhci_tds in the ring's TD list should be freed at this point */ 193 250 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 194 251 { ··· 266 209 xhci_ring_free(xhci, dev->ep_rings[i]); 267 210 268 211 if (dev->in_ctx) 269 - dma_pool_free(xhci->device_pool, 270 - dev->in_ctx, dev->in_ctx_dma); 212 + xhci_free_container_ctx(xhci, dev->in_ctx); 271 213 if (dev->out_ctx) 272 - dma_pool_free(xhci->device_pool, 273 - dev->out_ctx, dev->out_ctx_dma); 214 + xhci_free_container_ctx(xhci, dev->out_ctx); 215 + 274 216 kfree(xhci->devs[slot_id]); 275 217 xhci->devs[slot_id] = 0; 276 218 } ··· 277 221 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, 278 222 struct usb_device *udev, gfp_t flags) 279 223 { 280 - dma_addr_t dma; 281 224 struct xhci_virt_device *dev; 282 225 283 226 /* Slot ID 0 is reserved */ ··· 290 235 return 0; 291 236 dev = xhci->devs[slot_id]; 292 237 293 - /* Allocate the (output) device context that will be used in the HC. 294 - * The structure is 32 bytes smaller than the input context, but that's 295 - * fine. 296 - */ 297 - dev->out_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); 238 + /* Allocate the (output) device context that will be used in the HC. */ 239 + dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); 298 240 if (!dev->out_ctx) 299 241 goto fail; 300 - dev->out_ctx_dma = dma; 242 + 301 243 xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, 302 - (unsigned long long)dma); 303 - memset(dev->out_ctx, 0, sizeof(*dev->out_ctx)); 244 + (unsigned long long)dev->out_ctx->dma); 304 245 305 246 /* Allocate the (input) device context for address device command */ 306 - dev->in_ctx = dma_pool_alloc(xhci->device_pool, flags, &dma); 247 + dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); 307 248 if (!dev->in_ctx) 308 249 goto fail; 309 - dev->in_ctx_dma = dma; 250 + 310 251 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 311 - (unsigned long long)dma); 312 - memset(dev->in_ctx, 0, sizeof(*dev->in_ctx)); 252 + (unsigned long long)dev->in_ctx->dma); 313 253 314 254 /* Allocate endpoint 0 ring */ 315 255 dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags); ··· 314 264 init_completion(&dev->cmd_completion); 315 265 316 266 /* Point to output device context in dcbaa. */ 317 - xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx_dma; 267 + xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; 318 268 xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", 319 269 slot_id, 320 270 &xhci->dcbaa->dev_context_ptrs[slot_id], ··· 332 282 struct xhci_virt_device *dev; 333 283 struct xhci_ep_ctx *ep0_ctx; 334 284 struct usb_device *top_dev; 285 + struct xhci_slot_ctx *slot_ctx; 286 + struct xhci_input_control_ctx *ctrl_ctx; 335 287 336 288 dev = xhci->devs[udev->slot_id]; 337 289 /* Slot ID 0 is reserved */ ··· 342 290 udev->slot_id); 343 291 return -EINVAL; 344 292 } 345 - ep0_ctx = &dev->in_ctx->ep[0]; 293 + ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); 294 + ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); 295 + slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); 346 296 347 297 /* 2) New slot context and endpoint 0 context are valid*/ 348 - dev->in_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 298 + ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; 349 299 350 300 /* 3) Only the control endpoint is valid - one endpoint context */ 351 - dev->in_ctx->slot.dev_info |= LAST_CTX(1); 301 + slot_ctx->dev_info |= LAST_CTX(1); 352 302 353 303 switch (udev->speed) { 354 304 case USB_SPEED_SUPER: 355 - dev->in_ctx->slot.dev_info |= (u32) udev->route; 356 - dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_SS; 305 + slot_ctx->dev_info |= (u32) udev->route; 306 + slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; 357 307 break; 358 308 case USB_SPEED_HIGH: 359 - dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_HS; 309 + slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; 360 310 break; 361 311 case USB_SPEED_FULL: 362 - dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_FS; 312 + slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; 363 313 break; 364 314 case USB_SPEED_LOW: 365 - dev->in_ctx->slot.dev_info |= (u32) SLOT_SPEED_LS; 315 + slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; 366 316 break; 367 317 case USB_SPEED_VARIABLE: 368 318 xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); ··· 378 324 for (top_dev = udev; top_dev->parent && top_dev->parent->parent; 379 325 top_dev = top_dev->parent) 380 326 /* Found device below root hub */; 381 - dev->in_ctx->slot.dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 327 + slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); 382 328 xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); 383 329 384 330 /* Is this a LS/FS device under a HS hub? */ ··· 388 334 */ 389 335 if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && 390 336 udev->tt) { 391 - dev->in_ctx->slot.tt_info = udev->tt->hub->slot_id; 392 - dev->in_ctx->slot.tt_info |= udev->ttport << 8; 337 + slot_ctx->tt_info = udev->tt->hub->slot_id; 338 + slot_ctx->tt_info |= udev->ttport << 8; 393 339 } 394 340 xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); 395 341 xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); ··· 520 466 unsigned int max_burst; 521 467 522 468 ep_index = xhci_get_endpoint_index(&ep->desc); 523 - ep_ctx = &virt_dev->in_ctx->ep[ep_index]; 469 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 524 470 525 471 /* Set up the endpoint ring */ 526 472 virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags); ··· 587 533 struct xhci_ep_ctx *ep_ctx; 588 534 589 535 ep_index = xhci_get_endpoint_index(&ep->desc); 590 - ep_ctx = &virt_dev->in_ctx->ep[ep_index]; 536 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); 591 537 592 538 ep_ctx->ep_info = 0; 593 539 ep_ctx->ep_info2 = 0; ··· 807 753 */ 808 754 xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, 809 755 SEGMENT_SIZE, 64, xhci->page_size); 756 + 810 757 /* See Table 46 and Note on Figure 55 */ 811 - /* FIXME support 64-byte contexts */ 812 758 xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, 813 - sizeof(struct xhci_device_control), 814 - 64, xhci->page_size); 759 + 2112, 64, xhci->page_size); 815 760 if (!xhci->segment_pool || !xhci->device_pool) 816 761 goto fail; 817 762
+15 -7
drivers/usb/host/xhci-ring.c
··· 362 362 struct xhci_virt_device *dev = xhci->devs[slot_id]; 363 363 struct xhci_ring *ep_ring = dev->ep_rings[ep_index]; 364 364 struct xhci_generic_trb *trb; 365 + struct xhci_ep_ctx *ep_ctx; 365 366 366 367 state->new_cycle_state = 0; 367 368 state->new_deq_seg = find_trb_seg(cur_td->start_seg, ··· 371 370 if (!state->new_deq_seg) 372 371 BUG(); 373 372 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 374 - state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq; 373 + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 374 + state->new_cycle_state = 0x1 & ep_ctx->deq; 375 375 376 376 state->new_deq_ptr = cur_td->last_trb; 377 377 state->new_deq_seg = find_trb_seg(state->new_deq_seg, ··· 572 570 unsigned int ep_index; 573 571 struct xhci_ring *ep_ring; 574 572 struct xhci_virt_device *dev; 573 + struct xhci_ep_ctx *ep_ctx; 574 + struct xhci_slot_ctx *slot_ctx; 575 575 576 576 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 577 577 ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]); 578 578 dev = xhci->devs[slot_id]; 579 579 ep_ring = dev->ep_rings[ep_index]; 580 + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 581 + slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx); 580 582 581 583 if (GET_COMP_CODE(event->status) != COMP_SUCCESS) { 582 584 unsigned int ep_state; ··· 594 588 case COMP_CTX_STATE: 595 589 xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due " 596 590 "to incorrect slot or ep state.\n"); 597 - ep_state = dev->out_ctx->ep[ep_index].ep_info; 591 + ep_state = ep_ctx->ep_info; 598 592 ep_state &= EP_STATE_MASK; 599 - slot_state = dev->out_ctx->slot.dev_state; 593 + slot_state = slot_ctx->dev_state; 600 594 slot_state = GET_SLOT_STATE(slot_state); 601 595 xhci_dbg(xhci, "Slot state = %u, EP state = %u\n", 602 596 slot_state, ep_state); ··· 619 613 */ 620 614 } else { 621 615 xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n", 622 - dev->out_ctx->ep[ep_index].deq); 616 + ep_ctx->deq); 623 617 } 624 618 625 619 ep_ring->state &= ~SET_DEQ_PENDING; ··· 801 795 union xhci_trb *event_trb; 802 796 struct urb *urb = 0; 803 797 int status = -EINPROGRESS; 798 + struct xhci_ep_ctx *ep_ctx; 804 799 805 800 xhci_dbg(xhci, "In %s\n", __func__); 806 801 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)]; ··· 814 807 ep_index = TRB_TO_EP_ID(event->flags) - 1; 815 808 xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index); 816 809 ep_ring = xdev->ep_rings[ep_index]; 817 - if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 810 + ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 811 + if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) { 818 812 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n"); 819 813 return -ENODEV; 820 814 } ··· 1201 1193 gfp_t mem_flags) 1202 1194 { 1203 1195 int ret; 1204 - 1196 + struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); 1205 1197 ret = prepare_ring(xhci, xdev->ep_rings[ep_index], 1206 - xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK, 1198 + ep_ctx->ep_info & EP_STATE_MASK, 1207 1199 num_trbs, mem_flags); 1208 1200 if (ret) 1209 1201 return ret;
+34 -27
drivers/usb/host/xhci.h
··· 447 447 448 448 449 449 /** 450 + * struct xhci_container_ctx 451 + * @type: Type of context. Used to calculated offsets to contained contexts. 452 + * @size: Size of the context data 453 + * @bytes: The raw context data given to HW 454 + * @dma: dma address of the bytes 455 + * 456 + * Represents either a Device or Input context. Holds a pointer to the raw 457 + * memory used for the context (bytes) and dma address of it (dma). 458 + */ 459 + struct xhci_container_ctx { 460 + unsigned type; 461 + #define XHCI_CTX_TYPE_DEVICE 0x1 462 + #define XHCI_CTX_TYPE_INPUT 0x2 463 + 464 + int size; 465 + 466 + u8 *bytes; 467 + dma_addr_t dma; 468 + }; 469 + 470 + /** 450 471 * struct xhci_slot_ctx 451 472 * @dev_info: Route string, device speed, hub info, and last valid endpoint 452 473 * @dev_info2: Max exit latency for device number, root hub port number ··· 604 583 605 584 606 585 /** 607 - * struct xhci_device_control 608 - * Input context; see section 6.2.5. 586 + * struct xhci_input_control_context 587 + * Input control context; see section 6.2.5. 609 588 * 610 589 * @drop_context: set the bit of the endpoint context you want to disable 611 590 * @add_context: set the bit of the endpoint context you want to enable 612 591 */ 613 - struct xhci_device_control { 614 - /* Input control context */ 592 + struct xhci_input_control_ctx { 615 593 u32 drop_flags; 616 594 u32 add_flags; 617 - u32 rsvd[6]; 618 - /* Copy of device context */ 619 - struct xhci_slot_ctx slot; 620 - struct xhci_ep_ctx ep[31]; 621 - }; 622 - 623 - /** 624 - * struct xhci_device_ctx 625 - * Device context; see section 6.2.1. 626 - * 627 - * @slot: slot context for the device. 628 - * @ep: array of endpoint contexts for the device. 629 - */ 630 - struct xhci_device_ctx { 631 - struct xhci_slot_ctx slot; 632 - struct xhci_ep_ctx ep[31]; 595 + u32 rsvd2[6]; 633 596 }; 634 597 635 598 /* drop context bitmasks */ 636 599 #define DROP_EP(x) (0x1 << x) 637 600 /* add context bitmasks */ 638 601 #define ADD_EP(x) (0x1 << x) 639 - 640 602 641 603 struct xhci_virt_device { 642 604 /* ··· 630 626 * track of input and output contexts separately because 631 627 * these commands might fail and we don't trust the hardware. 632 628 */ 633 - struct xhci_device_ctx *out_ctx; 634 - dma_addr_t out_ctx_dma; 629 + struct xhci_container_ctx *out_ctx; 635 630 /* Used for addressing devices and configuration changes */ 636 - struct xhci_device_control *in_ctx; 637 - dma_addr_t in_ctx_dma; 631 + struct xhci_container_ctx *in_ctx; 632 + 638 633 /* FIXME when stream support is added */ 639 634 struct xhci_ring *ep_rings[31]; 640 635 /* Temporary storage in case the configure endpoint command fails and we ··· 1142 1139 void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); 1143 1140 void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); 1144 1141 void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); 1145 - void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_device_control *ctx, dma_addr_t dma, unsigned int last_ep); 1146 - void xhci_dbg_device_ctx(struct xhci_hcd *xhci, struct xhci_device_ctx *ctx, dma_addr_t dma, unsigned int last_ep); 1142 + void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); 1147 1143 1148 1144 /* xHCI memory managment */ 1149 1145 void xhci_mem_cleanup(struct xhci_hcd *xhci); ··· 1208 1206 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1209 1207 char *buf, u16 wLength); 1210 1208 int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); 1209 + 1210 + /* xHCI contexts */ 1211 + struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); 1212 + struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx); 1213 + struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index); 1211 1214 1212 1215 #endif /* __LINUX_XHCI_HCD_H */