Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'usb-for-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb into usb-next

Felipe writes:

USB: changes for v4.11

Here's the big pull request for the Gadget
API. Again the majority of changes sit in dwc2
driver. Most important changes contain a workaround
for GOTGCTL being wrong, a sleep-inside-spinlock fix
and the big series of cleanups on dwc2.

One important thing on dwc3 is that we don't anymore
need gadget drivers to cope with unaligned OUT
transfers for us. We have support for appending one
extra chained TRB to align transfer ourselves.

Apart from these, the usual set of typos,
non-critical fixes, etc.

+2316 -2233
+2 -2
Documentation/devicetree/bindings/usb/dwc3-st.txt
··· 20 20 with 'reg' property 21 21 22 22 - pinctl-names : A pinctrl state named "default" must be defined 23 - See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt 23 + See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt 24 24 25 25 - pinctrl-0 : Pin control group 26 - See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt 26 + See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt 27 27 28 28 - ranges : allows valid 1:1 translation between child's address space and 29 29 parent's address space
+4
Documentation/devicetree/bindings/usb/dwc3.txt
··· 56 56 57 57 - <DEPRECATED> tx-fifo-resize: determines if the FIFO *has* to be reallocated. 58 58 59 + - in addition all properties from usb-xhci.txt from the current directory are 60 + supported as well 61 + 62 + 59 63 This is usually a subnode to DWC3 glue to which it is connected. 60 64 61 65 dwc3@4a030000 {
+1 -1
Documentation/devicetree/bindings/usb/ehci-st.txt
··· 7 7 - interrupts : one EHCI interrupt should be described here 8 8 - pinctrl-names : a pinctrl state named "default" must be defined 9 9 - pinctrl-0 : phandle referencing pin configuration of the USB controller 10 - See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt 10 + See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt 11 11 - clocks : phandle list of usb clocks 12 12 - clock-names : should be "ic" for interconnect clock and "clk48" 13 13 See: Documentation/devicetree/bindings/clock/clock-bindings.txt
+1 -1
Documentation/devicetree/bindings/usb/mt8173-mtu3.txt
··· 30 30 "id_float" and "id_ground" are optinal which depends on 31 31 "mediatek,enable-manual-drd" 32 32 - pinctrl-0 : pin control group 33 - See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt 33 + See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt 34 34 35 35 - maximum-speed : valid arguments are "super-speed", "high-speed" and 36 36 "full-speed"; refer to usb/generic.txt
+2 -2
Documentation/devicetree/bindings/usb/mt8173-xhci.txt
··· 38 38 - usb3-lpm-capable : supports USB3.0 LPM 39 39 - pinctrl-names : a pinctrl state named "default" must be defined 40 40 - pinctrl-0 : pin control group 41 - See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt 41 + See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt 42 42 43 43 Example: 44 44 usb30: usb@11270000 { ··· 68 68 69 69 In the case, xhci is added as subnode to mtu3. An example and the DT binding 70 70 details of mtu3 can be found in: 71 - Documentation/devicetree/bindings/usb/mtu3.txt 71 + Documentation/devicetree/bindings/usb/mt8173-mtu3.txt 72 72 73 73 Required properties: 74 74 - compatible : should contain "mediatek,mt8173-xhci"
+1 -1
Documentation/devicetree/bindings/usb/qcom,dwc3.txt
··· 18 18 the node is not important. The content of the node is defined in dwc3.txt. 19 19 20 20 Phy documentation is provided in the following places: 21 - Documentation/devicetree/bindings/phy/qcom,dwc3-usb-phy.txt 21 + Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt 22 22 23 23 Example device nodes: 24 24
+2
Documentation/usb/gadget-testing.txt
··· 632 632 p_chmask - playback channel mask 633 633 p_srate - playback sampling rate 634 634 p_ssize - playback sample size (bytes) 635 + req_number - the number of pre-allocated request for both capture 636 + and playback 635 637 636 638 The attributes have sane default values. 637 639
+17 -22
drivers/usb/dwc2/core.c
··· 104 104 gr = &hsotg->gr_backup; 105 105 if (!gr->valid) { 106 106 dev_err(hsotg->dev, "%s: no global registers to restore\n", 107 - __func__); 107 + __func__); 108 108 return -EINVAL; 109 109 } 110 110 gr->valid = false; ··· 155 155 ret = dwc2_restore_global_registers(hsotg); 156 156 if (ret) { 157 157 dev_err(hsotg->dev, "%s: failed to restore registers\n", 158 - __func__); 158 + __func__); 159 159 return ret; 160 160 } 161 161 if (dwc2_is_host_mode(hsotg)) { 162 162 ret = dwc2_restore_host_registers(hsotg); 163 163 if (ret) { 164 164 dev_err(hsotg->dev, "%s: failed to restore host registers\n", 165 - __func__); 165 + __func__); 166 166 return ret; 167 167 } 168 168 } else { 169 169 ret = dwc2_restore_device_registers(hsotg); 170 170 if (ret) { 171 171 dev_err(hsotg->dev, "%s: failed to restore device registers\n", 172 - __func__); 172 + __func__); 173 173 return ret; 174 174 } 175 175 } ··· 195 195 ret = dwc2_backup_global_registers(hsotg); 196 196 if (ret) { 197 197 dev_err(hsotg->dev, "%s: failed to backup global registers\n", 198 - __func__); 198 + __func__); 199 199 return ret; 200 200 } 201 201 ··· 203 203 ret = dwc2_backup_host_registers(hsotg); 204 204 if (ret) { 205 205 dev_err(hsotg->dev, "%s: failed to backup host registers\n", 206 - __func__); 206 + __func__); 207 207 return ret; 208 208 } 209 209 } else { 210 210 ret = dwc2_backup_device_registers(hsotg); 211 211 if (ret) { 212 212 dev_err(hsotg->dev, "%s: failed to backup device registers\n", 213 - __func__); 213 + __func__); 214 214 return ret; 215 215 } 216 216 } ··· 313 313 * Do core a soft reset of the core. Be careful with this because it 314 314 * resets all the internal state machines of the core. 315 315 */ 316 - int dwc2_core_reset(struct dwc2_hsotg *hsotg) 316 + int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait) 317 317 { 318 318 u32 greset; 319 319 int count = 0; ··· 369 369 } 370 370 } while (!(greset & GRSTCTL_AHBIDLE)); 371 371 372 - if (wait_for_host_mode) 372 + if (wait_for_host_mode && !skip_wait) 373 373 dwc2_wait_for_mode(hsotg, true); 374 374 375 375 return 0; ··· 455 455 dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); 456 456 457 457 if (dwc2_iddig_filter_enabled(hsotg)) 458 - usleep_range(100000, 110000); 458 + msleep(100); 459 459 } 460 460 461 461 /* ··· 500 500 { 501 501 int retval; 502 502 503 - retval = dwc2_core_reset(hsotg); 503 + retval = dwc2_core_reset(hsotg, false); 504 504 if (retval) 505 505 return retval; 506 506 ··· 541 541 addr = hsotg->regs + HAINTMSK; 542 542 dev_dbg(hsotg->dev, "HAINTMSK @0x%08lX : 0x%08X\n", 543 543 (unsigned long)addr, dwc2_readl(addr)); 544 - if (hsotg->params.dma_desc_enable > 0) { 544 + if (hsotg->params.dma_desc_enable) { 545 545 addr = hsotg->regs + HFLBADDR; 546 546 dev_dbg(hsotg->dev, "HFLBADDR @0x%08lX : 0x%08X\n", 547 547 (unsigned long)addr, dwc2_readl(addr)); ··· 571 571 addr = hsotg->regs + HCDMA(i); 572 572 dev_dbg(hsotg->dev, "HCDMA @0x%08lX : 0x%08X\n", 573 573 (unsigned long)addr, dwc2_readl(addr)); 574 - if (hsotg->params.dma_desc_enable > 0) { 574 + if (hsotg->params.dma_desc_enable) { 575 575 addr = hsotg->regs + HCDMAB(i); 576 576 dev_dbg(hsotg->dev, "HCDMAB @0x%08lX : 0x%08X\n", 577 577 (unsigned long)addr, dwc2_readl(addr)); ··· 751 751 return dwc2_force_mode(hsotg, host); 752 752 } 753 753 754 - u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg) 755 - { 756 - return hsotg->params.otg_ver == 1 ? 0x0200 : 0x0103; 757 - } 758 - 759 754 bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg) 760 755 { 761 756 if (dwc2_readl(hsotg->regs + GSNPSID) == 0xffffffff) ··· 788 793 } 789 794 790 795 /* Returns the controller's GHWCFG2.OTG_MODE. */ 791 - unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg) 796 + unsigned int dwc2_op_mode(struct dwc2_hsotg *hsotg) 792 797 { 793 798 u32 ghwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); 794 799 ··· 799 804 /* Returns true if the controller is capable of DRD. */ 800 805 bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg) 801 806 { 802 - unsigned op_mode = dwc2_op_mode(hsotg); 807 + unsigned int op_mode = dwc2_op_mode(hsotg); 803 808 804 809 return (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) || 805 810 (op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE) || ··· 809 814 /* Returns true if the controller is host-only. */ 810 815 bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg) 811 816 { 812 - unsigned op_mode = dwc2_op_mode(hsotg); 817 + unsigned int op_mode = dwc2_op_mode(hsotg); 813 818 814 819 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) || 815 820 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST); ··· 818 823 /* Returns true if the controller is device-only. */ 819 824 bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg) 820 825 { 821 - unsigned op_mode = dwc2_op_mode(hsotg); 826 + unsigned int op_mode = dwc2_op_mode(hsotg); 822 827 823 828 return (op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || 824 829 (op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE);
+101 -107
drivers/usb/dwc2/core.h
··· 127 127 "vusb_a", /* analog USB supply, 1.1V */ 128 128 }; 129 129 130 + #define DWC2_NUM_SUPPLIES ARRAY_SIZE(dwc2_hsotg_supply_names) 131 + 130 132 /* 131 133 * EP0_MPS_LIMIT 132 134 * ··· 248 246 void *saved_req_buf; 249 247 }; 250 248 251 - #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 249 + #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 250 + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 252 251 #define call_gadget(_hs, _entry) \ 253 252 do { \ 254 253 if ((_hs)->gadget.speed != USB_SPEED_UNKNOWN && \ ··· 274 271 DWC2_L3, /* Off state */ 275 272 }; 276 273 277 - /* 278 - * Gadget periodic tx fifo sizes as used by legacy driver 279 - * EP0 is not included 280 - */ 281 - #define DWC2_G_P_LEGACY_TX_FIFO_SIZE {256, 256, 256, 256, 768, 768, 768, \ 282 - 768, 0, 0, 0, 0, 0, 0, 0} 283 - 284 274 /* Gadget ep0 states */ 285 275 enum dwc2_ep0_state { 286 276 DWC2_EP0_SETUP, ··· 291 295 * 1 - SRP Only capable 292 296 * 2 - No HNP/SRP capable (always available) 293 297 * Defaults to best available option (0, 1, then 2) 294 - * @otg_ver: OTG version supported 295 - * 0 - 1.3 (default) 296 - * 1 - 2.0 297 298 * @host_dma: Specifies whether to use slave or DMA mode for accessing 298 299 * the data FIFOs. The driver will automatically detect the 299 300 * value for this parameter if none is specified. ··· 437 444 * in DWORDS with possible values from from 438 445 * 16-32768 (default: 256, 256, 256, 256, 768, 439 446 * 768, 768, 768, 0, 0, 0, 0, 0, 0, 0). 447 + * @change_speed_quirk: Change speed configuration to DWC2_SPEED_PARAM_FULL 448 + * while full&low speed device connect. And change speed 449 + * back to DWC2_SPEED_PARAM_HIGH while device is gone. 450 + * 0 - No (default) 451 + * 1 - Yes 440 452 * 441 453 * The following parameters may be specified when starting the module. These 442 454 * parameters define how the DWC_otg controller should be configured. A ··· 450 452 * default described above. 451 453 */ 452 454 struct dwc2_core_params { 453 - /* 454 - * Don't add any non-int members here, this will break 455 - * dwc2_set_all_params! 456 - */ 457 - int otg_cap; 455 + u8 otg_cap; 458 456 #define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0 459 457 #define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1 460 458 #define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 461 459 462 - int otg_ver; 463 - int dma_desc_enable; 464 - int dma_desc_fs_enable; 465 - int speed; 466 - #define DWC2_SPEED_PARAM_HIGH 0 467 - #define DWC2_SPEED_PARAM_FULL 1 468 - #define DWC2_SPEED_PARAM_LOW 2 469 - 470 - int enable_dynamic_fifo; 471 - int en_multiple_tx_fifo; 472 - int host_rx_fifo_size; 473 - int host_nperio_tx_fifo_size; 474 - int host_perio_tx_fifo_size; 475 - int max_transfer_size; 476 - int max_packet_count; 477 - int host_channels; 478 - int phy_type; 460 + u8 phy_type; 479 461 #define DWC2_PHY_TYPE_PARAM_FS 0 480 462 #define DWC2_PHY_TYPE_PARAM_UTMI 1 481 463 #define DWC2_PHY_TYPE_PARAM_ULPI 2 482 464 483 - int phy_utmi_width; 484 - int phy_ulpi_ddr; 485 - int phy_ulpi_ext_vbus; 486 - #define DWC2_PHY_ULPI_INTERNAL_VBUS 0 487 - #define DWC2_PHY_ULPI_EXTERNAL_VBUS 1 465 + u8 speed; 466 + #define DWC2_SPEED_PARAM_HIGH 0 467 + #define DWC2_SPEED_PARAM_FULL 1 468 + #define DWC2_SPEED_PARAM_LOW 2 488 469 489 - int i2c_enable; 490 - int ulpi_fs_ls; 491 - int host_support_fs_ls_low_power; 492 - int host_ls_low_power_phy_clk; 493 - #define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 494 - #define DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 495 - 496 - int ts_dline; 497 - int reload_ctl; 498 - int ahbcfg; 499 - int uframe_sched; 500 - int external_id_pin_ctl; 501 - int hibernation; 502 - 503 - /* 504 - * The following parameters are *only* set via device 505 - * properties and cannot be set directly in this structure. 506 - */ 470 + u8 phy_utmi_width; 471 + bool phy_ulpi_ddr; 472 + bool phy_ulpi_ext_vbus; 473 + bool enable_dynamic_fifo; 474 + bool en_multiple_tx_fifo; 475 + bool i2c_enable; 476 + bool ulpi_fs_ls; 477 + bool ts_dline; 478 + bool reload_ctl; 479 + bool uframe_sched; 480 + bool external_id_pin_ctl; 481 + bool hibernation; 482 + u16 max_packet_count; 483 + u32 max_transfer_size; 484 + u32 ahbcfg; 507 485 508 486 /* Host parameters */ 509 487 bool host_dma; 488 + bool dma_desc_enable; 489 + bool dma_desc_fs_enable; 490 + bool host_support_fs_ls_low_power; 491 + bool host_ls_low_power_phy_clk; 492 + 493 + u8 host_channels; 494 + u16 host_rx_fifo_size; 495 + u16 host_nperio_tx_fifo_size; 496 + u16 host_perio_tx_fifo_size; 510 497 511 498 /* Gadget parameters */ 512 499 bool g_dma; ··· 499 516 u32 g_rx_fifo_size; 500 517 u32 g_np_tx_fifo_size; 501 518 u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; 519 + 520 + bool change_speed_quirk; 502 521 }; 503 522 504 523 /** ··· 588 603 #define DWC2_CTRL_BUFF_SIZE 8 589 604 590 605 /** 591 - * struct dwc2_gregs_backup - Holds global registers state before entering partial 592 - * power down 606 + * struct dwc2_gregs_backup - Holds global registers state before 607 + * entering partial power down 593 608 * @gotgctl: Backup of GOTGCTL register 594 609 * @gintmsk: Backup of GINTMSK register 595 610 * @gahbcfg: Backup of GAHBCFG register ··· 619 634 }; 620 635 621 636 /** 622 - * struct dwc2_dregs_backup - Holds device registers state before entering partial 623 - * power down 637 + * struct dwc2_dregs_backup - Holds device registers state before 638 + * entering partial power down 624 639 * @dcfg: Backup of DCFG register 625 640 * @dctl: Backup of DCTL register 626 641 * @daintmsk: Backup of DAINTMSK register ··· 649 664 }; 650 665 651 666 /** 652 - * struct dwc2_hregs_backup - Holds host registers state before entering partial 653 - * power down 667 + * struct dwc2_hregs_backup - Holds host registers state before 668 + * entering partial power down 654 669 * @hcfg: Backup of HCFG register 655 670 * @haintmsk: Backup of HAINTMSK register 656 671 * @hcintmsk: Backup of HCINTMSK register ··· 767 782 * @gadget_enabled Peripheral mode sub-driver initialization indicator. 768 783 * @ll_hw_enabled Status of low-level hardware resources. 769 784 * @phy: The otg phy transceiver structure for phy control. 770 - * @uphy: The otg phy transceiver structure for old USB phy control. 771 - * @plat: The platform specific configuration data. This can be removed once 772 - * all SoCs support usb transceiver. 785 + * @uphy: The otg phy transceiver structure for old USB phy 786 + * control. 787 + * @plat: The platform specific configuration data. This can be 788 + * removed once all SoCs support usb transceiver. 773 789 * @supplies: Definition of USB power supplies 774 790 * @phyif: PHY interface width 775 791 * @lock: Spinlock that protects all the driver data structures ··· 907 921 struct phy *phy; 908 922 struct usb_phy *uphy; 909 923 struct dwc2_hsotg_plat *plat; 910 - struct regulator_bulk_data supplies[ARRAY_SIZE(dwc2_hsotg_supply_names)]; 924 + struct regulator_bulk_data supplies[DWC2_NUM_SUPPLIES]; 911 925 u32 phyif; 912 926 913 927 spinlock_t lock; ··· 933 947 /* DWC OTG HW Release versions */ 934 948 #define DWC2_CORE_REV_2_71a 0x4f54271a 935 949 #define DWC2_CORE_REV_2_90a 0x4f54290a 950 + #define DWC2_CORE_REV_2_91a 0x4f54291a 936 951 #define DWC2_CORE_REV_2_92a 0x4f54292a 937 952 #define DWC2_CORE_REV_2_94a 0x4f54294a 938 953 #define DWC2_CORE_REV_3_00a 0x4f54300a ··· 1020 1033 #endif 1021 1034 #endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */ 1022 1035 1023 - #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1036 + #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 1037 + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1024 1038 /* Gadget structures */ 1025 1039 struct usb_gadget_driver *driver; 1026 1040 int fifo_mem; ··· 1089 1101 * The following functions support initialization of the core driver component 1090 1102 * and the DWC_otg controller 1091 1103 */ 1092 - extern int dwc2_core_reset(struct dwc2_hsotg *hsotg); 1093 - extern int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg); 1094 - extern int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg); 1095 - extern int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore); 1104 + int dwc2_core_reset(struct dwc2_hsotg *hsotg, bool skip_wait); 1105 + int dwc2_core_reset_and_force_dr_mode(struct dwc2_hsotg *hsotg); 1106 + int dwc2_enter_hibernation(struct dwc2_hsotg *hsotg); 1107 + int dwc2_exit_hibernation(struct dwc2_hsotg *hsotg, bool restore); 1096 1108 1097 1109 bool dwc2_force_mode_if_needed(struct dwc2_hsotg *hsotg, bool host); 1098 1110 void dwc2_clear_force_mode(struct dwc2_hsotg *hsotg); 1099 1111 void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg); 1100 1112 1101 - extern bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg); 1113 + bool dwc2_is_controller_alive(struct dwc2_hsotg *hsotg); 1102 1114 1103 1115 /* 1104 1116 * Common core Functions. 1105 1117 * The following functions support managing the DWC_otg controller in either 1106 1118 * device or host mode. 1107 1119 */ 1108 - extern void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes); 1109 - extern void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num); 1110 - extern void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg); 1120 + void dwc2_read_packet(struct dwc2_hsotg *hsotg, u8 *dest, u16 bytes); 1121 + void dwc2_flush_tx_fifo(struct dwc2_hsotg *hsotg, const int num); 1122 + void dwc2_flush_rx_fifo(struct dwc2_hsotg *hsotg); 1111 1123 1112 - extern void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd); 1113 - extern void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd); 1124 + void dwc2_enable_global_interrupts(struct dwc2_hsotg *hcd); 1125 + void dwc2_disable_global_interrupts(struct dwc2_hsotg *hcd); 1114 1126 1115 1127 /* This function should be called on every hardware interrupt. */ 1116 - extern irqreturn_t dwc2_handle_common_intr(int irq, void *dev); 1128 + irqreturn_t dwc2_handle_common_intr(int irq, void *dev); 1117 1129 1118 1130 /* The device ID match table */ 1119 1131 extern const struct of_device_id dwc2_of_match_table[]; 1120 1132 1121 - extern int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg); 1122 - extern int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg); 1133 + int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg); 1134 + int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg); 1123 1135 1124 1136 /* Parameters */ 1125 1137 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg); ··· 1133 1145 * are read in and cached so they always read directly from the 1134 1146 * GHWCFG2 register. 1135 1147 */ 1136 - unsigned dwc2_op_mode(struct dwc2_hsotg *hsotg); 1148 + unsigned int dwc2_op_mode(struct dwc2_hsotg *hsotg); 1137 1149 bool dwc2_hw_is_otg(struct dwc2_hsotg *hsotg); 1138 1150 bool dwc2_hw_is_host(struct dwc2_hsotg *hsotg); 1139 1151 bool dwc2_hw_is_device(struct dwc2_hsotg *hsotg); ··· 1145 1157 { 1146 1158 return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) != 0; 1147 1159 } 1160 + 1148 1161 static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg) 1149 1162 { 1150 1163 return (dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_CURMODE_HOST) == 0; ··· 1154 1165 /* 1155 1166 * Dump core registers and SPRAM 1156 1167 */ 1157 - extern void dwc2_dump_dev_registers(struct dwc2_hsotg *hsotg); 1158 - extern void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg); 1159 - extern void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg); 1160 - 1161 - /* 1162 - * Return OTG version - either 1.3 or 2.0 1163 - */ 1164 - extern u16 dwc2_get_otg_version(struct dwc2_hsotg *hsotg); 1168 + void dwc2_dump_dev_registers(struct dwc2_hsotg *hsotg); 1169 + void dwc2_dump_host_registers(struct dwc2_hsotg *hsotg); 1170 + void dwc2_dump_global_registers(struct dwc2_hsotg *hsotg); 1165 1171 1166 1172 /* Gadget defines */ 1167 - #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1168 - extern int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg); 1169 - extern int dwc2_hsotg_suspend(struct dwc2_hsotg *dwc2); 1170 - extern int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2); 1171 - extern int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq); 1172 - extern void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2, 1173 - bool reset); 1174 - extern void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); 1175 - extern void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); 1176 - extern int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); 1173 + #if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ 1174 + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1175 + int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg); 1176 + int dwc2_hsotg_suspend(struct dwc2_hsotg *dwc2); 1177 + int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2); 1178 + int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq); 1179 + void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2, 1180 + bool reset); 1181 + void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); 1182 + void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); 1183 + int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); 1177 1184 #define dwc2_is_device_connected(hsotg) (hsotg->connected) 1178 1185 int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg); 1179 1186 int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg); 1187 + int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg); 1188 + int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg); 1189 + int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg); 1180 1190 #else 1181 1191 static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2) 1182 1192 { return 0; } ··· 1186 1198 static inline int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq) 1187 1199 { return 0; } 1188 1200 static inline void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2, 1189 - bool reset) {} 1201 + bool reset) {} 1190 1202 static inline void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) {} 1191 1203 static inline void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2) {} 1192 1204 static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, 1193 - int testmode) 1205 + int testmode) 1194 1206 { return 0; } 1195 1207 #define dwc2_is_device_connected(hsotg) (0) 1196 1208 static inline int dwc2_backup_device_registers(struct dwc2_hsotg *hsotg) 1197 1209 { return 0; } 1198 1210 static inline int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg) 1199 1211 { return 0; } 1212 + static inline int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg) 1213 + { return 0; } 1214 + static inline int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 1215 + { return 0; } 1216 + static inline int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg) 1217 + { return 0; } 1200 1218 #endif 1201 1219 1202 1220 #if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) 1203 - extern int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg); 1204 - extern int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us); 1205 - extern void dwc2_hcd_connect(struct dwc2_hsotg *hsotg); 1206 - extern void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force); 1207 - extern void dwc2_hcd_start(struct dwc2_hsotg *hsotg); 1221 + int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg); 1222 + int dwc2_hcd_get_future_frame_number(struct dwc2_hsotg *hsotg, int us); 1223 + void dwc2_hcd_connect(struct dwc2_hsotg *hsotg); 1224 + void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force); 1225 + void dwc2_hcd_start(struct dwc2_hsotg *hsotg); 1208 1226 int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg); 1209 1227 int dwc2_restore_host_registers(struct dwc2_hsotg *hsotg); 1210 1228 #else ··· 1223 1229 static inline void dwc2_hcd_disconnect(struct dwc2_hsotg *hsotg, bool force) {} 1224 1230 static inline void dwc2_hcd_start(struct dwc2_hsotg *hsotg) {} 1225 1231 static inline void dwc2_hcd_remove(struct dwc2_hsotg *hsotg) {} 1226 - static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) 1232 + static inline int dwc2_hcd_init(struct dwc2_hsotg *hsotg) 1227 1233 { return 0; } 1228 1234 static inline int dwc2_backup_host_registers(struct dwc2_hsotg *hsotg) 1229 1235 { return 0; }
+5 -6
drivers/usb/dwc2/core_intr.c
··· 159 159 " ++OTG Interrupt: Session Request Success Status Change++\n"); 160 160 gotgctl = dwc2_readl(hsotg->regs + GOTGCTL); 161 161 if (gotgctl & GOTGCTL_SESREQSCS) { 162 - if (hsotg->params.phy_type == 163 - DWC2_PHY_TYPE_PARAM_FS 164 - && hsotg->params.i2c_enable > 0) { 162 + if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS && 163 + hsotg->params.i2c_enable) { 165 164 hsotg->srp_success = 1; 166 165 } else { 167 166 /* Clear Session Request */ ··· 316 317 dwc2_writel(GINTSTS_SESSREQINT, hsotg->regs + GINTSTS); 317 318 318 319 dev_dbg(hsotg->dev, "Session request interrupt - lx_state=%d\n", 319 - hsotg->lx_state); 320 + hsotg->lx_state); 320 321 321 322 if (dwc2_is_device_mode(hsotg)) { 322 323 if (hsotg->lx_state == DWC2_L2) { ··· 436 437 /* Ignore suspend request before enumeration */ 437 438 if (!dwc2_is_device_connected(hsotg)) { 438 439 dev_dbg(hsotg->dev, 439 - "ignore suspend request before enumeration\n"); 440 + "ignore suspend request before enumeration\n"); 440 441 return; 441 442 } 442 443 ··· 444 445 if (ret) { 445 446 if (ret != -ENOTSUPP) 446 447 dev_err(hsotg->dev, 447 - "enter hibernation failed\n"); 448 + "enter hibernation failed\n"); 448 449 goto skip_power_saving; 449 450 } 450 451
+2 -2
drivers/usb/dwc2/debug.h
··· 17 17 #include "core.h" 18 18 19 19 #ifdef CONFIG_DEBUG_FS 20 - extern int dwc2_debugfs_init(struct dwc2_hsotg *); 21 - extern void dwc2_debugfs_exit(struct dwc2_hsotg *); 20 + int dwc2_debugfs_init(struct dwc2_hsotg *hsotg); 21 + void dwc2_debugfs_exit(struct dwc2_hsotg *hsotg); 22 22 #else 23 23 static inline int dwc2_debugfs_init(struct dwc2_hsotg *hsotg) 24 24 { return 0; }
+169 -13
drivers/usb/dwc2/debugfs.c
··· 137 137 int idx; 138 138 139 139 seq_printf(seq, "DCFG=0x%08x, DCTL=0x%08x, DSTS=0x%08x\n", 140 - dwc2_readl(regs + DCFG), 140 + dwc2_readl(regs + DCFG), 141 141 dwc2_readl(regs + DCTL), 142 142 dwc2_readl(regs + DSTS)); 143 143 ··· 338 338 { 339 339 struct dentry *root; 340 340 struct dentry *file; 341 - unsigned epidx; 341 + unsigned int epidx; 342 342 343 343 root = hsotg->debug_root; 344 344 345 345 /* create general state file */ 346 346 347 - file = debugfs_create_file("state", S_IRUGO, root, hsotg, &state_fops); 347 + file = debugfs_create_file("state", 0444, root, hsotg, &state_fops); 348 348 if (IS_ERR(file)) 349 349 dev_err(hsotg->dev, "%s: failed to create state\n", __func__); 350 350 351 - file = debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root, hsotg, 352 - &testmode_fops); 351 + file = debugfs_create_file("testmode", 0644, root, hsotg, 352 + &testmode_fops); 353 353 if (IS_ERR(file)) 354 354 dev_err(hsotg->dev, "%s: failed to create testmode\n", 355 - __func__); 355 + __func__); 356 356 357 - file = debugfs_create_file("fifo", S_IRUGO, root, hsotg, &fifo_fops); 357 + file = debugfs_create_file("fifo", 0444, root, hsotg, &fifo_fops); 358 358 if (IS_ERR(file)) 359 359 dev_err(hsotg->dev, "%s: failed to create fifo\n", __func__); 360 360 ··· 364 364 365 365 ep = hsotg->eps_out[epidx]; 366 366 if (ep) { 367 - file = debugfs_create_file(ep->name, S_IRUGO, 368 - root, ep, &ep_fops); 367 + file = debugfs_create_file(ep->name, 0444, 368 + root, ep, &ep_fops); 369 369 if (IS_ERR(file)) 370 370 dev_err(hsotg->dev, "failed to create %s debug file\n", 371 371 ep->name); ··· 377 377 378 378 ep = hsotg->eps_in[epidx]; 379 379 if (ep) { 380 - file = debugfs_create_file(ep->name, S_IRUGO, 381 - root, ep, &ep_fops); 380 + file = debugfs_create_file(ep->name, 0444, 381 + root, ep, &ep_fops); 382 382 if (IS_ERR(file)) 383 383 dev_err(hsotg->dev, "failed to create %s debug file\n", 384 384 ep->name); ··· 725 725 dump_register(HCDMAB(15)), 726 726 }; 727 727 728 + #define print_param(_seq, _ptr, _param) \ 729 + seq_printf((_seq), "%-30s: %d\n", #_param, (_ptr)->_param) 730 + 731 + #define print_param_hex(_seq, _ptr, _param) \ 732 + seq_printf((_seq), "%-30s: 0x%x\n", #_param, (_ptr)->_param) 733 + 734 + static int params_show(struct seq_file *seq, void *v) 735 + { 736 + struct dwc2_hsotg *hsotg = seq->private; 737 + struct dwc2_core_params *p = &hsotg->params; 738 + int i; 739 + 740 + print_param(seq, p, otg_cap); 741 + print_param(seq, p, dma_desc_enable); 742 + print_param(seq, p, dma_desc_fs_enable); 743 + print_param(seq, p, speed); 744 + print_param(seq, p, enable_dynamic_fifo); 745 + print_param(seq, p, en_multiple_tx_fifo); 746 + print_param(seq, p, host_rx_fifo_size); 747 + print_param(seq, p, host_nperio_tx_fifo_size); 748 + print_param(seq, p, host_perio_tx_fifo_size); 749 + print_param(seq, p, max_transfer_size); 750 + print_param(seq, p, max_packet_count); 751 + print_param(seq, p, host_channels); 752 + print_param(seq, p, phy_type); 753 + print_param(seq, p, phy_utmi_width); 754 + print_param(seq, p, phy_ulpi_ddr); 755 + print_param(seq, p, phy_ulpi_ext_vbus); 756 + print_param(seq, p, i2c_enable); 757 + print_param(seq, p, ulpi_fs_ls); 758 + print_param(seq, p, host_support_fs_ls_low_power); 759 + print_param(seq, p, host_ls_low_power_phy_clk); 760 + print_param(seq, p, ts_dline); 761 + print_param(seq, p, reload_ctl); 762 + print_param_hex(seq, p, ahbcfg); 763 + print_param(seq, p, uframe_sched); 764 + print_param(seq, p, external_id_pin_ctl); 765 + print_param(seq, p, hibernation); 766 + print_param(seq, p, host_dma); 767 + print_param(seq, p, g_dma); 768 + print_param(seq, p, g_dma_desc); 769 + print_param(seq, p, g_rx_fifo_size); 770 + print_param(seq, p, g_np_tx_fifo_size); 771 + 772 + for (i = 0; i < MAX_EPS_CHANNELS; i++) { 773 + char str[32]; 774 + 775 + snprintf(str, 32, "g_tx_fifo_size[%d]", i); 776 + seq_printf(seq, "%-30s: %d\n", str, p->g_tx_fifo_size[i]); 777 + } 778 + 779 + return 0; 780 + } 781 + 782 + static int params_open(struct inode *inode, struct file *file) 783 + { 784 + return single_open(file, params_show, inode->i_private); 785 + } 786 + 787 + static const struct file_operations params_fops = { 788 + .owner = THIS_MODULE, 789 + .open = params_open, 790 + .read = seq_read, 791 + .llseek = seq_lseek, 792 + .release = single_release, 793 + }; 794 + 795 + static int hw_params_show(struct seq_file *seq, void *v) 796 + { 797 + struct dwc2_hsotg *hsotg = seq->private; 798 + struct dwc2_hw_params *hw = &hsotg->hw_params; 799 + 800 + print_param(seq, hw, op_mode); 801 + print_param(seq, hw, arch); 802 + print_param(seq, hw, dma_desc_enable); 803 + print_param(seq, hw, enable_dynamic_fifo); 804 + print_param(seq, hw, en_multiple_tx_fifo); 805 + print_param(seq, hw, rx_fifo_size); 806 + print_param(seq, hw, host_nperio_tx_fifo_size); 807 + print_param(seq, hw, dev_nperio_tx_fifo_size); 808 + print_param(seq, hw, host_perio_tx_fifo_size); 809 + print_param(seq, hw, nperio_tx_q_depth); 810 + print_param(seq, hw, host_perio_tx_q_depth); 811 + print_param(seq, hw, dev_token_q_depth); 812 + print_param(seq, hw, max_transfer_size); 813 + print_param(seq, hw, max_packet_count); 814 + print_param(seq, hw, host_channels); 815 + print_param(seq, hw, hs_phy_type); 816 + print_param(seq, hw, fs_phy_type); 817 + print_param(seq, hw, i2c_enable); 818 + print_param(seq, hw, num_dev_ep); 819 + print_param(seq, hw, num_dev_perio_in_ep); 820 + print_param(seq, hw, total_fifo_size); 821 + print_param(seq, hw, power_optimized); 822 + print_param(seq, hw, utmi_phy_data_width); 823 + print_param_hex(seq, hw, snpsid); 824 + print_param_hex(seq, hw, dev_ep_dirs); 825 + 826 + return 0; 827 + } 828 + 829 + static int hw_params_open(struct inode *inode, struct file *file) 830 + { 831 + return single_open(file, hw_params_show, inode->i_private); 832 + } 833 + 834 + static const struct file_operations hw_params_fops = { 835 + .owner = THIS_MODULE, 836 + .open = hw_params_open, 837 + .read = seq_read, 838 + .llseek = seq_lseek, 839 + .release = single_release, 840 + }; 841 + 842 + static int dr_mode_show(struct seq_file *seq, void *v) 843 + { 844 + struct dwc2_hsotg *hsotg = seq->private; 845 + const char *dr_mode = ""; 846 + 847 + device_property_read_string(hsotg->dev, "dr_mode", &dr_mode); 848 + seq_printf(seq, "%s\n", dr_mode); 849 + return 0; 850 + } 851 + 852 + static int dr_mode_open(struct inode *inode, struct file *file) 853 + { 854 + return single_open(file, dr_mode_show, inode->i_private); 855 + } 856 + 857 + static const struct file_operations dr_mode_fops = { 858 + .owner = THIS_MODULE, 859 + .open = dr_mode_open, 860 + .read = seq_read, 861 + .llseek = seq_lseek, 862 + .release = single_release, 863 + }; 864 + 728 865 int dwc2_debugfs_init(struct dwc2_hsotg *hsotg) 729 866 { 730 867 int ret; ··· 872 735 ret = -ENOMEM; 873 736 goto err0; 874 737 } 738 + 739 + file = debugfs_create_file("params", 0444, 740 + hsotg->debug_root, 741 + hsotg, &params_fops); 742 + if (IS_ERR(file)) 743 + dev_err(hsotg->dev, "%s: failed to create params\n", __func__); 744 + 745 + file = debugfs_create_file("hw_params", 0444, 746 + hsotg->debug_root, 747 + hsotg, &hw_params_fops); 748 + if (IS_ERR(file)) 749 + dev_err(hsotg->dev, "%s: failed to create hw_params\n", 750 + __func__); 751 + 752 + file = debugfs_create_file("dr_mode", 0444, 753 + hsotg->debug_root, 754 + hsotg, &dr_mode_fops); 755 + if (IS_ERR(file)) 756 + dev_err(hsotg->dev, "%s: failed to create dr_mode\n", __func__); 875 757 876 758 /* Add gadget debugfs nodes */ 877 759 dwc2_hsotg_create_debug(hsotg); ··· 906 750 hsotg->regset->nregs = ARRAY_SIZE(dwc2_regs); 907 751 hsotg->regset->base = hsotg->regs; 908 752 909 - file = debugfs_create_regset32("regdump", S_IRUGO, hsotg->debug_root, 910 - hsotg->regset); 753 + file = debugfs_create_regset32("regdump", 0444, hsotg->debug_root, 754 + hsotg->regset); 911 755 if (!file) { 912 756 ret = -ENOMEM; 913 757 goto err1;
+206 -107
drivers/usb/dwc2/gadget.c
··· 171 171 * request. 172 172 */ 173 173 static void dwc2_hsotg_ctrl_epint(struct dwc2_hsotg *hsotg, 174 - unsigned int ep, unsigned int dir_in, 174 + unsigned int ep, unsigned int dir_in, 175 175 unsigned int en) 176 176 { 177 177 unsigned long flags; ··· 189 189 daint &= ~bit; 190 190 dwc2_writel(daint, hsotg->regs + DAINTMSK); 191 191 local_irq_restore(flags); 192 + } 193 + 194 + /** 195 + * dwc2_hsotg_tx_fifo_count - return count of TX FIFOs in device mode 196 + */ 197 + int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg) 198 + { 199 + if (hsotg->hw_params.en_multiple_tx_fifo) 200 + /* In dedicated FIFO mode we need count of IN EPs */ 201 + return (dwc2_readl(hsotg->regs + GHWCFG4) & 202 + GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT; 203 + else 204 + /* In shared FIFO mode we need count of Periodic IN EPs */ 205 + return hsotg->hw_params.num_dev_perio_in_ep; 206 + } 207 + 208 + /** 209 + * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs 210 + */ 211 + static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg) 212 + { 213 + int val = 0; 214 + int i; 215 + u32 ep_dirs; 216 + 217 + /* 218 + * Don't need additional space for ep info control registers in 219 + * slave mode. 220 + */ 221 + if (!using_dma(hsotg)) { 222 + dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n"); 223 + return 0; 224 + } 225 + 226 + /* 227 + * Buffer DMA mode - 1 location per endpoit 228 + * Descriptor DMA mode - 4 locations per endpoint 229 + */ 230 + ep_dirs = hsotg->hw_params.dev_ep_dirs; 231 + 232 + for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) { 233 + val += ep_dirs & 3 ? 1 : 2; 234 + ep_dirs >>= 2; 235 + } 236 + 237 + if (using_desc_dma(hsotg)) 238 + val = val * 4; 239 + 240 + return val; 241 + } 242 + 243 + /** 244 + * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for 245 + * device mode TX FIFOs 246 + */ 247 + int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) 248 + { 249 + int ep_info_size; 250 + int addr; 251 + int tx_addr_max; 252 + u32 np_tx_fifo_size; 253 + 254 + np_tx_fifo_size = min_t(u32, hsotg->hw_params.dev_nperio_tx_fifo_size, 255 + hsotg->params.g_np_tx_fifo_size); 256 + 257 + /* Get Endpoint Info Control block size in DWORDs. */ 258 + ep_info_size = dwc2_hsotg_ep_info_size(hsotg); 259 + tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size; 260 + 261 + addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; 262 + if (tx_addr_max <= addr) 263 + return 0; 264 + 265 + return tx_addr_max - addr; 266 + } 267 + 268 + /** 269 + * dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode 270 + * TX FIFOs 271 + */ 272 + int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg) 273 + { 274 + int tx_fifo_count; 275 + int tx_fifo_depth; 276 + 277 + tx_fifo_depth = dwc2_hsotg_tx_fifo_total_depth(hsotg); 278 + 279 + tx_fifo_count = dwc2_hsotg_tx_fifo_count(hsotg); 280 + 281 + if (!tx_fifo_count) 282 + return tx_fifo_depth; 283 + else 284 + return tx_fifo_depth / tx_fifo_count; 192 285 } 193 286 194 287 /** ··· 334 241 val = dwc2_readl(hsotg->regs + DPTXFSIZN(ep)); 335 242 } 336 243 244 + dwc2_writel(hsotg->hw_params.total_fifo_size | 245 + addr << GDFIFOCFG_EPINFOBASE_SHIFT, 246 + hsotg->regs + GDFIFOCFG); 337 247 /* 338 248 * according to p428 of the design guide, we need to ensure that 339 249 * all fifos are flushed before continuing ··· 373 277 * Allocate a new USB request structure appropriate for the specified endpoint 374 278 */ 375 279 static struct usb_request *dwc2_hsotg_ep_alloc_request(struct usb_ep *ep, 376 - gfp_t flags) 280 + gfp_t flags) 377 281 { 378 282 struct dwc2_hsotg_req *req; 379 283 380 - req = kzalloc(sizeof(struct dwc2_hsotg_req), flags); 284 + req = kzalloc(sizeof(*req), flags); 381 285 if (!req) 382 286 return NULL; 383 287 ··· 408 312 * of a request to ensure the buffer is ready for access by the caller. 409 313 */ 410 314 static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg, 411 - struct dwc2_hsotg_ep *hs_ep, 315 + struct dwc2_hsotg_ep *hs_ep, 412 316 struct dwc2_hsotg_req *hs_req) 413 317 { 414 318 struct usb_request *req = &hs_req->req; 319 + 415 320 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); 416 321 } 417 322 ··· 481 384 * This routine is only needed for PIO 482 385 */ 483 386 static int dwc2_hsotg_write_fifo(struct dwc2_hsotg *hsotg, 484 - struct dwc2_hsotg_ep *hs_ep, 387 + struct dwc2_hsotg_ep *hs_ep, 485 388 struct dwc2_hsotg_req *hs_req) 486 389 { 487 390 bool periodic = is_ep_periodic(hs_ep); ··· 563 466 max_transfer = hs_ep->ep.maxpacket * hs_ep->mc; 564 467 565 468 dev_dbg(hsotg->dev, "%s: GNPTXSTS=%08x, can=%d, to=%d, max_transfer %d\n", 566 - __func__, gnptxsts, can_write, to_write, max_transfer); 469 + __func__, gnptxsts, can_write, to_write, max_transfer); 567 470 568 471 /* 569 472 * limit to 512 bytes of data, it seems at least on the non-periodic ··· 584 487 /* it's needed only when we do not use dedicated fifos */ 585 488 if (!hsotg->dedicated_fifos) 586 489 dwc2_hsotg_en_gsint(hsotg, 587 - periodic ? GINTSTS_PTXFEMP : 490 + periodic ? GINTSTS_PTXFEMP : 588 491 GINTSTS_NPTXFEMP); 589 492 } 590 493 ··· 613 516 /* it's needed only when we do not use dedicated fifos */ 614 517 if (!hsotg->dedicated_fifos) 615 518 dwc2_hsotg_en_gsint(hsotg, 616 - periodic ? GINTSTS_PTXFEMP : 519 + periodic ? GINTSTS_PTXFEMP : 617 520 GINTSTS_NPTXFEMP); 618 521 } 619 522 620 523 dev_dbg(hsotg->dev, "write %d/%d, can_write %d, done %d\n", 621 - to_write, hs_req->req.length, can_write, buf_pos); 524 + to_write, hs_req->req.length, can_write, buf_pos); 622 525 623 526 if (to_write <= 0) 624 527 return -ENOSPC; ··· 644 547 * Return the maximum data that can be queued in one go on a given endpoint 645 548 * so that transfers that are too long can be split. 646 549 */ 647 - static unsigned get_ep_limit(struct dwc2_hsotg_ep *hs_ep) 550 + static unsigned int get_ep_limit(struct dwc2_hsotg_ep *hs_ep) 648 551 { 649 552 int index = hs_ep->index; 650 - unsigned maxsize; 651 - unsigned maxpkt; 553 + unsigned int maxsize; 554 + unsigned int maxpkt; 652 555 653 556 if (index != 0) { 654 557 maxsize = DXEPTSIZ_XFERSIZE_LIMIT + 1; 655 558 maxpkt = DXEPTSIZ_PKTCNT_LIMIT + 1; 656 559 } else { 657 - maxsize = 64+64; 560 + maxsize = 64 + 64; 658 561 if (hs_ep->dir_in) 659 562 maxpkt = DIEPTSIZ0_PKTCNT_LIMIT + 1; 660 563 else ··· 677 580 } 678 581 679 582 /** 680 - * dwc2_hsotg_read_frameno - read current frame number 681 - * @hsotg: The device instance 682 - * 683 - * Return the current frame number 684 - */ 583 + * dwc2_hsotg_read_frameno - read current frame number 584 + * @hsotg: The device instance 585 + * 586 + * Return the current frame number 587 + */ 685 588 static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) 686 589 { 687 590 u32 dsts; ··· 971 874 * appropriately, and writing any data to the FIFOs. 972 875 */ 973 876 static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, 974 - struct dwc2_hsotg_ep *hs_ep, 877 + struct dwc2_hsotg_ep *hs_ep, 975 878 struct dwc2_hsotg_req *hs_req, 976 879 bool continuing) 977 880 { ··· 982 885 u32 epsize_reg; 983 886 u32 epsize; 984 887 u32 ctrl; 985 - unsigned length; 986 - unsigned packets; 987 - unsigned maxreq; 888 + unsigned int length; 889 + unsigned int packets; 890 + unsigned int maxreq; 988 891 unsigned int dma_reg; 989 892 990 893 if (index != 0) { ··· 1063 966 if (dir_in && ureq->zero && !continuing) { 1064 967 /* Test if zlp is actually required. */ 1065 968 if ((ureq->length >= hs_ep->ep.maxpacket) && 1066 - !(ureq->length % hs_ep->ep.maxpacket)) 969 + !(ureq->length % hs_ep->ep.maxpacket)) 1067 970 hs_ep->send_zlp = 1; 1068 971 } 1069 972 ··· 1167 1070 /* check ep is enabled */ 1168 1071 if (!(dwc2_readl(hsotg->regs + epctrl_reg) & DXEPCTL_EPENA)) 1169 1072 dev_dbg(hsotg->dev, 1170 - "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n", 1073 + "ep%d: failed to become enabled (DXEPCTL=0x%08x)?\n", 1171 1074 index, dwc2_readl(hsotg->regs + epctrl_reg)); 1172 1075 1173 1076 dev_dbg(hsotg->dev, "%s: DXEPCTL=0x%08x\n", ··· 1190 1093 * cleanup on completion. 1191 1094 */ 1192 1095 static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg, 1193 - struct dwc2_hsotg_ep *hs_ep, 1096 + struct dwc2_hsotg_ep *hs_ep, 1194 1097 struct usb_request *req) 1195 1098 { 1196 1099 int ret; ··· 1209 1112 } 1210 1113 1211 1114 static int dwc2_hsotg_handle_unaligned_buf_start(struct dwc2_hsotg *hsotg, 1212 - struct dwc2_hsotg_ep *hs_ep, struct dwc2_hsotg_req *hs_req) 1115 + struct dwc2_hsotg_ep *hs_ep, 1116 + struct dwc2_hsotg_req *hs_req) 1213 1117 { 1214 1118 void *req_buf = hs_req->req.buf; 1215 1119 ··· 1221 1123 WARN_ON(hs_req->saved_req_buf); 1222 1124 1223 1125 dev_dbg(hsotg->dev, "%s: %s: buf=%p length=%d\n", __func__, 1224 - hs_ep->ep.name, req_buf, hs_req->req.length); 1126 + hs_ep->ep.name, req_buf, hs_req->req.length); 1225 1127 1226 1128 hs_req->req.buf = kmalloc(hs_req->req.length, GFP_ATOMIC); 1227 1129 if (!hs_req->req.buf) { ··· 1240 1142 return 0; 1241 1143 } 1242 1144 1243 - static void dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg, 1244 - struct dwc2_hsotg_ep *hs_ep, struct dwc2_hsotg_req *hs_req) 1145 + static void 1146 + dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg, 1147 + struct dwc2_hsotg_ep *hs_ep, 1148 + struct dwc2_hsotg_req *hs_req) 1245 1149 { 1246 1150 /* If dma is not being used or buffer was aligned */ 1247 1151 if (!using_dma(hsotg) || !hs_req->saved_req_buf) ··· 1255 1155 /* Copy data from bounce buffer on successful out transfer */ 1256 1156 if (!hs_ep->dir_in && !hs_req->req.status) 1257 1157 memcpy(hs_req->saved_req_buf, hs_req->req.buf, 1258 - hs_req->req.actual); 1158 + hs_req->req.actual); 1259 1159 1260 1160 /* Free bounce buffer */ 1261 1161 kfree(hs_req->req.buf); ··· 1324 1224 } 1325 1225 1326 1226 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req, 1327 - gfp_t gfp_flags) 1227 + gfp_t gfp_flags) 1328 1228 { 1329 1229 struct dwc2_hsotg_req *hs_req = our_req(req); 1330 1230 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); ··· 1339 1239 /* Prevent new request submission when controller is suspended */ 1340 1240 if (hs->lx_state == DWC2_L2) { 1341 1241 dev_dbg(hs->dev, "%s: don't submit request while suspended\n", 1342 - __func__); 1242 + __func__); 1343 1243 return -EAGAIN; 1344 1244 } 1345 1245 ··· 1400 1300 } 1401 1301 1402 1302 static int dwc2_hsotg_ep_queue_lock(struct usb_ep *ep, struct usb_request *req, 1403 - gfp_t gfp_flags) 1303 + gfp_t gfp_flags) 1404 1304 { 1405 1305 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1406 1306 struct dwc2_hsotg *hs = hs_ep->parent; ··· 1415 1315 } 1416 1316 1417 1317 static void dwc2_hsotg_ep_free_request(struct usb_ep *ep, 1418 - struct usb_request *req) 1318 + struct usb_request *req) 1419 1319 { 1420 1320 struct dwc2_hsotg_req *hs_req = our_req(req); 1421 1321 ··· 1431 1331 * submitted that need cleaning up. 1432 1332 */ 1433 1333 static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, 1434 - struct usb_request *req) 1334 + struct usb_request *req) 1435 1335 { 1436 1336 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1437 1337 struct dwc2_hsotg *hsotg = hs_ep->parent; ··· 1450 1350 * structure, or return NULL if it is not a valid endpoint. 1451 1351 */ 1452 1352 static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, 1453 - u32 windex) 1353 + u32 windex) 1454 1354 { 1455 1355 struct dwc2_hsotg_ep *ep; 1456 1356 int dir = (windex & USB_DIR_IN) ? 1 : 0; ··· 1507 1407 * an internal method of sending replies to certain control requests, etc. 1508 1408 */ 1509 1409 static int dwc2_hsotg_send_reply(struct dwc2_hsotg *hsotg, 1510 - struct dwc2_hsotg_ep *ep, 1410 + struct dwc2_hsotg_ep *ep, 1511 1411 void *buff, 1512 1412 int length) 1513 1413 { ··· 1550 1450 * @ctrl: USB control request 1551 1451 */ 1552 1452 static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg, 1553 - struct usb_ctrlrequest *ctrl) 1453 + struct usb_ctrlrequest *ctrl) 1554 1454 { 1555 1455 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1556 1456 struct dwc2_hsotg_ep *ep; ··· 1566 1466 1567 1467 switch (ctrl->bRequestType & USB_RECIP_MASK) { 1568 1468 case USB_RECIP_DEVICE: 1569 - reply = cpu_to_le16(0); /* bit 0 => self powered, 1570 - * bit 1 => remote wakeup */ 1469 + /* 1470 + * bit 0 => self powered 1471 + * bit 1 => remote wakeup 1472 + */ 1473 + reply = cpu_to_le16(0); 1571 1474 break; 1572 1475 1573 1476 case USB_RECIP_INTERFACE: ··· 1658 1555 * @ctrl: USB control request 1659 1556 */ 1660 1557 static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg, 1661 - struct usb_ctrlrequest *ctrl) 1558 + struct usb_ctrlrequest *ctrl) 1662 1559 { 1663 1560 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1664 1561 struct dwc2_hsotg_req *hs_req; ··· 1743 1640 } 1744 1641 1745 1642 /* If we have pending request, then start it */ 1746 - if (!ep->req) { 1643 + if (!ep->req) 1747 1644 dwc2_gadget_start_next_request(ep); 1748 - } 1749 1645 } 1750 1646 1751 1647 break; ··· 1807 1705 * gadget driver). 1808 1706 */ 1809 1707 static void dwc2_hsotg_process_control(struct dwc2_hsotg *hsotg, 1810 - struct usb_ctrlrequest *ctrl) 1708 + struct usb_ctrlrequest *ctrl) 1811 1709 { 1812 1710 struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0]; 1813 1711 int ret = 0; ··· 1883 1781 * EP0 setup packets 1884 1782 */ 1885 1783 static void dwc2_hsotg_complete_setup(struct usb_ep *ep, 1886 - struct usb_request *req) 1784 + struct usb_request *req) 1887 1785 { 1888 1786 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 1889 1787 struct dwc2_hsotg *hsotg = hs_ep->parent; ··· 1941 1839 } 1942 1840 1943 1841 static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg, 1944 - struct dwc2_hsotg_ep *hs_ep) 1842 + struct dwc2_hsotg_ep *hs_ep) 1945 1843 { 1946 1844 u32 ctrl; 1947 1845 u8 index = hs_ep->index; ··· 1987 1885 * Note, expects the ep to already be locked as appropriate. 1988 1886 */ 1989 1887 static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg, 1990 - struct dwc2_hsotg_ep *hs_ep, 1888 + struct dwc2_hsotg_ep *hs_ep, 1991 1889 struct dwc2_hsotg_req *hs_req, 1992 1890 int result) 1993 1891 { 1994 - 1995 1892 if (!hs_req) { 1996 1893 dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__); 1997 1894 return; ··· 2036 1935 * so be careful when doing this. 2037 1936 */ 2038 1937 2039 - if (!hs_ep->req && result >= 0) { 1938 + if (!hs_ep->req && result >= 0) 2040 1939 dwc2_gadget_start_next_request(hs_ep); 2041 - } 2042 1940 } 2043 1941 2044 1942 /* ··· 2168 2068 int max_req; 2169 2069 int read_ptr; 2170 2070 2171 - 2172 2071 if (!hs_req) { 2173 2072 u32 epctl = dwc2_readl(hsotg->regs + DOEPCTL(ep_idx)); 2174 2073 int ptr; 2175 2074 2176 2075 dev_dbg(hsotg->dev, 2177 - "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n", 2076 + "%s: FIFO %d bytes on ep%d but no req (DXEPCTl=0x%08x)\n", 2178 2077 __func__, size, ep_idx, epctl); 2179 2078 2180 2079 /* dump the data from the FIFO, we've nothing we can do */ ··· 2233 2134 } 2234 2135 2235 2136 static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, 2236 - u32 epctl_reg) 2137 + u32 epctl_reg) 2237 2138 { 2238 2139 u32 ctrl; 2239 2140 ··· 2290 2191 struct dwc2_hsotg_ep *hs_ep = hsotg->eps_out[epnum]; 2291 2192 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2292 2193 struct usb_request *req = &hs_req->req; 2293 - unsigned size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2194 + unsigned int size_left = DXEPTSIZ_XFERSIZE_GET(epsize); 2294 2195 int result = 0; 2295 2196 2296 2197 if (!hs_req) { ··· 2309 2210 size_left = dwc2_gadget_get_xfersize_ddma(hs_ep); 2310 2211 2311 2212 if (using_dma(hsotg)) { 2312 - unsigned size_done; 2213 + unsigned int size_done; 2313 2214 2314 2215 /* 2315 2216 * Calculate the size of the transfer by checking how much ··· 2394 2295 size >>= GRXSTS_BYTECNT_SHIFT; 2395 2296 2396 2297 dev_dbg(hsotg->dev, "%s: GRXSTSP=0x%08x (%d@%d)\n", 2397 - __func__, grxstsr, size, epnum); 2298 + __func__, grxstsr, size, epnum); 2398 2299 2399 2300 switch ((status & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT) { 2400 2301 case GRXSTS_PKTSTS_GLOBALOUTNAK: ··· 2569 2470 * make an attempt to write data into the FIFO. 2570 2471 */ 2571 2472 static int dwc2_hsotg_trytx(struct dwc2_hsotg *hsotg, 2572 - struct dwc2_hsotg_ep *hs_ep) 2473 + struct dwc2_hsotg_ep *hs_ep) 2573 2474 { 2574 2475 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2575 2476 ··· 2580 2481 */ 2581 2482 if (hs_ep->index != 0) 2582 2483 dwc2_hsotg_ctrl_epint(hsotg, hs_ep->index, 2583 - hs_ep->dir_in, 0); 2484 + hs_ep->dir_in, 0); 2584 2485 return 0; 2585 2486 } 2586 2487 ··· 2602 2503 * call the relevant completion routines. 2603 2504 */ 2604 2505 static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, 2605 - struct dwc2_hsotg_ep *hs_ep) 2506 + struct dwc2_hsotg_ep *hs_ep) 2606 2507 { 2607 2508 struct dwc2_hsotg_req *hs_req = hs_ep->req; 2608 2509 u32 epsize = dwc2_readl(hsotg->regs + DIEPTSIZ(hs_ep->index)); ··· 2630 2531 ret = dwc2_hsotg_set_test_mode(hsotg, hsotg->test_mode); 2631 2532 if (ret < 0) { 2632 2533 dev_dbg(hsotg->dev, "Invalid Test #%d\n", 2633 - hsotg->test_mode); 2534 + hsotg->test_mode); 2634 2535 dwc2_hsotg_stall_ep0(hsotg); 2635 2536 return; 2636 2537 } ··· 2850 2751 } 2851 2752 2852 2753 /** 2853 - * dwc2_gadget_handle_nak - handle NAK interrupt 2854 - * @hs_ep: The endpoint on which interrupt is asserted. 2855 - * 2856 - * This is starting point for ISOC-IN transfer, synchronization done with 2857 - * first IN token received from host while corresponding EP is disabled. 2858 - * 2859 - * Device does not know when first one token will arrive from host. On first 2860 - * token arrival HW generates 2 interrupts: 'in token received while FIFO empty' 2861 - * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was 2862 - * sent in response to that as there was no data in FIFO. SW is basing on this 2863 - * interrupt to obtain frame in which token has come and then based on the 2864 - * interval calculates next frame for transfer. 2865 - */ 2754 + * dwc2_gadget_handle_nak - handle NAK interrupt 2755 + * @hs_ep: The endpoint on which interrupt is asserted. 2756 + * 2757 + * This is starting point for ISOC-IN transfer, synchronization done with 2758 + * first IN token received from host while corresponding EP is disabled. 2759 + * 2760 + * Device does not know when first one token will arrive from host. On first 2761 + * token arrival HW generates 2 interrupts: 'in token received while FIFO empty' 2762 + * and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was 2763 + * sent in response to that as there was no data in FIFO. SW is basing on this 2764 + * interrupt to obtain frame in which token has come and then based on the 2765 + * interval calculates next frame for transfer. 2766 + */ 2866 2767 static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep) 2867 2768 { 2868 2769 struct dwc2_hsotg *hsotg = hs_ep->parent; ··· 2906 2807 * Process and clear any interrupt pending for an individual endpoint 2907 2808 */ 2908 2809 static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx, 2909 - int dir_in) 2810 + int dir_in) 2910 2811 { 2911 2812 struct dwc2_hsotg_ep *hs_ep = index_to_ep(hsotg, idx, dir_in); 2912 2813 u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx); ··· 2923 2824 2924 2825 if (!hs_ep) { 2925 2826 dev_err(hsotg->dev, "%s:Interrupt for unconfigured ep%d(%s)\n", 2926 - __func__, idx, dir_in ? "in" : "out"); 2827 + __func__, idx, dir_in ? "in" : "out"); 2927 2828 return; 2928 2829 } 2929 2830 ··· 3158 3059 int result) 3159 3060 { 3160 3061 struct dwc2_hsotg_req *req, *treq; 3161 - unsigned size; 3062 + unsigned int size; 3162 3063 3163 3064 ep->req = NULL; 3164 3065 3165 3066 list_for_each_entry_safe(req, treq, &ep->queue, queue) 3166 3067 dwc2_hsotg_complete_request(hsotg, ep, req, 3167 - result); 3068 + result); 3168 3069 3169 3070 if (!hsotg->dedicated_fifos) 3170 3071 return; ··· 3183 3084 */ 3184 3085 void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg) 3185 3086 { 3186 - unsigned ep; 3087 + unsigned int ep; 3187 3088 3188 3089 if (!hsotg->connected) 3189 3090 return; ··· 3194 3095 for (ep = 0; ep < hsotg->num_of_eps; ep++) { 3195 3096 if (hsotg->eps_in[ep]) 3196 3097 kill_all_requests(hsotg, hsotg->eps_in[ep], 3197 - -ESHUTDOWN); 3098 + -ESHUTDOWN); 3198 3099 if (hsotg->eps_out[ep]) 3199 3100 kill_all_requests(hsotg, hsotg->eps_out[ep], 3200 - -ESHUTDOWN); 3101 + -ESHUTDOWN); 3201 3102 } 3202 3103 3203 3104 call_gadget(hsotg, disconnect); ··· 3246 3147 * Issue a soft reset to the core, and await the core finishing it. 3247 3148 */ 3248 3149 void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, 3249 - bool is_usb_reset) 3150 + bool is_usb_reset) 3250 3151 { 3251 3152 u32 intmsk; 3252 3153 u32 val; ··· 3257 3158 kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); 3258 3159 3259 3160 if (!is_usb_reset) 3260 - if (dwc2_core_reset(hsotg)) 3161 + if (dwc2_core_reset(hsotg, true)) 3261 3162 return; 3262 3163 3263 3164 /* ··· 3320 3221 if (!using_desc_dma(hsotg)) 3321 3222 intmsk |= GINTSTS_INCOMPL_SOIN | GINTSTS_INCOMPL_SOOUT; 3322 3223 3323 - if (hsotg->params.external_id_pin_ctl <= 0) 3224 + if (!hsotg->params.external_id_pin_ctl) 3324 3225 intmsk |= GINTSTS_CONIDSTSCHNG; 3325 3226 3326 3227 dwc2_writel(intmsk, hsotg->regs + GINTMSK); ··· 3561 3462 } 3562 3463 3563 3464 if (gintsts & (GINTSTS_USBRST | GINTSTS_RESETDET)) { 3564 - 3565 3465 u32 usb_status = dwc2_readl(hsotg->regs + GOTGCTL); 3566 3466 u32 connected = hsotg->connected; 3567 3467 ··· 3699 3601 */ 3700 3602 3701 3603 if (gintsts & IRQ_RETRY_MASK && --retry_count > 0) 3702 - goto irq_retry; 3604 + goto irq_retry; 3703 3605 3704 3606 spin_unlock(&hsotg->lock); 3705 3607 ··· 3803 3705 * This is called from the USB gadget code's usb_ep_enable(). 3804 3706 */ 3805 3707 static int dwc2_hsotg_ep_enable(struct usb_ep *ep, 3806 - const struct usb_endpoint_descriptor *desc) 3708 + const struct usb_endpoint_descriptor *desc) 3807 3709 { 3808 3710 struct dwc2_hsotg_ep *hs_ep = our_ep(ep); 3809 3711 struct dwc2_hsotg *hsotg = hs_ep->parent; ··· 3925 3827 if (dir_in && hsotg->dedicated_fifos) { 3926 3828 u32 fifo_index = 0; 3927 3829 u32 fifo_size = UINT_MAX; 3928 - size = hs_ep->ep.maxpacket*hs_ep->mc; 3830 + 3831 + size = hs_ep->ep.maxpacket * hs_ep->mc; 3929 3832 for (i = 1; i < hsotg->num_of_eps; ++i) { 3930 - if (hsotg->fifo_map & (1<<i)) 3833 + if (hsotg->fifo_map & (1 << i)) 3931 3834 continue; 3932 3835 val = dwc2_readl(hsotg->regs + DPTXFSIZN(i)); 3933 - val = (val >> FIFOSIZE_DEPTH_SHIFT)*4; 3836 + val = (val >> FIFOSIZE_DEPTH_SHIFT) * 4; 3934 3837 if (val < size) 3935 3838 continue; 3936 3839 /* Search for smallest acceptable fifo */ ··· 4132 4033 epctl &= ~DXEPCTL_STALL; 4133 4034 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4134 4035 if (xfertype == DXEPCTL_EPTYPE_BULK || 4135 - xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4136 - epctl |= DXEPCTL_SETD0PID; 4036 + xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4037 + epctl |= DXEPCTL_SETD0PID; 4137 4038 } 4138 4039 dwc2_writel(epctl, hs->regs + epreg); 4139 4040 } else { 4140 - 4141 4041 epreg = DOEPCTL(index); 4142 4042 epctl = dwc2_readl(hs->regs + epreg); 4143 4043 4144 - if (value) 4044 + if (value) { 4145 4045 epctl |= DXEPCTL_STALL; 4146 - else { 4046 + } else { 4147 4047 epctl &= ~DXEPCTL_STALL; 4148 4048 xfertype = epctl & DXEPCTL_EPTYPE_MASK; 4149 4049 if (xfertype == DXEPCTL_EPTYPE_BULK || 4150 - xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4151 - epctl |= DXEPCTL_SETD0PID; 4050 + xfertype == DXEPCTL_EPTYPE_INTERRUPT) 4051 + epctl |= DXEPCTL_SETD0PID; 4152 4052 } 4153 4053 dwc2_writel(epctl, hs->regs + epreg); 4154 4054 } ··· 4188 4090 }; 4189 4091 4190 4092 /** 4191 - * dwc2_hsotg_init - initalize the usb core 4093 + * dwc2_hsotg_init - initialize the usb core 4192 4094 * @hsotg: The driver state 4193 4095 */ 4194 4096 static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) ··· 4242 4144 * to work. 4243 4145 */ 4244 4146 static int dwc2_hsotg_udc_start(struct usb_gadget *gadget, 4245 - struct usb_gadget_driver *driver) 4147 + struct usb_gadget_driver *driver) 4246 4148 { 4247 4149 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4248 4150 unsigned long flags; ··· 4365 4267 unsigned long flags = 0; 4366 4268 4367 4269 dev_dbg(hsotg->dev, "%s: is_on: %d op_state: %d\n", __func__, is_on, 4368 - hsotg->op_state); 4270 + hsotg->op_state); 4369 4271 4370 4272 /* Don't modify pullup state while in host mode */ 4371 4273 if (hsotg->op_state != OTG_STATE_B_PERIPHERAL) { ··· 4427 4329 * 4428 4330 * Report how much power the device may consume to the phy. 4429 4331 */ 4430 - static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned mA) 4332 + static int dwc2_hsotg_vbus_draw(struct usb_gadget *gadget, unsigned int mA) 4431 4333 { 4432 4334 struct dwc2_hsotg *hsotg = to_hsotg(gadget); 4433 4335 ··· 4456 4358 * direction information and other state that may be required. 4457 4359 */ 4458 4360 static void dwc2_hsotg_initep(struct dwc2_hsotg *hsotg, 4459 - struct dwc2_hsotg_ep *hs_ep, 4361 + struct dwc2_hsotg_ep *hs_ep, 4460 4362 int epnum, 4461 4363 bool dir_in) 4462 4364 { ··· 4513 4415 4514 4416 if (using_dma(hsotg)) { 4515 4417 u32 next = DXEPCTL_NEXTEP((epnum + 1) % 15); 4418 + 4516 4419 if (dir_in) 4517 4420 dwc2_writel(next, hsotg->regs + DIEPCTL(epnum)); 4518 4421 else ··· 4540 4441 /* Add ep0 */ 4541 4442 hsotg->num_of_eps++; 4542 4443 4543 - hsotg->eps_in[0] = devm_kzalloc(hsotg->dev, sizeof(struct dwc2_hsotg_ep), 4544 - GFP_KERNEL); 4444 + hsotg->eps_in[0] = devm_kzalloc(hsotg->dev, 4445 + sizeof(struct dwc2_hsotg_ep), 4446 + GFP_KERNEL); 4545 4447 if (!hsotg->eps_in[0]) 4546 4448 return -ENOMEM; 4547 4449 /* Same dwc2_hsotg_ep is used in both directions for ep0 */ ··· 4621 4521 idx, dwc2_readl(regs + DOEPCTL(idx)), 4622 4522 dwc2_readl(regs + DOEPTSIZ(idx)), 4623 4523 dwc2_readl(regs + DOEPDMA(idx))); 4624 - 4625 4524 } 4626 4525 4627 4526 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n", ··· 4675 4576 } 4676 4577 4677 4578 ret = devm_request_irq(hsotg->dev, irq, dwc2_hsotg_irq, IRQF_SHARED, 4678 - dev_name(hsotg->dev), hsotg); 4579 + dev_name(hsotg->dev), hsotg); 4679 4580 if (ret < 0) { 4680 4581 dev_err(dev, "cannot claim IRQ for gadget\n"); 4681 4582 return ret; ··· 4706 4607 for (epnum = 0; epnum < hsotg->num_of_eps; epnum++) { 4707 4608 if (hsotg->eps_in[epnum]) 4708 4609 dwc2_hsotg_initep(hsotg, hsotg->eps_in[epnum], 4709 - epnum, 1); 4610 + epnum, 1); 4710 4611 if (hsotg->eps_out[epnum]) 4711 4612 dwc2_hsotg_initep(hsotg, hsotg->eps_out[epnum], 4712 - epnum, 0); 4613 + epnum, 0); 4713 4614 } 4714 4615 4715 4616 ret = usb_add_gadget_udc(dev, &hsotg->gadget);
+171 -92
drivers/usb/dwc2/hcd.c
··· 42 42 #include <linux/module.h> 43 43 #include <linux/spinlock.h> 44 44 #include <linux/interrupt.h> 45 + #include <linux/platform_device.h> 45 46 #include <linux/dma-mapping.h> 46 47 #include <linux/delay.h> 47 48 #include <linux/io.h> ··· 54 53 55 54 #include "core.h" 56 55 #include "hcd.h" 56 + 57 + static void dwc2_port_resume(struct dwc2_hsotg *hsotg); 57 58 58 59 /* 59 60 * ========================================================================= ··· 82 79 /* Enable the interrupts in the GINTMSK */ 83 80 intmsk = GINTSTS_MODEMIS | GINTSTS_OTGINT; 84 81 85 - if (hsotg->params.host_dma <= 0) 82 + if (!hsotg->params.host_dma) 86 83 intmsk |= GINTSTS_RXFLVL; 87 - if (hsotg->params.external_id_pin_ctl <= 0) 84 + if (!hsotg->params.external_id_pin_ctl) 88 85 intmsk |= GINTSTS_CONIDSTSCHNG; 89 86 90 87 intmsk |= GINTSTS_WKUPINT | GINTSTS_USBSUSP | ··· 103 100 104 101 if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 105 102 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 106 - hsotg->params.ulpi_fs_ls > 0) || 103 + hsotg->params.ulpi_fs_ls) || 107 104 hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS) { 108 105 /* Full speed PHY */ 109 106 val = HCFG_FSLSPCLKSEL_48_MHZ; ··· 155 152 if (dwc2_is_host_mode(hsotg)) 156 153 dwc2_init_fs_ls_pclk_sel(hsotg); 157 154 158 - if (hsotg->params.i2c_enable > 0) { 155 + if (hsotg->params.i2c_enable) { 159 156 dev_dbg(hsotg->dev, "FS PHY enabling I2C\n"); 160 157 161 158 /* Program GUSBCFG.OtgUtmiFsSel to I2C */ ··· 198 195 dev_dbg(hsotg->dev, "HS ULPI PHY selected\n"); 199 196 usbcfg |= GUSBCFG_ULPI_UTMI_SEL; 200 197 usbcfg &= ~(GUSBCFG_PHYIF16 | GUSBCFG_DDRSEL); 201 - if (hsotg->params.phy_ulpi_ddr > 0) 198 + if (hsotg->params.phy_ulpi_ddr) 202 199 usbcfg |= GUSBCFG_DDRSEL; 203 200 break; 204 201 case DWC2_PHY_TYPE_PARAM_UTMI: ··· 249 246 250 247 if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI && 251 248 hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED && 252 - hsotg->params.ulpi_fs_ls > 0) { 249 + hsotg->params.ulpi_fs_ls) { 253 250 dev_dbg(hsotg->dev, "Setting ULPI FSLS\n"); 254 251 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); 255 252 usbcfg |= GUSBCFG_ULPI_FS_LS; ··· 293 290 hsotg->params.host_dma, 294 291 hsotg->params.dma_desc_enable); 295 292 296 - if (hsotg->params.host_dma > 0) { 297 - if (hsotg->params.dma_desc_enable > 0) 293 + if (hsotg->params.host_dma) { 294 + if (hsotg->params.dma_desc_enable) 298 295 dev_dbg(hsotg->dev, "Using Descriptor DMA mode\n"); 299 296 else 300 297 dev_dbg(hsotg->dev, "Using Buffer DMA mode\n"); 301 298 } else { 302 299 dev_dbg(hsotg->dev, "Using Slave mode\n"); 303 - hsotg->params.dma_desc_enable = 0; 300 + hsotg->params.dma_desc_enable = false; 304 301 } 305 302 306 - if (hsotg->params.host_dma > 0) 303 + if (hsotg->params.host_dma) 307 304 ahbcfg |= GAHBCFG_DMA_EN; 308 305 309 306 dwc2_writel(ahbcfg, hsotg->regs + GAHBCFG); ··· 494 491 dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n", 495 492 dwc2_readl(hsotg->regs + HPTXFSIZ)); 496 493 497 - if (hsotg->params.en_multiple_tx_fifo > 0 && 498 - hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) { 494 + if (hsotg->params.en_multiple_tx_fifo && 495 + hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_91a) { 499 496 /* 497 + * This feature was implemented in 2.91a version 500 498 * Global DFIFOCFG calculation for Host mode - 501 499 * include RxFIFO, NPTXFIFO and HPTXFIFO 502 500 */ ··· 775 771 * For Descriptor DMA mode core halts the channel on AHB error. 776 772 * Interrupt is not required. 777 773 */ 778 - if (hsotg->params.dma_desc_enable <= 0) { 774 + if (!hsotg->params.dma_desc_enable) { 779 775 if (dbg_hc(chan)) 780 776 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 781 777 hcintmsk |= HCINTMSK_AHBERR; ··· 808 804 { 809 805 u32 intmsk; 810 806 811 - if (hsotg->params.host_dma > 0) { 807 + if (hsotg->params.host_dma) { 812 808 if (dbg_hc(chan)) 813 809 dev_vdbg(hsotg->dev, "DMA enabled\n"); 814 810 dwc2_hc_enable_dma_ints(hsotg, chan); ··· 1028 1024 1029 1025 /* No need to set the bit in DDMA for disabling the channel */ 1030 1026 /* TODO check it everywhere channel is disabled */ 1031 - if (hsotg->params.dma_desc_enable <= 0) { 1027 + if (!hsotg->params.dma_desc_enable) { 1032 1028 if (dbg_hc(chan)) 1033 1029 dev_vdbg(hsotg->dev, "desc DMA disabled\n"); 1034 1030 hcchar |= HCCHAR_CHENA; ··· 1038 1034 } 1039 1035 hcchar |= HCCHAR_CHDIS; 1040 1036 1041 - if (hsotg->params.host_dma <= 0) { 1037 + if (!hsotg->params.host_dma) { 1042 1038 if (dbg_hc(chan)) 1043 1039 dev_vdbg(hsotg->dev, "DMA not enabled\n"); 1044 1040 hcchar |= HCCHAR_CHENA; ··· 1384 1380 dev_vdbg(hsotg->dev, "%s()\n", __func__); 1385 1381 1386 1382 if (chan->do_ping) { 1387 - if (hsotg->params.host_dma <= 0) { 1383 + if (!hsotg->params.host_dma) { 1388 1384 if (dbg_hc(chan)) 1389 1385 dev_vdbg(hsotg->dev, "ping, no DMA\n"); 1390 1386 dwc2_hc_do_ping(hsotg, chan); ··· 1512 1508 TSIZ_SC_MC_PID_SHIFT); 1513 1509 } 1514 1510 1515 - if (hsotg->params.host_dma > 0) { 1511 + if (hsotg->params.host_dma) { 1516 1512 dwc2_writel((u32)chan->xfer_dma, 1517 1513 hsotg->regs + HCDMA(chan->hc_num)); 1518 1514 if (dbg_hc(chan)) ··· 1555 1551 chan->xfer_started = 1; 1556 1552 chan->requests++; 1557 1553 1558 - if (hsotg->params.host_dma <= 0 && 1554 + if (!hsotg->params.host_dma && 1559 1555 !chan->ep_is_in && chan->xfer_len > 0) 1560 1556 /* Load OUT packet into the appropriate Tx FIFO */ 1561 1557 dwc2_hc_write_packet(hsotg, chan); ··· 1838 1834 u32 hcchar; 1839 1835 int i; 1840 1836 1841 - if (hsotg->params.host_dma <= 0) { 1837 + if (!hsotg->params.host_dma) { 1842 1838 /* Flush out any channel requests in slave mode */ 1843 1839 for (i = 0; i < num_channels; i++) { 1844 1840 channel = hsotg->hc_ptr_array[i]; ··· 1874 1870 channel->qh = NULL; 1875 1871 } 1876 1872 /* All channels have been freed, mark them available */ 1877 - if (hsotg->params.uframe_sched > 0) { 1873 + if (hsotg->params.uframe_sched) { 1878 1874 hsotg->available_host_channels = 1879 1875 hsotg->params.host_channels; 1880 1876 } else { ··· 2111 2107 * Free the QTD and clean up the associated QH. Leave the QH in the 2112 2108 * schedule if it has any remaining QTDs. 2113 2109 */ 2114 - if (hsotg->params.dma_desc_enable <= 0) { 2110 + if (!hsotg->params.dma_desc_enable) { 2115 2111 u8 in_process = urb_qtd->in_process; 2116 2112 2117 2113 dwc2_hcd_qtd_unlink_and_free(hsotg, urb_qtd, qh); ··· 2154 2150 } 2155 2151 2156 2152 spin_unlock_irqrestore(&hsotg->lock, flags); 2157 - usleep_range(20000, 40000); 2153 + msleep(20); 2158 2154 spin_lock_irqsave(&hsotg->lock, flags); 2159 2155 qh = ep->hcpriv; 2160 2156 if (!qh) { ··· 2219 2215 2220 2216 /* Set ULPI External VBUS bit if needed */ 2221 2217 usbcfg &= ~GUSBCFG_ULPI_EXT_VBUS_DRV; 2222 - if (hsotg->params.phy_ulpi_ext_vbus == 2223 - DWC2_PHY_ULPI_EXTERNAL_VBUS) 2218 + if (hsotg->params.phy_ulpi_ext_vbus) 2224 2219 usbcfg |= GUSBCFG_ULPI_EXT_VBUS_DRV; 2225 2220 2226 2221 /* Set external TS Dline pulsing bit if needed */ 2227 2222 usbcfg &= ~GUSBCFG_TERMSELDLPULSE; 2228 - if (hsotg->params.ts_dline > 0) 2223 + if (hsotg->params.ts_dline) 2229 2224 usbcfg |= GUSBCFG_TERMSELDLPULSE; 2230 2225 2231 2226 dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); ··· 2263 2260 /* Program the GOTGCTL register */ 2264 2261 otgctl = dwc2_readl(hsotg->regs + GOTGCTL); 2265 2262 otgctl &= ~GOTGCTL_OTGVER; 2266 - if (hsotg->params.otg_ver > 0) 2267 - otgctl |= GOTGCTL_OTGVER; 2268 2263 dwc2_writel(otgctl, hsotg->regs + GOTGCTL); 2269 - dev_dbg(hsotg->dev, "OTG VER PARAM: %d\n", hsotg->params.otg_ver); 2270 2264 2271 2265 /* Clear the SRP success bit for FS-I2c */ 2272 2266 hsotg->srp_success = 0; ··· 2319 2319 * runtime. This bit needs to be programmed during initial configuration 2320 2320 * and its value must not be changed during runtime. 2321 2321 */ 2322 - if (hsotg->params.reload_ctl > 0) { 2322 + if (hsotg->params.reload_ctl) { 2323 2323 hfir = dwc2_readl(hsotg->regs + HFIR); 2324 2324 hfir |= HFIR_RLDCTRL; 2325 2325 dwc2_writel(hfir, hsotg->regs + HFIR); 2326 2326 } 2327 2327 2328 - if (hsotg->params.dma_desc_enable > 0) { 2328 + if (hsotg->params.dma_desc_enable) { 2329 2329 u32 op_mode = hsotg->hw_params.op_mode; 2330 2330 2331 2331 if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a || ··· 2337 2337 "Hardware does not support descriptor DMA mode -\n"); 2338 2338 dev_err(hsotg->dev, 2339 2339 "falling back to buffer DMA mode.\n"); 2340 - hsotg->params.dma_desc_enable = 0; 2340 + hsotg->params.dma_desc_enable = false; 2341 2341 } else { 2342 2342 hcfg = dwc2_readl(hsotg->regs + HCFG); 2343 2343 hcfg |= HCFG_DESCDMA; ··· 2363 2363 otgctl &= ~GOTGCTL_HSTSETHNPEN; 2364 2364 dwc2_writel(otgctl, hsotg->regs + GOTGCTL); 2365 2365 2366 - if (hsotg->params.dma_desc_enable <= 0) { 2366 + if (!hsotg->params.dma_desc_enable) { 2367 2367 int num_channels, i; 2368 2368 u32 hcchar; 2369 2369 ··· 2430 2430 hsotg->flags.d32 = 0; 2431 2431 hsotg->non_periodic_qh_ptr = &hsotg->non_periodic_sched_active; 2432 2432 2433 - if (hsotg->params.uframe_sched > 0) { 2433 + if (hsotg->params.uframe_sched) { 2434 2434 hsotg->available_host_channels = 2435 2435 hsotg->params.host_channels; 2436 2436 } else { ··· 2488 2488 chan->do_ping = 0; 2489 2489 chan->ep_is_in = 0; 2490 2490 chan->data_pid_start = DWC2_HC_PID_SETUP; 2491 - if (hsotg->params.host_dma > 0) 2491 + if (hsotg->params.host_dma) 2492 2492 chan->xfer_dma = urb->setup_dma; 2493 2493 else 2494 2494 chan->xfer_buf = urb->setup_packet; ··· 2515 2515 chan->do_ping = 0; 2516 2516 chan->data_pid_start = DWC2_HC_PID_DATA1; 2517 2517 chan->xfer_len = 0; 2518 - if (hsotg->params.host_dma > 0) 2518 + if (hsotg->params.host_dma) 2519 2519 chan->xfer_dma = hsotg->status_buf_dma; 2520 2520 else 2521 2521 chan->xfer_buf = hsotg->status_buf; ··· 2533 2533 2534 2534 case USB_ENDPOINT_XFER_ISOC: 2535 2535 chan->ep_type = USB_ENDPOINT_XFER_ISOC; 2536 - if (hsotg->params.dma_desc_enable > 0) 2536 + if (hsotg->params.dma_desc_enable) 2537 2537 break; 2538 2538 2539 2539 frame_desc = &urb->iso_descs[qtd->isoc_frame_index]; 2540 2540 frame_desc->status = 0; 2541 2541 2542 - if (hsotg->params.host_dma > 0) { 2542 + if (hsotg->params.host_dma) { 2543 2543 chan->xfer_dma = urb->dma; 2544 2544 chan->xfer_dma += frame_desc->offset + 2545 2545 qtd->isoc_split_offset; ··· 2577 2577 return; 2578 2578 2579 2579 temp = container_of(urb->transfer_buffer, 2580 - struct dma_aligned_buffer, data); 2580 + struct dma_aligned_buffer, data); 2581 2581 2582 2582 if (usb_urb_dir_in(urb)) 2583 2583 memcpy(temp->old_xfer_buffer, temp->data, ··· 2621 2621 } 2622 2622 2623 2623 static int dwc2_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, 2624 - gfp_t mem_flags) 2624 + gfp_t mem_flags) 2625 2625 { 2626 2626 int ret; 2627 2627 ··· 2718 2718 chan->multi_count = 1; 2719 2719 2720 2720 if (urb->actual_length > urb->length && 2721 - !dwc2_hcd_is_pipe_in(&urb->pipe_info)) 2721 + !dwc2_hcd_is_pipe_in(&urb->pipe_info)) 2722 2722 urb->actual_length = urb->length; 2723 2723 2724 - if (hsotg->params.host_dma > 0) 2724 + if (hsotg->params.host_dma) 2725 2725 chan->xfer_dma = urb->dma + urb->actual_length; 2726 2726 else 2727 2727 chan->xfer_buf = (u8 *)urb->buf + urb->actual_length; ··· 2746 2746 */ 2747 2747 chan->multi_count = dwc2_hb_mult(qh->maxp); 2748 2748 2749 - if (hsotg->params.dma_desc_enable > 0) { 2749 + if (hsotg->params.dma_desc_enable) { 2750 2750 chan->desc_list_addr = qh->desc_list_dma; 2751 2751 chan->desc_list_sz = qh->desc_list_sz; 2752 2752 } ··· 2783 2783 while (qh_ptr != &hsotg->periodic_sched_ready) { 2784 2784 if (list_empty(&hsotg->free_hc_list)) 2785 2785 break; 2786 - if (hsotg->params.uframe_sched > 0) { 2786 + if (hsotg->params.uframe_sched) { 2787 2787 if (hsotg->available_host_channels <= 1) 2788 2788 break; 2789 2789 hsotg->available_host_channels--; ··· 2810 2810 num_channels = hsotg->params.host_channels; 2811 2811 qh_ptr = hsotg->non_periodic_sched_inactive.next; 2812 2812 while (qh_ptr != &hsotg->non_periodic_sched_inactive) { 2813 - if (hsotg->params.uframe_sched <= 0 && 2813 + if (!hsotg->params.uframe_sched && 2814 2814 hsotg->non_periodic_channels >= num_channels - 2815 2815 hsotg->periodic_channels) 2816 2816 break; 2817 2817 if (list_empty(&hsotg->free_hc_list)) 2818 2818 break; 2819 2819 qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry); 2820 - if (hsotg->params.uframe_sched > 0) { 2820 + if (hsotg->params.uframe_sched) { 2821 2821 if (hsotg->available_host_channels < 1) 2822 2822 break; 2823 2823 hsotg->available_host_channels--; ··· 2839 2839 else 2840 2840 ret_val = DWC2_TRANSACTION_ALL; 2841 2841 2842 - if (hsotg->params.uframe_sched <= 0) 2842 + if (!hsotg->params.uframe_sched) 2843 2843 hsotg->non_periodic_channels++; 2844 2844 } 2845 2845 ··· 2878 2878 list_move_tail(&chan->split_order_list_entry, 2879 2879 &hsotg->split_order); 2880 2880 2881 - if (hsotg->params.host_dma > 0) { 2882 - if (hsotg->params.dma_desc_enable > 0) { 2881 + if (hsotg->params.host_dma) { 2882 + if (hsotg->params.dma_desc_enable) { 2883 2883 if (!chan->xfer_started || 2884 2884 chan->ep_type == USB_ENDPOINT_XFER_ISOC) { 2885 2885 dwc2_hcd_start_xfer_ddma(hsotg, chan->qh); ··· 2967 2967 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 2968 2968 TXSTS_QSPCAVAIL_SHIFT; 2969 2969 if (qspcavail == 0) { 2970 - no_queue_space = 1; 2970 + no_queue_space = true; 2971 2971 break; 2972 2972 } 2973 2973 ··· 2988 2988 * The flag prevents any halts to get into the request queue in 2989 2989 * the middle of multiple high-bandwidth packets getting queued. 2990 2990 */ 2991 - if (hsotg->params.host_dma <= 0 && 2992 - qh->channel->multi_count > 1) 2991 + if (!hsotg->params.host_dma && 2992 + qh->channel->multi_count > 1) 2993 2993 hsotg->queuing_high_bandwidth = 1; 2994 2994 2995 2995 fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >> 2996 2996 TXSTS_FSPCAVAIL_SHIFT; 2997 2997 status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail); 2998 2998 if (status < 0) { 2999 - no_fifo_space = 1; 2999 + no_fifo_space = true; 3000 3000 break; 3001 3001 } 3002 3002 ··· 3007 3007 * controller automatically handles multiple packets for 3008 3008 * high-bandwidth transfers. 3009 3009 */ 3010 - if (hsotg->params.host_dma > 0 || status == 0 || 3010 + if (hsotg->params.host_dma || status == 0 || 3011 3011 qh->channel->requests == qh->channel->multi_count) { 3012 3012 qh_ptr = qh_ptr->next; 3013 3013 /* ··· 3024 3024 3025 3025 exit: 3026 3026 if (no_queue_space || no_fifo_space || 3027 - (hsotg->params.host_dma <= 0 && 3027 + (!hsotg->params.host_dma && 3028 3028 !list_empty(&hsotg->periodic_sched_assigned))) { 3029 3029 /* 3030 3030 * May need to queue more transactions as the request ··· 3045 3045 * now. This function is called from interrupt 3046 3046 * handlers to queue more transactions as transfer 3047 3047 * states change. 3048 - */ 3048 + */ 3049 3049 gintmsk = dwc2_readl(hsotg->regs + GINTMSK); 3050 3050 if (gintmsk & GINTSTS_PTXFEMP) { 3051 3051 gintmsk &= ~GINTSTS_PTXFEMP; ··· 3104 3104 tx_status = dwc2_readl(hsotg->regs + GNPTXSTS); 3105 3105 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 3106 3106 TXSTS_QSPCAVAIL_SHIFT; 3107 - if (hsotg->params.host_dma <= 0 && qspcavail == 0) { 3107 + if (!hsotg->params.host_dma && qspcavail == 0) { 3108 3108 no_queue_space = 1; 3109 3109 break; 3110 3110 } ··· 3137 3137 hsotg->non_periodic_qh_ptr->next; 3138 3138 } while (hsotg->non_periodic_qh_ptr != orig_qh_ptr); 3139 3139 3140 - if (hsotg->params.host_dma <= 0) { 3140 + if (!hsotg->params.host_dma) { 3141 3141 tx_status = dwc2_readl(hsotg->regs + GNPTXSTS); 3142 3142 qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >> 3143 3143 TXSTS_QSPCAVAIL_SHIFT; ··· 3235 3235 if (gotgctl & GOTGCTL_CONID_B) { 3236 3236 /* Wait for switch to device mode */ 3237 3237 dev_dbg(hsotg->dev, "connId B\n"); 3238 + if (hsotg->bus_suspended) { 3239 + dev_info(hsotg->dev, 3240 + "Do port resume before switching to device mode\n"); 3241 + dwc2_port_resume(hsotg); 3242 + } 3238 3243 while (!dwc2_is_device_mode(hsotg)) { 3239 3244 dev_info(hsotg->dev, 3240 3245 "Waiting for Peripheral Mode, Mode=%s\n", 3241 3246 dwc2_is_host_mode(hsotg) ? "Host" : 3242 3247 "Peripheral"); 3243 - usleep_range(20000, 40000); 3248 + msleep(20); 3249 + /* 3250 + * Sometimes the initial GOTGCTRL read is wrong, so 3251 + * check it again and jump to host mode if that was 3252 + * the case. 3253 + */ 3254 + gotgctl = dwc2_readl(hsotg->regs + GOTGCTL); 3255 + if (!(gotgctl & GOTGCTL_CONID_B)) 3256 + goto host; 3244 3257 if (++count > 250) 3245 3258 break; 3246 3259 } ··· 3268 3255 spin_unlock_irqrestore(&hsotg->lock, flags); 3269 3256 dwc2_hsotg_core_connect(hsotg); 3270 3257 } else { 3258 + host: 3271 3259 /* A-Device connector (Host Mode) */ 3272 3260 dev_dbg(hsotg->dev, "connId A\n"); 3273 3261 while (!dwc2_is_host_mode(hsotg)) { 3274 3262 dev_info(hsotg->dev, "Waiting for Host Mode, Mode=%s\n", 3275 3263 dwc2_is_host_mode(hsotg) ? 3276 3264 "Host" : "Peripheral"); 3277 - usleep_range(20000, 40000); 3265 + msleep(20); 3278 3266 if (++count > 250) 3279 3267 break; 3280 3268 } ··· 3310 3296 dwc2_readl(hsotg->regs + HPRT0)); 3311 3297 3312 3298 dwc2_hcd_rem_wakeup(hsotg); 3313 - hsotg->bus_suspended = 0; 3299 + hsotg->bus_suspended = false; 3314 3300 3315 3301 /* Change to L0 state */ 3316 3302 hsotg->lx_state = DWC2_L0; ··· 3346 3332 hprt0 |= HPRT0_SUSP; 3347 3333 dwc2_writel(hprt0, hsotg->regs + HPRT0); 3348 3334 3349 - hsotg->bus_suspended = 1; 3335 + hsotg->bus_suspended = true; 3350 3336 3351 3337 /* 3352 3338 * If hibernation is supported, Phy clock will be suspended ··· 3368 3354 3369 3355 spin_unlock_irqrestore(&hsotg->lock, flags); 3370 3356 3371 - usleep_range(200000, 250000); 3357 + msleep(200); 3372 3358 } else { 3373 3359 spin_unlock_irqrestore(&hsotg->lock, flags); 3374 3360 } ··· 3392 3378 pcgctl &= ~PCGCTL_STOPPCLK; 3393 3379 dwc2_writel(pcgctl, hsotg->regs + PCGCTL); 3394 3380 spin_unlock_irqrestore(&hsotg->lock, flags); 3395 - usleep_range(20000, 40000); 3381 + msleep(20); 3396 3382 spin_lock_irqsave(&hsotg->lock, flags); 3397 3383 } 3398 3384 ··· 3408 3394 hprt0 = dwc2_read_hprt0(hsotg); 3409 3395 hprt0 &= ~(HPRT0_RES | HPRT0_SUSP); 3410 3396 dwc2_writel(hprt0, hsotg->regs + HPRT0); 3411 - hsotg->bus_suspended = 0; 3397 + hsotg->bus_suspended = false; 3412 3398 spin_unlock_irqrestore(&hsotg->lock, flags); 3413 3399 } 3414 3400 ··· 3628 3614 u32 hcfg; 3629 3615 3630 3616 dev_info(hsotg->dev, "Enabling descriptor DMA mode\n"); 3631 - hsotg->params.dma_desc_enable = 1; 3617 + hsotg->params.dma_desc_enable = true; 3632 3618 hcfg = dwc2_readl(hsotg->regs + HCFG); 3633 3619 hcfg |= HCFG_DESCDMA; 3634 3620 dwc2_writel(hcfg, hsotg->regs + HCFG); ··· 3705 3691 } 3706 3692 3707 3693 /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ 3708 - usleep_range(50000, 70000); 3694 + msleep(50); 3709 3695 hprt0 &= ~HPRT0_RST; 3710 3696 dwc2_writel(hprt0, hsotg->regs + HPRT0); 3711 3697 hsotg->lx_state = DWC2_L0; /* Now back to On state */ ··· 4061 4047 { 4062 4048 struct wrapper_priv_data *p; 4063 4049 4064 - p = (struct wrapper_priv_data *) &hcd->hcd_priv; 4050 + p = (struct wrapper_priv_data *)&hcd->hcd_priv; 4065 4051 return p->hsotg; 4066 4052 } 4067 4053 ··· 4096 4082 *ttport = urb->dev->ttport; 4097 4083 4098 4084 dwc_tt = urb->dev->tt->hcpriv; 4099 - if (dwc_tt == NULL) { 4085 + if (!dwc_tt) { 4100 4086 size_t bitmap_size; 4101 4087 4102 4088 /* ··· 4110 4096 4111 4097 dwc_tt = kzalloc(sizeof(*dwc_tt) + bitmap_size, 4112 4098 mem_flags); 4113 - if (dwc_tt == NULL) 4099 + if (!dwc_tt) 4114 4100 return NULL; 4115 4101 4116 4102 dwc_tt->usb_tt = urb->dev->tt; ··· 4137 4123 void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, struct dwc2_tt *dwc_tt) 4138 4124 { 4139 4125 /* Model kfree and make put of NULL a no-op */ 4140 - if (dwc_tt == NULL) 4126 + if (!dwc_tt) 4141 4127 return; 4142 4128 4143 4129 WARN_ON(dwc_tt->refcount < 1); ··· 4219 4205 usb_pipeendpoint(urb->pipe), 4220 4206 usb_pipein(urb->pipe) ? "IN" : "OUT", status, 4221 4207 urb->actual_length); 4222 - 4223 4208 4224 4209 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 4225 4210 urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb); ··· 4600 4587 dwc2_dump_urb_info(hcd, urb, "urb_enqueue"); 4601 4588 } 4602 4589 4603 - if (ep == NULL) 4590 + if (!ep) 4604 4591 return -EINVAL; 4605 4592 4606 4593 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS || ··· 4670 4657 urb->iso_frame_desc[i].length); 4671 4658 4672 4659 urb->hcpriv = dwc2_urb; 4673 - qh = (struct dwc2_qh *) ep->hcpriv; 4660 + qh = (struct dwc2_qh *)ep->hcpriv; 4674 4661 /* Create QH for the endpoint if it doesn't exist */ 4675 4662 if (!qh) { 4676 4663 qh = dwc2_hcd_qh_create(hsotg, dwc2_urb, mem_flags); ··· 4725 4712 dwc2_hcd_qh_unlink(hsotg, qh); 4726 4713 /* Free each QTD in the QH's QTD list */ 4727 4714 list_for_each_entry_safe(qtd2, qtd2_tmp, &qh->qtd_list, 4728 - qtd_list_entry) 4715 + qtd_list_entry) 4729 4716 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh); 4730 4717 dwc2_hcd_qh_free(hsotg, qh); 4731 4718 } ··· 4873 4860 spin_unlock_irqrestore(&hsotg->lock, flags); 4874 4861 } 4875 4862 4863 + /* 4864 + * HPRT0_SPD_HIGH_SPEED: high speed 4865 + * HPRT0_SPD_FULL_SPEED: full speed 4866 + */ 4867 + static void dwc2_change_bus_speed(struct usb_hcd *hcd, int speed) 4868 + { 4869 + struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 4870 + 4871 + if (hsotg->params.speed == speed) 4872 + return; 4873 + 4874 + hsotg->params.speed = speed; 4875 + queue_work(hsotg->wq_otg, &hsotg->wf_otg); 4876 + } 4877 + 4878 + static void dwc2_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 4879 + { 4880 + struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 4881 + 4882 + if (!hsotg->params.change_speed_quirk) 4883 + return; 4884 + 4885 + /* 4886 + * On removal, set speed to default high-speed. 4887 + */ 4888 + if (udev->parent && udev->parent->speed > USB_SPEED_UNKNOWN && 4889 + udev->parent->speed < USB_SPEED_HIGH) { 4890 + dev_info(hsotg->dev, "Set speed to default high-speed\n"); 4891 + dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED); 4892 + } 4893 + } 4894 + 4895 + static int dwc2_reset_device(struct usb_hcd *hcd, struct usb_device *udev) 4896 + { 4897 + struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd); 4898 + 4899 + if (!hsotg->params.change_speed_quirk) 4900 + return 0; 4901 + 4902 + if (udev->speed == USB_SPEED_HIGH) { 4903 + dev_info(hsotg->dev, "Set speed to high-speed\n"); 4904 + dwc2_change_bus_speed(hcd, HPRT0_SPD_HIGH_SPEED); 4905 + } else if ((udev->speed == USB_SPEED_FULL || 4906 + udev->speed == USB_SPEED_LOW)) { 4907 + /* 4908 + * Change speed setting to full-speed if there's 4909 + * a full-speed or low-speed device plugged in. 4910 + */ 4911 + dev_info(hsotg->dev, "Set speed to full-speed\n"); 4912 + dwc2_change_bus_speed(hcd, HPRT0_SPD_FULL_SPEED); 4913 + } 4914 + 4915 + return 0; 4916 + } 4917 + 4876 4918 static struct hc_driver dwc2_hc_driver = { 4877 4919 .description = "dwc2_hsotg", 4878 4920 .product_desc = "DWC OTG Controller", ··· 4979 4911 for (i = 0; i < MAX_EPS_CHANNELS; i++) { 4980 4912 struct dwc2_host_chan *chan = hsotg->hc_ptr_array[i]; 4981 4913 4982 - if (chan != NULL) { 4914 + if (chan) { 4983 4915 dev_dbg(hsotg->dev, "HCD Free channel #%i, chan=%p\n", 4984 4916 i, chan); 4985 4917 hsotg->hc_ptr_array[i] = NULL; ··· 4987 4919 } 4988 4920 } 4989 4921 4990 - if (hsotg->params.host_dma > 0) { 4922 + if (hsotg->params.host_dma) { 4991 4923 if (hsotg->status_buf) { 4992 4924 dma_free_coherent(hsotg->dev, DWC2_HCD_STATUS_BUF_SIZE, 4993 4925 hsotg->status_buf, ··· 5035 4967 * USB bus with the core and calls the hc_driver->start() function. It returns 5036 4968 * a negative error on failure. 5037 4969 */ 5038 - int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq) 4970 + int dwc2_hcd_init(struct dwc2_hsotg *hsotg) 5039 4971 { 4972 + struct platform_device *pdev = to_platform_device(hsotg->dev); 4973 + struct resource *res; 5040 4974 struct usb_hcd *hcd; 5041 4975 struct dwc2_host_chan *channel; 5042 4976 u32 hcfg; ··· 5069 4999 hsotg->last_frame_num = HFNUM_MAX_FRNUM; 5070 5000 5071 5001 /* Check if the bus driver or platform code has setup a dma_mask */ 5072 - if (hsotg->params.host_dma > 0 && 5073 - hsotg->dev->dma_mask == NULL) { 5002 + if (hsotg->params.host_dma && 5003 + !hsotg->dev->dma_mask) { 5074 5004 dev_warn(hsotg->dev, 5075 5005 "dma_mask not set, disabling DMA\n"); 5076 - hsotg->params.host_dma = 0; 5077 - hsotg->params.dma_desc_enable = 0; 5006 + hsotg->params.host_dma = false; 5007 + hsotg->params.dma_desc_enable = false; 5078 5008 } 5079 5009 5080 5010 /* Set device flags indicating whether the HCD supports DMA */ 5081 - if (hsotg->params.host_dma > 0) { 5011 + if (hsotg->params.host_dma) { 5082 5012 if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) 5083 5013 dev_warn(hsotg->dev, "can't set DMA mask\n"); 5084 5014 if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0) 5085 5015 dev_warn(hsotg->dev, "can't set coherent DMA mask\n"); 5086 5016 } 5087 5017 5018 + if (hsotg->params.change_speed_quirk) { 5019 + dwc2_hc_driver.free_dev = dwc2_free_dev; 5020 + dwc2_hc_driver.reset_device = dwc2_reset_device; 5021 + } 5022 + 5088 5023 hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev)); 5089 5024 if (!hcd) 5090 5025 goto error1; 5091 5026 5092 - if (hsotg->params.host_dma <= 0) 5027 + if (!hsotg->params.host_dma) 5093 5028 hcd->self.uses_dma = 0; 5094 5029 5095 5030 hcd->has_tt = 1; 5096 5031 5097 - ((struct wrapper_priv_data *) &hcd->hcd_priv)->hsotg = hsotg; 5032 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 5033 + hcd->rsrc_start = res->start; 5034 + hcd->rsrc_len = resource_size(res); 5035 + 5036 + ((struct wrapper_priv_data *)&hcd->hcd_priv)->hsotg = hsotg; 5098 5037 hsotg->priv = hcd; 5099 5038 5100 5039 /* ··· 5151 5072 5152 5073 for (i = 0; i < num_channels; i++) { 5153 5074 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 5154 - if (channel == NULL) 5075 + if (!channel) 5155 5076 goto error3; 5156 5077 channel->hc_num = i; 5157 5078 INIT_LIST_HEAD(&channel->split_order_list_entry); ··· 5170 5091 * done after usb_add_hcd since that function allocates the DMA buffer 5171 5092 * pool. 5172 5093 */ 5173 - if (hsotg->params.host_dma > 0) 5094 + if (hsotg->params.host_dma) 5174 5095 hsotg->status_buf = dma_alloc_coherent(hsotg->dev, 5175 5096 DWC2_HCD_STATUS_BUF_SIZE, 5176 5097 &hsotg->status_buf_dma, GFP_KERNEL); ··· 5200 5121 * Disable descriptor dma mode since it will not be 5201 5122 * usable. 5202 5123 */ 5203 - hsotg->params.dma_desc_enable = 0; 5204 - hsotg->params.dma_desc_fs_enable = 0; 5124 + hsotg->params.dma_desc_enable = false; 5125 + hsotg->params.dma_desc_fs_enable = false; 5205 5126 } 5206 5127 5207 5128 hsotg->desc_hsisoc_cache = kmem_cache_create("dwc2-hsisoc-desc", ··· 5217 5138 * Disable descriptor dma mode since it will not be 5218 5139 * usable. 5219 5140 */ 5220 - hsotg->params.dma_desc_enable = 0; 5221 - hsotg->params.dma_desc_fs_enable = 0; 5141 + hsotg->params.dma_desc_enable = false; 5142 + hsotg->params.dma_desc_fs_enable = false; 5222 5143 } 5223 5144 } 5224 5145 ··· 5243 5164 * allocates the DMA buffer pool, registers the USB bus, requests the 5244 5165 * IRQ line, and calls hcd_start method. 5245 5166 */ 5246 - retval = usb_add_hcd(hcd, irq, IRQF_SHARED); 5167 + retval = usb_add_hcd(hcd, hsotg->irq, IRQF_SHARED); 5247 5168 if (retval < 0) 5248 5169 goto error4; 5249 5170
+37 -37
drivers/usb/dwc2/hcd.h
··· 521 521 return !dwc2_hcd_is_pipe_in(pipe); 522 522 } 523 523 524 - extern int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq); 525 - extern void dwc2_hcd_remove(struct dwc2_hsotg *hsotg); 524 + int dwc2_hcd_init(struct dwc2_hsotg *hsotg); 525 + void dwc2_hcd_remove(struct dwc2_hsotg *hsotg); 526 526 527 527 /* Transaction Execution Functions */ 528 - extern enum dwc2_transaction_type dwc2_hcd_select_transactions( 528 + enum dwc2_transaction_type dwc2_hcd_select_transactions( 529 529 struct dwc2_hsotg *hsotg); 530 - extern void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, 531 - enum dwc2_transaction_type tr_type); 530 + void dwc2_hcd_queue_transactions(struct dwc2_hsotg *hsotg, 531 + enum dwc2_transaction_type tr_type); 532 532 533 533 /* Schedule Queue Functions */ 534 534 /* Implemented in hcd_queue.c */ 535 - extern struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 536 - struct dwc2_hcd_urb *urb, 535 + struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 536 + struct dwc2_hcd_urb *urb, 537 537 gfp_t mem_flags); 538 - extern void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 539 - extern int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 540 - extern void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 541 - extern void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 542 - int sched_csplit); 538 + void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 539 + int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 540 + void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 541 + void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 542 + int sched_csplit); 543 543 544 - extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb); 545 - extern int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 546 - struct dwc2_qh *qh); 544 + void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb); 545 + int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 546 + struct dwc2_qh *qh); 547 547 548 548 /* Unlinks and frees a QTD */ 549 549 static inline void dwc2_hcd_qtd_unlink_and_free(struct dwc2_hsotg *hsotg, ··· 556 556 } 557 557 558 558 /* Descriptor DMA support functions */ 559 - extern void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, 560 - struct dwc2_qh *qh); 561 - extern void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, 562 - struct dwc2_host_chan *chan, int chnum, 559 + void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, 560 + struct dwc2_qh *qh); 561 + void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, 562 + struct dwc2_host_chan *chan, int chnum, 563 563 enum dwc2_halt_status halt_status); 564 564 565 - extern int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 566 - gfp_t mem_flags); 567 - extern void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 565 + int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 566 + gfp_t mem_flags); 567 + void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh); 568 568 569 569 /* Check if QH is non-periodic */ 570 570 #define dwc2_qh_is_non_per(_qh_ptr_) \ ··· 732 732 return qh->host_us; 733 733 } 734 734 735 - extern void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg, 736 - struct dwc2_host_chan *chan, int chnum, 735 + void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg, 736 + struct dwc2_host_chan *chan, int chnum, 737 737 struct dwc2_qtd *qtd); 738 738 739 739 /* HCD Core API */ ··· 746 746 * Returns IRQ_HANDLED if interrupt is handled 747 747 * Return IRQ_NONE if interrupt is not handled 748 748 */ 749 - extern irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg); 749 + irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg); 750 750 751 751 /** 752 752 * dwc2_hcd_stop() - Halts the DWC_otg host mode operation 753 753 * 754 754 * @hsotg: The DWC2 HCD 755 755 */ 756 - extern void dwc2_hcd_stop(struct dwc2_hsotg *hsotg); 756 + void dwc2_hcd_stop(struct dwc2_hsotg *hsotg); 757 757 758 758 /** 759 759 * dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host, ··· 761 761 * 762 762 * @hsotg: The DWC2 HCD 763 763 */ 764 - extern int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg); 764 + int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg); 765 765 766 766 /** 767 767 * dwc2_hcd_dump_state() - Dumps hsotg state ··· 771 771 * NOTE: This function will be removed once the peripheral controller code 772 772 * is integrated and the driver is stable 773 773 */ 774 - extern void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg); 774 + void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg); 775 775 776 776 /** 777 777 * dwc2_hcd_dump_frrem() - Dumps the average frame remaining at SOF ··· 784 784 * NOTE: This function will be removed once the peripheral controller code 785 785 * is integrated and the driver is stable 786 786 */ 787 - extern void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg); 787 + void dwc2_hcd_dump_frrem(struct dwc2_hsotg *hsotg); 788 788 789 789 /* URB interface */ 790 790 ··· 793 793 #define URB_SEND_ZERO_PACKET 0x2 794 794 795 795 /* Host driver callbacks */ 796 - extern struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, 797 - void *context, gfp_t mem_flags, 798 - int *ttport); 796 + struct dwc2_tt *dwc2_host_get_tt_info(struct dwc2_hsotg *hsotg, 797 + void *context, gfp_t mem_flags, 798 + int *ttport); 799 799 800 - extern void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, 801 - struct dwc2_tt *dwc_tt); 802 - extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context); 803 - extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 804 - int status); 800 + void dwc2_host_put_tt_info(struct dwc2_hsotg *hsotg, 801 + struct dwc2_tt *dwc_tt); 802 + int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context); 803 + void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 804 + int status); 805 805 806 806 #ifdef DEBUG 807 807 /*
+11 -12
drivers/usb/dwc2/hcd_ddma.c
··· 89 89 { 90 90 struct kmem_cache *desc_cache; 91 91 92 - if (qh->ep_type == USB_ENDPOINT_XFER_ISOC 93 - && qh->dev_speed == USB_SPEED_HIGH) 92 + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 93 + qh->dev_speed == USB_SPEED_HIGH) 94 94 desc_cache = hsotg->desc_hsisoc_cache; 95 95 else 96 96 desc_cache = hsotg->desc_gen_cache; ··· 106 106 qh->desc_list_sz, 107 107 DMA_TO_DEVICE); 108 108 109 - qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags); 109 + qh->n_bytes = kcalloc(dwc2_max_desc_num(qh), sizeof(u32), flags); 110 110 if (!qh->n_bytes) { 111 111 dma_unmap_single(hsotg->dev, qh->desc_list_dma, 112 112 qh->desc_list_sz, ··· 123 123 { 124 124 struct kmem_cache *desc_cache; 125 125 126 - if (qh->ep_type == USB_ENDPOINT_XFER_ISOC 127 - && qh->dev_speed == USB_SPEED_HIGH) 126 + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC && 127 + qh->dev_speed == USB_SPEED_HIGH) 128 128 desc_cache = hsotg->desc_hsisoc_cache; 129 129 else 130 130 desc_cache = hsotg->desc_gen_cache; ··· 175 175 hsotg->frame_list = NULL; 176 176 177 177 spin_unlock_irqrestore(&hsotg->lock, flags); 178 - 179 178 } 180 179 181 180 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) ··· 296 297 struct dwc2_host_chan *chan = qh->channel; 297 298 298 299 if (dwc2_qh_is_non_per(qh)) { 299 - if (hsotg->params.uframe_sched > 0) 300 + if (hsotg->params.uframe_sched) 300 301 hsotg->available_host_channels++; 301 302 else 302 303 hsotg->non_periodic_channels--; ··· 403 404 404 405 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || 405 406 qh->ep_type == USB_ENDPOINT_XFER_INT) && 406 - (hsotg->params.uframe_sched > 0 || 407 + (hsotg->params.uframe_sched || 407 408 !hsotg->periodic_channels) && hsotg->frame_list) { 408 409 dwc2_per_sched_disable(hsotg); 409 410 dwc2_frame_list_free(hsotg); ··· 569 570 #endif 570 571 571 572 dma_sync_single_for_device(hsotg->dev, 572 - qh->desc_list_dma + 573 + qh->desc_list_dma + 573 574 (idx * sizeof(struct dwc2_dma_desc)), 574 575 sizeof(struct dwc2_dma_desc), 575 576 DMA_TO_DEVICE); ··· 775 776 n_desc - 1, 776 777 &qh->desc_list[n_desc - 1]); 777 778 dma_sync_single_for_device(hsotg->dev, 778 - qh->desc_list_dma + 779 + qh->desc_list_dma + 779 780 ((n_desc - 1) * 780 781 sizeof(struct dwc2_dma_desc)), 781 782 sizeof(struct dwc2_dma_desc), ··· 815 816 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", 816 817 &qh->desc_list[0]); 817 818 dma_sync_single_for_device(hsotg->dev, 818 - qh->desc_list_dma, 819 + qh->desc_list_dma, 819 820 sizeof(struct dwc2_dma_desc), 820 821 DMA_TO_DEVICE); 821 822 } ··· 1063 1064 } 1064 1065 1065 1066 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg, 1066 - struct dwc2_host_chan *chan, 1067 + struct dwc2_host_chan *chan, 1067 1068 struct dwc2_qtd *qtd, 1068 1069 struct dwc2_dma_desc *dma_desc, 1069 1070 enum dwc2_halt_status halt_status,
+51 -47
drivers/usb/dwc2/hcd_intr.c
··· 60 60 61 61 if (expected != curr_frame_number) 62 62 dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n", 63 - expected, curr_frame_number); 63 + expected, curr_frame_number); 64 64 65 65 #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 66 66 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { ··· 163 163 * (micro)frame 164 164 */ 165 165 list_move_tail(&qh->qh_list_entry, 166 - &hsotg->periodic_sched_ready); 166 + &hsotg->periodic_sched_ready); 167 167 } 168 168 } 169 169 tr_type = dwc2_hcd_select_transactions(hsotg); ··· 297 297 HCFG_FSLSPCLKSEL_SHIFT; 298 298 299 299 if (prtspd == HPRT0_SPD_LOW_SPEED && 300 - params->host_ls_low_power_phy_clk == 301 - DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) { 300 + params->host_ls_low_power_phy_clk) { 302 301 /* 6 MHZ */ 303 302 dev_vdbg(hsotg->dev, 304 303 "FS_PHY programming HCFG to 6 MHz\n"); ··· 397 398 if (hsotg->params.dma_desc_fs_enable) { 398 399 u32 hcfg; 399 400 400 - hsotg->params.dma_desc_enable = 0; 401 + hsotg->params.dma_desc_enable = false; 401 402 hsotg->new_connection = false; 402 403 hcfg = dwc2_readl(hsotg->regs + HCFG); 403 404 hcfg &= ~HCFG_DESCDMA; ··· 441 442 count = (hctsiz & TSIZ_XFERSIZE_MASK) >> 442 443 TSIZ_XFERSIZE_SHIFT; 443 444 length = chan->xfer_len - count; 444 - if (short_read != NULL) 445 + if (short_read) 445 446 *short_read = (count != 0); 446 447 } else if (chan->qh->do_split) { 447 448 length = qtd->ssplit_out_xfer_count; ··· 603 604 /* Skip whole frame */ 604 605 if (chan->qh->do_split && 605 606 chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && 606 - hsotg->params.host_dma > 0) { 607 + hsotg->params.host_dma) { 607 608 qtd->complete_split = 0; 608 609 qtd->isoc_split_offset = 0; 609 610 } ··· 742 743 dwc2_hc_cleanup(hsotg, chan); 743 744 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list); 744 745 745 - if (hsotg->params.uframe_sched > 0) { 746 + if (hsotg->params.uframe_sched) { 746 747 hsotg->available_host_channels++; 747 748 } else { 748 749 switch (chan->ep_type) { ··· 788 789 if (dbg_hc(chan)) 789 790 dev_vdbg(hsotg->dev, "%s()\n", __func__); 790 791 791 - if (hsotg->params.host_dma > 0) { 792 + if (hsotg->params.host_dma) { 792 793 if (dbg_hc(chan)) 793 794 dev_vdbg(hsotg->dev, "DMA enabled\n"); 794 795 dwc2_release_channel(hsotg, chan, qtd, halt_status); ··· 822 823 * processed. 823 824 */ 824 825 list_move_tail(&chan->qh->qh_list_entry, 825 - &hsotg->periodic_sched_assigned); 826 + &hsotg->periodic_sched_assigned); 826 827 827 828 /* 828 829 * Make sure the Periodic Tx FIFO Empty interrupt is ··· 978 979 979 980 pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 980 981 981 - if (hsotg->params.dma_desc_enable > 0) { 982 + if (hsotg->params.dma_desc_enable) { 982 983 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status); 983 984 if (pipe_type == USB_ENDPOINT_XFER_ISOC) 984 985 /* Do not disable the interrupt, just clear it */ ··· 989 990 /* Handle xfer complete on CSPLIT */ 990 991 if (chan->qh->do_split) { 991 992 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in && 992 - hsotg->params.host_dma > 0) { 993 + hsotg->params.host_dma) { 993 994 if (qtd->complete_split && 994 995 dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum, 995 996 qtd)) ··· 1077 1078 dev_vdbg(hsotg->dev, " Isochronous transfer complete\n"); 1078 1079 if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL) 1079 1080 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, 1080 - chnum, qtd, DWC2_HC_XFER_COMPLETE); 1081 + chnum, qtd, 1082 + DWC2_HC_XFER_COMPLETE); 1081 1083 dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd, 1082 1084 halt_status); 1083 1085 break; ··· 1102 1102 dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n", 1103 1103 chnum); 1104 1104 1105 - if (hsotg->params.dma_desc_enable > 0) { 1105 + if (hsotg->params.dma_desc_enable) { 1106 1106 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1107 1107 DWC2_HC_XFER_STALL); 1108 1108 goto handle_stall_done; ··· 1212 1212 switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) { 1213 1213 case USB_ENDPOINT_XFER_CONTROL: 1214 1214 case USB_ENDPOINT_XFER_BULK: 1215 - if (hsotg->params.host_dma > 0 && chan->ep_is_in) { 1215 + if (hsotg->params.host_dma && chan->ep_is_in) { 1216 1216 /* 1217 1217 * NAK interrupts are enabled on bulk/control IN 1218 1218 * transfers in DMA mode for the sole purpose of ··· 1358 1358 */ 1359 1359 if (chan->do_split && chan->complete_split) { 1360 1360 if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC && 1361 - hsotg->params.host_dma > 0) { 1361 + hsotg->params.host_dma) { 1362 1362 qtd->complete_split = 0; 1363 1363 qtd->isoc_split_offset = 0; 1364 1364 qtd->isoc_frame_index++; ··· 1379 1379 struct dwc2_qh *qh = chan->qh; 1380 1380 bool past_end; 1381 1381 1382 - if (hsotg->params.uframe_sched <= 0) { 1382 + if (!hsotg->params.uframe_sched) { 1383 1383 int frnum = dwc2_hcd_get_frame_number(hsotg); 1384 1384 1385 1385 /* Don't have num_hs_transfers; simple logic */ ··· 1389 1389 int end_frnum; 1390 1390 1391 1391 /* 1392 - * Figure out the end frame based on schedule. 1393 - * 1394 - * We don't want to go on trying again and again 1395 - * forever. Let's stop when we've done all the 1396 - * transfers that were scheduled. 1397 - * 1398 - * We're going to be comparing start_active_frame 1399 - * and next_active_frame, both of which are 1 1400 - * before the time the packet goes on the wire, 1401 - * so that cancels out. Basically if had 1 1402 - * transfer and we saw 1 NYET then we're done. 1403 - * We're getting a NYET here so if next >= 1404 - * (start + num_transfers) we're done. The 1405 - * complexity is that for all but ISOC_OUT we 1406 - * skip one slot. 1407 - */ 1392 + * Figure out the end frame based on 1393 + * schedule. 1394 + * 1395 + * We don't want to go on trying again 1396 + * and again forever. Let's stop when 1397 + * we've done all the transfers that 1398 + * were scheduled. 1399 + * 1400 + * We're going to be comparing 1401 + * start_active_frame and 1402 + * next_active_frame, both of which 1403 + * are 1 before the time the packet 1404 + * goes on the wire, so that cancels 1405 + * out. Basically if had 1 transfer 1406 + * and we saw 1 NYET then we're done. 1407 + * We're getting a NYET here so if 1408 + * next >= (start + num_transfers) 1409 + * we're done. The complexity is that 1410 + * for all but ISOC_OUT we skip one 1411 + * slot. 1412 + */ 1408 1413 end_frnum = dwc2_frame_num_inc( 1409 1414 qh->start_active_frame, 1410 1415 qh->num_hs_transfers); ··· 1477 1472 1478 1473 dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1479 1474 1480 - if (hsotg->params.dma_desc_enable > 0) { 1475 + if (hsotg->params.dma_desc_enable) { 1481 1476 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1482 1477 DWC2_HC_XFER_BABBLE_ERR); 1483 1478 goto disable_int; ··· 1582 1577 dev_err(hsotg->dev, " Interval: %d\n", urb->interval); 1583 1578 1584 1579 /* Core halts the channel for Descriptor DMA mode */ 1585 - if (hsotg->params.dma_desc_enable > 0) { 1580 + if (hsotg->params.dma_desc_enable) { 1586 1581 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1587 1582 DWC2_HC_XFER_AHB_ERR); 1588 1583 goto handle_ahberr_done; ··· 1614 1609 1615 1610 dwc2_hc_handle_tt_clear(hsotg, chan, qtd); 1616 1611 1617 - if (hsotg->params.dma_desc_enable > 0) { 1612 + if (hsotg->params.dma_desc_enable) { 1618 1613 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1619 1614 DWC2_HC_XFER_XACT_ERR); 1620 1615 goto handle_xacterr_done; ··· 1625 1620 case USB_ENDPOINT_XFER_BULK: 1626 1621 qtd->error_count++; 1627 1622 if (!chan->qh->ping_state) { 1628 - 1629 1623 dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, 1630 1624 qtd, DWC2_HC_XFER_XACT_ERR); 1631 1625 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); ··· 1649 1645 enum dwc2_halt_status halt_status; 1650 1646 1651 1647 halt_status = dwc2_update_isoc_urb_state(hsotg, chan, 1652 - chnum, qtd, DWC2_HC_XFER_XACT_ERR); 1648 + chnum, qtd, DWC2_HC_XFER_XACT_ERR); 1653 1649 dwc2_halt_channel(hsotg, chan, qtd, halt_status); 1654 1650 } 1655 1651 break; ··· 1807 1803 1808 1804 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE || 1809 1805 (chan->halt_status == DWC2_HC_XFER_AHB_ERR && 1810 - hsotg->params.dma_desc_enable <= 0)) { 1811 - if (hsotg->params.dma_desc_enable > 0) 1806 + !hsotg->params.dma_desc_enable)) { 1807 + if (hsotg->params.dma_desc_enable) 1812 1808 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 1813 1809 chan->halt_status); 1814 1810 else ··· 1839 1835 } else if (chan->hcint & HCINTMSK_STALL) { 1840 1836 dwc2_hc_stall_intr(hsotg, chan, chnum, qtd); 1841 1837 } else if ((chan->hcint & HCINTMSK_XACTERR) && 1842 - hsotg->params.dma_desc_enable <= 0) { 1838 + !hsotg->params.dma_desc_enable) { 1843 1839 if (out_nak_enh) { 1844 1840 if (chan->hcint & 1845 1841 (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) { ··· 1859 1855 */ 1860 1856 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); 1861 1857 } else if ((chan->hcint & HCINTMSK_XCS_XACT) && 1862 - hsotg->params.dma_desc_enable > 0) { 1858 + hsotg->params.dma_desc_enable) { 1863 1859 dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd); 1864 1860 } else if ((chan->hcint & HCINTMSK_AHBERR) && 1865 - hsotg->params.dma_desc_enable > 0) { 1861 + hsotg->params.dma_desc_enable) { 1866 1862 dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd); 1867 1863 } else if (chan->hcint & HCINTMSK_BBLERR) { 1868 1864 dwc2_hc_babble_intr(hsotg, chan, chnum, qtd); ··· 1955 1951 dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n", 1956 1952 chnum); 1957 1953 1958 - if (hsotg->params.host_dma > 0) { 1954 + if (hsotg->params.host_dma) { 1959 1955 dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd); 1960 1956 } else { 1961 1957 if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd)) ··· 1974 1970 { 1975 1971 struct dwc2_qtd *cur_head; 1976 1972 1977 - if (qh == NULL) 1973 + if (!qh) 1978 1974 return false; 1979 1975 1980 1976 cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd, ··· 2032 2028 * interrupt unmasked 2033 2029 */ 2034 2030 WARN_ON(hcint != HCINTMSK_CHHLTD); 2035 - if (hsotg->params.dma_desc_enable > 0) 2031 + if (hsotg->params.dma_desc_enable) 2036 2032 dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, 2037 2033 chan->halt_status); 2038 2034 else ··· 2060 2056 qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd, 2061 2057 qtd_list_entry); 2062 2058 2063 - if (hsotg->params.host_dma <= 0) { 2059 + if (!hsotg->params.host_dma) { 2064 2060 if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD) 2065 2061 hcint &= ~HCINTMSK_CHHLTD; 2066 2062 }
+18 -22
drivers/usb/dwc2/hcd_queue.c
··· 76 76 int num_channels; 77 77 78 78 num_channels = hsotg->params.host_channels; 79 - if (hsotg->periodic_channels + hsotg->non_periodic_channels < 80 - num_channels 81 - && hsotg->periodic_channels < num_channels - 1) { 79 + if ((hsotg->periodic_channels + hsotg->non_periodic_channels < 80 + num_channels) && (hsotg->periodic_channels < num_channels - 1)) { 82 81 status = 0; 83 82 } else { 84 83 dev_dbg(hsotg->dev, 85 - "%s: Total channels: %d, Periodic: %d, " 86 - "Non-periodic: %d\n", __func__, num_channels, 84 + "%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n", 85 + __func__, num_channels, 87 86 hsotg->periodic_channels, hsotg->non_periodic_channels); 88 87 status = -ENOSPC; 89 88 } ··· 484 485 } 485 486 } 486 487 487 - 488 488 struct dwc2_qh_print_data { 489 489 struct dwc2_hsotg *hsotg; 490 490 struct dwc2_qh *qh; ··· 556 558 DWC2_HS_SCHEDULE_UFRAMES, "uFrame", "us", 557 559 dwc2_qh_print, &print_data); 558 560 } 559 - return; 560 561 } 561 562 #else 562 563 static inline void dwc2_qh_schedule_print(struct dwc2_hsotg *hsotg, ··· 584 587 unsigned long *map = dwc2_get_ls_map(hsotg, qh); 585 588 int slice; 586 589 587 - if (map == NULL) 590 + if (!map) 588 591 return -EINVAL; 589 592 590 593 /* ··· 623 626 unsigned long *map = dwc2_get_ls_map(hsotg, qh); 624 627 625 628 /* Schedule should have failed, so no worries about no error code */ 626 - if (map == NULL) 629 + if (!map) 627 630 return; 628 631 629 632 pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, ··· 1104 1107 next_active_frame = earliest_frame; 1105 1108 1106 1109 /* Get the "no microframe schduler" out of the way... */ 1107 - if (hsotg->params.uframe_sched <= 0) { 1110 + if (!hsotg->params.uframe_sched) { 1108 1111 if (qh->do_split) 1109 1112 /* Splits are active at microframe 0 minus 1 */ 1110 1113 next_active_frame |= 0x7; ··· 1179 1182 qh->start_active_frame = next_active_frame; 1180 1183 1181 1184 dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n", 1182 - qh, frame_number, qh->next_active_frame); 1185 + qh, frame_number, qh->next_active_frame); 1183 1186 } 1184 1187 1185 1188 /** ··· 1197 1200 { 1198 1201 int status; 1199 1202 1200 - if (hsotg->params.uframe_sched > 0) { 1203 + if (hsotg->params.uframe_sched) { 1201 1204 status = dwc2_uframe_schedule(hsotg, qh); 1202 1205 } else { 1203 1206 status = dwc2_periodic_channel_available(hsotg); ··· 1218 1221 return status; 1219 1222 } 1220 1223 1221 - if (hsotg->params.uframe_sched <= 0) 1224 + if (!hsotg->params.uframe_sched) 1222 1225 /* Reserve periodic channel */ 1223 1226 hsotg->periodic_channels++; 1224 1227 ··· 1254 1257 /* Update claimed usecs per (micro)frame */ 1255 1258 hsotg->periodic_usecs -= qh->host_us; 1256 1259 1257 - if (hsotg->params.uframe_sched > 0) { 1260 + if (hsotg->params.uframe_sched) { 1258 1261 dwc2_uframe_unschedule(hsotg, qh); 1259 1262 } else { 1260 1263 /* Release periodic channel reservation */ ··· 1391 1394 1392 1395 qh->unreserve_pending = 0; 1393 1396 1394 - if (hsotg->params.dma_desc_enable > 0) 1397 + if (hsotg->params.dma_desc_enable) 1395 1398 /* Don't rely on SOF and start in ready schedule */ 1396 1399 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); 1397 1400 else ··· 1498 1501 device_ns += dwc_tt->usb_tt->think_time; 1499 1502 qh->device_us = NS_TO_US(device_ns); 1500 1503 1501 - 1502 1504 qh->device_interval = urb->interval; 1503 1505 qh->host_interval = urb->interval * (do_split ? 8 : 1); 1504 1506 ··· 1583 1587 * Return: Pointer to the newly allocated QH, or NULL on error 1584 1588 */ 1585 1589 struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 1586 - struct dwc2_hcd_urb *urb, 1590 + struct dwc2_hcd_urb *urb, 1587 1591 gfp_t mem_flags) 1588 1592 { 1589 1593 struct dwc2_qh *qh; ··· 1598 1602 1599 1603 dwc2_qh_init(hsotg, qh, urb, mem_flags); 1600 1604 1601 - if (hsotg->params.dma_desc_enable > 0 && 1605 + if (hsotg->params.dma_desc_enable && 1602 1606 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { 1603 1607 dwc2_hcd_qh_free(hsotg, qh); 1604 1608 return NULL; ··· 1710 1714 dwc2_deschedule_periodic(hsotg, qh); 1711 1715 hsotg->periodic_qh_count--; 1712 1716 if (!hsotg->periodic_qh_count && 1713 - hsotg->params.dma_desc_enable <= 0) { 1717 + !hsotg->params.dma_desc_enable) { 1714 1718 intr_mask = dwc2_readl(hsotg->regs + GINTMSK); 1715 1719 intr_mask &= ~GINTSTS_SOF; 1716 1720 dwc2_writel(intr_mask, hsotg->regs + GINTMSK); ··· 1737 1741 * Return: number missed by (or 0 if we didn't miss). 1738 1742 */ 1739 1743 static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg, 1740 - struct dwc2_qh *qh, u16 frame_number) 1744 + struct dwc2_qh *qh, u16 frame_number) 1741 1745 { 1742 1746 u16 old_frame = qh->next_active_frame; 1743 1747 u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1); ··· 1800 1804 * Return: number missed by (or 0 if we didn't miss). 1801 1805 */ 1802 1806 static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg, 1803 - struct dwc2_qh *qh, u16 frame_number) 1807 + struct dwc2_qh *qh, u16 frame_number) 1804 1808 { 1805 1809 int missed = 0; 1806 1810 u16 interval = qh->host_interval; ··· 1922 1926 missed = dwc2_next_periodic_start(hsotg, qh, frame_number); 1923 1927 1924 1928 dwc2_sch_vdbg(hsotg, 1925 - "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n", 1929 + "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n", 1926 1930 qh, sched_next_periodic_split, frame_number, old_frame, 1927 1931 qh->next_active_frame, 1928 1932 dwc2_frame_num_dec(qh->next_active_frame, old_frame),
+298 -298
drivers/usb/dwc2/hw.h
··· 40 40 #define HSOTG_REG(x) (x) 41 41 42 42 #define GOTGCTL HSOTG_REG(0x000) 43 - #define GOTGCTL_CHIRPEN (1 << 27) 43 + #define GOTGCTL_CHIRPEN BIT(27) 44 44 #define GOTGCTL_MULT_VALID_BC_MASK (0x1f << 22) 45 45 #define GOTGCTL_MULT_VALID_BC_SHIFT 22 46 - #define GOTGCTL_OTGVER (1 << 20) 47 - #define GOTGCTL_BSESVLD (1 << 19) 48 - #define GOTGCTL_ASESVLD (1 << 18) 49 - #define GOTGCTL_DBNC_SHORT (1 << 17) 50 - #define GOTGCTL_CONID_B (1 << 16) 51 - #define GOTGCTL_DBNCE_FLTR_BYPASS (1 << 15) 52 - #define GOTGCTL_DEVHNPEN (1 << 11) 53 - #define GOTGCTL_HSTSETHNPEN (1 << 10) 54 - #define GOTGCTL_HNPREQ (1 << 9) 55 - #define GOTGCTL_HSTNEGSCS (1 << 8) 56 - #define GOTGCTL_SESREQ (1 << 1) 57 - #define GOTGCTL_SESREQSCS (1 << 0) 46 + #define GOTGCTL_OTGVER BIT(20) 47 + #define GOTGCTL_BSESVLD BIT(19) 48 + #define GOTGCTL_ASESVLD BIT(18) 49 + #define GOTGCTL_DBNC_SHORT BIT(17) 50 + #define GOTGCTL_CONID_B BIT(16) 51 + #define GOTGCTL_DBNCE_FLTR_BYPASS BIT(15) 52 + #define GOTGCTL_DEVHNPEN BIT(11) 53 + #define GOTGCTL_HSTSETHNPEN BIT(10) 54 + #define GOTGCTL_HNPREQ BIT(9) 55 + #define GOTGCTL_HSTNEGSCS BIT(8) 56 + #define GOTGCTL_SESREQ BIT(1) 57 + #define GOTGCTL_SESREQSCS BIT(0) 58 58 59 59 #define GOTGINT HSOTG_REG(0x004) 60 - #define GOTGINT_DBNCE_DONE (1 << 19) 61 - #define GOTGINT_A_DEV_TOUT_CHG (1 << 18) 62 - #define GOTGINT_HST_NEG_DET (1 << 17) 63 - #define GOTGINT_HST_NEG_SUC_STS_CHNG (1 << 9) 64 - #define GOTGINT_SES_REQ_SUC_STS_CHNG (1 << 8) 65 - #define GOTGINT_SES_END_DET (1 << 2) 60 + #define GOTGINT_DBNCE_DONE BIT(19) 61 + #define GOTGINT_A_DEV_TOUT_CHG BIT(18) 62 + #define GOTGINT_HST_NEG_DET BIT(17) 63 + #define GOTGINT_HST_NEG_SUC_STS_CHNG BIT(9) 64 + #define GOTGINT_SES_REQ_SUC_STS_CHNG BIT(8) 65 + #define GOTGINT_SES_END_DET BIT(2) 66 66 67 67 #define GAHBCFG HSOTG_REG(0x008) 68 - #define GAHBCFG_AHB_SINGLE (1 << 23) 69 - #define GAHBCFG_NOTI_ALL_DMA_WRIT (1 << 22) 70 - #define GAHBCFG_REM_MEM_SUPP (1 << 21) 71 - #define GAHBCFG_P_TXF_EMP_LVL (1 << 8) 72 - #define GAHBCFG_NP_TXF_EMP_LVL (1 << 7) 73 - #define GAHBCFG_DMA_EN (1 << 5) 68 + #define GAHBCFG_AHB_SINGLE BIT(23) 69 + #define GAHBCFG_NOTI_ALL_DMA_WRIT BIT(22) 70 + #define GAHBCFG_REM_MEM_SUPP BIT(21) 71 + #define GAHBCFG_P_TXF_EMP_LVL BIT(8) 72 + #define GAHBCFG_NP_TXF_EMP_LVL BIT(7) 73 + #define GAHBCFG_DMA_EN BIT(5) 74 74 #define GAHBCFG_HBSTLEN_MASK (0xf << 1) 75 75 #define GAHBCFG_HBSTLEN_SHIFT 1 76 76 #define GAHBCFG_HBSTLEN_SINGLE 0 ··· 78 78 #define GAHBCFG_HBSTLEN_INCR4 3 79 79 #define GAHBCFG_HBSTLEN_INCR8 5 80 80 #define GAHBCFG_HBSTLEN_INCR16 7 81 - #define GAHBCFG_GLBL_INTR_EN (1 << 0) 81 + #define GAHBCFG_GLBL_INTR_EN BIT(0) 82 82 #define GAHBCFG_CTRL_MASK (GAHBCFG_P_TXF_EMP_LVL | \ 83 83 GAHBCFG_NP_TXF_EMP_LVL | \ 84 84 GAHBCFG_DMA_EN | \ 85 85 GAHBCFG_GLBL_INTR_EN) 86 86 87 87 #define GUSBCFG HSOTG_REG(0x00C) 88 - #define GUSBCFG_FORCEDEVMODE (1 << 30) 89 - #define GUSBCFG_FORCEHOSTMODE (1 << 29) 90 - #define GUSBCFG_TXENDDELAY (1 << 28) 91 - #define GUSBCFG_ICTRAFFICPULLREMOVE (1 << 27) 92 - #define GUSBCFG_ICUSBCAP (1 << 26) 93 - #define GUSBCFG_ULPI_INT_PROT_DIS (1 << 25) 94 - #define GUSBCFG_INDICATORPASSTHROUGH (1 << 24) 95 - #define GUSBCFG_INDICATORCOMPLEMENT (1 << 23) 96 - #define GUSBCFG_TERMSELDLPULSE (1 << 22) 97 - #define GUSBCFG_ULPI_INT_VBUS_IND (1 << 21) 98 - #define GUSBCFG_ULPI_EXT_VBUS_DRV (1 << 20) 99 - #define GUSBCFG_ULPI_CLK_SUSP_M (1 << 19) 100 - #define GUSBCFG_ULPI_AUTO_RES (1 << 18) 101 - #define GUSBCFG_ULPI_FS_LS (1 << 17) 102 - #define GUSBCFG_OTG_UTMI_FS_SEL (1 << 16) 103 - #define GUSBCFG_PHY_LP_CLK_SEL (1 << 15) 88 + #define GUSBCFG_FORCEDEVMODE BIT(30) 89 + #define GUSBCFG_FORCEHOSTMODE BIT(29) 90 + #define GUSBCFG_TXENDDELAY BIT(28) 91 + #define GUSBCFG_ICTRAFFICPULLREMOVE BIT(27) 92 + #define GUSBCFG_ICUSBCAP BIT(26) 93 + #define GUSBCFG_ULPI_INT_PROT_DIS BIT(25) 94 + #define GUSBCFG_INDICATORPASSTHROUGH BIT(24) 95 + #define GUSBCFG_INDICATORCOMPLEMENT BIT(23) 96 + #define GUSBCFG_TERMSELDLPULSE BIT(22) 97 + #define GUSBCFG_ULPI_INT_VBUS_IND BIT(21) 98 + #define GUSBCFG_ULPI_EXT_VBUS_DRV BIT(20) 99 + #define GUSBCFG_ULPI_CLK_SUSP_M BIT(19) 100 + #define GUSBCFG_ULPI_AUTO_RES BIT(18) 101 + #define GUSBCFG_ULPI_FS_LS BIT(17) 102 + #define GUSBCFG_OTG_UTMI_FS_SEL BIT(16) 103 + #define GUSBCFG_PHY_LP_CLK_SEL BIT(15) 104 104 #define GUSBCFG_USBTRDTIM_MASK (0xf << 10) 105 105 #define GUSBCFG_USBTRDTIM_SHIFT 10 106 - #define GUSBCFG_HNPCAP (1 << 9) 107 - #define GUSBCFG_SRPCAP (1 << 8) 108 - #define GUSBCFG_DDRSEL (1 << 7) 109 - #define GUSBCFG_PHYSEL (1 << 6) 110 - #define GUSBCFG_FSINTF (1 << 5) 111 - #define GUSBCFG_ULPI_UTMI_SEL (1 << 4) 112 - #define GUSBCFG_PHYIF16 (1 << 3) 106 + #define GUSBCFG_HNPCAP BIT(9) 107 + #define GUSBCFG_SRPCAP BIT(8) 108 + #define GUSBCFG_DDRSEL BIT(7) 109 + #define GUSBCFG_PHYSEL BIT(6) 110 + #define GUSBCFG_FSINTF BIT(5) 111 + #define GUSBCFG_ULPI_UTMI_SEL BIT(4) 112 + #define GUSBCFG_PHYIF16 BIT(3) 113 113 #define GUSBCFG_PHYIF8 (0 << 3) 114 114 #define GUSBCFG_TOUTCAL_MASK (0x7 << 0) 115 115 #define GUSBCFG_TOUTCAL_SHIFT 0 ··· 117 117 #define GUSBCFG_TOUTCAL(_x) ((_x) << 0) 118 118 119 119 #define GRSTCTL HSOTG_REG(0x010) 120 - #define GRSTCTL_AHBIDLE (1 << 31) 121 - #define GRSTCTL_DMAREQ (1 << 30) 120 + #define GRSTCTL_AHBIDLE BIT(31) 121 + #define GRSTCTL_DMAREQ BIT(30) 122 122 #define GRSTCTL_TXFNUM_MASK (0x1f << 6) 123 123 #define GRSTCTL_TXFNUM_SHIFT 6 124 124 #define GRSTCTL_TXFNUM_LIMIT 0x1f 125 125 #define GRSTCTL_TXFNUM(_x) ((_x) << 6) 126 - #define GRSTCTL_TXFFLSH (1 << 5) 127 - #define GRSTCTL_RXFFLSH (1 << 4) 128 - #define GRSTCTL_IN_TKNQ_FLSH (1 << 3) 129 - #define GRSTCTL_FRMCNTRRST (1 << 2) 130 - #define GRSTCTL_HSFTRST (1 << 1) 131 - #define GRSTCTL_CSFTRST (1 << 0) 126 + #define GRSTCTL_TXFFLSH BIT(5) 127 + #define GRSTCTL_RXFFLSH BIT(4) 128 + #define GRSTCTL_IN_TKNQ_FLSH BIT(3) 129 + #define GRSTCTL_FRMCNTRRST BIT(2) 130 + #define GRSTCTL_HSFTRST BIT(1) 131 + #define GRSTCTL_CSFTRST BIT(0) 132 132 133 133 #define GINTSTS HSOTG_REG(0x014) 134 134 #define GINTMSK HSOTG_REG(0x018) 135 - #define GINTSTS_WKUPINT (1 << 31) 136 - #define GINTSTS_SESSREQINT (1 << 30) 137 - #define GINTSTS_DISCONNINT (1 << 29) 138 - #define GINTSTS_CONIDSTSCHNG (1 << 28) 139 - #define GINTSTS_LPMTRANRCVD (1 << 27) 140 - #define GINTSTS_PTXFEMP (1 << 26) 141 - #define GINTSTS_HCHINT (1 << 25) 142 - #define GINTSTS_PRTINT (1 << 24) 143 - #define GINTSTS_RESETDET (1 << 23) 144 - #define GINTSTS_FET_SUSP (1 << 22) 145 - #define GINTSTS_INCOMPL_IP (1 << 21) 146 - #define GINTSTS_INCOMPL_SOOUT (1 << 21) 147 - #define GINTSTS_INCOMPL_SOIN (1 << 20) 148 - #define GINTSTS_OEPINT (1 << 19) 149 - #define GINTSTS_IEPINT (1 << 18) 150 - #define GINTSTS_EPMIS (1 << 17) 151 - #define GINTSTS_RESTOREDONE (1 << 16) 152 - #define GINTSTS_EOPF (1 << 15) 153 - #define GINTSTS_ISOUTDROP (1 << 14) 154 - #define GINTSTS_ENUMDONE (1 << 13) 155 - #define GINTSTS_USBRST (1 << 12) 156 - #define GINTSTS_USBSUSP (1 << 11) 157 - #define GINTSTS_ERLYSUSP (1 << 10) 158 - #define GINTSTS_I2CINT (1 << 9) 159 - #define GINTSTS_ULPI_CK_INT (1 << 8) 160 - #define GINTSTS_GOUTNAKEFF (1 << 7) 161 - #define GINTSTS_GINNAKEFF (1 << 6) 162 - #define GINTSTS_NPTXFEMP (1 << 5) 163 - #define GINTSTS_RXFLVL (1 << 4) 164 - #define GINTSTS_SOF (1 << 3) 165 - #define GINTSTS_OTGINT (1 << 2) 166 - #define GINTSTS_MODEMIS (1 << 1) 167 - #define GINTSTS_CURMODE_HOST (1 << 0) 135 + #define GINTSTS_WKUPINT BIT(31) 136 + #define GINTSTS_SESSREQINT BIT(30) 137 + #define GINTSTS_DISCONNINT BIT(29) 138 + #define GINTSTS_CONIDSTSCHNG BIT(28) 139 + #define GINTSTS_LPMTRANRCVD BIT(27) 140 + #define GINTSTS_PTXFEMP BIT(26) 141 + #define GINTSTS_HCHINT BIT(25) 142 + #define GINTSTS_PRTINT BIT(24) 143 + #define GINTSTS_RESETDET BIT(23) 144 + #define GINTSTS_FET_SUSP BIT(22) 145 + #define GINTSTS_INCOMPL_IP BIT(21) 146 + #define GINTSTS_INCOMPL_SOOUT BIT(21) 147 + #define GINTSTS_INCOMPL_SOIN BIT(20) 148 + #define GINTSTS_OEPINT BIT(19) 149 + #define GINTSTS_IEPINT BIT(18) 150 + #define GINTSTS_EPMIS BIT(17) 151 + #define GINTSTS_RESTOREDONE BIT(16) 152 + #define GINTSTS_EOPF BIT(15) 153 + #define GINTSTS_ISOUTDROP BIT(14) 154 + #define GINTSTS_ENUMDONE BIT(13) 155 + #define GINTSTS_USBRST BIT(12) 156 + #define GINTSTS_USBSUSP BIT(11) 157 + #define GINTSTS_ERLYSUSP BIT(10) 158 + #define GINTSTS_I2CINT BIT(9) 159 + #define GINTSTS_ULPI_CK_INT BIT(8) 160 + #define GINTSTS_GOUTNAKEFF BIT(7) 161 + #define GINTSTS_GINNAKEFF BIT(6) 162 + #define GINTSTS_NPTXFEMP BIT(5) 163 + #define GINTSTS_RXFLVL BIT(4) 164 + #define GINTSTS_SOF BIT(3) 165 + #define GINTSTS_OTGINT BIT(2) 166 + #define GINTSTS_MODEMIS BIT(1) 167 + #define GINTSTS_CURMODE_HOST BIT(0) 168 168 169 169 #define GRXSTSR HSOTG_REG(0x01C) 170 170 #define GRXSTSP HSOTG_REG(0x020) ··· 208 208 #define GNPTXSTS_NP_TXF_SPC_AVAIL_GET(_v) (((_v) >> 0) & 0xffff) 209 209 210 210 #define GI2CCTL HSOTG_REG(0x0030) 211 - #define GI2CCTL_BSYDNE (1 << 31) 212 - #define GI2CCTL_RW (1 << 30) 213 - #define GI2CCTL_I2CDATSE0 (1 << 28) 211 + #define GI2CCTL_BSYDNE BIT(31) 212 + #define GI2CCTL_RW BIT(30) 213 + #define GI2CCTL_I2CDATSE0 BIT(28) 214 214 #define GI2CCTL_I2CDEVADDR_MASK (0x3 << 26) 215 215 #define GI2CCTL_I2CDEVADDR_SHIFT 26 216 - #define GI2CCTL_I2CSUSPCTL (1 << 25) 217 - #define GI2CCTL_ACK (1 << 24) 218 - #define GI2CCTL_I2CEN (1 << 23) 216 + #define GI2CCTL_I2CSUSPCTL BIT(25) 217 + #define GI2CCTL_ACK BIT(24) 218 + #define GI2CCTL_I2CEN BIT(23) 219 219 #define GI2CCTL_ADDR_MASK (0x7f << 16) 220 220 #define GI2CCTL_ADDR_SHIFT 16 221 221 #define GI2CCTL_REGADDR_MASK (0xff << 8) ··· 230 230 #define GHWCFG1 HSOTG_REG(0x0044) 231 231 232 232 #define GHWCFG2 HSOTG_REG(0x0048) 233 - #define GHWCFG2_OTG_ENABLE_IC_USB (1 << 31) 233 + #define GHWCFG2_OTG_ENABLE_IC_USB BIT(31) 234 234 #define GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK (0x1f << 26) 235 235 #define GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT 26 236 236 #define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK (0x3 << 24) 237 237 #define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT 24 238 238 #define GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK (0x3 << 22) 239 239 #define GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT 22 240 - #define GHWCFG2_MULTI_PROC_INT (1 << 20) 241 - #define GHWCFG2_DYNAMIC_FIFO (1 << 19) 242 - #define GHWCFG2_PERIO_EP_SUPPORTED (1 << 18) 240 + #define GHWCFG2_MULTI_PROC_INT BIT(20) 241 + #define GHWCFG2_DYNAMIC_FIFO BIT(19) 242 + #define GHWCFG2_PERIO_EP_SUPPORTED BIT(18) 243 243 #define GHWCFG2_NUM_HOST_CHAN_MASK (0xf << 14) 244 244 #define GHWCFG2_NUM_HOST_CHAN_SHIFT 14 245 245 #define GHWCFG2_NUM_DEV_EP_MASK (0xf << 10) ··· 256 256 #define GHWCFG2_HS_PHY_TYPE_UTMI 1 257 257 #define GHWCFG2_HS_PHY_TYPE_ULPI 2 258 258 #define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI 3 259 - #define GHWCFG2_POINT2POINT (1 << 5) 259 + #define GHWCFG2_POINT2POINT BIT(5) 260 260 #define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3) 261 261 #define GHWCFG2_ARCHITECTURE_SHIFT 3 262 262 #define GHWCFG2_SLAVE_ONLY_ARCH 0 ··· 276 276 #define GHWCFG3 HSOTG_REG(0x004c) 277 277 #define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16) 278 278 #define GHWCFG3_DFIFO_DEPTH_SHIFT 16 279 - #define GHWCFG3_OTG_LPM_EN (1 << 15) 280 - #define GHWCFG3_BC_SUPPORT (1 << 14) 281 - #define GHWCFG3_OTG_ENABLE_HSIC (1 << 13) 282 - #define GHWCFG3_ADP_SUPP (1 << 12) 283 - #define GHWCFG3_SYNCH_RESET_TYPE (1 << 11) 284 - #define GHWCFG3_OPTIONAL_FEATURES (1 << 10) 285 - #define GHWCFG3_VENDOR_CTRL_IF (1 << 9) 286 - #define GHWCFG3_I2C (1 << 8) 287 - #define GHWCFG3_OTG_FUNC (1 << 7) 279 + #define GHWCFG3_OTG_LPM_EN BIT(15) 280 + #define GHWCFG3_BC_SUPPORT BIT(14) 281 + #define GHWCFG3_OTG_ENABLE_HSIC BIT(13) 282 + #define GHWCFG3_ADP_SUPP BIT(12) 283 + #define GHWCFG3_SYNCH_RESET_TYPE BIT(11) 284 + #define GHWCFG3_OPTIONAL_FEATURES BIT(10) 285 + #define GHWCFG3_VENDOR_CTRL_IF BIT(9) 286 + #define GHWCFG3_I2C BIT(8) 287 + #define GHWCFG3_OTG_FUNC BIT(7) 288 288 #define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK (0x7 << 4) 289 289 #define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT 4 290 290 #define GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK (0xf << 0) 291 291 #define GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT 0 292 292 293 293 #define GHWCFG4 HSOTG_REG(0x0050) 294 - #define GHWCFG4_DESC_DMA_DYN (1 << 31) 295 - #define GHWCFG4_DESC_DMA (1 << 30) 294 + #define GHWCFG4_DESC_DMA_DYN BIT(31) 295 + #define GHWCFG4_DESC_DMA BIT(30) 296 296 #define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26) 297 297 #define GHWCFG4_NUM_IN_EPS_SHIFT 26 298 - #define GHWCFG4_DED_FIFO_EN (1 << 25) 298 + #define GHWCFG4_DED_FIFO_EN BIT(25) 299 299 #define GHWCFG4_DED_FIFO_SHIFT 25 300 - #define GHWCFG4_SESSION_END_FILT_EN (1 << 24) 301 - #define GHWCFG4_B_VALID_FILT_EN (1 << 23) 302 - #define GHWCFG4_A_VALID_FILT_EN (1 << 22) 303 - #define GHWCFG4_VBUS_VALID_FILT_EN (1 << 21) 304 - #define GHWCFG4_IDDIG_FILT_EN (1 << 20) 300 + #define GHWCFG4_SESSION_END_FILT_EN BIT(24) 301 + #define GHWCFG4_B_VALID_FILT_EN BIT(23) 302 + #define GHWCFG4_A_VALID_FILT_EN BIT(22) 303 + #define GHWCFG4_VBUS_VALID_FILT_EN BIT(21) 304 + #define GHWCFG4_IDDIG_FILT_EN BIT(20) 305 305 #define GHWCFG4_NUM_DEV_MODE_CTRL_EP_MASK (0xf << 16) 306 306 #define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16 307 307 #define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14) ··· 309 309 #define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0 310 310 #define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1 311 311 #define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2 312 - #define GHWCFG4_XHIBER (1 << 7) 313 - #define GHWCFG4_HIBER (1 << 6) 314 - #define GHWCFG4_MIN_AHB_FREQ (1 << 5) 315 - #define GHWCFG4_POWER_OPTIMIZ (1 << 4) 312 + #define GHWCFG4_XHIBER BIT(7) 313 + #define GHWCFG4_HIBER BIT(6) 314 + #define GHWCFG4_MIN_AHB_FREQ BIT(5) 315 + #define GHWCFG4_POWER_OPTIMIZ BIT(4) 316 316 #define GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK (0xf << 0) 317 317 #define GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT 0 318 318 319 319 #define GLPMCFG HSOTG_REG(0x0054) 320 - #define GLPMCFG_INV_SEL_HSIC (1 << 31) 321 - #define GLPMCFG_HSIC_CONNECT (1 << 30) 320 + #define GLPMCFG_INV_SEL_HSIC BIT(31) 321 + #define GLPMCFG_HSIC_CONNECT BIT(30) 322 322 #define GLPMCFG_RETRY_COUNT_STS_MASK (0x7 << 25) 323 323 #define GLPMCFG_RETRY_COUNT_STS_SHIFT 25 324 - #define GLPMCFG_SEND_LPM (1 << 24) 324 + #define GLPMCFG_SEND_LPM BIT(24) 325 325 #define GLPMCFG_RETRY_COUNT_MASK (0x7 << 21) 326 326 #define GLPMCFG_RETRY_COUNT_SHIFT 21 327 327 #define GLPMCFG_LPM_CHAN_INDEX_MASK (0xf << 17) 328 328 #define GLPMCFG_LPM_CHAN_INDEX_SHIFT 17 329 - #define GLPMCFG_SLEEP_STATE_RESUMEOK (1 << 16) 330 - #define GLPMCFG_PRT_SLEEP_STS (1 << 15) 329 + #define GLPMCFG_SLEEP_STATE_RESUMEOK BIT(16) 330 + #define GLPMCFG_PRT_SLEEP_STS BIT(15) 331 331 #define GLPMCFG_LPM_RESP_MASK (0x3 << 13) 332 332 #define GLPMCFG_LPM_RESP_SHIFT 13 333 333 #define GLPMCFG_HIRD_THRES_MASK (0x1f << 8) 334 334 #define GLPMCFG_HIRD_THRES_SHIFT 8 335 335 #define GLPMCFG_HIRD_THRES_EN (0x10 << 8) 336 - #define GLPMCFG_EN_UTMI_SLEEP (1 << 7) 337 - #define GLPMCFG_REM_WKUP_EN (1 << 6) 336 + #define GLPMCFG_EN_UTMI_SLEEP BIT(7) 337 + #define GLPMCFG_REM_WKUP_EN BIT(6) 338 338 #define GLPMCFG_HIRD_MASK (0xf << 2) 339 339 #define GLPMCFG_HIRD_SHIFT 2 340 - #define GLPMCFG_APPL_RESP (1 << 1) 341 - #define GLPMCFG_LPM_CAP_EN (1 << 0) 340 + #define GLPMCFG_APPL_RESP BIT(1) 341 + #define GLPMCFG_LPM_CAP_EN BIT(0) 342 342 343 343 #define GPWRDN HSOTG_REG(0x0058) 344 344 #define GPWRDN_MULT_VAL_ID_BC_MASK (0x1f << 24) 345 345 #define GPWRDN_MULT_VAL_ID_BC_SHIFT 24 346 - #define GPWRDN_ADP_INT (1 << 23) 347 - #define GPWRDN_BSESSVLD (1 << 22) 348 - #define GPWRDN_IDSTS (1 << 21) 346 + #define GPWRDN_ADP_INT BIT(23) 347 + #define GPWRDN_BSESSVLD BIT(22) 348 + #define GPWRDN_IDSTS BIT(21) 349 349 #define GPWRDN_LINESTATE_MASK (0x3 << 19) 350 350 #define GPWRDN_LINESTATE_SHIFT 19 351 - #define GPWRDN_STS_CHGINT_MSK (1 << 18) 352 - #define GPWRDN_STS_CHGINT (1 << 17) 353 - #define GPWRDN_SRP_DET_MSK (1 << 16) 354 - #define GPWRDN_SRP_DET (1 << 15) 355 - #define GPWRDN_CONNECT_DET_MSK (1 << 14) 356 - #define GPWRDN_CONNECT_DET (1 << 13) 357 - #define GPWRDN_DISCONN_DET_MSK (1 << 12) 358 - #define GPWRDN_DISCONN_DET (1 << 11) 359 - #define GPWRDN_RST_DET_MSK (1 << 10) 360 - #define GPWRDN_RST_DET (1 << 9) 361 - #define GPWRDN_LNSTSCHG_MSK (1 << 8) 362 - #define GPWRDN_LNSTSCHG (1 << 7) 363 - #define GPWRDN_DIS_VBUS (1 << 6) 364 - #define GPWRDN_PWRDNSWTCH (1 << 5) 365 - #define GPWRDN_PWRDNRSTN (1 << 4) 366 - #define GPWRDN_PWRDNCLMP (1 << 3) 367 - #define GPWRDN_RESTORE (1 << 2) 368 - #define GPWRDN_PMUACTV (1 << 1) 369 - #define GPWRDN_PMUINTSEL (1 << 0) 351 + #define GPWRDN_STS_CHGINT_MSK BIT(18) 352 + #define GPWRDN_STS_CHGINT BIT(17) 353 + #define GPWRDN_SRP_DET_MSK BIT(16) 354 + #define GPWRDN_SRP_DET BIT(15) 355 + #define GPWRDN_CONNECT_DET_MSK BIT(14) 356 + #define GPWRDN_CONNECT_DET BIT(13) 357 + #define GPWRDN_DISCONN_DET_MSK BIT(12) 358 + #define GPWRDN_DISCONN_DET BIT(11) 359 + #define GPWRDN_RST_DET_MSK BIT(10) 360 + #define GPWRDN_RST_DET BIT(9) 361 + #define GPWRDN_LNSTSCHG_MSK BIT(8) 362 + #define GPWRDN_LNSTSCHG BIT(7) 363 + #define GPWRDN_DIS_VBUS BIT(6) 364 + #define GPWRDN_PWRDNSWTCH BIT(5) 365 + #define GPWRDN_PWRDNRSTN BIT(4) 366 + #define GPWRDN_PWRDNCLMP BIT(3) 367 + #define GPWRDN_RESTORE BIT(2) 368 + #define GPWRDN_PMUACTV BIT(1) 369 + #define GPWRDN_PMUINTSEL BIT(0) 370 370 371 371 #define GDFIFOCFG HSOTG_REG(0x005c) 372 372 #define GDFIFOCFG_EPINFOBASE_MASK (0xffff << 16) ··· 377 377 #define ADPCTL HSOTG_REG(0x0060) 378 378 #define ADPCTL_AR_MASK (0x3 << 27) 379 379 #define ADPCTL_AR_SHIFT 27 380 - #define ADPCTL_ADP_TMOUT_INT_MSK (1 << 26) 381 - #define ADPCTL_ADP_SNS_INT_MSK (1 << 25) 382 - #define ADPCTL_ADP_PRB_INT_MSK (1 << 24) 383 - #define ADPCTL_ADP_TMOUT_INT (1 << 23) 384 - #define ADPCTL_ADP_SNS_INT (1 << 22) 385 - #define ADPCTL_ADP_PRB_INT (1 << 21) 386 - #define ADPCTL_ADPENA (1 << 20) 387 - #define ADPCTL_ADPRES (1 << 19) 388 - #define ADPCTL_ENASNS (1 << 18) 389 - #define ADPCTL_ENAPRB (1 << 17) 380 + #define ADPCTL_ADP_TMOUT_INT_MSK BIT(26) 381 + #define ADPCTL_ADP_SNS_INT_MSK BIT(25) 382 + #define ADPCTL_ADP_PRB_INT_MSK BIT(24) 383 + #define ADPCTL_ADP_TMOUT_INT BIT(23) 384 + #define ADPCTL_ADP_SNS_INT BIT(22) 385 + #define ADPCTL_ADP_PRB_INT BIT(21) 386 + #define ADPCTL_ADPENA BIT(20) 387 + #define ADPCTL_ADPRES BIT(19) 388 + #define ADPCTL_ENASNS BIT(18) 389 + #define ADPCTL_ENAPRB BIT(17) 390 390 #define ADPCTL_RTIM_MASK (0x7ff << 6) 391 391 #define ADPCTL_RTIM_SHIFT 6 392 392 #define ADPCTL_PRB_PER_MASK (0x3 << 4) ··· 412 412 /* Device mode registers */ 413 413 414 414 #define DCFG HSOTG_REG(0x800) 415 - #define DCFG_DESCDMA_EN (1 << 23) 415 + #define DCFG_DESCDMA_EN BIT(23) 416 416 #define DCFG_EPMISCNT_MASK (0x1f << 18) 417 417 #define DCFG_EPMISCNT_SHIFT 18 418 418 #define DCFG_EPMISCNT_LIMIT 0x1f ··· 425 425 #define DCFG_DEVADDR_SHIFT 4 426 426 #define DCFG_DEVADDR_LIMIT 0x7f 427 427 #define DCFG_DEVADDR(_x) ((_x) << 4) 428 - #define DCFG_NZ_STS_OUT_HSHK (1 << 2) 428 + #define DCFG_NZ_STS_OUT_HSHK BIT(2) 429 429 #define DCFG_DEVSPD_MASK (0x3 << 0) 430 430 #define DCFG_DEVSPD_SHIFT 0 431 431 #define DCFG_DEVSPD_HS 0 ··· 434 434 #define DCFG_DEVSPD_FS48 3 435 435 436 436 #define DCTL HSOTG_REG(0x804) 437 - #define DCTL_PWRONPRGDONE (1 << 11) 438 - #define DCTL_CGOUTNAK (1 << 10) 439 - #define DCTL_SGOUTNAK (1 << 9) 440 - #define DCTL_CGNPINNAK (1 << 8) 441 - #define DCTL_SGNPINNAK (1 << 7) 437 + #define DCTL_PWRONPRGDONE BIT(11) 438 + #define DCTL_CGOUTNAK BIT(10) 439 + #define DCTL_SGOUTNAK BIT(9) 440 + #define DCTL_CGNPINNAK BIT(8) 441 + #define DCTL_SGNPINNAK BIT(7) 442 442 #define DCTL_TSTCTL_MASK (0x7 << 4) 443 443 #define DCTL_TSTCTL_SHIFT 4 444 - #define DCTL_GOUTNAKSTS (1 << 3) 445 - #define DCTL_GNPINNAKSTS (1 << 2) 446 - #define DCTL_SFTDISCON (1 << 1) 447 - #define DCTL_RMTWKUPSIG (1 << 0) 444 + #define DCTL_GOUTNAKSTS BIT(3) 445 + #define DCTL_GNPINNAKSTS BIT(2) 446 + #define DCTL_SFTDISCON BIT(1) 447 + #define DCTL_RMTWKUPSIG BIT(0) 448 448 449 449 #define DSTS HSOTG_REG(0x808) 450 450 #define DSTS_SOFFN_MASK (0x3fff << 8) 451 451 #define DSTS_SOFFN_SHIFT 8 452 452 #define DSTS_SOFFN_LIMIT 0x3fff 453 453 #define DSTS_SOFFN(_x) ((_x) << 8) 454 - #define DSTS_ERRATICERR (1 << 3) 454 + #define DSTS_ERRATICERR BIT(3) 455 455 #define DSTS_ENUMSPD_MASK (0x3 << 1) 456 456 #define DSTS_ENUMSPD_SHIFT 1 457 457 #define DSTS_ENUMSPD_HS 0 458 458 #define DSTS_ENUMSPD_FS 1 459 459 #define DSTS_ENUMSPD_LS 2 460 460 #define DSTS_ENUMSPD_FS48 3 461 - #define DSTS_SUSPSTS (1 << 0) 461 + #define DSTS_SUSPSTS BIT(0) 462 462 463 463 #define DIEPMSK HSOTG_REG(0x810) 464 - #define DIEPMSK_NAKMSK (1 << 13) 465 - #define DIEPMSK_BNAININTRMSK (1 << 9) 466 - #define DIEPMSK_TXFIFOUNDRNMSK (1 << 8) 467 - #define DIEPMSK_TXFIFOEMPTY (1 << 7) 468 - #define DIEPMSK_INEPNAKEFFMSK (1 << 6) 469 - #define DIEPMSK_INTKNEPMISMSK (1 << 5) 470 - #define DIEPMSK_INTKNTXFEMPMSK (1 << 4) 471 - #define DIEPMSK_TIMEOUTMSK (1 << 3) 472 - #define DIEPMSK_AHBERRMSK (1 << 2) 473 - #define DIEPMSK_EPDISBLDMSK (1 << 1) 474 - #define DIEPMSK_XFERCOMPLMSK (1 << 0) 464 + #define DIEPMSK_NAKMSK BIT(13) 465 + #define DIEPMSK_BNAININTRMSK BIT(9) 466 + #define DIEPMSK_TXFIFOUNDRNMSK BIT(8) 467 + #define DIEPMSK_TXFIFOEMPTY BIT(7) 468 + #define DIEPMSK_INEPNAKEFFMSK BIT(6) 469 + #define DIEPMSK_INTKNEPMISMSK BIT(5) 470 + #define DIEPMSK_INTKNTXFEMPMSK BIT(4) 471 + #define DIEPMSK_TIMEOUTMSK BIT(3) 472 + #define DIEPMSK_AHBERRMSK BIT(2) 473 + #define DIEPMSK_EPDISBLDMSK BIT(1) 474 + #define DIEPMSK_XFERCOMPLMSK BIT(0) 475 475 476 476 #define DOEPMSK HSOTG_REG(0x814) 477 - #define DOEPMSK_BNAMSK (1 << 9) 478 - #define DOEPMSK_BACK2BACKSETUP (1 << 6) 479 - #define DOEPMSK_STSPHSERCVDMSK (1 << 5) 480 - #define DOEPMSK_OUTTKNEPDISMSK (1 << 4) 481 - #define DOEPMSK_SETUPMSK (1 << 3) 482 - #define DOEPMSK_AHBERRMSK (1 << 2) 483 - #define DOEPMSK_EPDISBLDMSK (1 << 1) 484 - #define DOEPMSK_XFERCOMPLMSK (1 << 0) 477 + #define DOEPMSK_BNAMSK BIT(9) 478 + #define DOEPMSK_BACK2BACKSETUP BIT(6) 479 + #define DOEPMSK_STSPHSERCVDMSK BIT(5) 480 + #define DOEPMSK_OUTTKNEPDISMSK BIT(4) 481 + #define DOEPMSK_SETUPMSK BIT(3) 482 + #define DOEPMSK_AHBERRMSK BIT(2) 483 + #define DOEPMSK_EPDISBLDMSK BIT(1) 484 + #define DOEPMSK_XFERCOMPLMSK BIT(0) 485 485 486 486 #define DAINT HSOTG_REG(0x818) 487 487 #define DAINTMSK HSOTG_REG(0x81C) ··· 516 516 #define D0EPCTL_MPS_16 2 517 517 #define D0EPCTL_MPS_8 3 518 518 519 - #define DXEPCTL_EPENA (1 << 31) 520 - #define DXEPCTL_EPDIS (1 << 30) 521 - #define DXEPCTL_SETD1PID (1 << 29) 522 - #define DXEPCTL_SETODDFR (1 << 29) 523 - #define DXEPCTL_SETD0PID (1 << 28) 524 - #define DXEPCTL_SETEVENFR (1 << 28) 525 - #define DXEPCTL_SNAK (1 << 27) 526 - #define DXEPCTL_CNAK (1 << 26) 519 + #define DXEPCTL_EPENA BIT(31) 520 + #define DXEPCTL_EPDIS BIT(30) 521 + #define DXEPCTL_SETD1PID BIT(29) 522 + #define DXEPCTL_SETODDFR BIT(29) 523 + #define DXEPCTL_SETD0PID BIT(28) 524 + #define DXEPCTL_SETEVENFR BIT(28) 525 + #define DXEPCTL_SNAK BIT(27) 526 + #define DXEPCTL_CNAK BIT(26) 527 527 #define DXEPCTL_TXFNUM_MASK (0xf << 22) 528 528 #define DXEPCTL_TXFNUM_SHIFT 22 529 529 #define DXEPCTL_TXFNUM_LIMIT 0xf 530 530 #define DXEPCTL_TXFNUM(_x) ((_x) << 22) 531 - #define DXEPCTL_STALL (1 << 21) 532 - #define DXEPCTL_SNP (1 << 20) 531 + #define DXEPCTL_STALL BIT(21) 532 + #define DXEPCTL_SNP BIT(20) 533 533 #define DXEPCTL_EPTYPE_MASK (0x3 << 18) 534 534 #define DXEPCTL_EPTYPE_CONTROL (0x0 << 18) 535 535 #define DXEPCTL_EPTYPE_ISO (0x1 << 18) 536 536 #define DXEPCTL_EPTYPE_BULK (0x2 << 18) 537 537 #define DXEPCTL_EPTYPE_INTERRUPT (0x3 << 18) 538 538 539 - #define DXEPCTL_NAKSTS (1 << 17) 540 - #define DXEPCTL_DPID (1 << 16) 541 - #define DXEPCTL_EOFRNUM (1 << 16) 542 - #define DXEPCTL_USBACTEP (1 << 15) 539 + #define DXEPCTL_NAKSTS BIT(17) 540 + #define DXEPCTL_DPID BIT(16) 541 + #define DXEPCTL_EOFRNUM BIT(16) 542 + #define DXEPCTL_USBACTEP BIT(15) 543 543 #define DXEPCTL_NEXTEP_MASK (0xf << 11) 544 544 #define DXEPCTL_NEXTEP_SHIFT 11 545 545 #define DXEPCTL_NEXTEP_LIMIT 0xf ··· 551 551 552 552 #define DIEPINT(_a) HSOTG_REG(0x908 + ((_a) * 0x20)) 553 553 #define DOEPINT(_a) HSOTG_REG(0xB08 + ((_a) * 0x20)) 554 - #define DXEPINT_SETUP_RCVD (1 << 15) 555 - #define DXEPINT_NYETINTRPT (1 << 14) 556 - #define DXEPINT_NAKINTRPT (1 << 13) 557 - #define DXEPINT_BBLEERRINTRPT (1 << 12) 558 - #define DXEPINT_PKTDRPSTS (1 << 11) 559 - #define DXEPINT_BNAINTR (1 << 9) 560 - #define DXEPINT_TXFIFOUNDRN (1 << 8) 561 - #define DXEPINT_OUTPKTERR (1 << 8) 562 - #define DXEPINT_TXFEMP (1 << 7) 563 - #define DXEPINT_INEPNAKEFF (1 << 6) 564 - #define DXEPINT_BACK2BACKSETUP (1 << 6) 565 - #define DXEPINT_INTKNEPMIS (1 << 5) 566 - #define DXEPINT_STSPHSERCVD (1 << 5) 567 - #define DXEPINT_INTKNTXFEMP (1 << 4) 568 - #define DXEPINT_OUTTKNEPDIS (1 << 4) 569 - #define DXEPINT_TIMEOUT (1 << 3) 570 - #define DXEPINT_SETUP (1 << 3) 571 - #define DXEPINT_AHBERR (1 << 2) 572 - #define DXEPINT_EPDISBLD (1 << 1) 573 - #define DXEPINT_XFERCOMPL (1 << 0) 554 + #define DXEPINT_SETUP_RCVD BIT(15) 555 + #define DXEPINT_NYETINTRPT BIT(14) 556 + #define DXEPINT_NAKINTRPT BIT(13) 557 + #define DXEPINT_BBLEERRINTRPT BIT(12) 558 + #define DXEPINT_PKTDRPSTS BIT(11) 559 + #define DXEPINT_BNAINTR BIT(9) 560 + #define DXEPINT_TXFIFOUNDRN BIT(8) 561 + #define DXEPINT_OUTPKTERR BIT(8) 562 + #define DXEPINT_TXFEMP BIT(7) 563 + #define DXEPINT_INEPNAKEFF BIT(6) 564 + #define DXEPINT_BACK2BACKSETUP BIT(6) 565 + #define DXEPINT_INTKNEPMIS BIT(5) 566 + #define DXEPINT_STSPHSERCVD BIT(5) 567 + #define DXEPINT_INTKNTXFEMP BIT(4) 568 + #define DXEPINT_OUTTKNEPDIS BIT(4) 569 + #define DXEPINT_TIMEOUT BIT(3) 570 + #define DXEPINT_SETUP BIT(3) 571 + #define DXEPINT_AHBERR BIT(2) 572 + #define DXEPINT_EPDISBLD BIT(1) 573 + #define DXEPINT_XFERCOMPL BIT(0) 574 574 575 575 #define DIEPTSIZ0 HSOTG_REG(0x910) 576 576 #define DIEPTSIZ0_PKTCNT_MASK (0x3 << 19) ··· 587 587 #define DOEPTSIZ0_SUPCNT_SHIFT 29 588 588 #define DOEPTSIZ0_SUPCNT_LIMIT 0x3 589 589 #define DOEPTSIZ0_SUPCNT(_x) ((_x) << 29) 590 - #define DOEPTSIZ0_PKTCNT (1 << 19) 590 + #define DOEPTSIZ0_PKTCNT BIT(19) 591 591 #define DOEPTSIZ0_XFERSIZE_MASK (0x7f << 0) 592 592 #define DOEPTSIZ0_XFERSIZE_SHIFT 0 593 593 ··· 614 614 #define DTXFSTS(_a) HSOTG_REG(0x918 + ((_a) * 0x20)) 615 615 616 616 #define PCGCTL HSOTG_REG(0x0e00) 617 - #define PCGCTL_IF_DEV_MODE (1 << 31) 617 + #define PCGCTL_IF_DEV_MODE BIT(31) 618 618 #define PCGCTL_P2HD_PRT_SPD_MASK (0x3 << 29) 619 619 #define PCGCTL_P2HD_PRT_SPD_SHIFT 29 620 620 #define PCGCTL_P2HD_DEV_ENUM_SPD_MASK (0x3 << 27) 621 621 #define PCGCTL_P2HD_DEV_ENUM_SPD_SHIFT 27 622 622 #define PCGCTL_MAC_DEV_ADDR_MASK (0x7f << 20) 623 623 #define PCGCTL_MAC_DEV_ADDR_SHIFT 20 624 - #define PCGCTL_MAX_TERMSEL (1 << 19) 624 + #define PCGCTL_MAX_TERMSEL BIT(19) 625 625 #define PCGCTL_MAX_XCVRSELECT_MASK (0x3 << 17) 626 626 #define PCGCTL_MAX_XCVRSELECT_SHIFT 17 627 - #define PCGCTL_PORT_POWER (1 << 16) 627 + #define PCGCTL_PORT_POWER BIT(16) 628 628 #define PCGCTL_PRT_CLK_SEL_MASK (0x3 << 14) 629 629 #define PCGCTL_PRT_CLK_SEL_SHIFT 14 630 - #define PCGCTL_ESS_REG_RESTORED (1 << 13) 631 - #define PCGCTL_EXTND_HIBER_SWITCH (1 << 12) 632 - #define PCGCTL_EXTND_HIBER_PWRCLMP (1 << 11) 633 - #define PCGCTL_ENBL_EXTND_HIBER (1 << 10) 634 - #define PCGCTL_RESTOREMODE (1 << 9) 635 - #define PCGCTL_RESETAFTSUSP (1 << 8) 636 - #define PCGCTL_DEEP_SLEEP (1 << 7) 637 - #define PCGCTL_PHY_IN_SLEEP (1 << 6) 638 - #define PCGCTL_ENBL_SLEEP_GATING (1 << 5) 639 - #define PCGCTL_RSTPDWNMODULE (1 << 3) 640 - #define PCGCTL_PWRCLMP (1 << 2) 641 - #define PCGCTL_GATEHCLK (1 << 1) 642 - #define PCGCTL_STOPPCLK (1 << 0) 630 + #define PCGCTL_ESS_REG_RESTORED BIT(13) 631 + #define PCGCTL_EXTND_HIBER_SWITCH BIT(12) 632 + #define PCGCTL_EXTND_HIBER_PWRCLMP BIT(11) 633 + #define PCGCTL_ENBL_EXTND_HIBER BIT(10) 634 + #define PCGCTL_RESTOREMODE BIT(9) 635 + #define PCGCTL_RESETAFTSUSP BIT(8) 636 + #define PCGCTL_DEEP_SLEEP BIT(7) 637 + #define PCGCTL_PHY_IN_SLEEP BIT(6) 638 + #define PCGCTL_ENBL_SLEEP_GATING BIT(5) 639 + #define PCGCTL_RSTPDWNMODULE BIT(3) 640 + #define PCGCTL_PWRCLMP BIT(2) 641 + #define PCGCTL_GATEHCLK BIT(1) 642 + #define PCGCTL_STOPPCLK BIT(0) 643 643 644 644 #define EPFIFO(_a) HSOTG_REG(0x1000 + ((_a) * 0x1000)) 645 645 646 646 /* Host Mode Registers */ 647 647 648 648 #define HCFG HSOTG_REG(0x0400) 649 - #define HCFG_MODECHTIMEN (1 << 31) 650 - #define HCFG_PERSCHEDENA (1 << 26) 649 + #define HCFG_MODECHTIMEN BIT(31) 650 + #define HCFG_PERSCHEDENA BIT(26) 651 651 #define HCFG_FRLISTEN_MASK (0x3 << 24) 652 652 #define HCFG_FRLISTEN_SHIFT 24 653 653 #define HCFG_FRLISTEN_8 (0 << 24) 654 654 #define FRLISTEN_8_SIZE 8 655 - #define HCFG_FRLISTEN_16 (1 << 24) 655 + #define HCFG_FRLISTEN_16 BIT(24) 656 656 #define FRLISTEN_16_SIZE 16 657 657 #define HCFG_FRLISTEN_32 (2 << 24) 658 658 #define FRLISTEN_32_SIZE 32 659 659 #define HCFG_FRLISTEN_64 (3 << 24) 660 660 #define FRLISTEN_64_SIZE 64 661 - #define HCFG_DESCDMA (1 << 23) 661 + #define HCFG_DESCDMA BIT(23) 662 662 #define HCFG_RESVALID_MASK (0xff << 8) 663 663 #define HCFG_RESVALID_SHIFT 8 664 - #define HCFG_ENA32KHZ (1 << 7) 665 - #define HCFG_FSLSSUPP (1 << 2) 664 + #define HCFG_ENA32KHZ BIT(7) 665 + #define HCFG_FSLSSUPP BIT(2) 666 666 #define HCFG_FSLSPCLKSEL_MASK (0x3 << 0) 667 667 #define HCFG_FSLSPCLKSEL_SHIFT 0 668 668 #define HCFG_FSLSPCLKSEL_30_60_MHZ 0 ··· 672 672 #define HFIR HSOTG_REG(0x0404) 673 673 #define HFIR_FRINT_MASK (0xffff << 0) 674 674 #define HFIR_FRINT_SHIFT 0 675 - #define HFIR_RLDCTRL (1 << 16) 675 + #define HFIR_RLDCTRL BIT(16) 676 676 677 677 #define HFNUM HSOTG_REG(0x0408) 678 678 #define HFNUM_FRREM_MASK (0xffff << 16) ··· 682 682 #define HFNUM_MAX_FRNUM 0x3fff 683 683 684 684 #define HPTXSTS HSOTG_REG(0x0410) 685 - #define TXSTS_QTOP_ODD (1 << 31) 685 + #define TXSTS_QTOP_ODD BIT(31) 686 686 #define TXSTS_QTOP_CHNEP_MASK (0xf << 27) 687 687 #define TXSTS_QTOP_CHNEP_SHIFT 27 688 688 #define TXSTS_QTOP_TOKEN_MASK (0x3 << 25) 689 689 #define TXSTS_QTOP_TOKEN_SHIFT 25 690 - #define TXSTS_QTOP_TERMINATE (1 << 24) 690 + #define TXSTS_QTOP_TERMINATE BIT(24) 691 691 #define TXSTS_QSPCAVAIL_MASK (0xff << 16) 692 692 #define TXSTS_QSPCAVAIL_SHIFT 16 693 693 #define TXSTS_FSPCAVAIL_MASK (0xffff << 0) ··· 705 705 #define HPRT0_SPD_LOW_SPEED 2 706 706 #define HPRT0_TSTCTL_MASK (0xf << 13) 707 707 #define HPRT0_TSTCTL_SHIFT 13 708 - #define HPRT0_PWR (1 << 12) 708 + #define HPRT0_PWR BIT(12) 709 709 #define HPRT0_LNSTS_MASK (0x3 << 10) 710 710 #define HPRT0_LNSTS_SHIFT 10 711 - #define HPRT0_RST (1 << 8) 712 - #define HPRT0_SUSP (1 << 7) 713 - #define HPRT0_RES (1 << 6) 714 - #define HPRT0_OVRCURRCHG (1 << 5) 715 - #define HPRT0_OVRCURRACT (1 << 4) 716 - #define HPRT0_ENACHG (1 << 3) 717 - #define HPRT0_ENA (1 << 2) 718 - #define HPRT0_CONNDET (1 << 1) 719 - #define HPRT0_CONNSTS (1 << 0) 711 + #define HPRT0_RST BIT(8) 712 + #define HPRT0_SUSP BIT(7) 713 + #define HPRT0_RES BIT(6) 714 + #define HPRT0_OVRCURRCHG BIT(5) 715 + #define HPRT0_OVRCURRACT BIT(4) 716 + #define HPRT0_ENACHG BIT(3) 717 + #define HPRT0_ENA BIT(2) 718 + #define HPRT0_CONNDET BIT(1) 719 + #define HPRT0_CONNSTS BIT(0) 720 720 721 721 #define HCCHAR(_ch) HSOTG_REG(0x0500 + 0x20 * (_ch)) 722 - #define HCCHAR_CHENA (1 << 31) 723 - #define HCCHAR_CHDIS (1 << 30) 724 - #define HCCHAR_ODDFRM (1 << 29) 722 + #define HCCHAR_CHENA BIT(31) 723 + #define HCCHAR_CHDIS BIT(30) 724 + #define HCCHAR_ODDFRM BIT(29) 725 725 #define HCCHAR_DEVADDR_MASK (0x7f << 22) 726 726 #define HCCHAR_DEVADDR_SHIFT 22 727 727 #define HCCHAR_MULTICNT_MASK (0x3 << 20) 728 728 #define HCCHAR_MULTICNT_SHIFT 20 729 729 #define HCCHAR_EPTYPE_MASK (0x3 << 18) 730 730 #define HCCHAR_EPTYPE_SHIFT 18 731 - #define HCCHAR_LSPDDEV (1 << 17) 732 - #define HCCHAR_EPDIR (1 << 15) 731 + #define HCCHAR_LSPDDEV BIT(17) 732 + #define HCCHAR_EPDIR BIT(15) 733 733 #define HCCHAR_EPNUM_MASK (0xf << 11) 734 734 #define HCCHAR_EPNUM_SHIFT 11 735 735 #define HCCHAR_MPS_MASK (0x7ff << 0) 736 736 #define HCCHAR_MPS_SHIFT 0 737 737 738 738 #define HCSPLT(_ch) HSOTG_REG(0x0504 + 0x20 * (_ch)) 739 - #define HCSPLT_SPLTENA (1 << 31) 740 - #define HCSPLT_COMPSPLT (1 << 16) 739 + #define HCSPLT_SPLTENA BIT(31) 740 + #define HCSPLT_COMPSPLT BIT(16) 741 741 #define HCSPLT_XACTPOS_MASK (0x3 << 14) 742 742 #define HCSPLT_XACTPOS_SHIFT 14 743 743 #define HCSPLT_XACTPOS_MID 0 ··· 752 752 #define HCINT(_ch) HSOTG_REG(0x0508 + 0x20 * (_ch)) 753 753 #define HCINTMSK(_ch) HSOTG_REG(0x050c + 0x20 * (_ch)) 754 754 #define HCINTMSK_RESERVED14_31 (0x3ffff << 14) 755 - #define HCINTMSK_FRM_LIST_ROLL (1 << 13) 756 - #define HCINTMSK_XCS_XACT (1 << 12) 757 - #define HCINTMSK_BNA (1 << 11) 758 - #define HCINTMSK_DATATGLERR (1 << 10) 759 - #define HCINTMSK_FRMOVRUN (1 << 9) 760 - #define HCINTMSK_BBLERR (1 << 8) 761 - #define HCINTMSK_XACTERR (1 << 7) 762 - #define HCINTMSK_NYET (1 << 6) 763 - #define HCINTMSK_ACK (1 << 5) 764 - #define HCINTMSK_NAK (1 << 4) 765 - #define HCINTMSK_STALL (1 << 3) 766 - #define HCINTMSK_AHBERR (1 << 2) 767 - #define HCINTMSK_CHHLTD (1 << 1) 768 - #define HCINTMSK_XFERCOMPL (1 << 0) 755 + #define HCINTMSK_FRM_LIST_ROLL BIT(13) 756 + #define HCINTMSK_XCS_XACT BIT(12) 757 + #define HCINTMSK_BNA BIT(11) 758 + #define HCINTMSK_DATATGLERR BIT(10) 759 + #define HCINTMSK_FRMOVRUN BIT(9) 760 + #define HCINTMSK_BBLERR BIT(8) 761 + #define HCINTMSK_XACTERR BIT(7) 762 + #define HCINTMSK_NYET BIT(6) 763 + #define HCINTMSK_ACK BIT(5) 764 + #define HCINTMSK_NAK BIT(4) 765 + #define HCINTMSK_STALL BIT(3) 766 + #define HCINTMSK_AHBERR BIT(2) 767 + #define HCINTMSK_CHHLTD BIT(1) 768 + #define HCINTMSK_XFERCOMPL BIT(0) 769 769 770 770 #define HCTSIZ(_ch) HSOTG_REG(0x0510 + 0x20 * (_ch)) 771 - #define TSIZ_DOPNG (1 << 31) 771 + #define TSIZ_DOPNG BIT(31) 772 772 #define TSIZ_SC_MC_PID_MASK (0x3 << 29) 773 773 #define TSIZ_SC_MC_PID_SHIFT 29 774 774 #define TSIZ_SC_MC_PID_DATA0 0 ··· 808 808 809 809 /* Host Mode DMA descriptor status quadlet */ 810 810 811 - #define HOST_DMA_A (1 << 31) 811 + #define HOST_DMA_A BIT(31) 812 812 #define HOST_DMA_STS_MASK (0x3 << 28) 813 813 #define HOST_DMA_STS_SHIFT 28 814 - #define HOST_DMA_STS_PKTERR (1 << 28) 815 - #define HOST_DMA_EOL (1 << 26) 816 - #define HOST_DMA_IOC (1 << 25) 817 - #define HOST_DMA_SUP (1 << 24) 818 - #define HOST_DMA_ALT_QTD (1 << 23) 814 + #define HOST_DMA_STS_PKTERR BIT(28) 815 + #define HOST_DMA_EOL BIT(26) 816 + #define HOST_DMA_IOC BIT(25) 817 + #define HOST_DMA_SUP BIT(24) 818 + #define HOST_DMA_ALT_QTD BIT(23) 819 819 #define HOST_DMA_QTD_OFFSET_MASK (0x3f << 17) 820 820 #define HOST_DMA_QTD_OFFSET_SHIFT 17 821 821 #define HOST_DMA_ISOC_NBYTES_MASK (0xfff << 0) ··· 837 837 #define DEV_DMA_STS_SUCC 0 838 838 #define DEV_DMA_STS_BUFF_FLUSH 1 839 839 #define DEV_DMA_STS_BUFF_ERR 3 840 - #define DEV_DMA_L (1 << 27) 841 - #define DEV_DMA_SHORT (1 << 26) 842 - #define DEV_DMA_IOC (1 << 25) 843 - #define DEV_DMA_SR (1 << 24) 844 - #define DEV_DMA_MTRF (1 << 23) 840 + #define DEV_DMA_L BIT(27) 841 + #define DEV_DMA_SHORT BIT(26) 842 + #define DEV_DMA_IOC BIT(25) 843 + #define DEV_DMA_SR BIT(24) 844 + #define DEV_DMA_MTRF BIT(23) 845 845 #define DEV_DMA_ISOC_PID_MASK (0x3 << 23) 846 846 #define DEV_DMA_ISOC_PID_SHIFT 23 847 847 #define DEV_DMA_ISOC_PID_DATA0 0
+388 -1104
drivers/usb/dwc2/params.c
··· 38 38 39 39 #include "core.h" 40 40 41 - static const struct dwc2_core_params params_hi6220 = { 42 - .otg_cap = 2, /* No HNP/SRP capable */ 43 - .otg_ver = 0, /* 1.3 */ 44 - .dma_desc_enable = 0, 45 - .dma_desc_fs_enable = 0, 46 - .speed = 0, /* High Speed */ 47 - .enable_dynamic_fifo = 1, 48 - .en_multiple_tx_fifo = 1, 49 - .host_rx_fifo_size = 512, 50 - .host_nperio_tx_fifo_size = 512, 51 - .host_perio_tx_fifo_size = 512, 52 - .max_transfer_size = 65535, 53 - .max_packet_count = 511, 54 - .host_channels = 16, 55 - .phy_type = 1, /* UTMI */ 56 - .phy_utmi_width = 8, 57 - .phy_ulpi_ddr = 0, /* Single */ 58 - .phy_ulpi_ext_vbus = 0, 59 - .i2c_enable = 0, 60 - .ulpi_fs_ls = 0, 61 - .host_support_fs_ls_low_power = 0, 62 - .host_ls_low_power_phy_clk = 0, /* 48 MHz */ 63 - .ts_dline = 0, 64 - .reload_ctl = 0, 65 - .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << 66 - GAHBCFG_HBSTLEN_SHIFT, 67 - .uframe_sched = 0, 68 - .external_id_pin_ctl = -1, 69 - .hibernation = -1, 70 - }; 41 + static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg) 42 + { 43 + struct dwc2_core_params *p = &hsotg->params; 71 44 72 - static const struct dwc2_core_params params_bcm2835 = { 73 - .otg_cap = 0, /* HNP/SRP capable */ 74 - .otg_ver = 0, /* 1.3 */ 75 - .dma_desc_enable = 0, 76 - .dma_desc_fs_enable = 0, 77 - .speed = 0, /* High Speed */ 78 - .enable_dynamic_fifo = 1, 79 - .en_multiple_tx_fifo = 1, 80 - .host_rx_fifo_size = 774, /* 774 DWORDs */ 81 - .host_nperio_tx_fifo_size = 256, /* 256 DWORDs */ 82 - .host_perio_tx_fifo_size = 512, /* 512 DWORDs */ 83 - .max_transfer_size = 65535, 84 - .max_packet_count = 511, 85 - .host_channels = 8, 86 - .phy_type = 1, /* UTMI */ 87 - .phy_utmi_width = 8, /* 8 bits */ 88 - .phy_ulpi_ddr = 0, /* Single */ 89 - .phy_ulpi_ext_vbus = 0, 90 - .i2c_enable = 0, 91 - .ulpi_fs_ls = 0, 92 - .host_support_fs_ls_low_power = 0, 93 - .host_ls_low_power_phy_clk = 0, /* 48 MHz */ 94 - .ts_dline = 0, 95 - .reload_ctl = 0, 96 - .ahbcfg = 0x10, 97 - .uframe_sched = 0, 98 - .external_id_pin_ctl = -1, 99 - .hibernation = -1, 100 - }; 45 + p->host_rx_fifo_size = 774; 46 + p->max_transfer_size = 65535; 47 + p->max_packet_count = 511; 48 + p->ahbcfg = 0x10; 49 + p->uframe_sched = false; 50 + } 101 51 102 - static const struct dwc2_core_params params_rk3066 = { 103 - .otg_cap = 2, /* non-HNP/non-SRP */ 104 - .otg_ver = -1, 105 - .dma_desc_enable = 0, 106 - .dma_desc_fs_enable = 0, 107 - .speed = -1, 108 - .enable_dynamic_fifo = 1, 109 - .en_multiple_tx_fifo = -1, 110 - .host_rx_fifo_size = 525, /* 525 DWORDs */ 111 - .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ 112 - .host_perio_tx_fifo_size = 256, /* 256 DWORDs */ 113 - .max_transfer_size = -1, 114 - .max_packet_count = -1, 115 - .host_channels = -1, 116 - .phy_type = -1, 117 - .phy_utmi_width = -1, 118 - .phy_ulpi_ddr = -1, 119 - .phy_ulpi_ext_vbus = -1, 120 - .i2c_enable = -1, 121 - .ulpi_fs_ls = -1, 122 - .host_support_fs_ls_low_power = -1, 123 - .host_ls_low_power_phy_clk = -1, 124 - .ts_dline = -1, 125 - .reload_ctl = -1, 126 - .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << 127 - GAHBCFG_HBSTLEN_SHIFT, 128 - .uframe_sched = -1, 129 - .external_id_pin_ctl = -1, 130 - .hibernation = -1, 131 - }; 52 + static void dwc2_set_his_params(struct dwc2_hsotg *hsotg) 53 + { 54 + struct dwc2_core_params *p = &hsotg->params; 132 55 133 - static const struct dwc2_core_params params_ltq = { 134 - .otg_cap = 2, /* non-HNP/non-SRP */ 135 - .otg_ver = -1, 136 - .dma_desc_enable = -1, 137 - .dma_desc_fs_enable = -1, 138 - .speed = -1, 139 - .enable_dynamic_fifo = -1, 140 - .en_multiple_tx_fifo = -1, 141 - .host_rx_fifo_size = 288, /* 288 DWORDs */ 142 - .host_nperio_tx_fifo_size = 128, /* 128 DWORDs */ 143 - .host_perio_tx_fifo_size = 96, /* 96 DWORDs */ 144 - .max_transfer_size = 65535, 145 - .max_packet_count = 511, 146 - .host_channels = -1, 147 - .phy_type = -1, 148 - .phy_utmi_width = -1, 149 - .phy_ulpi_ddr = -1, 150 - .phy_ulpi_ext_vbus = -1, 151 - .i2c_enable = -1, 152 - .ulpi_fs_ls = -1, 153 - .host_support_fs_ls_low_power = -1, 154 - .host_ls_low_power_phy_clk = -1, 155 - .ts_dline = -1, 156 - .reload_ctl = -1, 157 - .ahbcfg = GAHBCFG_HBSTLEN_INCR16 << 158 - GAHBCFG_HBSTLEN_SHIFT, 159 - .uframe_sched = -1, 160 - .external_id_pin_ctl = -1, 161 - .hibernation = -1, 162 - }; 56 + p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 57 + p->speed = DWC2_SPEED_PARAM_HIGH; 58 + p->host_rx_fifo_size = 512; 59 + p->host_nperio_tx_fifo_size = 512; 60 + p->host_perio_tx_fifo_size = 512; 61 + p->max_transfer_size = 65535; 62 + p->max_packet_count = 511; 63 + p->host_channels = 16; 64 + p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI; 65 + p->phy_utmi_width = 8; 66 + p->i2c_enable = false; 67 + p->reload_ctl = false; 68 + p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << 69 + GAHBCFG_HBSTLEN_SHIFT; 70 + p->uframe_sched = false; 71 + p->change_speed_quirk = true; 72 + } 163 73 164 - static const struct dwc2_core_params params_amlogic = { 165 - .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE, 166 - .otg_ver = -1, 167 - .dma_desc_enable = 0, 168 - .dma_desc_fs_enable = 0, 169 - .speed = DWC2_SPEED_PARAM_HIGH, 170 - .enable_dynamic_fifo = 1, 171 - .en_multiple_tx_fifo = -1, 172 - .host_rx_fifo_size = 512, 173 - .host_nperio_tx_fifo_size = 500, 174 - .host_perio_tx_fifo_size = 500, 175 - .max_transfer_size = -1, 176 - .max_packet_count = -1, 177 - .host_channels = 16, 178 - .phy_type = DWC2_PHY_TYPE_PARAM_UTMI, 179 - .phy_utmi_width = -1, 180 - .phy_ulpi_ddr = -1, 181 - .phy_ulpi_ext_vbus = -1, 182 - .i2c_enable = -1, 183 - .ulpi_fs_ls = -1, 184 - .host_support_fs_ls_low_power = -1, 185 - .host_ls_low_power_phy_clk = -1, 186 - .ts_dline = -1, 187 - .reload_ctl = 1, 188 - .ahbcfg = GAHBCFG_HBSTLEN_INCR8 << 189 - GAHBCFG_HBSTLEN_SHIFT, 190 - .uframe_sched = 0, 191 - .external_id_pin_ctl = -1, 192 - .hibernation = -1, 193 - }; 74 + static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg) 75 + { 76 + struct dwc2_core_params *p = &hsotg->params; 194 77 195 - static const struct dwc2_core_params params_default = { 196 - .otg_cap = -1, 197 - .otg_ver = -1, 78 + p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 79 + p->host_rx_fifo_size = 525; 80 + p->host_nperio_tx_fifo_size = 128; 81 + p->host_perio_tx_fifo_size = 256; 82 + p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << 83 + GAHBCFG_HBSTLEN_SHIFT; 84 + } 198 85 199 - /* 200 - * Disable descriptor dma mode by default as the HW can support 201 - * it, but does not support it for SPLIT transactions. 202 - * Disable it for FS devices as well. 203 - */ 204 - .dma_desc_enable = 0, 205 - .dma_desc_fs_enable = 0, 86 + static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg) 87 + { 88 + struct dwc2_core_params *p = &hsotg->params; 206 89 207 - .speed = -1, 208 - .enable_dynamic_fifo = -1, 209 - .en_multiple_tx_fifo = -1, 210 - .host_rx_fifo_size = -1, 211 - .host_nperio_tx_fifo_size = -1, 212 - .host_perio_tx_fifo_size = -1, 213 - .max_transfer_size = -1, 214 - .max_packet_count = -1, 215 - .host_channels = -1, 216 - .phy_type = -1, 217 - .phy_utmi_width = -1, 218 - .phy_ulpi_ddr = -1, 219 - .phy_ulpi_ext_vbus = -1, 220 - .i2c_enable = -1, 221 - .ulpi_fs_ls = -1, 222 - .host_support_fs_ls_low_power = -1, 223 - .host_ls_low_power_phy_clk = -1, 224 - .ts_dline = -1, 225 - .reload_ctl = -1, 226 - .ahbcfg = -1, 227 - .uframe_sched = -1, 228 - .external_id_pin_ctl = -1, 229 - .hibernation = -1, 230 - }; 90 + p->otg_cap = 2; 91 + p->host_rx_fifo_size = 288; 92 + p->host_nperio_tx_fifo_size = 128; 93 + p->host_perio_tx_fifo_size = 96; 94 + p->max_transfer_size = 65535; 95 + p->max_packet_count = 511; 96 + p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << 97 + GAHBCFG_HBSTLEN_SHIFT; 98 + } 99 + 100 + static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg) 101 + { 102 + struct dwc2_core_params *p = &hsotg->params; 103 + 104 + p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 105 + p->speed = DWC2_SPEED_PARAM_HIGH; 106 + p->host_rx_fifo_size = 512; 107 + p->host_nperio_tx_fifo_size = 500; 108 + p->host_perio_tx_fifo_size = 500; 109 + p->host_channels = 16; 110 + p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI; 111 + p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 << 112 + GAHBCFG_HBSTLEN_SHIFT; 113 + p->uframe_sched = false; 114 + } 115 + 116 + static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg) 117 + { 118 + struct dwc2_core_params *p = &hsotg->params; 119 + 120 + p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT; 121 + } 231 122 232 123 const struct of_device_id dwc2_of_match_table[] = { 233 - { .compatible = "brcm,bcm2835-usb", .data = &params_bcm2835 }, 234 - { .compatible = "hisilicon,hi6220-usb", .data = &params_hi6220 }, 235 - { .compatible = "rockchip,rk3066-usb", .data = &params_rk3066 }, 236 - { .compatible = "lantiq,arx100-usb", .data = &params_ltq }, 237 - { .compatible = "lantiq,xrx200-usb", .data = &params_ltq }, 238 - { .compatible = "snps,dwc2", .data = NULL }, 239 - { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, 240 - { .compatible = "amlogic,meson8b-usb", .data = &params_amlogic }, 241 - { .compatible = "amlogic,meson-gxbb-usb", .data = &params_amlogic }, 242 - { .compatible = "amcc,dwc-otg", .data = NULL }, 124 + { .compatible = "brcm,bcm2835-usb", .data = dwc2_set_bcm_params }, 125 + { .compatible = "hisilicon,hi6220-usb", .data = dwc2_set_his_params }, 126 + { .compatible = "rockchip,rk3066-usb", .data = dwc2_set_rk_params }, 127 + { .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params }, 128 + { .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params }, 129 + { .compatible = "snps,dwc2" }, 130 + { .compatible = "samsung,s3c6400-hsotg" }, 131 + { .compatible = "amlogic,meson8b-usb", 132 + .data = dwc2_set_amlogic_params }, 133 + { .compatible = "amlogic,meson-gxbb-usb", 134 + .data = dwc2_set_amlogic_params }, 135 + { .compatible = "amcc,dwc-otg", .data = dwc2_set_amcc_params }, 243 136 {}, 244 137 }; 245 138 MODULE_DEVICE_TABLE(of, dwc2_of_match_table); 246 139 247 - static void dwc2_get_device_property(struct dwc2_hsotg *hsotg, 248 - char *property, u8 size, u64 *value) 140 + static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg) 249 141 { 250 - u32 val32; 142 + u8 val; 251 143 252 - switch (size) { 253 - case 0: 254 - *value = device_property_read_bool(hsotg->dev, property); 144 + switch (hsotg->hw_params.op_mode) { 145 + case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 146 + val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; 255 147 break; 256 - case 1: 257 - case 2: 258 - case 4: 259 - if (device_property_read_u32(hsotg->dev, property, &val32)) 260 - return; 261 - 262 - *value = val32; 263 - break; 264 - case 8: 265 - if (device_property_read_u64(hsotg->dev, property, value)) 266 - return; 267 - 148 + case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 149 + case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 150 + case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 151 + val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; 268 152 break; 269 153 default: 270 - /* 271 - * The size is checked by the only function that calls 272 - * this so this should never happen. 273 - */ 274 - WARN_ON(1); 275 - return; 154 + val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 155 + break; 276 156 } 157 + 158 + hsotg->params.otg_cap = val; 277 159 } 278 160 279 - static void dwc2_set_core_param(void *param, u8 size, u64 value) 161 + static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg) 280 162 { 281 - switch (size) { 282 - case 0: 283 - *((bool *)param) = !!value; 284 - break; 285 - case 1: 286 - *((u8 *)param) = (u8)value; 287 - break; 288 - case 2: 289 - *((u16 *)param) = (u16)value; 290 - break; 291 - case 4: 292 - *((u32 *)param) = (u32)value; 293 - break; 294 - case 8: 295 - *((u64 *)param) = (u64)value; 296 - break; 297 - default: 163 + int val; 164 + u32 hs_phy_type = hsotg->hw_params.hs_phy_type; 165 + 166 + val = DWC2_PHY_TYPE_PARAM_FS; 167 + if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { 168 + if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 169 + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) 170 + val = DWC2_PHY_TYPE_PARAM_UTMI; 171 + else 172 + val = DWC2_PHY_TYPE_PARAM_ULPI; 173 + } 174 + 175 + if (dwc2_is_fs_iot(hsotg)) 176 + hsotg->params.phy_type = DWC2_PHY_TYPE_PARAM_FS; 177 + 178 + hsotg->params.phy_type = val; 179 + } 180 + 181 + static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg) 182 + { 183 + int val; 184 + 185 + val = hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS ? 186 + DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; 187 + 188 + if (dwc2_is_fs_iot(hsotg)) 189 + val = DWC2_SPEED_PARAM_FULL; 190 + 191 + if (dwc2_is_hs_iot(hsotg)) 192 + val = DWC2_SPEED_PARAM_HIGH; 193 + 194 + hsotg->params.speed = val; 195 + } 196 + 197 + static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg) 198 + { 199 + int val; 200 + 201 + val = (hsotg->hw_params.utmi_phy_data_width == 202 + GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; 203 + 204 + hsotg->params.phy_utmi_width = val; 205 + } 206 + 207 + static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg) 208 + { 209 + struct dwc2_core_params *p = &hsotg->params; 210 + int depth_average; 211 + int fifo_count; 212 + int i; 213 + 214 + fifo_count = dwc2_hsotg_tx_fifo_count(hsotg); 215 + 216 + memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size)); 217 + depth_average = dwc2_hsotg_tx_fifo_average_depth(hsotg); 218 + for (i = 1; i <= fifo_count; i++) 219 + p->g_tx_fifo_size[i] = depth_average; 220 + } 221 + 222 + /** 223 + * dwc2_set_default_params() - Set all core parameters to their 224 + * auto-detected default values. 225 + */ 226 + static void dwc2_set_default_params(struct dwc2_hsotg *hsotg) 227 + { 228 + struct dwc2_hw_params *hw = &hsotg->hw_params; 229 + struct dwc2_core_params *p = &hsotg->params; 230 + bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH); 231 + 232 + dwc2_set_param_otg_cap(hsotg); 233 + dwc2_set_param_phy_type(hsotg); 234 + dwc2_set_param_speed(hsotg); 235 + dwc2_set_param_phy_utmi_width(hsotg); 236 + p->phy_ulpi_ddr = false; 237 + p->phy_ulpi_ext_vbus = false; 238 + 239 + p->enable_dynamic_fifo = hw->enable_dynamic_fifo; 240 + p->en_multiple_tx_fifo = hw->en_multiple_tx_fifo; 241 + p->i2c_enable = hw->i2c_enable; 242 + p->ulpi_fs_ls = false; 243 + p->ts_dline = false; 244 + p->reload_ctl = (hw->snpsid >= DWC2_CORE_REV_2_92a); 245 + p->uframe_sched = true; 246 + p->external_id_pin_ctl = false; 247 + p->hibernation = false; 248 + p->max_packet_count = hw->max_packet_count; 249 + p->max_transfer_size = hw->max_transfer_size; 250 + p->ahbcfg = GAHBCFG_HBSTLEN_INCR4 << GAHBCFG_HBSTLEN_SHIFT; 251 + 252 + if ((hsotg->dr_mode == USB_DR_MODE_HOST) || 253 + (hsotg->dr_mode == USB_DR_MODE_OTG)) { 254 + p->host_dma = dma_capable; 255 + p->dma_desc_enable = false; 256 + p->dma_desc_fs_enable = false; 257 + p->host_support_fs_ls_low_power = false; 258 + p->host_ls_low_power_phy_clk = false; 259 + p->host_channels = hw->host_channels; 260 + p->host_rx_fifo_size = hw->rx_fifo_size; 261 + p->host_nperio_tx_fifo_size = hw->host_nperio_tx_fifo_size; 262 + p->host_perio_tx_fifo_size = hw->host_perio_tx_fifo_size; 263 + } 264 + 265 + if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) || 266 + (hsotg->dr_mode == USB_DR_MODE_OTG)) { 267 + p->g_dma = dma_capable; 268 + p->g_dma_desc = hw->dma_desc_enable; 269 + 298 270 /* 299 - * The size is checked by the only function that calls 300 - * this so this should never happen. 271 + * The values for g_rx_fifo_size (2048) and 272 + * g_np_tx_fifo_size (1024) come from the legacy s3c 273 + * gadget driver. These defaults have been hard-coded 274 + * for some time so many platforms depend on these 275 + * values. Leave them as defaults for now and only 276 + * auto-detect if the hardware does not support the 277 + * default. 301 278 */ 302 - WARN_ON(1); 303 - return; 279 + p->g_rx_fifo_size = 2048; 280 + p->g_np_tx_fifo_size = 1024; 281 + dwc2_set_param_tx_fifo_sizes(hsotg); 304 282 } 305 283 } 306 284 307 285 /** 308 - * dwc2_set_param() - Set a core parameter 286 + * dwc2_get_device_properties() - Read in device properties. 309 287 * 310 - * @hsotg: Programming view of the DWC_otg controller 311 - * @param: Pointer to the parameter to set 312 - * @lookup: True if the property should be looked up 313 - * @property: The device property to read 314 - * @legacy: The param value to set if @property is not available. This 315 - * will typically be the legacy value set in the static 316 - * params structure. 317 - * @def: The default value 318 - * @min: The minimum value 319 - * @max: The maximum value 320 - * @size: The size of the core parameter in bytes, or 0 for bool. 321 - * 322 - * This function looks up @property and sets the @param to that value. 323 - * If the property doesn't exist it uses the passed-in @value. It will 324 - * verify that the value falls between @min and @max. If it doesn't, 325 - * it will output an error and set the parameter to either @def or, 326 - * failing that, to @min. 327 - * 328 - * The @size is used to write to @param and to query the device 329 - * properties so that this same function can be used with different 330 - * types of parameters. 288 + * Read in the device properties and adjust core parameters if needed. 331 289 */ 332 - static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param, 333 - bool lookup, char *property, u64 legacy, 334 - u64 def, u64 min, u64 max, u8 size) 290 + static void dwc2_get_device_properties(struct dwc2_hsotg *hsotg) 335 291 { 336 - u64 sizemax; 337 - u64 value; 292 + struct dwc2_core_params *p = &hsotg->params; 293 + int num; 338 294 339 - if (WARN_ON(!hsotg || !param || !property)) 340 - return; 295 + if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) || 296 + (hsotg->dr_mode == USB_DR_MODE_OTG)) { 297 + device_property_read_u32(hsotg->dev, "g-rx-fifo-size", 298 + &p->g_rx_fifo_size); 341 299 342 - if (WARN((size > 8) || ((size & (size - 1)) != 0), 343 - "Invalid size %d for %s\n", size, property)) 344 - return; 300 + device_property_read_u32(hsotg->dev, "g-np-tx-fifo-size", 301 + &p->g_np_tx_fifo_size); 345 302 346 - dev_vdbg(hsotg->dev, "%s: Setting %s: legacy=%llu, def=%llu, min=%llu, max=%llu, size=%d\n", 347 - __func__, property, legacy, def, min, max, size); 303 + num = device_property_read_u32_array(hsotg->dev, 304 + "g-tx-fifo-size", 305 + NULL, 0); 348 306 349 - sizemax = (1ULL << (size * 8)) - 1; 350 - value = legacy; 351 - 352 - /* Override legacy settings. */ 353 - if (lookup) 354 - dwc2_get_device_property(hsotg, property, size, &value); 355 - 356 - /* 357 - * While the value is not valid, try setting it to the default 358 - * value, and failing that, set it to the minimum. 359 - */ 360 - while ((value < min) || (value > max)) { 361 - /* Print an error unless the value is set to auto. */ 362 - if (value != sizemax) 363 - dev_err(hsotg->dev, "Invalid value %llu for param %s\n", 364 - value, property); 365 - 366 - /* 367 - * If we are already the default, just set it to the 368 - * minimum. 369 - */ 370 - if (value == def) { 371 - dev_vdbg(hsotg->dev, "%s: setting value to min=%llu\n", 372 - __func__, min); 373 - value = min; 374 - break; 307 + if (num > 0) { 308 + num = min(num, 15); 309 + memset(p->g_tx_fifo_size, 0, 310 + sizeof(p->g_tx_fifo_size)); 311 + device_property_read_u32_array(hsotg->dev, 312 + "g-tx-fifo-size", 313 + &p->g_tx_fifo_size[1], 314 + num); 375 315 } 376 - 377 - /* Try the default value */ 378 - dev_vdbg(hsotg->dev, "%s: setting value to default=%llu\n", 379 - __func__, def); 380 - value = def; 381 316 } 382 - 383 - dev_dbg(hsotg->dev, "Setting %s to %llu\n", property, value); 384 - dwc2_set_core_param(param, size, value); 385 317 } 386 318 387 - /** 388 - * dwc2_set_param_u32() - Set a u32 parameter 389 - * 390 - * See dwc2_set_param(). 391 - */ 392 - static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param, 393 - bool lookup, char *property, u16 legacy, 394 - u16 def, u16 min, u16 max) 395 - { 396 - dwc2_set_param(hsotg, param, lookup, property, 397 - legacy, def, min, max, 4); 398 - } 399 - 400 - /** 401 - * dwc2_set_param_bool() - Set a bool parameter 402 - * 403 - * See dwc2_set_param(). 404 - * 405 - * Note: there is no 'legacy' argument here because there is no legacy 406 - * source of bool params. 407 - */ 408 - static void dwc2_set_param_bool(struct dwc2_hsotg *hsotg, bool *param, 409 - bool lookup, char *property, 410 - bool def, bool min, bool max) 411 - { 412 - dwc2_set_param(hsotg, param, lookup, property, 413 - def, def, min, max, 0); 414 - } 415 - 416 - #define DWC2_OUT_OF_BOUNDS(a, b, c) ((a) < (b) || (a) > (c)) 417 - 418 - /* Parameter access functions */ 419 - static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val) 319 + static void dwc2_check_param_otg_cap(struct dwc2_hsotg *hsotg) 420 320 { 421 321 int valid = 1; 422 322 423 - switch (val) { 323 + switch (hsotg->params.otg_cap) { 424 324 case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: 425 325 if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) 426 326 valid = 0; ··· 345 445 break; 346 446 } 347 447 348 - if (!valid) { 349 - if (val >= 0) 350 - dev_err(hsotg->dev, 351 - "%d invalid for otg_cap parameter. Check HW configuration.\n", 352 - val); 353 - switch (hsotg->hw_params.op_mode) { 354 - case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: 355 - val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE; 356 - break; 357 - case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: 358 - case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: 359 - case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: 360 - val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE; 361 - break; 362 - default: 363 - val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; 364 - break; 365 - } 366 - dev_dbg(hsotg->dev, "Setting otg_cap to %d\n", val); 367 - } 368 - 369 - hsotg->params.otg_cap = val; 448 + if (!valid) 449 + dwc2_set_param_otg_cap(hsotg); 370 450 } 371 451 372 - static void dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val) 373 - { 374 - int valid = 1; 375 - 376 - if (val > 0 && (hsotg->params.host_dma <= 0 || 377 - !hsotg->hw_params.dma_desc_enable)) 378 - valid = 0; 379 - if (val < 0) 380 - valid = 0; 381 - 382 - if (!valid) { 383 - if (val >= 0) 384 - dev_err(hsotg->dev, 385 - "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", 386 - val); 387 - val = (hsotg->params.host_dma > 0 && 388 - hsotg->hw_params.dma_desc_enable); 389 - dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val); 390 - } 391 - 392 - hsotg->params.dma_desc_enable = val; 393 - } 394 - 395 - static void dwc2_set_param_dma_desc_fs_enable(struct dwc2_hsotg *hsotg, int val) 396 - { 397 - int valid = 1; 398 - 399 - if (val > 0 && (hsotg->params.host_dma <= 0 || 400 - !hsotg->hw_params.dma_desc_enable)) 401 - valid = 0; 402 - if (val < 0) 403 - valid = 0; 404 - 405 - if (!valid) { 406 - if (val >= 0) 407 - dev_err(hsotg->dev, 408 - "%d invalid for dma_desc_fs_enable parameter. Check HW configuration.\n", 409 - val); 410 - val = (hsotg->params.host_dma > 0 && 411 - hsotg->hw_params.dma_desc_enable); 412 - } 413 - 414 - hsotg->params.dma_desc_fs_enable = val; 415 - dev_dbg(hsotg->dev, "Setting dma_desc_fs_enable to %d\n", val); 416 - } 417 - 418 - static void 419 - dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hsotg *hsotg, 420 - int val) 421 - { 422 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 423 - if (val >= 0) { 424 - dev_err(hsotg->dev, 425 - "Wrong value for host_support_fs_low_power\n"); 426 - dev_err(hsotg->dev, 427 - "host_support_fs_low_power must be 0 or 1\n"); 428 - } 429 - val = 0; 430 - dev_dbg(hsotg->dev, 431 - "Setting host_support_fs_low_power to %d\n", val); 432 - } 433 - 434 - hsotg->params.host_support_fs_ls_low_power = val; 435 - } 436 - 437 - static void dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, 438 - int val) 439 - { 440 - int valid = 1; 441 - 442 - if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo) 443 - valid = 0; 444 - if (val < 0) 445 - valid = 0; 446 - 447 - if (!valid) { 448 - if (val >= 0) 449 - dev_err(hsotg->dev, 450 - "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", 451 - val); 452 - val = hsotg->hw_params.enable_dynamic_fifo; 453 - dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val); 454 - } 455 - 456 - hsotg->params.enable_dynamic_fifo = val; 457 - } 458 - 459 - static void dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val) 460 - { 461 - int valid = 1; 462 - 463 - if (val < 16 || val > hsotg->hw_params.rx_fifo_size) 464 - valid = 0; 465 - 466 - if (!valid) { 467 - if (val >= 0) 468 - dev_err(hsotg->dev, 469 - "%d invalid for host_rx_fifo_size. Check HW configuration.\n", 470 - val); 471 - val = hsotg->hw_params.rx_fifo_size; 472 - dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val); 473 - } 474 - 475 - hsotg->params.host_rx_fifo_size = val; 476 - } 477 - 478 - static void dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, 479 - int val) 480 - { 481 - int valid = 1; 482 - 483 - if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size) 484 - valid = 0; 485 - 486 - if (!valid) { 487 - if (val >= 0) 488 - dev_err(hsotg->dev, 489 - "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", 490 - val); 491 - val = hsotg->hw_params.host_nperio_tx_fifo_size; 492 - dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n", 493 - val); 494 - } 495 - 496 - hsotg->params.host_nperio_tx_fifo_size = val; 497 - } 498 - 499 - static void dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, 500 - int val) 501 - { 502 - int valid = 1; 503 - 504 - if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size) 505 - valid = 0; 506 - 507 - if (!valid) { 508 - if (val >= 0) 509 - dev_err(hsotg->dev, 510 - "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", 511 - val); 512 - val = hsotg->hw_params.host_perio_tx_fifo_size; 513 - dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n", 514 - val); 515 - } 516 - 517 - hsotg->params.host_perio_tx_fifo_size = val; 518 - } 519 - 520 - static void dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val) 521 - { 522 - int valid = 1; 523 - 524 - if (val < 2047 || val > hsotg->hw_params.max_transfer_size) 525 - valid = 0; 526 - 527 - if (!valid) { 528 - if (val >= 0) 529 - dev_err(hsotg->dev, 530 - "%d invalid for max_transfer_size. Check HW configuration.\n", 531 - val); 532 - val = hsotg->hw_params.max_transfer_size; 533 - dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val); 534 - } 535 - 536 - hsotg->params.max_transfer_size = val; 537 - } 538 - 539 - static void dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val) 540 - { 541 - int valid = 1; 542 - 543 - if (val < 15 || val > hsotg->hw_params.max_packet_count) 544 - valid = 0; 545 - 546 - if (!valid) { 547 - if (val >= 0) 548 - dev_err(hsotg->dev, 549 - "%d invalid for max_packet_count. Check HW configuration.\n", 550 - val); 551 - val = hsotg->hw_params.max_packet_count; 552 - dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val); 553 - } 554 - 555 - hsotg->params.max_packet_count = val; 556 - } 557 - 558 - static void dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val) 559 - { 560 - int valid = 1; 561 - 562 - if (val < 1 || val > hsotg->hw_params.host_channels) 563 - valid = 0; 564 - 565 - if (!valid) { 566 - if (val >= 0) 567 - dev_err(hsotg->dev, 568 - "%d invalid for host_channels. Check HW configuration.\n", 569 - val); 570 - val = hsotg->hw_params.host_channels; 571 - dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val); 572 - } 573 - 574 - hsotg->params.host_channels = val; 575 - } 576 - 577 - static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val) 452 + static void dwc2_check_param_phy_type(struct dwc2_hsotg *hsotg) 578 453 { 579 454 int valid = 0; 580 - u32 hs_phy_type, fs_phy_type; 581 - 582 - if (DWC2_OUT_OF_BOUNDS(val, DWC2_PHY_TYPE_PARAM_FS, 583 - DWC2_PHY_TYPE_PARAM_ULPI)) { 584 - if (val >= 0) { 585 - dev_err(hsotg->dev, "Wrong value for phy_type\n"); 586 - dev_err(hsotg->dev, "phy_type must be 0, 1 or 2\n"); 587 - } 588 - 589 - valid = 0; 590 - } 455 + u32 hs_phy_type; 456 + u32 fs_phy_type; 591 457 592 458 hs_phy_type = hsotg->hw_params.hs_phy_type; 593 459 fs_phy_type = hsotg->hw_params.fs_phy_type; 594 - if (val == DWC2_PHY_TYPE_PARAM_UTMI && 595 - (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 596 - hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 597 - valid = 1; 598 - else if (val == DWC2_PHY_TYPE_PARAM_ULPI && 599 - (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || 600 - hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 601 - valid = 1; 602 - else if (val == DWC2_PHY_TYPE_PARAM_FS && 603 - fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 604 - valid = 1; 605 460 606 - if (!valid) { 607 - if (val >= 0) 608 - dev_err(hsotg->dev, 609 - "%d invalid for phy_type. Check HW configuration.\n", 610 - val); 611 - val = DWC2_PHY_TYPE_PARAM_FS; 612 - if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { 613 - if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || 614 - hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) 615 - val = DWC2_PHY_TYPE_PARAM_UTMI; 616 - else 617 - val = DWC2_PHY_TYPE_PARAM_ULPI; 618 - } 619 - dev_dbg(hsotg->dev, "Setting phy_type to %d\n", val); 461 + switch (hsotg->params.phy_type) { 462 + case DWC2_PHY_TYPE_PARAM_FS: 463 + if (fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) 464 + valid = 1; 465 + break; 466 + case DWC2_PHY_TYPE_PARAM_UTMI: 467 + if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI) || 468 + (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 469 + valid = 1; 470 + break; 471 + case DWC2_PHY_TYPE_PARAM_ULPI: 472 + if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI) || 473 + (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) 474 + valid = 1; 475 + break; 476 + default: 477 + break; 620 478 } 621 479 622 - hsotg->params.phy_type = val; 480 + if (!valid) 481 + dwc2_set_param_phy_type(hsotg); 623 482 } 624 483 625 - static int dwc2_get_param_phy_type(struct dwc2_hsotg *hsotg) 626 - { 627 - return hsotg->params.phy_type; 628 - } 629 - 630 - static void dwc2_set_param_speed(struct dwc2_hsotg *hsotg, int val) 484 + static void dwc2_check_param_speed(struct dwc2_hsotg *hsotg) 631 485 { 632 486 int valid = 1; 487 + int phy_type = hsotg->params.phy_type; 488 + int speed = hsotg->params.speed; 633 489 634 - if (DWC2_OUT_OF_BOUNDS(val, 0, 2)) { 635 - if (val >= 0) { 636 - dev_err(hsotg->dev, "Wrong value for speed parameter\n"); 637 - dev_err(hsotg->dev, "max_speed parameter must be 0, 1, or 2\n"); 638 - } 490 + switch (speed) { 491 + case DWC2_SPEED_PARAM_HIGH: 492 + if ((hsotg->params.speed == DWC2_SPEED_PARAM_HIGH) && 493 + (phy_type == DWC2_PHY_TYPE_PARAM_FS)) 494 + valid = 0; 495 + break; 496 + case DWC2_SPEED_PARAM_FULL: 497 + case DWC2_SPEED_PARAM_LOW: 498 + break; 499 + default: 639 500 valid = 0; 501 + break; 640 502 } 641 503 642 - if (dwc2_is_hs_iot(hsotg) && 643 - val == DWC2_SPEED_PARAM_LOW) 644 - valid = 0; 645 - 646 - if (val == DWC2_SPEED_PARAM_HIGH && 647 - dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 648 - valid = 0; 649 - 650 - if (!valid) { 651 - if (val >= 0) 652 - dev_err(hsotg->dev, 653 - "%d invalid for speed parameter. Check HW configuration.\n", 654 - val); 655 - val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS ? 656 - DWC2_SPEED_PARAM_FULL : DWC2_SPEED_PARAM_HIGH; 657 - dev_dbg(hsotg->dev, "Setting speed to %d\n", val); 658 - } 659 - 660 - hsotg->params.speed = val; 504 + if (!valid) 505 + dwc2_set_param_speed(hsotg); 661 506 } 662 507 663 - static void dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hsotg *hsotg, 664 - int val) 665 - { 666 - int valid = 1; 667 - 668 - if (DWC2_OUT_OF_BOUNDS(val, DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ, 669 - DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) { 670 - if (val >= 0) { 671 - dev_err(hsotg->dev, 672 - "Wrong value for host_ls_low_power_phy_clk parameter\n"); 673 - dev_err(hsotg->dev, 674 - "host_ls_low_power_phy_clk must be 0 or 1\n"); 675 - } 676 - valid = 0; 677 - } 678 - 679 - if (val == DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && 680 - dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS) 681 - valid = 0; 682 - 683 - if (!valid) { 684 - if (val >= 0) 685 - dev_err(hsotg->dev, 686 - "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", 687 - val); 688 - val = dwc2_get_param_phy_type(hsotg) == DWC2_PHY_TYPE_PARAM_FS 689 - ? DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 690 - : DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; 691 - dev_dbg(hsotg->dev, "Setting host_ls_low_power_phy_clk to %d\n", 692 - val); 693 - } 694 - 695 - hsotg->params.host_ls_low_power_phy_clk = val; 696 - } 697 - 698 - static void dwc2_set_param_phy_ulpi_ddr(struct dwc2_hsotg *hsotg, int val) 699 - { 700 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 701 - if (val >= 0) { 702 - dev_err(hsotg->dev, "Wrong value for phy_ulpi_ddr\n"); 703 - dev_err(hsotg->dev, "phy_upli_ddr must be 0 or 1\n"); 704 - } 705 - val = 0; 706 - dev_dbg(hsotg->dev, "Setting phy_upli_ddr to %d\n", val); 707 - } 708 - 709 - hsotg->params.phy_ulpi_ddr = val; 710 - } 711 - 712 - static void dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val) 713 - { 714 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 715 - if (val >= 0) { 716 - dev_err(hsotg->dev, 717 - "Wrong value for phy_ulpi_ext_vbus\n"); 718 - dev_err(hsotg->dev, 719 - "phy_ulpi_ext_vbus must be 0 or 1\n"); 720 - } 721 - val = 0; 722 - dev_dbg(hsotg->dev, "Setting phy_ulpi_ext_vbus to %d\n", val); 723 - } 724 - 725 - hsotg->params.phy_ulpi_ext_vbus = val; 726 - } 727 - 728 - static void dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val) 508 + static void dwc2_check_param_phy_utmi_width(struct dwc2_hsotg *hsotg) 729 509 { 730 510 int valid = 0; 511 + int param = hsotg->params.phy_utmi_width; 512 + int width = hsotg->hw_params.utmi_phy_data_width; 731 513 732 - switch (hsotg->hw_params.utmi_phy_data_width) { 514 + switch (width) { 733 515 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8: 734 - valid = (val == 8); 516 + valid = (param == 8); 735 517 break; 736 518 case GHWCFG4_UTMI_PHY_DATA_WIDTH_16: 737 - valid = (val == 16); 519 + valid = (param == 16); 738 520 break; 739 521 case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16: 740 - valid = (val == 8 || val == 16); 522 + valid = (param == 8 || param == 16); 741 523 break; 742 524 } 743 525 744 - if (!valid) { 745 - if (val >= 0) { 746 - dev_err(hsotg->dev, 747 - "%d invalid for phy_utmi_width. Check HW configuration.\n", 748 - val); 526 + if (!valid) 527 + dwc2_set_param_phy_utmi_width(hsotg); 528 + } 529 + 530 + static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg) 531 + { 532 + int fifo_count; 533 + int fifo; 534 + int min; 535 + u32 total = 0; 536 + u32 dptxfszn; 537 + 538 + fifo_count = dwc2_hsotg_tx_fifo_count(hsotg); 539 + min = hsotg->hw_params.en_multiple_tx_fifo ? 16 : 4; 540 + 541 + for (fifo = 1; fifo <= fifo_count; fifo++) 542 + total += hsotg->params.g_tx_fifo_size[fifo]; 543 + 544 + if (total > dwc2_hsotg_tx_fifo_total_depth(hsotg) || !total) { 545 + dev_warn(hsotg->dev, "%s: Invalid parameter g-tx-fifo-size, setting to default average\n", 546 + __func__); 547 + dwc2_set_param_tx_fifo_sizes(hsotg); 548 + } 549 + 550 + for (fifo = 1; fifo <= fifo_count; fifo++) { 551 + dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) & 552 + FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT; 553 + 554 + if (hsotg->params.g_tx_fifo_size[fifo] < min || 555 + hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) { 556 + dev_warn(hsotg->dev, "%s: Invalid parameter g_tx_fifo_size[%d]=%d\n", 557 + __func__, fifo, 558 + hsotg->params.g_tx_fifo_size[fifo]); 559 + hsotg->params.g_tx_fifo_size[fifo] = dptxfszn; 749 560 } 750 - val = (hsotg->hw_params.utmi_phy_data_width == 751 - GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16; 752 - dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val); 753 - } 754 - 755 - hsotg->params.phy_utmi_width = val; 756 - } 757 - 758 - static void dwc2_set_param_ulpi_fs_ls(struct dwc2_hsotg *hsotg, int val) 759 - { 760 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 761 - if (val >= 0) { 762 - dev_err(hsotg->dev, "Wrong value for ulpi_fs_ls\n"); 763 - dev_err(hsotg->dev, "ulpi_fs_ls must be 0 or 1\n"); 764 - } 765 - val = 0; 766 - dev_dbg(hsotg->dev, "Setting ulpi_fs_ls to %d\n", val); 767 - } 768 - 769 - hsotg->params.ulpi_fs_ls = val; 770 - } 771 - 772 - static void dwc2_set_param_ts_dline(struct dwc2_hsotg *hsotg, int val) 773 - { 774 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 775 - if (val >= 0) { 776 - dev_err(hsotg->dev, "Wrong value for ts_dline\n"); 777 - dev_err(hsotg->dev, "ts_dline must be 0 or 1\n"); 778 - } 779 - val = 0; 780 - dev_dbg(hsotg->dev, "Setting ts_dline to %d\n", val); 781 - } 782 - 783 - hsotg->params.ts_dline = val; 784 - } 785 - 786 - static void dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val) 787 - { 788 - int valid = 1; 789 - 790 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 791 - if (val >= 0) { 792 - dev_err(hsotg->dev, "Wrong value for i2c_enable\n"); 793 - dev_err(hsotg->dev, "i2c_enable must be 0 or 1\n"); 794 - } 795 - 796 - valid = 0; 797 - } 798 - 799 - if (val == 1 && !(hsotg->hw_params.i2c_enable)) 800 - valid = 0; 801 - 802 - if (!valid) { 803 - if (val >= 0) 804 - dev_err(hsotg->dev, 805 - "%d invalid for i2c_enable. Check HW configuration.\n", 806 - val); 807 - val = hsotg->hw_params.i2c_enable; 808 - dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val); 809 - } 810 - 811 - hsotg->params.i2c_enable = val; 812 - } 813 - 814 - static void dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, 815 - int val) 816 - { 817 - int valid = 1; 818 - 819 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 820 - if (val >= 0) { 821 - dev_err(hsotg->dev, 822 - "Wrong value for en_multiple_tx_fifo,\n"); 823 - dev_err(hsotg->dev, 824 - "en_multiple_tx_fifo must be 0 or 1\n"); 825 - } 826 - valid = 0; 827 - } 828 - 829 - if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo) 830 - valid = 0; 831 - 832 - if (!valid) { 833 - if (val >= 0) 834 - dev_err(hsotg->dev, 835 - "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", 836 - val); 837 - val = hsotg->hw_params.en_multiple_tx_fifo; 838 - dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val); 839 - } 840 - 841 - hsotg->params.en_multiple_tx_fifo = val; 842 - } 843 - 844 - static void dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val) 845 - { 846 - int valid = 1; 847 - 848 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 849 - if (val >= 0) { 850 - dev_err(hsotg->dev, 851 - "'%d' invalid for parameter reload_ctl\n", val); 852 - dev_err(hsotg->dev, "reload_ctl must be 0 or 1\n"); 853 - } 854 - valid = 0; 855 - } 856 - 857 - if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a) 858 - valid = 0; 859 - 860 - if (!valid) { 861 - if (val >= 0) 862 - dev_err(hsotg->dev, 863 - "%d invalid for parameter reload_ctl. Check HW configuration.\n", 864 - val); 865 - val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a; 866 - dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val); 867 - } 868 - 869 - hsotg->params.reload_ctl = val; 870 - } 871 - 872 - static void dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val) 873 - { 874 - if (val != -1) 875 - hsotg->params.ahbcfg = val; 876 - else 877 - hsotg->params.ahbcfg = GAHBCFG_HBSTLEN_INCR4 << 878 - GAHBCFG_HBSTLEN_SHIFT; 879 - } 880 - 881 - static void dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val) 882 - { 883 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 884 - if (val >= 0) { 885 - dev_err(hsotg->dev, 886 - "'%d' invalid for parameter otg_ver\n", val); 887 - dev_err(hsotg->dev, 888 - "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); 889 - } 890 - val = 0; 891 - dev_dbg(hsotg->dev, "Setting otg_ver to %d\n", val); 892 - } 893 - 894 - hsotg->params.otg_ver = val; 895 - } 896 - 897 - static void dwc2_set_param_uframe_sched(struct dwc2_hsotg *hsotg, int val) 898 - { 899 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 900 - if (val >= 0) { 901 - dev_err(hsotg->dev, 902 - "'%d' invalid for parameter uframe_sched\n", 903 - val); 904 - dev_err(hsotg->dev, "uframe_sched must be 0 or 1\n"); 905 - } 906 - val = 1; 907 - dev_dbg(hsotg->dev, "Setting uframe_sched to %d\n", val); 908 - } 909 - 910 - hsotg->params.uframe_sched = val; 911 - } 912 - 913 - static void dwc2_set_param_external_id_pin_ctl(struct dwc2_hsotg *hsotg, 914 - int val) 915 - { 916 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 917 - if (val >= 0) { 918 - dev_err(hsotg->dev, 919 - "'%d' invalid for parameter external_id_pin_ctl\n", 920 - val); 921 - dev_err(hsotg->dev, "external_id_pin_ctl must be 0 or 1\n"); 922 - } 923 - val = 0; 924 - dev_dbg(hsotg->dev, "Setting external_id_pin_ctl to %d\n", val); 925 - } 926 - 927 - hsotg->params.external_id_pin_ctl = val; 928 - } 929 - 930 - static void dwc2_set_param_hibernation(struct dwc2_hsotg *hsotg, 931 - int val) 932 - { 933 - if (DWC2_OUT_OF_BOUNDS(val, 0, 1)) { 934 - if (val >= 0) { 935 - dev_err(hsotg->dev, 936 - "'%d' invalid for parameter hibernation\n", 937 - val); 938 - dev_err(hsotg->dev, "hibernation must be 0 or 1\n"); 939 - } 940 - val = 0; 941 - dev_dbg(hsotg->dev, "Setting hibernation to %d\n", val); 942 - } 943 - 944 - hsotg->params.hibernation = val; 945 - } 946 - 947 - static void dwc2_set_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg) 948 - { 949 - int i; 950 - int num; 951 - char *property = "g-tx-fifo-size"; 952 - struct dwc2_core_params *p = &hsotg->params; 953 - 954 - memset(p->g_tx_fifo_size, 0, sizeof(p->g_tx_fifo_size)); 955 - 956 - /* Read tx fifo sizes */ 957 - num = device_property_read_u32_array(hsotg->dev, property, NULL, 0); 958 - 959 - if (num > 0) { 960 - device_property_read_u32_array(hsotg->dev, property, 961 - &p->g_tx_fifo_size[1], 962 - num); 963 - } else { 964 - u32 p_tx_fifo[] = DWC2_G_P_LEGACY_TX_FIFO_SIZE; 965 - 966 - memcpy(&p->g_tx_fifo_size[1], 967 - p_tx_fifo, 968 - sizeof(p_tx_fifo)); 969 - 970 - num = ARRAY_SIZE(p_tx_fifo); 971 - } 972 - 973 - for (i = 0; i < num; i++) { 974 - if ((i + 1) >= ARRAY_SIZE(p->g_tx_fifo_size)) 975 - break; 976 - 977 - dev_dbg(hsotg->dev, "Setting %s[%d] to %d\n", 978 - property, i + 1, p->g_tx_fifo_size[i + 1]); 979 561 } 980 562 } 981 563 982 - static void dwc2_set_gadget_dma(struct dwc2_hsotg *hsotg) 564 + #define CHECK_RANGE(_param, _min, _max, _def) do { \ 565 + if ((hsotg->params._param) < (_min) || \ 566 + (hsotg->params._param) > (_max)) { \ 567 + dev_warn(hsotg->dev, "%s: Invalid parameter %s=%d\n", \ 568 + __func__, #_param, hsotg->params._param); \ 569 + hsotg->params._param = (_def); \ 570 + } \ 571 + } while (0) 572 + 573 + #define CHECK_BOOL(_param, _check) do { \ 574 + if (hsotg->params._param && !(_check)) { \ 575 + dev_warn(hsotg->dev, "%s: Invalid parameter %s=%d\n", \ 576 + __func__, #_param, hsotg->params._param); \ 577 + hsotg->params._param = false; \ 578 + } \ 579 + } while (0) 580 + 581 + static void dwc2_check_params(struct dwc2_hsotg *hsotg) 983 582 { 984 583 struct dwc2_hw_params *hw = &hsotg->hw_params; 985 584 struct dwc2_core_params *p = &hsotg->params; 986 585 bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH); 987 586 988 - /* Buffer DMA */ 989 - dwc2_set_param_bool(hsotg, &p->g_dma, 990 - false, "gadget-dma", 991 - dma_capable, false, 992 - dma_capable); 587 + dwc2_check_param_otg_cap(hsotg); 588 + dwc2_check_param_phy_type(hsotg); 589 + dwc2_check_param_speed(hsotg); 590 + dwc2_check_param_phy_utmi_width(hsotg); 591 + CHECK_BOOL(enable_dynamic_fifo, hw->enable_dynamic_fifo); 592 + CHECK_BOOL(en_multiple_tx_fifo, hw->en_multiple_tx_fifo); 593 + CHECK_BOOL(i2c_enable, hw->i2c_enable); 594 + CHECK_BOOL(reload_ctl, (hsotg->hw_params.snpsid > DWC2_CORE_REV_2_92a)); 595 + CHECK_RANGE(max_packet_count, 596 + 15, hw->max_packet_count, 597 + hw->max_packet_count); 598 + CHECK_RANGE(max_transfer_size, 599 + 2047, hw->max_transfer_size, 600 + hw->max_transfer_size); 993 601 994 - /* DMA Descriptor */ 995 - dwc2_set_param_bool(hsotg, &p->g_dma_desc, false, 996 - "gadget-dma-desc", 997 - !!hw->dma_desc_enable, false, 998 - !!hw->dma_desc_enable); 999 - } 1000 - 1001 - /** 1002 - * dwc2_set_parameters() - Set all core parameters. 1003 - * 1004 - * @hsotg: Programming view of the DWC_otg controller 1005 - * @params: The parameters to set 1006 - */ 1007 - static void dwc2_set_parameters(struct dwc2_hsotg *hsotg, 1008 - const struct dwc2_core_params *params) 1009 - { 1010 - struct dwc2_hw_params *hw = &hsotg->hw_params; 1011 - struct dwc2_core_params *p = &hsotg->params; 1012 - bool dma_capable = !(hw->arch == GHWCFG2_SLAVE_ONLY_ARCH); 1013 - 1014 - dwc2_set_param_otg_cap(hsotg, params->otg_cap); 1015 602 if ((hsotg->dr_mode == USB_DR_MODE_HOST) || 1016 603 (hsotg->dr_mode == USB_DR_MODE_OTG)) { 1017 - dev_dbg(hsotg->dev, "Setting HOST parameters\n"); 1018 - 1019 - dwc2_set_param_bool(hsotg, &p->host_dma, 1020 - false, "host-dma", 1021 - dma_capable, false, 1022 - dma_capable); 1023 - dwc2_set_param_host_rx_fifo_size(hsotg, 1024 - params->host_rx_fifo_size); 1025 - dwc2_set_param_host_nperio_tx_fifo_size(hsotg, 1026 - params->host_nperio_tx_fifo_size); 1027 - dwc2_set_param_host_perio_tx_fifo_size(hsotg, 1028 - params->host_perio_tx_fifo_size); 604 + CHECK_BOOL(host_dma, dma_capable); 605 + CHECK_BOOL(dma_desc_enable, p->host_dma); 606 + CHECK_BOOL(dma_desc_fs_enable, p->dma_desc_enable); 607 + CHECK_BOOL(host_ls_low_power_phy_clk, 608 + p->phy_type == DWC2_PHY_TYPE_PARAM_FS); 609 + CHECK_RANGE(host_channels, 610 + 1, hw->host_channels, 611 + hw->host_channels); 612 + CHECK_RANGE(host_rx_fifo_size, 613 + 16, hw->rx_fifo_size, 614 + hw->rx_fifo_size); 615 + CHECK_RANGE(host_nperio_tx_fifo_size, 616 + 16, hw->host_nperio_tx_fifo_size, 617 + hw->host_nperio_tx_fifo_size); 618 + CHECK_RANGE(host_perio_tx_fifo_size, 619 + 16, hw->host_perio_tx_fifo_size, 620 + hw->host_perio_tx_fifo_size); 1029 621 } 1030 - dwc2_set_param_dma_desc_enable(hsotg, params->dma_desc_enable); 1031 - dwc2_set_param_dma_desc_fs_enable(hsotg, params->dma_desc_fs_enable); 1032 622 1033 - dwc2_set_param_host_support_fs_ls_low_power(hsotg, 1034 - params->host_support_fs_ls_low_power); 1035 - dwc2_set_param_enable_dynamic_fifo(hsotg, 1036 - params->enable_dynamic_fifo); 1037 - dwc2_set_param_max_transfer_size(hsotg, 1038 - params->max_transfer_size); 1039 - dwc2_set_param_max_packet_count(hsotg, 1040 - params->max_packet_count); 1041 - dwc2_set_param_host_channels(hsotg, params->host_channels); 1042 - dwc2_set_param_phy_type(hsotg, params->phy_type); 1043 - dwc2_set_param_speed(hsotg, params->speed); 1044 - dwc2_set_param_host_ls_low_power_phy_clk(hsotg, 1045 - params->host_ls_low_power_phy_clk); 1046 - dwc2_set_param_phy_ulpi_ddr(hsotg, params->phy_ulpi_ddr); 1047 - dwc2_set_param_phy_ulpi_ext_vbus(hsotg, 1048 - params->phy_ulpi_ext_vbus); 1049 - dwc2_set_param_phy_utmi_width(hsotg, params->phy_utmi_width); 1050 - dwc2_set_param_ulpi_fs_ls(hsotg, params->ulpi_fs_ls); 1051 - dwc2_set_param_ts_dline(hsotg, params->ts_dline); 1052 - dwc2_set_param_i2c_enable(hsotg, params->i2c_enable); 1053 - dwc2_set_param_en_multiple_tx_fifo(hsotg, 1054 - params->en_multiple_tx_fifo); 1055 - dwc2_set_param_reload_ctl(hsotg, params->reload_ctl); 1056 - dwc2_set_param_ahbcfg(hsotg, params->ahbcfg); 1057 - dwc2_set_param_otg_ver(hsotg, params->otg_ver); 1058 - dwc2_set_param_uframe_sched(hsotg, params->uframe_sched); 1059 - dwc2_set_param_external_id_pin_ctl(hsotg, params->external_id_pin_ctl); 1060 - dwc2_set_param_hibernation(hsotg, params->hibernation); 1061 - 1062 - /* 1063 - * Set devicetree-only parameters. These parameters do not 1064 - * take any values from @params. 1065 - */ 1066 623 if ((hsotg->dr_mode == USB_DR_MODE_PERIPHERAL) || 1067 624 (hsotg->dr_mode == USB_DR_MODE_OTG)) { 1068 - dev_dbg(hsotg->dev, "Setting peripheral device properties\n"); 1069 - 1070 - dwc2_set_gadget_dma(hsotg); 1071 - 1072 - /* 1073 - * The values for g_rx_fifo_size (2048) and 1074 - * g_np_tx_fifo_size (1024) come from the legacy s3c 1075 - * gadget driver. These defaults have been hard-coded 1076 - * for some time so many platforms depend on these 1077 - * values. Leave them as defaults for now and only 1078 - * auto-detect if the hardware does not support the 1079 - * default. 1080 - */ 1081 - dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size, 1082 - true, "g-rx-fifo-size", 2048, 1083 - hw->rx_fifo_size, 1084 - 16, hw->rx_fifo_size); 1085 - 1086 - dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size, 1087 - true, "g-np-tx-fifo-size", 1024, 1088 - hw->dev_nperio_tx_fifo_size, 1089 - 16, hw->dev_nperio_tx_fifo_size); 1090 - 1091 - dwc2_set_param_tx_fifo_sizes(hsotg); 625 + CHECK_BOOL(g_dma, dma_capable); 626 + CHECK_BOOL(g_dma_desc, (p->g_dma && hw->dma_desc_enable)); 627 + CHECK_RANGE(g_rx_fifo_size, 628 + 16, hw->rx_fifo_size, 629 + hw->rx_fifo_size); 630 + CHECK_RANGE(g_np_tx_fifo_size, 631 + 16, hw->dev_nperio_tx_fifo_size, 632 + hw->dev_nperio_tx_fifo_size); 633 + dwc2_check_param_tx_fifo_sizes(hsotg); 1092 634 } 1093 635 } 1094 636 ··· 553 1211 554 1212 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 555 1213 hptxfsiz = dwc2_readl(hsotg->regs + HPTXFSIZ); 556 - dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 557 - dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz); 558 1214 559 1215 if (forced) 560 1216 dwc2_clear_force_mode(hsotg); ··· 580 1240 forced = dwc2_force_mode_if_needed(hsotg, false); 581 1241 582 1242 gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); 583 - dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz); 584 1243 585 1244 if (forced) 586 1245 dwc2_clear_force_mode(hsotg); ··· 624 1285 hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); 625 1286 hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); 626 1287 grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); 627 - 628 - dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1); 629 - dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2); 630 - dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3); 631 - dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4); 632 - dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz); 633 1288 634 1289 /* 635 1290 * Host specific hardware parameters. Reading these parameters ··· 684 1351 hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> 685 1352 GRXFSIZ_DEPTH_SHIFT; 686 1353 687 - dev_dbg(hsotg->dev, "Detected values from hardware:\n"); 688 - dev_dbg(hsotg->dev, " op_mode=%d\n", 689 - hw->op_mode); 690 - dev_dbg(hsotg->dev, " arch=%d\n", 691 - hw->arch); 692 - dev_dbg(hsotg->dev, " dma_desc_enable=%d\n", 693 - hw->dma_desc_enable); 694 - dev_dbg(hsotg->dev, " power_optimized=%d\n", 695 - hw->power_optimized); 696 - dev_dbg(hsotg->dev, " i2c_enable=%d\n", 697 - hw->i2c_enable); 698 - dev_dbg(hsotg->dev, " hs_phy_type=%d\n", 699 - hw->hs_phy_type); 700 - dev_dbg(hsotg->dev, " fs_phy_type=%d\n", 701 - hw->fs_phy_type); 702 - dev_dbg(hsotg->dev, " utmi_phy_data_width=%d\n", 703 - hw->utmi_phy_data_width); 704 - dev_dbg(hsotg->dev, " num_dev_ep=%d\n", 705 - hw->num_dev_ep); 706 - dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n", 707 - hw->num_dev_perio_in_ep); 708 - dev_dbg(hsotg->dev, " host_channels=%d\n", 709 - hw->host_channels); 710 - dev_dbg(hsotg->dev, " max_transfer_size=%d\n", 711 - hw->max_transfer_size); 712 - dev_dbg(hsotg->dev, " max_packet_count=%d\n", 713 - hw->max_packet_count); 714 - dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n", 715 - hw->nperio_tx_q_depth); 716 - dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n", 717 - hw->host_perio_tx_q_depth); 718 - dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n", 719 - hw->dev_token_q_depth); 720 - dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n", 721 - hw->enable_dynamic_fifo); 722 - dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n", 723 - hw->en_multiple_tx_fifo); 724 - dev_dbg(hsotg->dev, " total_fifo_size=%d\n", 725 - hw->total_fifo_size); 726 - dev_dbg(hsotg->dev, " rx_fifo_size=%d\n", 727 - hw->rx_fifo_size); 728 - dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n", 729 - hw->host_nperio_tx_fifo_size); 730 - dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n", 731 - hw->host_perio_tx_fifo_size); 732 - dev_dbg(hsotg->dev, "\n"); 733 - 734 1354 return 0; 735 1355 } 736 1356 737 1357 int dwc2_init_params(struct dwc2_hsotg *hsotg) 738 1358 { 739 1359 const struct of_device_id *match; 740 - struct dwc2_core_params params; 1360 + void (*set_params)(void *data); 1361 + 1362 + dwc2_set_default_params(hsotg); 1363 + dwc2_get_device_properties(hsotg); 741 1364 742 1365 match = of_match_device(dwc2_of_match_table, hsotg->dev); 743 - if (match && match->data) 744 - params = *((struct dwc2_core_params *)match->data); 745 - else 746 - params = params_default; 747 - 748 - if (dwc2_is_fs_iot(hsotg)) { 749 - params.speed = DWC2_SPEED_PARAM_FULL; 750 - params.phy_type = DWC2_PHY_TYPE_PARAM_FS; 1366 + if (match && match->data) { 1367 + set_params = match->data; 1368 + set_params(hsotg); 751 1369 } 752 1370 753 - dwc2_set_parameters(hsotg, &params); 1371 + dwc2_check_params(hsotg); 754 1372 755 1373 return 0; 756 1374 }
+1 -1
drivers/usb/dwc2/pci.c
··· 87 87 } 88 88 89 89 static int dwc2_pci_probe(struct pci_dev *pci, 90 - const struct pci_device_id *id) 90 + const struct pci_device_id *id) 91 91 { 92 92 struct resource res[2]; 93 93 struct platform_device *dwc2;
+8 -8
drivers/usb/dwc2/platform.c
··· 111 111 112 112 if (mode != hsotg->dr_mode) { 113 113 dev_warn(hsotg->dev, 114 - "Configuration mismatch. dr_mode forced to %s\n", 114 + "Configuration mismatch. dr_mode forced to %s\n", 115 115 mode == USB_DR_MODE_HOST ? "host" : "device"); 116 116 117 117 hsotg->dr_mode = mode; ··· 136 136 return ret; 137 137 } 138 138 139 - if (hsotg->uphy) 139 + if (hsotg->uphy) { 140 140 ret = usb_phy_init(hsotg->uphy); 141 - else if (hsotg->plat && hsotg->plat->phy_init) 141 + } else if (hsotg->plat && hsotg->plat->phy_init) { 142 142 ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type); 143 - else { 143 + } else { 144 144 ret = phy_power_on(hsotg->phy); 145 145 if (ret == 0) 146 146 ret = phy_init(hsotg->phy); ··· 170 170 struct platform_device *pdev = to_platform_device(hsotg->dev); 171 171 int ret = 0; 172 172 173 - if (hsotg->uphy) 173 + if (hsotg->uphy) { 174 174 usb_phy_shutdown(hsotg->uphy); 175 - else if (hsotg->plat && hsotg->plat->phy_exit) 175 + } else if (hsotg->plat && hsotg->plat->phy_exit) { 176 176 ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type); 177 - else { 177 + } else { 178 178 ret = phy_exit(hsotg->phy); 179 179 if (ret == 0) 180 180 ret = phy_power_off(hsotg->phy); ··· 445 445 } 446 446 447 447 if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL) { 448 - retval = dwc2_hcd_init(hsotg, hsotg->irq); 448 + retval = dwc2_hcd_init(hsotg); 449 449 if (retval) { 450 450 if (hsotg->gadget_enabled) 451 451 dwc2_hsotg_remove(hsotg);
+5
drivers/usb/dwc3/core.h
··· 40 40 /* Global constants */ 41 41 #define DWC3_PULL_UP_TIMEOUT 500 /* ms */ 42 42 #define DWC3_ZLP_BUF_SIZE 1024 /* size of a superspeed bulk */ 43 + #define DWC3_BOUNCE_SIZE 1024 /* size of a superspeed bulk */ 43 44 #define DWC3_EP0_BOUNCE_SIZE 512 44 45 #define DWC3_ENDPOINTS_NUM 32 45 46 #define DWC3_XHCI_RESOURCES_NUM 2 ··· 725 724 * @epnum: endpoint number to which this request refers 726 725 * @trb: pointer to struct dwc3_trb 727 726 * @trb_dma: DMA address of @trb 727 + * @unaligned: true for OUT endpoints with length not divisible by maxp 728 728 * @direction: IN or OUT direction flag 729 729 * @mapped: true when request has been dma-mapped 730 730 * @queued: true when request has been queued to HW ··· 742 740 struct dwc3_trb *trb; 743 741 dma_addr_t trb_dma; 744 742 743 + unsigned unaligned:1; 745 744 unsigned direction:1; 746 745 unsigned mapped:1; 747 746 unsigned started:1; ··· 860 857 struct dwc3 { 861 858 struct usb_ctrlrequest *ctrl_req; 862 859 struct dwc3_trb *ep0_trb; 860 + void *bounce; 863 861 void *ep0_bounce; 864 862 void *zlp_buf; 865 863 void *scratchbuf; 866 864 u8 *setup_buf; 867 865 dma_addr_t ctrl_req_addr; 868 866 dma_addr_t ep0_trb_addr; 867 + dma_addr_t bounce_addr; 869 868 dma_addr_t ep0_bounce_addr; 870 869 dma_addr_t scratch_addr; 871 870 struct dwc3_request ep0_usb_req;
+1 -4
drivers/usb/dwc3/dwc3-exynos.c
··· 128 128 clk_prepare_enable(exynos->clk); 129 129 130 130 exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk"); 131 - if (IS_ERR(exynos->susp_clk)) { 132 - dev_info(dev, "no suspend clk specified\n"); 131 + if (IS_ERR(exynos->susp_clk)) 133 132 exynos->susp_clk = NULL; 134 - } 135 133 clk_prepare_enable(exynos->susp_clk); 136 134 137 135 if (of_device_is_compatible(node, "samsung,exynos7-dwusb3")) { ··· 288 290 289 291 module_platform_driver(dwc3_exynos_driver); 290 292 291 - MODULE_ALIAS("platform:exynos-dwc3"); 292 293 MODULE_AUTHOR("Anton Tikhomirov <av.tikhomirov@samsung.com>"); 293 294 MODULE_LICENSE("GPL v2"); 294 295 MODULE_DESCRIPTION("DesignWare USB3 EXYNOS Glue Layer");
+7 -13
drivers/usb/dwc3/dwc3-omap.c
··· 426 426 } 427 427 428 428 omap->vbus_nb.notifier_call = dwc3_omap_vbus_notifier; 429 - ret = extcon_register_notifier(edev, EXTCON_USB, 430 - &omap->vbus_nb); 429 + ret = devm_extcon_register_notifier(omap->dev, edev, 430 + EXTCON_USB, &omap->vbus_nb); 431 431 if (ret < 0) 432 432 dev_vdbg(omap->dev, "failed to register notifier for USB\n"); 433 433 434 434 omap->id_nb.notifier_call = dwc3_omap_id_notifier; 435 - ret = extcon_register_notifier(edev, EXTCON_USB_HOST, 436 - &omap->id_nb); 435 + ret = devm_extcon_register_notifier(omap->dev, edev, 436 + EXTCON_USB_HOST, &omap->id_nb); 437 437 if (ret < 0) 438 438 dev_vdbg(omap->dev, "failed to register notifier for USB-HOST\n"); 439 439 440 - if (extcon_get_cable_state_(edev, EXTCON_USB) == true) 440 + if (extcon_get_state(edev, EXTCON_USB) == true) 441 441 dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID); 442 - if (extcon_get_cable_state_(edev, EXTCON_USB_HOST) == true) 442 + if (extcon_get_state(edev, EXTCON_USB_HOST) == true) 443 443 dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND); 444 444 445 445 omap->edev = edev; ··· 528 528 ret = of_platform_populate(node, NULL, NULL, dev); 529 529 if (ret) { 530 530 dev_err(&pdev->dev, "failed to create dwc3 core\n"); 531 - goto err2; 531 + goto err1; 532 532 } 533 533 534 534 dwc3_omap_enable_irqs(omap); 535 535 enable_irq(omap->irq); 536 536 return 0; 537 - 538 - err2: 539 - extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb); 540 - extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb); 541 537 542 538 err1: 543 539 pm_runtime_put_sync(dev); ··· 546 550 { 547 551 struct dwc3_omap *omap = platform_get_drvdata(pdev); 548 552 549 - extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb); 550 - extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb); 551 553 dwc3_omap_disable_irqs(omap); 552 554 disable_irq(omap->irq); 553 555 of_platform_depopulate(omap->dev);
+14
drivers/usb/dwc3/ep0.c
··· 1123 1123 dwc->ep0state = EP0_STATUS_PHASE; 1124 1124 1125 1125 if (dwc->delayed_status) { 1126 + struct dwc3_ep *dep = dwc->eps[0]; 1127 + 1126 1128 WARN_ON_ONCE(event->endpoint_number != 1); 1129 + /* 1130 + * We should handle the delay STATUS phase here if the 1131 + * request for handling delay STATUS has been queued 1132 + * into the list. 1133 + */ 1134 + if (!list_empty(&dep->pending_list)) { 1135 + dwc->delayed_status = false; 1136 + usb_gadget_set_state(&dwc->gadget, 1137 + USB_STATE_CONFIGURED); 1138 + dwc3_ep0_do_control_status(dwc, event); 1139 + } 1140 + 1127 1141 return; 1128 1142 } 1129 1143
+152 -42
drivers/usb/dwc3/gadget.c
··· 833 833 834 834 static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep); 835 835 836 - /** 837 - * dwc3_prepare_one_trb - setup one TRB from one request 838 - * @dep: endpoint for which this request is prepared 839 - * @req: dwc3_request pointer 840 - */ 841 - static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 842 - struct dwc3_request *req, dma_addr_t dma, 843 - unsigned length, unsigned chain, unsigned node) 836 + static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, 837 + dma_addr_t dma, unsigned length, unsigned chain, unsigned node, 838 + unsigned stream_id, unsigned short_not_ok, unsigned no_interrupt) 844 839 { 845 - struct dwc3_trb *trb; 846 840 struct dwc3 *dwc = dep->dwc; 847 841 struct usb_gadget *gadget = &dwc->gadget; 848 842 enum usb_device_speed speed = gadget->speed; 849 - 850 - trb = &dep->trb_pool[dep->trb_enqueue]; 851 - 852 - if (!req->trb) { 853 - dwc3_gadget_move_started_request(req); 854 - req->trb = trb; 855 - req->trb_dma = dwc3_trb_dma_offset(dep, trb); 856 - dep->queued_requests++; 857 - } 858 843 859 844 dwc3_ep_inc_enq(dep); 860 845 ··· 885 900 if (usb_endpoint_dir_out(dep->endpoint.desc)) { 886 901 trb->ctrl |= DWC3_TRB_CTRL_CSP; 887 902 888 - if (req->request.short_not_ok) 903 + if (short_not_ok) 889 904 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; 890 905 } 891 906 892 - if ((!req->request.no_interrupt && !chain) || 907 + if ((!no_interrupt && !chain) || 893 908 (dwc3_calc_trbs_left(dep) == 0)) 894 909 trb->ctrl |= DWC3_TRB_CTRL_IOC; 895 910 ··· 897 912 trb->ctrl |= DWC3_TRB_CTRL_CHN; 898 913 899 914 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) 900 - trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id); 915 + trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); 901 916 902 917 trb->ctrl |= DWC3_TRB_CTRL_HWO; 903 918 904 919 trace_dwc3_prepare_trb(dep, trb); 920 + } 921 + 922 + /** 923 + * dwc3_prepare_one_trb - setup one TRB from one request 924 + * @dep: endpoint for which this request is prepared 925 + * @req: dwc3_request pointer 926 + * @chain: should this TRB be chained to the next? 927 + * @node: only for isochronous endpoints. First TRB needs different type. 928 + */ 929 + static void dwc3_prepare_one_trb(struct dwc3_ep *dep, 930 + struct dwc3_request *req, unsigned chain, unsigned node) 931 + { 932 + struct dwc3_trb *trb; 933 + unsigned length = req->request.length; 934 + unsigned stream_id = req->request.stream_id; 935 + unsigned short_not_ok = req->request.short_not_ok; 936 + unsigned no_interrupt = req->request.no_interrupt; 937 + dma_addr_t dma = req->request.dma; 938 + 939 + trb = &dep->trb_pool[dep->trb_enqueue]; 940 + 941 + if (!req->trb) { 942 + dwc3_gadget_move_started_request(req); 943 + req->trb = trb; 944 + req->trb_dma = dwc3_trb_dma_offset(dep, trb); 945 + dep->queued_requests++; 946 + } 947 + 948 + __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node, 949 + stream_id, short_not_ok, no_interrupt); 905 950 } 906 951 907 952 /** ··· 989 974 { 990 975 struct scatterlist *sg = req->sg; 991 976 struct scatterlist *s; 992 - unsigned int length; 993 - dma_addr_t dma; 994 977 int i; 995 978 996 979 for_each_sg(sg, s, req->num_pending_sgs, i) { 980 + unsigned int length = req->request.length; 981 + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 982 + unsigned int rem = length % maxp; 997 983 unsigned chain = true; 998 - 999 - length = sg_dma_len(s); 1000 - dma = sg_dma_address(s); 1001 984 1002 985 if (sg_is_last(s)) 1003 986 chain = false; 1004 987 1005 - dwc3_prepare_one_trb(dep, req, dma, length, 1006 - chain, i); 988 + if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { 989 + struct dwc3 *dwc = dep->dwc; 990 + struct dwc3_trb *trb; 991 + 992 + req->unaligned = true; 993 + 994 + /* prepare normal TRB */ 995 + dwc3_prepare_one_trb(dep, req, true, i); 996 + 997 + /* Now prepare one extra TRB to align transfer size */ 998 + trb = &dep->trb_pool[dep->trb_enqueue]; 999 + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 1000 + maxp - rem, false, 0, 1001 + req->request.stream_id, 1002 + req->request.short_not_ok, 1003 + req->request.no_interrupt); 1004 + } else { 1005 + dwc3_prepare_one_trb(dep, req, chain, i); 1006 + } 1007 1007 1008 1008 if (!dwc3_calc_trbs_left(dep)) 1009 1009 break; ··· 1028 998 static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, 1029 999 struct dwc3_request *req) 1030 1000 { 1031 - unsigned int length; 1032 - dma_addr_t dma; 1001 + unsigned int length = req->request.length; 1002 + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); 1003 + unsigned int rem = length % maxp; 1033 1004 1034 - dma = req->request.dma; 1035 - length = req->request.length; 1005 + if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { 1006 + struct dwc3 *dwc = dep->dwc; 1007 + struct dwc3_trb *trb; 1036 1008 1037 - dwc3_prepare_one_trb(dep, req, dma, length, 1038 - false, 0); 1009 + req->unaligned = true; 1010 + 1011 + /* prepare normal TRB */ 1012 + dwc3_prepare_one_trb(dep, req, true, 0); 1013 + 1014 + /* Now prepare one extra TRB to align transfer size */ 1015 + trb = &dep->trb_pool[dep->trb_enqueue]; 1016 + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, 1017 + false, 0, req->request.stream_id, 1018 + req->request.short_not_ok, 1019 + req->request.no_interrupt); 1020 + } else { 1021 + dwc3_prepare_one_trb(dep, req, false, 0); 1022 + } 1039 1023 } 1040 1024 1041 1025 /* ··· 1379 1335 unsigned transfer_in_flight; 1380 1336 unsigned started; 1381 1337 1338 + if (dep->flags & DWC3_EP_STALL) 1339 + return 0; 1340 + 1382 1341 if (dep->number > 1) 1383 1342 trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); 1384 1343 else ··· 1403 1356 else 1404 1357 dep->flags |= DWC3_EP_STALL; 1405 1358 } else { 1359 + if (!(dep->flags & DWC3_EP_STALL)) 1360 + return 0; 1406 1361 1407 1362 ret = dwc3_send_clear_stall_ep_cmd(dep); 1408 1363 if (ret) ··· 1967 1918 dep->endpoint.ops = &dwc3_gadget_ep0_ops; 1968 1919 if (!epnum) 1969 1920 dwc->gadget.ep0 = &dep->endpoint; 1921 + } else if (direction) { 1922 + int mdwidth; 1923 + int size; 1924 + int ret; 1925 + int num; 1926 + 1927 + mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0); 1928 + /* MDWIDTH is represented in bits, we need it in bytes */ 1929 + mdwidth /= 8; 1930 + 1931 + size = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(i)); 1932 + size = DWC3_GTXFIFOSIZ_TXFDEF(size); 1933 + 1934 + /* FIFO Depth is in MDWDITH bytes. Multiply */ 1935 + size *= mdwidth; 1936 + 1937 + num = size / 1024; 1938 + if (num == 0) 1939 + num = 1; 1940 + 1941 + /* 1942 + * FIFO sizes account an extra MDWIDTH * (num + 1) bytes for 1943 + * internal overhead. We don't really know how these are used, 1944 + * but documentation say it exists. 1945 + */ 1946 + size -= mdwidth * (num + 1); 1947 + size /= num; 1948 + 1949 + usb_ep_set_maxpacket_limit(&dep->endpoint, size); 1950 + 1951 + dep->endpoint.max_streams = 15; 1952 + dep->endpoint.ops = &dwc3_gadget_ep_ops; 1953 + list_add_tail(&dep->endpoint.ep_list, 1954 + &dwc->gadget.ep_list); 1955 + 1956 + ret = dwc3_alloc_trb_pool(dep); 1957 + if (ret) 1958 + return ret; 1970 1959 } else { 1971 1960 int ret; 1972 1961 ··· 2116 2029 if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO)) 2117 2030 trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2118 2031 2032 + /* 2033 + * If we're dealing with unaligned size OUT transfer, we will be left 2034 + * with one TRB pending in the ring. We need to manually clear HWO bit 2035 + * from that TRB. 2036 + */ 2037 + if (req->unaligned && (trb->ctrl & DWC3_TRB_CTRL_HWO)) { 2038 + trb->ctrl &= ~DWC3_TRB_CTRL_HWO; 2039 + return 1; 2040 + } 2041 + 2119 2042 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN) 2120 2043 return 1; 2121 2044 ··· 2213 2116 trb = &dep->trb_pool[dep->trb_dequeue]; 2214 2117 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2215 2118 event, status, chain); 2119 + } 2120 + 2121 + if (req->unaligned) { 2122 + trb = &dep->trb_pool[dep->trb_dequeue]; 2123 + ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb, 2124 + event, status, false); 2125 + req->unaligned = false; 2216 2126 } 2217 2127 2218 2128 req->request.actual = length - req->remaining; ··· 3123 3019 goto err4; 3124 3020 } 3125 3021 3022 + dwc->bounce = dma_alloc_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, 3023 + &dwc->bounce_addr, GFP_KERNEL); 3024 + if (!dwc->bounce) { 3025 + ret = -ENOMEM; 3026 + goto err5; 3027 + } 3028 + 3126 3029 init_completion(&dwc->ep0_in_setup); 3127 3030 3128 3031 dwc->gadget.ops = &dwc3_gadget_ops; ··· 3161 3050 dwc->gadget.max_speed = dwc->maximum_speed; 3162 3051 3163 3052 /* 3164 - * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize 3165 - * on ep out. 3166 - */ 3167 - dwc->gadget.quirk_ep_out_aligned_size = true; 3168 - 3169 - /* 3170 3053 * REVISIT: Here we should clear all pending IRQs to be 3171 3054 * sure we're starting from a well known location. 3172 3055 */ 3173 3056 3174 3057 ret = dwc3_gadget_init_endpoints(dwc); 3175 3058 if (ret) 3176 - goto err5; 3059 + goto err6; 3177 3060 3178 3061 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); 3179 3062 if (ret) { 3180 3063 dev_err(dwc->dev, "failed to register udc\n"); 3181 - goto err5; 3064 + goto err6; 3182 3065 } 3183 3066 3184 3067 return 0; 3068 + err6: 3069 + dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3070 + dwc->bounce_addr); 3185 3071 3186 3072 err5: 3187 3073 kfree(dwc->zlp_buf); ··· 3211 3103 3212 3104 dwc3_gadget_free_endpoints(dwc); 3213 3105 3106 + dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, 3107 + dwc->bounce_addr); 3214 3108 dma_free_coherent(dwc->sysdev, DWC3_EP0_BOUNCE_SIZE, 3215 3109 dwc->ep0_bounce, dwc->ep0_bounce_addr); 3216 3110
+18 -3
drivers/usb/dwc3/host.c
··· 54 54 55 55 int dwc3_host_init(struct dwc3 *dwc) 56 56 { 57 - struct property_entry props[2]; 57 + struct property_entry props[3]; 58 58 struct platform_device *xhci; 59 59 int ret, irq; 60 60 struct resource *res; 61 61 struct platform_device *dwc3_pdev = to_platform_device(dwc->dev); 62 + int prop_idx = 0; 62 63 63 64 irq = dwc3_host_get_irq(dwc); 64 65 if (irq < 0) ··· 98 97 99 98 memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); 100 99 101 - if (dwc->usb3_lpm_capable) { 102 - props[0].name = "usb3-lpm-capable"; 100 + if (dwc->usb3_lpm_capable) 101 + props[prop_idx++].name = "usb3-lpm-capable"; 102 + 103 + /** 104 + * WORKAROUND: dwc3 revisions <=3.00a have a limitation 105 + * where Port Disable command doesn't work. 106 + * 107 + * The suggested workaround is that we avoid Port Disable 108 + * completely. 109 + * 110 + * This following flag tells XHCI to do just that. 111 + */ 112 + if (dwc->revision <= DWC3_REVISION_300A) 113 + props[prop_idx++].name = "quirk-broken-port-ped"; 114 + 115 + if (prop_idx) { 103 116 ret = platform_device_add_properties(xhci, props); 104 117 if (ret) { 105 118 dev_err(dwc->dev, "failed to add properties to xHCI\n");
+2 -2
drivers/usb/gadget/function/f_fs.c
··· 1230 1230 desc = epfile->ep->descs[desc_idx]; 1231 1231 1232 1232 spin_unlock_irq(&epfile->ffs->eps_lock); 1233 - ret = copy_to_user((void *)value, desc, sizeof(*desc)); 1233 + ret = copy_to_user((void *)value, desc, desc->bLength); 1234 1234 if (ret) 1235 1235 ret = -EFAULT; 1236 1236 return ret; ··· 2101 2101 case FFS_ENDPOINT: 2102 2102 d = (void *)desc; 2103 2103 helper->eps_count++; 2104 - if (helper->eps_count >= 15) 2104 + if (helper->eps_count >= FFS_MAX_EPS_COUNT) 2105 2105 return -EINVAL; 2106 2106 /* Check if descriptors for any speed were already parsed */ 2107 2107 if (!helper->ffs->eps_count && !helper->ffs->interfaces_count)
+140 -52
drivers/usb/gadget/function/f_hid.c
··· 50 50 51 51 /* recv report */ 52 52 struct list_head completed_out_req; 53 - spinlock_t spinlock; 53 + spinlock_t read_spinlock; 54 54 wait_queue_head_t read_queue; 55 55 unsigned int qlen; 56 56 57 57 /* send report */ 58 - struct mutex lock; 58 + spinlock_t write_spinlock; 59 59 bool write_pending; 60 60 wait_queue_head_t write_queue; 61 61 struct usb_request *req; ··· 258 258 if (!access_ok(VERIFY_WRITE, buffer, count)) 259 259 return -EFAULT; 260 260 261 - spin_lock_irqsave(&hidg->spinlock, flags); 261 + spin_lock_irqsave(&hidg->read_spinlock, flags); 262 262 263 263 #define READ_COND (!list_empty(&hidg->completed_out_req)) 264 264 265 265 /* wait for at least one buffer to complete */ 266 266 while (!READ_COND) { 267 - spin_unlock_irqrestore(&hidg->spinlock, flags); 267 + spin_unlock_irqrestore(&hidg->read_spinlock, flags); 268 268 if (file->f_flags & O_NONBLOCK) 269 269 return -EAGAIN; 270 270 271 271 if (wait_event_interruptible(hidg->read_queue, READ_COND)) 272 272 return -ERESTARTSYS; 273 273 274 - spin_lock_irqsave(&hidg->spinlock, flags); 274 + spin_lock_irqsave(&hidg->read_spinlock, flags); 275 275 } 276 276 277 277 /* pick the first one */ 278 278 list = list_first_entry(&hidg->completed_out_req, 279 279 struct f_hidg_req_list, list); 280 + 281 + /* 282 + * Remove this from list to protect it from beign free() 283 + * while host disables our function 284 + */ 285 + list_del(&list->list); 286 + 280 287 req = list->req; 281 288 count = min_t(unsigned int, count, req->actual - list->pos); 282 - spin_unlock_irqrestore(&hidg->spinlock, flags); 289 + spin_unlock_irqrestore(&hidg->read_spinlock, flags); 283 290 284 291 /* copy to user outside spinlock */ 285 292 count -= copy_to_user(buffer, req->buf + list->pos, count); ··· 299 292 * call, taking into account its current read position. 300 293 */ 301 294 if (list->pos == req->actual) { 302 - spin_lock_irqsave(&hidg->spinlock, flags); 303 - list_del(&list->list); 304 295 kfree(list); 305 - spin_unlock_irqrestore(&hidg->spinlock, flags); 306 296 307 297 req->length = hidg->report_length; 308 298 ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL); 309 - if (ret < 0) 299 + if (ret < 0) { 300 + free_ep_req(hidg->out_ep, req); 310 301 return ret; 302 + } 303 + } else { 304 + spin_lock_irqsave(&hidg->read_spinlock, flags); 305 + list_add(&list->list, &hidg->completed_out_req); 306 + spin_unlock_irqrestore(&hidg->read_spinlock, flags); 307 + 308 + wake_up(&hidg->read_queue); 311 309 } 312 310 313 311 return count; ··· 321 309 static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req) 322 310 { 323 311 struct f_hidg *hidg = (struct f_hidg *)ep->driver_data; 312 + unsigned long flags; 324 313 325 314 if (req->status != 0) { 326 315 ERROR(hidg->func.config->cdev, 327 316 "End Point Request ERROR: %d\n", req->status); 328 317 } 329 318 319 + spin_lock_irqsave(&hidg->write_spinlock, flags); 330 320 hidg->write_pending = 0; 321 + spin_unlock_irqrestore(&hidg->write_spinlock, flags); 331 322 wake_up(&hidg->write_queue); 332 323 } 333 324 ··· 338 323 size_t count, loff_t *offp) 339 324 { 340 325 struct f_hidg *hidg = file->private_data; 326 + struct usb_request *req; 327 + unsigned long flags; 341 328 ssize_t status = -ENOMEM; 342 329 343 330 if (!access_ok(VERIFY_READ, buffer, count)) 344 331 return -EFAULT; 345 332 346 - mutex_lock(&hidg->lock); 333 + spin_lock_irqsave(&hidg->write_spinlock, flags); 347 334 348 335 #define WRITE_COND (!hidg->write_pending) 349 - 336 + try_again: 350 337 /* write queue */ 351 338 while (!WRITE_COND) { 352 - mutex_unlock(&hidg->lock); 339 + spin_unlock_irqrestore(&hidg->write_spinlock, flags); 353 340 if (file->f_flags & O_NONBLOCK) 354 341 return -EAGAIN; 355 342 ··· 359 342 hidg->write_queue, WRITE_COND)) 360 343 return -ERESTARTSYS; 361 344 362 - mutex_lock(&hidg->lock); 345 + spin_lock_irqsave(&hidg->write_spinlock, flags); 363 346 } 364 347 348 + hidg->write_pending = 1; 349 + req = hidg->req; 365 350 count = min_t(unsigned, count, hidg->report_length); 351 + 352 + spin_unlock_irqrestore(&hidg->write_spinlock, flags); 366 353 status = copy_from_user(hidg->req->buf, buffer, count); 367 354 368 355 if (status != 0) { 369 356 ERROR(hidg->func.config->cdev, 370 357 "copy_from_user error\n"); 371 - mutex_unlock(&hidg->lock); 372 - return -EINVAL; 358 + status = -EINVAL; 359 + goto release_write_pending; 373 360 } 374 361 375 - hidg->req->status = 0; 376 - hidg->req->zero = 0; 377 - hidg->req->length = count; 378 - hidg->req->complete = f_hidg_req_complete; 379 - hidg->req->context = hidg; 380 - hidg->write_pending = 1; 362 + spin_lock_irqsave(&hidg->write_spinlock, flags); 363 + 364 + /* we our function has been disabled by host */ 365 + if (!hidg->req) { 366 + free_ep_req(hidg->in_ep, hidg->req); 367 + /* 368 + * TODO 369 + * Should we fail with error here? 370 + */ 371 + goto try_again; 372 + } 373 + 374 + req->status = 0; 375 + req->zero = 0; 376 + req->length = count; 377 + req->complete = f_hidg_req_complete; 378 + req->context = hidg; 381 379 382 380 status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); 383 381 if (status < 0) { 384 382 ERROR(hidg->func.config->cdev, 385 383 "usb_ep_queue error on int endpoint %zd\n", status); 386 - hidg->write_pending = 0; 387 - wake_up(&hidg->write_queue); 384 + goto release_write_pending_unlocked; 388 385 } else { 389 386 status = count; 390 387 } 388 + spin_unlock_irqrestore(&hidg->write_spinlock, flags); 391 389 392 - mutex_unlock(&hidg->lock); 390 + return status; 391 + release_write_pending: 392 + spin_lock_irqsave(&hidg->write_spinlock, flags); 393 + release_write_pending_unlocked: 394 + hidg->write_pending = 0; 395 + spin_unlock_irqrestore(&hidg->write_spinlock, flags); 396 + 397 + wake_up(&hidg->write_queue); 393 398 394 399 return status; 395 400 } ··· 464 425 static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req) 465 426 { 466 427 struct f_hidg *hidg = (struct f_hidg *) req->context; 428 + struct usb_composite_dev *cdev = hidg->func.config->cdev; 467 429 struct f_hidg_req_list *req_list; 468 430 unsigned long flags; 469 431 470 - req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC); 471 - if (!req_list) 432 + switch (req->status) { 433 + case 0: 434 + req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC); 435 + if (!req_list) { 436 + ERROR(cdev, "Unable to allocate mem for req_list\n"); 437 + goto free_req; 438 + } 439 + 440 + req_list->req = req; 441 + 442 + spin_lock_irqsave(&hidg->read_spinlock, flags); 443 + list_add_tail(&req_list->list, &hidg->completed_out_req); 444 + spin_unlock_irqrestore(&hidg->read_spinlock, flags); 445 + 446 + wake_up(&hidg->read_queue); 447 + break; 448 + default: 449 + ERROR(cdev, "Set report failed %d\n", req->status); 450 + /* FALLTHROUGH */ 451 + case -ECONNABORTED: /* hardware forced ep reset */ 452 + case -ECONNRESET: /* request dequeued */ 453 + case -ESHUTDOWN: /* disconnect from host */ 454 + free_req: 455 + free_ep_req(ep, req); 472 456 return; 473 - 474 - req_list->req = req; 475 - 476 - spin_lock_irqsave(&hidg->spinlock, flags); 477 - list_add_tail(&req_list->list, &hidg->completed_out_req); 478 - spin_unlock_irqrestore(&hidg->spinlock, flags); 479 - 480 - wake_up(&hidg->read_queue); 457 + } 481 458 } 482 459 483 460 static int hidg_setup(struct usb_function *f, ··· 599 544 { 600 545 struct f_hidg *hidg = func_to_hidg(f); 601 546 struct f_hidg_req_list *list, *next; 547 + unsigned long flags; 602 548 603 549 usb_ep_disable(hidg->in_ep); 604 550 usb_ep_disable(hidg->out_ep); 605 551 552 + spin_lock_irqsave(&hidg->read_spinlock, flags); 606 553 list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { 554 + free_ep_req(hidg->out_ep, list->req); 607 555 list_del(&list->list); 608 556 kfree(list); 609 557 } 558 + spin_unlock_irqrestore(&hidg->read_spinlock, flags); 559 + 560 + spin_lock_irqsave(&hidg->write_spinlock, flags); 561 + if (!hidg->write_pending) { 562 + free_ep_req(hidg->in_ep, hidg->req); 563 + hidg->write_pending = 1; 564 + } 565 + 566 + hidg->req = NULL; 567 + spin_unlock_irqrestore(&hidg->write_spinlock, flags); 610 568 } 611 569 612 570 static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) 613 571 { 614 572 struct usb_composite_dev *cdev = f->config->cdev; 615 573 struct f_hidg *hidg = func_to_hidg(f); 574 + struct usb_request *req_in = NULL; 575 + unsigned long flags; 616 576 int i, status = 0; 617 577 618 578 VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt); ··· 648 578 goto fail; 649 579 } 650 580 hidg->in_ep->driver_data = hidg; 581 + 582 + req_in = hidg_alloc_ep_req(hidg->in_ep, hidg->report_length); 583 + if (!req_in) { 584 + status = -ENOMEM; 585 + goto disable_ep_in; 586 + } 651 587 } 652 588 653 589 ··· 665 589 hidg->out_ep); 666 590 if (status) { 667 591 ERROR(cdev, "config_ep_by_speed FAILED!\n"); 668 - goto fail; 592 + goto free_req_in; 669 593 } 670 594 status = usb_ep_enable(hidg->out_ep); 671 595 if (status < 0) { 672 596 ERROR(cdev, "Enable OUT endpoint FAILED!\n"); 673 - goto fail; 597 + goto free_req_in; 674 598 } 675 599 hidg->out_ep->driver_data = hidg; 676 600 ··· 686 610 req->context = hidg; 687 611 status = usb_ep_queue(hidg->out_ep, req, 688 612 GFP_ATOMIC); 689 - if (status) 613 + if (status) { 690 614 ERROR(cdev, "%s queue req --> %d\n", 691 615 hidg->out_ep->name, status); 616 + free_ep_req(hidg->out_ep, req); 617 + } 692 618 } else { 693 - usb_ep_disable(hidg->out_ep); 694 619 status = -ENOMEM; 695 - goto fail; 620 + goto disable_out_ep; 696 621 } 697 622 } 698 623 } 624 + 625 + if (hidg->in_ep != NULL) { 626 + spin_lock_irqsave(&hidg->write_spinlock, flags); 627 + hidg->req = req_in; 628 + hidg->write_pending = 0; 629 + spin_unlock_irqrestore(&hidg->write_spinlock, flags); 630 + 631 + wake_up(&hidg->write_queue); 632 + } 633 + return 0; 634 + disable_out_ep: 635 + usb_ep_disable(hidg->out_ep); 636 + free_req_in: 637 + if (req_in) 638 + free_ep_req(hidg->in_ep, req_in); 639 + 640 + disable_ep_in: 641 + if (hidg->in_ep) 642 + usb_ep_disable(hidg->in_ep); 699 643 700 644 fail: 701 645 return status; ··· 765 669 goto fail; 766 670 hidg->out_ep = ep; 767 671 768 - /* preallocate request and buffer */ 769 - status = -ENOMEM; 770 - hidg->req = alloc_ep_req(hidg->in_ep, hidg->report_length); 771 - if (!hidg->req) 772 - goto fail; 773 - 774 672 /* set descriptor dynamic values */ 775 673 hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass; 776 674 hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol; ··· 801 711 if (status) 802 712 goto fail; 803 713 804 - mutex_init(&hidg->lock); 805 - spin_lock_init(&hidg->spinlock); 714 + spin_lock_init(&hidg->write_spinlock); 715 + hidg->write_pending = 1; 716 + hidg->req = NULL; 717 + spin_lock_init(&hidg->read_spinlock); 806 718 init_waitqueue_head(&hidg->write_queue); 807 719 init_waitqueue_head(&hidg->read_queue); 808 720 INIT_LIST_HEAD(&hidg->completed_out_req); ··· 1067 975 1068 976 device_destroy(hidg_class, MKDEV(major, hidg->minor)); 1069 977 cdev_del(&hidg->cdev); 1070 - 1071 - /* disable/free request and end point */ 1072 - usb_ep_disable(hidg->in_ep); 1073 - free_ep_req(hidg->in_ep, hidg->req); 1074 978 1075 979 usb_free_all_descriptors(f); 1076 980 }
+43 -14
drivers/usb/gadget/function/f_printer.c
··· 49 49 50 50 #include "u_printer.h" 51 51 52 - #define PNP_STRING_LEN 1024 53 52 #define PRINTER_MINORS 4 54 53 #define GET_DEVICE_ID 0 55 54 #define GET_PORT_STATUS 1 ··· 906 907 switch (ctrl->bRequest) { 907 908 case GET_DEVICE_ID: 908 909 w_index >>= 8; 909 - if (w_length <= PNP_STRING_LEN && 910 - (USB_DIR_IN & ctrl->bRequestType)) 910 + if (USB_DIR_IN & ctrl->bRequestType) 911 911 break; 912 912 return false; 913 913 case GET_PORT_STATUS: ··· 935 937 struct printer_dev *dev = func_to_printer(f); 936 938 struct usb_composite_dev *cdev = f->config->cdev; 937 939 struct usb_request *req = cdev->req; 940 + u8 *buf = req->buf; 938 941 int value = -EOPNOTSUPP; 939 942 u16 wIndex = le16_to_cpu(ctrl->wIndex); 940 943 u16 wValue = le16_to_cpu(ctrl->wValue); ··· 952 953 if ((wIndex>>8) != dev->interface) 953 954 break; 954 955 955 - value = (dev->pnp_string[0] << 8) | dev->pnp_string[1]; 956 - memcpy(req->buf, dev->pnp_string, value); 956 + if (!dev->pnp_string) { 957 + value = 0; 958 + break; 959 + } 960 + value = strlen(dev->pnp_string); 961 + buf[0] = (value >> 8) & 0xFF; 962 + buf[1] = value & 0xFF; 963 + memcpy(buf + 2, dev->pnp_string, value); 957 964 DBG(dev, "1284 PNP String: %x %s\n", value, 958 - &dev->pnp_string[2]); 965 + dev->pnp_string); 959 966 break; 960 967 961 968 case GET_PORT_STATUS: /* Get Port Status */ ··· 969 964 if (wIndex != dev->interface) 970 965 break; 971 966 972 - *(u8 *)req->buf = dev->printer_status; 967 + buf[0] = dev->printer_status; 973 968 value = min_t(u16, wLength, 1); 974 969 break; 975 970 ··· 1162 1157 char *page) 1163 1158 { 1164 1159 struct f_printer_opts *opts = to_f_printer_opts(item); 1165 - int result; 1160 + int result = 0; 1166 1161 1167 1162 mutex_lock(&opts->lock); 1168 - result = strlcpy(page, opts->pnp_string + 2, PNP_STRING_LEN - 2); 1163 + if (!opts->pnp_string) 1164 + goto unlock; 1165 + 1166 + result = strlcpy(page, opts->pnp_string, PAGE_SIZE); 1167 + if (result >= PAGE_SIZE) { 1168 + result = PAGE_SIZE; 1169 + } else if (page[result - 1] != '\n' && result + 1 < PAGE_SIZE) { 1170 + page[result++] = '\n'; 1171 + page[result] = '\0'; 1172 + } 1173 + 1174 + unlock: 1169 1175 mutex_unlock(&opts->lock); 1170 1176 1171 1177 return result; ··· 1186 1170 const char *page, size_t len) 1187 1171 { 1188 1172 struct f_printer_opts *opts = to_f_printer_opts(item); 1189 - int result, l; 1173 + char *new_pnp; 1174 + int result; 1190 1175 1191 1176 mutex_lock(&opts->lock); 1192 - result = strlcpy(opts->pnp_string + 2, page, PNP_STRING_LEN - 2); 1193 - l = strlen(opts->pnp_string + 2) + 2; 1194 - opts->pnp_string[0] = (l >> 8) & 0xFF; 1195 - opts->pnp_string[1] = l & 0xFF; 1177 + 1178 + new_pnp = kstrndup(page, len, GFP_KERNEL); 1179 + if (!new_pnp) { 1180 + result = -ENOMEM; 1181 + goto unlock; 1182 + } 1183 + 1184 + if (opts->pnp_string_allocated) 1185 + kfree(opts->pnp_string); 1186 + 1187 + opts->pnp_string_allocated = true; 1188 + opts->pnp_string = new_pnp; 1189 + result = len; 1190 + unlock: 1196 1191 mutex_unlock(&opts->lock); 1197 1192 1198 1193 return result; ··· 1297 1270 1298 1271 mutex_unlock(&printer_ida_lock); 1299 1272 1273 + if (opts->pnp_string_allocated) 1274 + kfree(opts->pnp_string); 1300 1275 kfree(opts); 1301 1276 } 1302 1277
+35 -14
drivers/usb/gadget/function/f_uac2.c
··· 22 22 23 23 #include "u_uac2.h" 24 24 25 - /* Keep everyone on toes */ 26 - #define USB_XFERS 2 27 - 28 25 /* 29 26 * The driver implements a simple UAC_2 topology. 30 27 * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture ··· 75 78 size_t period_size; 76 79 77 80 unsigned max_psize; 78 - struct uac2_req ureq[USB_XFERS]; 81 + struct uac2_req *ureq; 79 82 80 83 spinlock_t lock; 81 84 }; ··· 266 269 uac2_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 267 270 { 268 271 struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream); 272 + struct audio_dev *agdev = uac2_to_agdev(uac2); 273 + struct f_uac2_opts *uac2_opts = agdev_to_uac2_opts(agdev); 269 274 struct uac2_rtd_params *prm; 270 275 unsigned long flags; 271 276 int err = 0; ··· 299 300 300 301 /* Clear buffer after Play stops */ 301 302 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss) 302 - memset(prm->rbuf, 0, prm->max_psize * USB_XFERS); 303 + memset(prm->rbuf, 0, prm->max_psize * uac2_opts->req_number); 303 304 304 305 return err; 305 306 } ··· 942 943 free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep) 943 944 { 944 945 struct snd_uac2_chip *uac2 = prm->uac2; 946 + struct audio_dev *agdev = uac2_to_agdev(uac2); 947 + struct f_uac2_opts *uac2_opts = agdev_to_uac2_opts(agdev); 945 948 int i; 946 949 947 950 if (!prm->ep_enabled) ··· 951 950 952 951 prm->ep_enabled = false; 953 952 954 - for (i = 0; i < USB_XFERS; i++) { 953 + for (i = 0; i < uac2_opts->req_number; i++) { 955 954 if (prm->ureq[i].req) { 956 955 usb_ep_dequeue(ep, prm->ureq[i].req); 957 956 usb_ep_free_request(ep, prm->ureq[i].req); ··· 1096 1095 1097 1096 prm = &agdev->uac2.c_prm; 1098 1097 prm->max_psize = hs_epout_desc.wMaxPacketSize; 1099 - prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); 1098 + prm->ureq = kcalloc(uac2_opts->req_number, sizeof(struct uac2_req), 1099 + GFP_KERNEL); 1100 + if (!prm->ureq) { 1101 + ret = -ENOMEM; 1102 + goto err_free_descs; 1103 + } 1104 + prm->rbuf = kcalloc(uac2_opts->req_number, prm->max_psize, GFP_KERNEL); 1100 1105 if (!prm->rbuf) { 1101 1106 prm->max_psize = 0; 1107 + ret = -ENOMEM; 1102 1108 goto err_free_descs; 1103 1109 } 1104 1110 1105 1111 prm = &agdev->uac2.p_prm; 1106 1112 prm->max_psize = hs_epin_desc.wMaxPacketSize; 1107 - prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); 1113 + prm->ureq = kcalloc(uac2_opts->req_number, sizeof(struct uac2_req), 1114 + GFP_KERNEL); 1115 + if (!prm->ureq) { 1116 + ret = -ENOMEM; 1117 + goto err_free_descs; 1118 + } 1119 + prm->rbuf = kcalloc(uac2_opts->req_number, prm->max_psize, GFP_KERNEL); 1108 1120 if (!prm->rbuf) { 1109 1121 prm->max_psize = 0; 1110 - goto err; 1122 + ret = -ENOMEM; 1123 + goto err_no_memory; 1111 1124 } 1112 1125 1113 1126 ret = alsa_uac2_init(agdev); 1114 1127 if (ret) 1115 - goto err; 1128 + goto err_no_memory; 1116 1129 return 0; 1117 1130 1118 - err: 1131 + err_no_memory: 1132 + kfree(agdev->uac2.p_prm.ureq); 1133 + kfree(agdev->uac2.c_prm.ureq); 1119 1134 kfree(agdev->uac2.p_prm.rbuf); 1120 1135 kfree(agdev->uac2.c_prm.rbuf); 1121 1136 err_free_descs: 1122 1137 usb_free_all_descriptors(fn); 1123 - return -EINVAL; 1138 + return ret; 1124 1139 } 1125 1140 1126 1141 static int ··· 1144 1127 { 1145 1128 struct usb_composite_dev *cdev = fn->config->cdev; 1146 1129 struct audio_dev *agdev = func_to_agdev(fn); 1130 + struct f_uac2_opts *opts = agdev_to_uac2_opts(agdev); 1147 1131 struct snd_uac2_chip *uac2 = &agdev->uac2; 1148 1132 struct usb_gadget *gadget = cdev->gadget; 1149 1133 struct device *dev = &uac2->pdev.dev; ··· 1175 1157 agdev->as_out_alt = alt; 1176 1158 req_len = prm->max_psize; 1177 1159 } else if (intf == agdev->as_in_intf) { 1178 - struct f_uac2_opts *opts = agdev_to_uac2_opts(agdev); 1179 1160 unsigned int factor, rate; 1180 1161 struct usb_endpoint_descriptor *ep_desc; 1181 1162 ··· 1220 1203 prm->ep_enabled = true; 1221 1204 usb_ep_enable(ep); 1222 1205 1223 - for (i = 0; i < USB_XFERS; i++) { 1206 + for (i = 0; i < opts->req_number; i++) { 1224 1207 if (!prm->ureq[i].req) { 1225 1208 req = usb_ep_alloc_request(ep, GFP_ATOMIC); 1226 1209 if (req == NULL) ··· 1504 1487 UAC2_ATTRIBUTE(c_chmask); 1505 1488 UAC2_ATTRIBUTE(c_srate); 1506 1489 UAC2_ATTRIBUTE(c_ssize); 1490 + UAC2_ATTRIBUTE(req_number); 1507 1491 1508 1492 static struct configfs_attribute *f_uac2_attrs[] = { 1509 1493 &f_uac2_opts_attr_p_chmask, ··· 1513 1495 &f_uac2_opts_attr_c_chmask, 1514 1496 &f_uac2_opts_attr_c_srate, 1515 1497 &f_uac2_opts_attr_c_ssize, 1498 + &f_uac2_opts_attr_req_number, 1516 1499 NULL, 1517 1500 }; 1518 1501 ··· 1551 1532 opts->c_chmask = UAC2_DEF_CCHMASK; 1552 1533 opts->c_srate = UAC2_DEF_CSRATE; 1553 1534 opts->c_ssize = UAC2_DEF_CSSIZE; 1535 + opts->req_number = UAC2_DEF_REQ_NUM; 1554 1536 return &opts->func_inst; 1555 1537 } 1556 1538 ··· 1580 1560 1581 1561 prm = &agdev->uac2.c_prm; 1582 1562 kfree(prm->rbuf); 1563 + kfree(prm->ureq); 1583 1564 usb_free_all_descriptors(f); 1584 1565 } 1585 1566
+20 -4
drivers/usb/gadget/function/u_ether.c
··· 913 913 int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) 914 914 { 915 915 struct eth_dev *dev; 916 + int ret; 916 917 917 918 dev = netdev_priv(net); 918 - return get_ether_addr_str(dev->dev_mac, dev_addr, len); 919 + ret = get_ether_addr_str(dev->dev_mac, dev_addr, len); 920 + if (ret + 1 < len) { 921 + dev_addr[ret++] = '\n'; 922 + dev_addr[ret] = '\0'; 923 + } 924 + 925 + return ret; 919 926 } 920 927 EXPORT_SYMBOL_GPL(gether_get_dev_addr); 921 928 ··· 942 935 int gether_get_host_addr(struct net_device *net, char *host_addr, int len) 943 936 { 944 937 struct eth_dev *dev; 938 + int ret; 945 939 946 940 dev = netdev_priv(net); 947 - return get_ether_addr_str(dev->host_mac, host_addr, len); 941 + ret = get_ether_addr_str(dev->host_mac, host_addr, len); 942 + if (ret + 1 < len) { 943 + host_addr[ret++] = '\n'; 944 + host_addr[ret] = '\0'; 945 + } 946 + 947 + return ret; 948 948 } 949 949 EXPORT_SYMBOL_GPL(gether_get_host_addr); 950 950 ··· 998 984 999 985 int gether_get_ifname(struct net_device *net, char *name, int len) 1000 986 { 987 + int ret; 988 + 1001 989 rtnl_lock(); 1002 - strlcpy(name, netdev_name(net), len); 990 + ret = snprintf(name, len, "%s\n", netdev_name(net)); 1003 991 rtnl_unlock(); 1004 - return strlen(name); 992 + return ret < len ? ret : len; 1005 993 } 1006 994 EXPORT_SYMBOL_GPL(gether_get_ifname); 1007 995
+1 -1
drivers/usb/gadget/function/u_ether_configfs.h
··· 108 108 mutex_lock(&opts->lock); \ 109 109 qmult = gether_get_qmult(opts->net); \ 110 110 mutex_unlock(&opts->lock); \ 111 - return sprintf(page, "%d", qmult); \ 111 + return sprintf(page, "%d\n", qmult); \ 112 112 } \ 113 113 \ 114 114 static ssize_t _f_##_opts_qmult_store(struct config_item *item, \
+2 -1
drivers/usb/gadget/function/u_fs.h
··· 247 247 248 248 unsigned user_flags; 249 249 250 - u8 eps_addrmap[15]; 250 + #define FFS_MAX_EPS_COUNT 31 251 + u8 eps_addrmap[FFS_MAX_EPS_COUNT]; 251 252 252 253 unsigned short strings_count; 253 254 unsigned short interfaces_count;
+2 -3
drivers/usb/gadget/function/u_printer.h
··· 18 18 19 19 #include <linux/usb/composite.h> 20 20 21 - #define PNP_STRING_LEN 1024 22 - 23 21 struct f_printer_opts { 24 22 struct usb_function_instance func_inst; 25 23 int minor; 26 - char pnp_string[PNP_STRING_LEN]; 24 + char *pnp_string; 25 + bool pnp_string_allocated; 27 26 unsigned q_len; 28 27 29 28 /*
+2
drivers/usb/gadget/function/u_uac2.h
··· 24 24 #define UAC2_DEF_CCHMASK 0x3 25 25 #define UAC2_DEF_CSRATE 64000 26 26 #define UAC2_DEF_CSSIZE 2 27 + #define UAC2_DEF_REQ_NUM 2 27 28 28 29 struct f_uac2_opts { 29 30 struct usb_function_instance func_inst; ··· 34 33 int c_chmask; 35 34 int c_srate; 36 35 int c_ssize; 36 + int req_number; 37 37 bool bound; 38 38 39 39 struct mutex lock;
+1
drivers/usb/gadget/legacy/audio.c
··· 229 229 uac2_opts->c_chmask = c_chmask; 230 230 uac2_opts->c_srate = c_srate; 231 231 uac2_opts->c_ssize = c_ssize; 232 + uac2_opts->req_number = UAC2_DEF_REQ_NUM; 232 233 #else 233 234 uac1_opts = container_of(fi_uac1, struct f_uac1_opts, func_inst); 234 235 uac1_opts->fn_play = fn_play;
+17 -11
drivers/usb/gadget/legacy/printer.c
··· 88 88 89 89 static char product_desc [40] = DRIVER_DESC; 90 90 static char serial_num [40] = "1"; 91 - static char pnp_string[PNP_STRING_LEN] = 92 - "XXMFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;"; 91 + static char *pnp_string = 92 + "MFG:linux;MDL:g_printer;CLS:PRINTER;SN:1;"; 93 93 94 94 /* static strings, in UTF-8 */ 95 95 static struct usb_string strings [] = { ··· 143 143 static int printer_bind(struct usb_composite_dev *cdev) 144 144 { 145 145 struct f_printer_opts *opts; 146 - int ret, len; 146 + int ret; 147 147 148 148 fi_printer = usb_get_function_instance("printer"); 149 149 if (IS_ERR(fi_printer)) 150 150 return PTR_ERR(fi_printer); 151 151 152 - if (iPNPstring) 153 - strlcpy(&pnp_string[2], iPNPstring, PNP_STRING_LEN - 2); 154 - 155 - len = strlen(pnp_string); 156 - pnp_string[0] = (len >> 8) & 0xFF; 157 - pnp_string[1] = len & 0xFF; 158 - 159 152 opts = container_of(fi_printer, struct f_printer_opts, func_inst); 160 153 opts->minor = 0; 161 - memcpy(opts->pnp_string, pnp_string, PNP_STRING_LEN); 162 154 opts->q_len = QLEN; 155 + if (iPNPstring) { 156 + opts->pnp_string = kstrdup(iPNPstring, GFP_KERNEL); 157 + if (!opts->pnp_string) { 158 + ret = -ENOMEM; 159 + goto fail_put_func_inst; 160 + } 161 + opts->pnp_string_allocated = true; 162 + /* 163 + * we don't free this memory in case of error 164 + * as printer cleanup func will do this for us 165 + */ 166 + } else { 167 + opts->pnp_string = pnp_string; 168 + } 163 169 164 170 ret = usb_string_ids_tab(cdev, strings); 165 171 if (ret < 0)
+14
drivers/usb/gadget/udc/Kconfig
··· 60 60 USBA is the integrated high-speed USB Device controller on 61 61 the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel. 62 62 63 + The fifo_mode parameter is used to select endpoint allocation mode. 64 + fifo_mode = 0 is used to let the driver autoconfigure the endpoints. 65 + In this case 2 banks are allocated for isochronous endpoints and 66 + only one bank is allocated for the rest of the endpoints. 67 + 68 + fifo_mode = 1 is a generic maximum fifo size (1024 bytes) configuration 69 + allowing the usage of ep1 - ep6 70 + 71 + fifo_mode = 2 is a generic performance maximum fifo size (1024 bytes) 72 + configuration allowing the usage of ep1 - ep3 73 + 74 + fifo_mode = 3 is a balanced performance configuration allowing the 75 + the usage of ep1 - ep8 76 + 63 77 config USB_BCM63XX_UDC 64 78 tristate "Broadcom BCM63xx Peripheral Controller" 65 79 depends on BCM63XX
+204 -32
drivers/usb/gadget/udc/atmel_usba_udc.c
··· 20 20 #include <linux/mfd/syscon.h> 21 21 #include <linux/platform_device.h> 22 22 #include <linux/regmap.h> 23 + #include <linux/ctype.h> 23 24 #include <linux/usb/ch9.h> 24 25 #include <linux/usb/gadget.h> 25 26 #include <linux/usb/atmel_usba_udc.h> ··· 319 318 } 320 319 #endif 321 320 321 + static ushort fifo_mode; 322 + 323 + /* "modprobe ... fifo_mode=1" etc */ 324 + module_param(fifo_mode, ushort, 0x0); 325 + MODULE_PARM_DESC(fifo_mode, "Endpoint configuration mode"); 326 + 327 + /* mode 0 - uses autoconfig */ 328 + 329 + /* mode 1 - fits in 8KB, generic max fifo configuration */ 330 + static struct usba_fifo_cfg mode_1_cfg[] = { 331 + { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 332 + { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, }, 333 + { .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 1, }, 334 + { .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 1, }, 335 + { .hw_ep_num = 4, .fifo_size = 1024, .nr_banks = 1, }, 336 + { .hw_ep_num = 5, .fifo_size = 1024, .nr_banks = 1, }, 337 + { .hw_ep_num = 6, .fifo_size = 1024, .nr_banks = 1, }, 338 + }; 339 + 340 + /* mode 2 - fits in 8KB, performance max fifo configuration */ 341 + static struct usba_fifo_cfg mode_2_cfg[] = { 342 + { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 343 + { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 3, }, 344 + { .hw_ep_num = 2, .fifo_size = 1024, .nr_banks = 2, }, 345 + { .hw_ep_num = 3, .fifo_size = 1024, .nr_banks = 2, }, 346 + }; 347 + 348 + /* mode 3 - fits in 8KB, mixed fifo configuration */ 349 + static struct usba_fifo_cfg mode_3_cfg[] = { 350 + { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 351 + { .hw_ep_num = 1, .fifo_size = 1024, .nr_banks = 2, }, 352 + { .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, }, 353 + { .hw_ep_num = 3, .fifo_size = 512, .nr_banks = 2, }, 354 + { .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, }, 355 + { .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, }, 356 + { .hw_ep_num = 6, .fifo_size = 512, .nr_banks = 2, }, 357 + }; 358 + 359 + /* mode 4 - fits in 8KB, custom fifo configuration */ 360 + static struct usba_fifo_cfg mode_4_cfg[] = { 361 + { .hw_ep_num = 0, .fifo_size = 64, .nr_banks = 1, }, 362 + { .hw_ep_num = 1, .fifo_size = 512, .nr_banks = 2, }, 363 + { .hw_ep_num = 2, .fifo_size = 512, .nr_banks = 2, }, 364 + { .hw_ep_num = 3, .fifo_size = 8, .nr_banks = 2, }, 365 + { .hw_ep_num = 4, .fifo_size = 512, .nr_banks = 2, }, 366 + { .hw_ep_num = 5, .fifo_size = 512, .nr_banks = 2, }, 367 + { .hw_ep_num = 6, .fifo_size = 16, .nr_banks = 2, }, 368 + { .hw_ep_num = 7, .fifo_size = 8, .nr_banks = 2, }, 369 + { .hw_ep_num = 8, .fifo_size = 8, .nr_banks = 2, }, 370 + }; 371 + /* Add additional configurations here */ 372 + 373 + int usba_config_fifo_table(struct usba_udc *udc) 374 + { 375 + int n; 376 + 377 + switch (fifo_mode) { 378 + default: 379 + fifo_mode = 0; 380 + case 0: 381 + udc->fifo_cfg = NULL; 382 + n = 0; 383 + break; 384 + case 1: 385 + udc->fifo_cfg = mode_1_cfg; 386 + n = ARRAY_SIZE(mode_1_cfg); 387 + break; 388 + case 2: 389 + udc->fifo_cfg = mode_2_cfg; 390 + n = ARRAY_SIZE(mode_2_cfg); 391 + break; 392 + case 3: 393 + udc->fifo_cfg = mode_3_cfg; 394 + n = ARRAY_SIZE(mode_3_cfg); 395 + break; 396 + case 4: 397 + udc->fifo_cfg = mode_4_cfg; 398 + n = ARRAY_SIZE(mode_4_cfg); 399 + break; 400 + } 401 + DBG(DBG_HW, "Setup fifo_mode %d\n", fifo_mode); 402 + 403 + return n; 404 + } 405 + 322 406 static inline u32 usba_int_enb_get(struct usba_udc *udc) 323 407 { 324 408 return udc->int_enb_cache; ··· 629 543 ep->is_isoc = 0; 630 544 ep->is_in = 0; 631 545 632 - if (maxpacket <= 8) 633 - ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); 634 - else 635 - /* LSB is bit 1, not 0 */ 636 - ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3); 637 - 638 - DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n", 546 + DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n", 639 547 ep->ep.name, ept_cfg, maxpacket); 640 548 641 549 if (usb_endpoint_dir_in(desc)) { 642 550 ep->is_in = 1; 643 - ept_cfg |= USBA_EPT_DIR_IN; 551 + ep->ept_cfg |= USBA_EPT_DIR_IN; 644 552 } 645 553 646 554 switch (usb_endpoint_type(desc)) { 647 555 case USB_ENDPOINT_XFER_CONTROL: 648 - ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); 649 - ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); 556 + ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); 650 557 break; 651 558 case USB_ENDPOINT_XFER_ISOC: 652 559 if (!ep->can_isoc) { ··· 657 578 return -EINVAL; 658 579 659 580 ep->is_isoc = 1; 660 - ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO); 581 + ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO); 582 + ep->ept_cfg |= USBA_BF(NB_TRANS, nr_trans); 661 583 662 - /* 663 - * Do triple-buffering on high-bandwidth iso endpoints. 664 - */ 665 - if (nr_trans > 1 && ep->nr_banks == 3) 666 - ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE); 667 - else 668 - ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); 669 - ept_cfg |= USBA_BF(NB_TRANS, nr_trans); 670 584 break; 671 585 case USB_ENDPOINT_XFER_BULK: 672 - ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK); 673 - ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); 586 + ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK); 674 587 break; 675 588 case USB_ENDPOINT_XFER_INT: 676 - ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT); 677 - ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); 589 + ep->ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT); 678 590 break; 679 591 } 680 592 ··· 674 604 ep->ep.desc = desc; 675 605 ep->ep.maxpacket = maxpacket; 676 606 677 - usba_ep_writel(ep, CFG, ept_cfg); 607 + usba_ep_writel(ep, CFG, ep->ept_cfg); 678 608 usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); 679 609 680 610 if (ep->can_dma) { ··· 1076 1006 struct usb_gadget_driver *driver); 1077 1007 static int atmel_usba_stop(struct usb_gadget *gadget); 1078 1008 1009 + static struct usb_ep *atmel_usba_match_ep( 1010 + struct usb_gadget *gadget, 1011 + struct usb_endpoint_descriptor *desc, 1012 + struct usb_ss_ep_comp_descriptor *ep_comp 1013 + ) 1014 + { 1015 + struct usb_ep *_ep; 1016 + struct usba_ep *ep; 1017 + 1018 + /* Look at endpoints until an unclaimed one looks usable */ 1019 + list_for_each_entry(_ep, &gadget->ep_list, ep_list) { 1020 + if (usb_gadget_ep_match_desc(gadget, _ep, desc, ep_comp)) 1021 + goto found_ep; 1022 + } 1023 + /* Fail */ 1024 + return NULL; 1025 + 1026 + found_ep: 1027 + 1028 + if (fifo_mode == 0) { 1029 + /* Optimize hw fifo size based on ep type and other info */ 1030 + ep = to_usba_ep(_ep); 1031 + 1032 + switch (usb_endpoint_type(desc)) { 1033 + 1034 + case USB_ENDPOINT_XFER_CONTROL: 1035 + break; 1036 + 1037 + case USB_ENDPOINT_XFER_ISOC: 1038 + ep->fifo_size = 1024; 1039 + ep->nr_banks = 2; 1040 + break; 1041 + 1042 + case USB_ENDPOINT_XFER_BULK: 1043 + ep->fifo_size = 512; 1044 + ep->nr_banks = 1; 1045 + break; 1046 + 1047 + case USB_ENDPOINT_XFER_INT: 1048 + if (desc->wMaxPacketSize == 0) 1049 + ep->fifo_size = 1050 + roundup_pow_of_two(_ep->maxpacket_limit); 1051 + else 1052 + ep->fifo_size = 1053 + roundup_pow_of_two(le16_to_cpu(desc->wMaxPacketSize)); 1054 + ep->nr_banks = 1; 1055 + break; 1056 + } 1057 + 1058 + /* It might be a little bit late to set this */ 1059 + usb_ep_set_maxpacket_limit(&ep->ep, ep->fifo_size); 1060 + 1061 + /* Generate ept_cfg basd on FIFO size and number of banks */ 1062 + if (ep->fifo_size <= 8) 1063 + ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); 1064 + else 1065 + /* LSB is bit 1, not 0 */ 1066 + ep->ept_cfg = 1067 + USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3); 1068 + 1069 + ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks); 1070 + 1071 + ep->udc->configured_ep++; 1072 + } 1073 + 1074 + return _ep; 1075 + } 1076 + 1079 1077 static const struct usb_gadget_ops usba_udc_ops = { 1080 1078 .get_frame = usba_udc_get_frame, 1081 1079 .wakeup = usba_udc_wakeup, 1082 1080 .set_selfpowered = usba_udc_set_selfpowered, 1083 1081 .udc_start = atmel_usba_start, 1084 1082 .udc_stop = atmel_usba_stop, 1083 + .match_ep = atmel_usba_match_ep, 1085 1084 }; 1086 1085 1087 1086 static struct usb_endpoint_descriptor usba_ep0_desc = { ··· 1817 1678 } 1818 1679 1819 1680 if (status & USBA_END_OF_RESET) { 1820 - struct usba_ep *ep0; 1681 + struct usba_ep *ep0, *ep; 1682 + int i, n; 1821 1683 1822 1684 usba_writel(udc, INT_CLR, USBA_END_OF_RESET); 1823 1685 generate_bias_pulse(udc); ··· 1857 1717 if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED)) 1858 1718 dev_dbg(&udc->pdev->dev, 1859 1719 "ODD: EP0 configuration is invalid!\n"); 1720 + 1721 + /* Preallocate other endpoints */ 1722 + n = fifo_mode ? udc->num_ep : udc->configured_ep; 1723 + for (i = 1; i < n; i++) { 1724 + ep = &udc->usba_ep[i]; 1725 + usba_ep_writel(ep, CFG, ep->ept_cfg); 1726 + if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) 1727 + dev_dbg(&udc->pdev->dev, 1728 + "ODD: EP%d configuration is invalid!\n", i); 1729 + } 1860 1730 } 1861 1731 1862 1732 spin_unlock(&udc->lock); ··· 2014 1864 if (gpio_is_valid(udc->vbus_pin)) 2015 1865 disable_irq(gpio_to_irq(udc->vbus_pin)); 2016 1866 1867 + if (fifo_mode == 0) 1868 + udc->configured_ep = 1; 1869 + 2017 1870 usba_stop(udc); 2018 1871 2019 1872 udc->driver = NULL; ··· 2084 1931 &flags); 2085 1932 udc->vbus_pin_inverted = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0; 2086 1933 2087 - pp = NULL; 2088 - while ((pp = of_get_next_child(np, pp))) 2089 - udc->num_ep++; 1934 + if (fifo_mode == 0) { 1935 + pp = NULL; 1936 + while ((pp = of_get_next_child(np, pp))) 1937 + udc->num_ep++; 1938 + udc->configured_ep = 1; 1939 + } else 1940 + udc->num_ep = usba_config_fifo_table(udc); 2090 1941 2091 1942 eps = devm_kzalloc(&pdev->dev, sizeof(struct usba_ep) * udc->num_ep, 2092 1943 GFP_KERNEL); ··· 2103 1946 2104 1947 pp = NULL; 2105 1948 i = 0; 2106 - while ((pp = of_get_next_child(np, pp))) { 1949 + while ((pp = of_get_next_child(np, pp)) && i < udc->num_ep) { 2107 1950 ep = &eps[i]; 2108 1951 2109 1952 ret = of_property_read_u32(pp, "reg", &val); ··· 2111 1954 dev_err(&pdev->dev, "of_probe: reg error(%d)\n", ret); 2112 1955 goto err; 2113 1956 } 2114 - ep->index = val; 1957 + ep->index = fifo_mode ? udc->fifo_cfg[i].hw_ep_num : val; 2115 1958 2116 1959 ret = of_property_read_u32(pp, "atmel,fifo-size", &val); 2117 1960 if (ret) { 2118 1961 dev_err(&pdev->dev, "of_probe: fifo-size error(%d)\n", ret); 2119 1962 goto err; 2120 1963 } 2121 - ep->fifo_size = val; 1964 + ep->fifo_size = fifo_mode ? udc->fifo_cfg[i].fifo_size : val; 2122 1965 2123 1966 ret = of_property_read_u32(pp, "atmel,nb-banks", &val); 2124 1967 if (ret) { 2125 1968 dev_err(&pdev->dev, "of_probe: nb-banks error(%d)\n", ret); 2126 1969 goto err; 2127 1970 } 2128 - ep->nr_banks = val; 1971 + ep->nr_banks = fifo_mode ? udc->fifo_cfg[i].nr_banks : val; 2129 1972 2130 1973 ep->can_dma = of_property_read_bool(pp, "atmel,can-dma"); 2131 1974 ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc"); ··· 2156 1999 2157 2000 ep->ep.caps.dir_in = true; 2158 2001 ep->ep.caps.dir_out = true; 2002 + 2003 + if (fifo_mode != 0) { 2004 + /* 2005 + * Generate ept_cfg based on FIFO size and 2006 + * banks number 2007 + */ 2008 + if (ep->fifo_size <= 8) 2009 + ep->ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); 2010 + else 2011 + /* LSB is bit 1, not 0 */ 2012 + ep->ept_cfg = 2013 + USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3); 2014 + 2015 + ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks); 2016 + } 2159 2017 2160 2018 if (i) 2161 2019 list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+9 -1
drivers/usb/gadget/udc/atmel_usba_udc.h
··· 275 275 u32 ctrl; 276 276 }; 277 277 278 + struct usba_fifo_cfg { 279 + u8 hw_ep_num; 280 + u16 fifo_size; 281 + u8 nr_banks; 282 + }; 283 + 278 284 struct usba_ep { 279 285 int state; 280 286 void __iomem *ep_regs; ··· 299 293 unsigned int can_isoc:1; 300 294 unsigned int is_isoc:1; 301 295 unsigned int is_in:1; 302 - 296 + unsigned long ept_cfg; 303 297 #ifdef CONFIG_USB_GADGET_DEBUG_FS 304 298 u32 last_dma_status; 305 299 struct dentry *debugfs_dir; ··· 344 338 int vbus_pin; 345 339 int vbus_pin_inverted; 346 340 int num_ep; 341 + int configured_ep; 342 + struct usba_fifo_cfg *fifo_cfg; 347 343 struct clk *pclk; 348 344 struct clk *hclk; 349 345 struct usba_ep *usba_ep;
+31 -14
drivers/usb/gadget/udc/core.c
··· 1080 1080 dev_vdbg(dev, "%s\n", __func__); 1081 1081 } 1082 1082 1083 + /* should be called with udc_lock held */ 1084 + static int check_pending_gadget_drivers(struct usb_udc *udc) 1085 + { 1086 + struct usb_gadget_driver *driver; 1087 + int ret = 0; 1088 + 1089 + list_for_each_entry(driver, &gadget_driver_pending_list, pending) 1090 + if (!driver->udc_name || strcmp(driver->udc_name, 1091 + dev_name(&udc->dev)) == 0) { 1092 + ret = udc_bind_to_driver(udc, driver); 1093 + if (ret != -EPROBE_DEFER) 1094 + list_del(&driver->pending); 1095 + break; 1096 + } 1097 + 1098 + return ret; 1099 + } 1100 + 1083 1101 /** 1084 1102 * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list 1085 1103 * @parent: the parent device to this udc. Usually the controller driver's ··· 1111 1093 void (*release)(struct device *dev)) 1112 1094 { 1113 1095 struct usb_udc *udc; 1114 - struct usb_gadget_driver *driver; 1115 1096 int ret = -ENOMEM; 1116 1097 1117 1098 udc = kzalloc(sizeof(*udc), GFP_KERNEL); ··· 1153 1136 udc->vbus = true; 1154 1137 1155 1138 /* pick up one of pending gadget drivers */ 1156 - list_for_each_entry(driver, &gadget_driver_pending_list, pending) { 1157 - if (!driver->udc_name || strcmp(driver->udc_name, 1158 - dev_name(&udc->dev)) == 0) { 1159 - ret = udc_bind_to_driver(udc, driver); 1160 - if (ret != -EPROBE_DEFER) 1161 - list_del(&driver->pending); 1162 - if (ret) 1163 - goto err5; 1164 - break; 1165 - } 1166 - } 1139 + ret = check_pending_gadget_drivers(udc); 1140 + if (ret) 1141 + goto err5; 1167 1142 1168 1143 mutex_unlock(&udc_lock); 1169 1144 ··· 1365 1356 return -EINVAL; 1366 1357 1367 1358 mutex_lock(&udc_lock); 1368 - list_for_each_entry(udc, &udc_list, list) 1359 + list_for_each_entry(udc, &udc_list, list) { 1369 1360 if (udc->driver == driver) { 1370 1361 usb_gadget_remove_driver(udc); 1371 1362 usb_gadget_set_state(udc->gadget, 1372 - USB_STATE_NOTATTACHED); 1363 + USB_STATE_NOTATTACHED); 1364 + 1365 + /* Maybe there is someone waiting for this UDC? */ 1366 + check_pending_gadget_drivers(udc); 1367 + /* 1368 + * For now we ignore bind errors as probably it's 1369 + * not a valid reason to fail other's gadget unbind 1370 + */ 1373 1371 ret = 0; 1374 1372 break; 1375 1373 } 1374 + } 1376 1375 1377 1376 if (ret) { 1378 1377 list_del(&driver->pending);
+1 -1
drivers/usb/gadget/udc/fotg210-udc.c
··· 527 527 { 528 528 } 529 529 530 - static struct usb_ep_ops fotg210_ep_ops = { 530 + static const struct usb_ep_ops fotg210_ep_ops = { 531 531 .enable = fotg210_ep_enable, 532 532 .disable = fotg210_ep_disable, 533 533
+1 -1
drivers/usb/gadget/udc/fsl_qe_udc.c
··· 1847 1847 return status; 1848 1848 } 1849 1849 1850 - static struct usb_ep_ops qe_ep_ops = { 1850 + static const struct usb_ep_ops qe_ep_ops = { 1851 1851 .enable = qe_ep_enable, 1852 1852 .disable = qe_ep_disable, 1853 1853
+9 -3
drivers/usb/gadget/udc/fsl_udc_core.c
··· 1118 1118 } while (fsl_readl(&dr_regs->endptstatus) & bits); 1119 1119 } 1120 1120 1121 - static struct usb_ep_ops fsl_ep_ops = { 1121 + static const struct usb_ep_ops fsl_ep_ops = { 1122 1122 .enable = fsl_ep_enable, 1123 1123 .disable = fsl_ep_disable, 1124 1124 ··· 1248 1248 .udc_stop = fsl_udc_stop, 1249 1249 }; 1250 1250 1251 + /* 1252 + * Empty complete function used by this driver to fill in the req->complete 1253 + * field when creating a request since the complete field is mandatory. 1254 + */ 1255 + static void fsl_noop_complete(struct usb_ep *ep, struct usb_request *req) { } 1256 + 1251 1257 /* Set protocol stall on ep0, protocol stall will automatically be cleared 1252 1258 on new transaction */ 1253 1259 static void ep0stall(struct fsl_udc *udc) ··· 1288 1282 req->req.length = 0; 1289 1283 req->req.status = -EINPROGRESS; 1290 1284 req->req.actual = 0; 1291 - req->req.complete = NULL; 1285 + req->req.complete = fsl_noop_complete; 1292 1286 req->dtd_count = 0; 1293 1287 1294 1288 ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); ··· 1371 1365 req->req.length = 2; 1372 1366 req->req.status = -EINPROGRESS; 1373 1367 req->req.actual = 0; 1374 - req->req.complete = NULL; 1368 + req->req.complete = fsl_noop_complete; 1375 1369 req->dtd_count = 0; 1376 1370 1377 1371 ret = usb_gadget_map_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
+1 -1
drivers/usb/gadget/udc/fusb300_udc.c
··· 518 518 { 519 519 } 520 520 521 - static struct usb_ep_ops fusb300_ep_ops = { 521 + static const struct usb_ep_ops fusb300_ep_ops = { 522 522 .enable = fusb300_enable, 523 523 .disable = fusb300_disable, 524 524
+1 -1
drivers/usb/gadget/udc/goku_udc.c
··· 968 968 command(regs, COMMAND_FIFO_CLEAR, ep->num); 969 969 } 970 970 971 - static struct usb_ep_ops goku_ep_ops = { 971 + static const struct usb_ep_ops goku_ep_ops = { 972 972 .enable = goku_ep_enable, 973 973 .disable = goku_ep_disable, 974 974
+1 -1
drivers/usb/gadget/udc/gr_udc.c
··· 1841 1841 spin_unlock(&ep->dev->lock); 1842 1842 } 1843 1843 1844 - static struct usb_ep_ops gr_ep_ops = { 1844 + static const struct usb_ep_ops gr_ep_ops = { 1845 1845 .enable = gr_ep_enable, 1846 1846 .disable = gr_ep_disable, 1847 1847
+1 -1
drivers/usb/gadget/udc/m66592-udc.c
··· 1436 1436 spin_unlock_irqrestore(&ep->m66592->lock, flags); 1437 1437 } 1438 1438 1439 - static struct usb_ep_ops m66592_ep_ops = { 1439 + static const struct usb_ep_ops m66592_ep_ops = { 1440 1440 .enable = m66592_enable, 1441 1441 .disable = m66592_disable, 1442 1442
+1 -1
drivers/usb/gadget/udc/mv_u3d_core.c
··· 995 995 return mv_u3d_ep_set_halt_wedge(_ep, 1, 1); 996 996 } 997 997 998 - static struct usb_ep_ops mv_u3d_ep_ops = { 998 + static const struct usb_ep_ops mv_u3d_ep_ops = { 999 999 .enable = mv_u3d_ep_enable, 1000 1000 .disable = mv_u3d_ep_disable, 1001 1001
+1 -1
drivers/usb/gadget/udc/mv_udc_core.c
··· 946 946 return mv_ep_set_halt_wedge(_ep, 1, 1); 947 947 } 948 948 949 - static struct usb_ep_ops mv_ep_ops = { 949 + static const struct usb_ep_ops mv_ep_ops = { 950 950 .enable = mv_ep_enable, 951 951 .disable = mv_ep_disable, 952 952
+2 -2
drivers/usb/gadget/udc/net2272.c
··· 181 181 static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *); 182 182 static int net2272_fifo_status(struct usb_ep *); 183 183 184 - static struct usb_ep_ops net2272_ep_ops; 184 + static const struct usb_ep_ops net2272_ep_ops; 185 185 186 186 /*---------------------------------------------------------------------------*/ 187 187 ··· 1067 1067 net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH); 1068 1068 } 1069 1069 1070 - static struct usb_ep_ops net2272_ep_ops = { 1070 + static const struct usb_ep_ops net2272_ep_ops = { 1071 1071 .enable = net2272_enable, 1072 1072 .disable = net2272_disable, 1073 1073
+1 -1
drivers/usb/gadget/udc/omap_udc.c
··· 1112 1112 return status; 1113 1113 } 1114 1114 1115 - static struct usb_ep_ops omap_ep_ops = { 1115 + static const struct usb_ep_ops omap_ep_ops = { 1116 1116 .enable = omap_ep_enable, 1117 1117 .disable = omap_ep_disable, 1118 1118
+1 -1
drivers/usb/gadget/udc/pxa27x_udc.c
··· 1473 1473 return 0; 1474 1474 } 1475 1475 1476 - static struct usb_ep_ops pxa_ep_ops = { 1476 + static const struct usb_ep_ops pxa_ep_ops = { 1477 1477 .enable = pxa_ep_enable, 1478 1478 .disable = pxa_ep_disable, 1479 1479
+1 -1
drivers/usb/gadget/udc/r8a66597-udc.c
··· 1706 1706 spin_unlock_irqrestore(&ep->r8a66597->lock, flags); 1707 1707 } 1708 1708 1709 - static struct usb_ep_ops r8a66597_ep_ops = { 1709 + static const struct usb_ep_ops r8a66597_ep_ops = { 1710 1710 .enable = r8a66597_enable, 1711 1711 .disable = r8a66597_disable, 1712 1712
+1 -1
drivers/usb/gadget/udc/s3c-hsudc.c
··· 954 954 return 0; 955 955 } 956 956 957 - static struct usb_ep_ops s3c_hsudc_ep_ops = { 957 + static const struct usb_ep_ops s3c_hsudc_ep_ops = { 958 958 .enable = s3c_hsudc_ep_enable, 959 959 .disable = s3c_hsudc_ep_disable, 960 960 .alloc_request = s3c_hsudc_alloc_request,
-33
drivers/usb/phy/phy-ab8500-usb.c
··· 1023 1023 ab->enabled_charging_detection = true; 1024 1024 } 1025 1025 1026 - static unsigned ab8500_eyediagram_workaroud(struct ab8500_usb *ab, unsigned mA) 1027 - { 1028 - /* 1029 - * AB8500 V2 has eye diagram issues when drawing more than 100mA from 1030 - * VBUS. Set charging current to 100mA in case of standard host 1031 - */ 1032 - if (is_ab8500_2p0_or_earlier(ab->ab8500)) 1033 - if (mA > 100) 1034 - mA = 100; 1035 - 1036 - return mA; 1037 - } 1038 - 1039 - static int ab8500_usb_set_power(struct usb_phy *phy, unsigned mA) 1040 - { 1041 - struct ab8500_usb *ab; 1042 - 1043 - if (!phy) 1044 - return -ENODEV; 1045 - 1046 - ab = phy_to_ab(phy); 1047 - 1048 - mA = ab8500_eyediagram_workaroud(ab, mA); 1049 - 1050 - ab->vbus_draw = mA; 1051 - 1052 - atomic_notifier_call_chain(&ab->phy.notifier, 1053 - UX500_MUSB_VBUS, &ab->vbus_draw); 1054 - 1055 - return 0; 1056 - } 1057 - 1058 1026 static int ab8500_usb_set_suspend(struct usb_phy *x, int suspend) 1059 1027 { 1060 1028 /* TODO */ ··· 1360 1392 ab->phy.otg = otg; 1361 1393 ab->phy.label = "ab8500"; 1362 1394 ab->phy.set_suspend = ab8500_usb_set_suspend; 1363 - ab->phy.set_power = ab8500_usb_set_power; 1364 1395 ab->phy.otg->state = OTG_STATE_UNDEFINED; 1365 1396 1366 1397 otg->usb_phy = &ab->phy;
-12
drivers/usb/phy/phy-fsl-usb.c
··· 642 642 return 0; 643 643 } 644 644 645 - /* Set OTG port power, only for B-device */ 646 - static int fsl_otg_set_power(struct usb_phy *phy, unsigned mA) 647 - { 648 - if (!fsl_otg_dev) 649 - return -ENODEV; 650 - if (phy->otg->state == OTG_STATE_B_PERIPHERAL) 651 - pr_info("FSL OTG: Draw %d mA\n", mA); 652 - 653 - return 0; 654 - } 655 - 656 645 /* 657 646 * Delayed pin detect interrupt processing. 658 647 * ··· 810 821 /* initialize the otg structure */ 811 822 fsl_otg_tc->phy.label = DRIVER_DESC; 812 823 fsl_otg_tc->phy.dev = &pdev->dev; 813 - fsl_otg_tc->phy.set_power = fsl_otg_set_power; 814 824 815 825 fsl_otg_tc->phy.otg->usb_phy = &fsl_otg_tc->phy; 816 826 fsl_otg_tc->phy.otg->set_host = fsl_otg_set_host;
+11 -40
drivers/usb/phy/phy-msm-usb.c
··· 842 842 motg->cur_power = mA; 843 843 } 844 844 845 - static int msm_otg_set_power(struct usb_phy *phy, unsigned mA) 846 - { 847 - struct msm_otg *motg = container_of(phy, struct msm_otg, phy); 848 - 849 - /* 850 - * Gadget driver uses set_power method to notify about the 851 - * available current based on suspend/configured states. 852 - * 853 - * IDEV_CHG can be drawn irrespective of suspend/un-configured 854 - * states when CDP/ACA is connected. 855 - */ 856 - if (motg->chg_type == USB_SDP_CHARGER) 857 - msm_otg_notify_charger(motg, mA); 858 - 859 - return 0; 860 - } 861 - 862 845 static void msm_otg_start_host(struct usb_phy *phy, int on) 863 846 { 864 847 struct msm_otg *motg = container_of(phy, struct msm_otg, phy); ··· 1725 1742 if (!IS_ERR(ext_vbus)) { 1726 1743 motg->vbus.extcon = ext_vbus; 1727 1744 motg->vbus.nb.notifier_call = msm_otg_vbus_notifier; 1728 - ret = extcon_register_notifier(ext_vbus, EXTCON_USB, 1729 - &motg->vbus.nb); 1745 + ret = devm_extcon_register_notifier(&pdev->dev, ext_vbus, 1746 + EXTCON_USB, &motg->vbus.nb); 1730 1747 if (ret < 0) { 1731 1748 dev_err(&pdev->dev, "register VBUS notifier failed\n"); 1732 1749 return ret; 1733 1750 } 1734 1751 1735 - ret = extcon_get_cable_state_(ext_vbus, EXTCON_USB); 1752 + ret = extcon_get_state(ext_vbus, EXTCON_USB); 1736 1753 if (ret) 1737 1754 set_bit(B_SESS_VLD, &motg->inputs); 1738 1755 else ··· 1742 1759 if (!IS_ERR(ext_id)) { 1743 1760 motg->id.extcon = ext_id; 1744 1761 motg->id.nb.notifier_call = msm_otg_id_notifier; 1745 - ret = extcon_register_notifier(ext_id, EXTCON_USB_HOST, 1746 - &motg->id.nb); 1762 + ret = devm_extcon_register_notifier(&pdev->dev, ext_id, 1763 + EXTCON_USB_HOST, &motg->id.nb); 1747 1764 if (ret < 0) { 1748 1765 dev_err(&pdev->dev, "register ID notifier failed\n"); 1749 - extcon_unregister_notifier(motg->vbus.extcon, 1750 - EXTCON_USB, &motg->vbus.nb); 1751 1766 return ret; 1752 1767 } 1753 1768 1754 - ret = extcon_get_cable_state_(ext_id, EXTCON_USB_HOST); 1769 + ret = extcon_get_state(ext_id, EXTCON_USB_HOST); 1755 1770 if (ret) 1756 1771 clear_bit(ID, &motg->inputs); 1757 1772 else ··· 1864 1883 */ 1865 1884 if (motg->phy_number) { 1866 1885 phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4); 1867 - if (!phy_select) { 1868 - ret = -ENOMEM; 1869 - goto unregister_extcon; 1870 - } 1886 + if (!phy_select) 1887 + return -ENOMEM; 1888 + 1871 1889 /* Enable second PHY with the OTG port */ 1872 1890 writel(0x1, phy_select); 1873 1891 } ··· 1877 1897 if (motg->irq < 0) { 1878 1898 dev_err(&pdev->dev, "platform_get_irq failed\n"); 1879 1899 ret = motg->irq; 1880 - goto unregister_extcon; 1900 + return motg->irq; 1881 1901 } 1882 1902 1883 1903 regs[0].supply = "vddcx"; ··· 1886 1906 1887 1907 ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs); 1888 1908 if (ret) 1889 - goto unregister_extcon; 1909 + return ret; 1890 1910 1891 1911 motg->vddcx = regs[0].consumer; 1892 1912 motg->v3p3 = regs[1].consumer; ··· 1930 1950 } 1931 1951 1932 1952 phy->init = msm_phy_init; 1933 - phy->set_power = msm_otg_set_power; 1934 1953 phy->notify_disconnect = msm_phy_notify_disconnect; 1935 1954 phy->type = USB_PHY_TYPE_USB2; 1936 1955 ··· 1982 2003 clk_disable_unprepare(motg->clk); 1983 2004 if (!IS_ERR(motg->core_clk)) 1984 2005 clk_disable_unprepare(motg->core_clk); 1985 - unregister_extcon: 1986 - extcon_unregister_notifier(motg->id.extcon, 1987 - EXTCON_USB_HOST, &motg->id.nb); 1988 - extcon_unregister_notifier(motg->vbus.extcon, 1989 - EXTCON_USB, &motg->vbus.nb); 1990 2006 1991 2007 return ret; 1992 2008 } ··· 2002 2028 * we could load bootloader/kernel at next reboot 2003 2029 */ 2004 2030 gpiod_set_value_cansleep(motg->switch_gpio, 0); 2005 - 2006 - extcon_unregister_notifier(motg->id.extcon, EXTCON_USB_HOST, &motg->id.nb); 2007 - extcon_unregister_notifier(motg->vbus.extcon, EXTCON_USB, &motg->vbus.nb); 2008 2031 2009 2032 msm_otg_debugfs_cleanup(); 2010 2033 cancel_delayed_work_sync(&motg->chg_work);
+6 -18
drivers/usb/phy/phy-omap-otg.c
··· 118 118 otg_dev->id_nb.notifier_call = omap_otg_id_notifier; 119 119 otg_dev->vbus_nb.notifier_call = omap_otg_vbus_notifier; 120 120 121 - ret = extcon_register_notifier(extcon, EXTCON_USB_HOST, &otg_dev->id_nb); 121 + ret = devm_extcon_register_notifier(&pdev->dev, extcon, 122 + EXTCON_USB_HOST, &otg_dev->id_nb); 122 123 if (ret) 123 124 return ret; 124 125 125 - ret = extcon_register_notifier(extcon, EXTCON_USB, &otg_dev->vbus_nb); 126 + ret = devm_extcon_register_notifier(&pdev->dev, extcon, 127 + EXTCON_USB, &otg_dev->vbus_nb); 126 128 if (ret) { 127 - extcon_unregister_notifier(extcon, EXTCON_USB_HOST, 128 - &otg_dev->id_nb); 129 129 return ret; 130 130 } 131 131 132 - otg_dev->id = extcon_get_cable_state_(extcon, EXTCON_USB_HOST); 133 - otg_dev->vbus = extcon_get_cable_state_(extcon, EXTCON_USB); 132 + otg_dev->id = extcon_get_state(extcon, EXTCON_USB_HOST); 133 + otg_dev->vbus = extcon_get_state(extcon, EXTCON_USB); 134 134 omap_otg_set_mode(otg_dev); 135 135 136 136 rev = readl(otg_dev->base); ··· 145 145 return 0; 146 146 } 147 147 148 - static int omap_otg_remove(struct platform_device *pdev) 149 - { 150 - struct otg_device *otg_dev = platform_get_drvdata(pdev); 151 - struct extcon_dev *edev = otg_dev->extcon; 152 - 153 - extcon_unregister_notifier(edev, EXTCON_USB_HOST, &otg_dev->id_nb); 154 - extcon_unregister_notifier(edev, EXTCON_USB, &otg_dev->vbus_nb); 155 - 156 - return 0; 157 - } 158 - 159 148 static struct platform_driver omap_otg_driver = { 160 149 .probe = omap_otg_probe, 161 - .remove = omap_otg_remove, 162 150 .driver = { 163 151 .name = "omap_otg", 164 152 },
+4 -9
drivers/usb/phy/phy-qcom-8x16-usb.c
··· 187 187 val = ULPI_PWR_OTG_COMP_DISABLE; 188 188 usb_phy_io_write(phy, val, ULPI_SET(ULPI_PWR_CLK_MNG_REG)); 189 189 190 - state = extcon_get_cable_state_(qphy->vbus_edev, EXTCON_USB); 190 + state = extcon_get_state(qphy->vbus_edev, EXTCON_USB); 191 191 if (state) 192 192 phy_8x16_vbus_on(qphy); 193 193 else ··· 316 316 goto off_clks; 317 317 318 318 qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify; 319 - ret = extcon_register_notifier(qphy->vbus_edev, EXTCON_USB, 320 - &qphy->vbus_notify); 319 + ret = devm_extcon_register_notifier(&pdev->dev, qphy->vbus_edev, 320 + EXTCON_USB, &qphy->vbus_notify); 321 321 if (ret < 0) 322 322 goto off_power; 323 323 324 324 ret = usb_add_phy_dev(&qphy->phy); 325 325 if (ret) 326 - goto off_extcon; 326 + goto off_power; 327 327 328 328 qphy->reboot_notify.notifier_call = phy_8x16_reboot_notify; 329 329 register_reboot_notifier(&qphy->reboot_notify); 330 330 331 331 return 0; 332 332 333 - off_extcon: 334 - extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB, 335 - &qphy->vbus_notify); 336 333 off_power: 337 334 regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator); 338 335 off_clks: ··· 344 347 struct phy_8x16 *qphy = platform_get_drvdata(pdev); 345 348 346 349 unregister_reboot_notifier(&qphy->reboot_notify); 347 - extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB, 348 - &qphy->vbus_notify); 349 350 350 351 /* 351 352 * Ensure that D+/D- lines are routed to uB connector, so
+5 -5
drivers/usb/phy/phy-tahvo.c
··· 121 121 prev_state = tu->vbus_state; 122 122 tu->vbus_state = reg & TAHVO_STAT_VBUS; 123 123 if (prev_state != tu->vbus_state) { 124 - extcon_set_cable_state_(tu->extcon, EXTCON_USB, tu->vbus_state); 124 + extcon_set_state_sync(tu->extcon, EXTCON_USB, tu->vbus_state); 125 125 sysfs_notify(&tu->pt_dev->dev.kobj, NULL, "vbus_state"); 126 126 } 127 127 } ··· 130 130 { 131 131 struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent); 132 132 133 - extcon_set_cable_state_(tu->extcon, EXTCON_USB_HOST, true); 133 + extcon_set_state_sync(tu->extcon, EXTCON_USB_HOST, true); 134 134 135 135 /* Power up the transceiver in USB host mode */ 136 136 retu_write(rdev, TAHVO_REG_USBR, USBR_REGOUT | USBR_NSUSPEND | ··· 149 149 { 150 150 struct retu_dev *rdev = dev_get_drvdata(tu->pt_dev->dev.parent); 151 151 152 - extcon_set_cable_state_(tu->extcon, EXTCON_USB_HOST, false); 152 + extcon_set_state_sync(tu->extcon, EXTCON_USB_HOST, false); 153 153 154 154 /* Power up transceiver and set it in USB peripheral mode */ 155 155 retu_write(rdev, TAHVO_REG_USBR, USBR_SLAVE_CONTROL | USBR_REGOUT | ··· 379 379 } 380 380 381 381 /* Set the initial cable state. */ 382 - extcon_set_cable_state_(tu->extcon, EXTCON_USB_HOST, 382 + extcon_set_state_sync(tu->extcon, EXTCON_USB_HOST, 383 383 tu->tahvo_mode == TAHVO_MODE_HOST); 384 - extcon_set_cable_state_(tu->extcon, EXTCON_USB, tu->vbus_state); 384 + extcon_set_state_sync(tu->extcon, EXTCON_USB, tu->vbus_state); 385 385 386 386 /* Create OTG interface */ 387 387 tahvo_usb_power_off(tu);
+1 -1
drivers/usb/renesas_usbhs/common.c
··· 389 389 390 390 if (enable && !mod) { 391 391 if (priv->edev) { 392 - cable = extcon_get_cable_state_(priv->edev, EXTCON_USB_HOST); 392 + cable = extcon_get_state(priv->edev, EXTCON_USB_HOST); 393 393 if ((cable > 0 && id != USBHS_HOST) || 394 394 (!cable && id != USBHS_GADGET)) { 395 395 dev_info(&pdev->dev,
+50 -2
tools/usb/ffs-test.c
··· 22 22 /* $(CROSS_COMPILE)cc -Wall -Wextra -g -o ffs-test ffs-test.c -lpthread */ 23 23 24 24 25 - #define _BSD_SOURCE /* for endian.h */ 25 + #define _DEFAULT_SOURCE /* for endian.h */ 26 26 27 27 #include <endian.h> 28 28 #include <errno.h> ··· 110 110 struct usb_functionfs_descs_head_v2 header; 111 111 __le32 fs_count; 112 112 __le32 hs_count; 113 + __le32 ss_count; 113 114 struct { 114 115 struct usb_interface_descriptor intf; 115 116 struct usb_endpoint_descriptor_no_audio sink; 116 117 struct usb_endpoint_descriptor_no_audio source; 117 118 } __attribute__((packed)) fs_descs, hs_descs; 119 + struct { 120 + struct usb_interface_descriptor intf; 121 + struct usb_endpoint_descriptor_no_audio sink; 122 + struct usb_ss_ep_comp_descriptor sink_comp; 123 + struct usb_endpoint_descriptor_no_audio source; 124 + struct usb_ss_ep_comp_descriptor source_comp; 125 + } ss_descs; 118 126 } __attribute__((packed)) descriptors = { 119 127 .header = { 120 128 .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC_V2), 121 129 .flags = cpu_to_le32(FUNCTIONFS_HAS_FS_DESC | 122 - FUNCTIONFS_HAS_HS_DESC), 130 + FUNCTIONFS_HAS_HS_DESC | 131 + FUNCTIONFS_HAS_SS_DESC), 123 132 .length = cpu_to_le32(sizeof descriptors), 124 133 }, 125 134 .fs_count = cpu_to_le32(3), ··· 178 169 .bmAttributes = USB_ENDPOINT_XFER_BULK, 179 170 .wMaxPacketSize = cpu_to_le16(512), 180 171 .bInterval = 1, /* NAK every 1 uframe */ 172 + }, 173 + }, 174 + .ss_count = cpu_to_le32(5), 175 + .ss_descs = { 176 + .intf = { 177 + .bLength = sizeof descriptors.fs_descs.intf, 178 + .bDescriptorType = USB_DT_INTERFACE, 179 + .bNumEndpoints = 2, 180 + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, 181 + .iInterface = 1, 182 + }, 183 + .sink = { 184 + .bLength = sizeof descriptors.hs_descs.sink, 185 + .bDescriptorType = USB_DT_ENDPOINT, 186 + .bEndpointAddress = 1 | USB_DIR_IN, 187 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 188 + .wMaxPacketSize = cpu_to_le16(1024), 189 + }, 190 + .sink_comp = { 191 + .bLength = USB_DT_SS_EP_COMP_SIZE, 192 + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 193 + .bMaxBurst = 0, 194 + .bmAttributes = 0, 195 + .wBytesPerInterval = 0, 196 + }, 197 + .source = { 198 + .bLength = sizeof descriptors.hs_descs.source, 199 + .bDescriptorType = USB_DT_ENDPOINT, 200 + .bEndpointAddress = 2 | USB_DIR_OUT, 201 + .bmAttributes = USB_ENDPOINT_XFER_BULK, 202 + .wMaxPacketSize = cpu_to_le16(1024), 203 + .bInterval = 1, /* NAK every 1 uframe */ 204 + }, 205 + .source_comp = { 206 + .bLength = USB_DT_SS_EP_COMP_SIZE, 207 + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, 208 + .bMaxBurst = 0, 209 + .bmAttributes = 0, 210 + .wBytesPerInterval = 0, 181 211 }, 182 212 }, 183 213 };